aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authordakovalkov <dakovalkov@yandex-team.com>2023-12-03 13:33:55 +0300
committerdakovalkov <dakovalkov@yandex-team.com>2023-12-03 14:04:39 +0300
commit2a718325637e5302334b6d0a6430f63168f8dbb3 (patch)
tree64be81080b7df9ec1d86d053a0c394ae53fcf1fe
parente0d94a470142d95c3007e9c5d80380994940664a (diff)
downloadydb-2a718325637e5302334b6d0a6430f63168f8dbb3.tar.gz
Update contrib/libs/aws-sdk-cpp to 1.11.37
-rw-r--r--.mapping.json42
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/CMakeLists.darwin-arm64.txt75
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/CMakeLists.darwin-x86_64.txt75
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/CMakeLists.linux-aarch64.txt75
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/CMakeLists.linux-x86_64.txt75
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/CMakeLists.windows-x86_64.txt75
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/AmazonWebServiceRequest.h46
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/AmazonWebServiceResult.h2
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/Aws.h35
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/Core_EXPORTS.h6
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/Globals.h48
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/Region.h57
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/VersionConfig.h6
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/AWSAuthSigner.h422
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/AWSAuthSignerProvider.h47
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/AWSBearerToken.h113
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/AWSCredentials.h2
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/AWSCredentialsProvider.h1
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/AWSCredentialsProviderChain.h4
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/SSOCredentialsProvider.h3
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/STSCredentialsProvider.h1
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/bearer-token-provider/AWSBearerTokenProviderBase.h30
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/bearer-token-provider/AWSBearerTokenProviderChainBase.h31
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/bearer-token-provider/DefaultBearerTokenProviderChain.h49
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/bearer-token-provider/SSOBearerTokenProvider.h63
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer-provider/AWSAuthSignerProviderBase.h33
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer-provider/BearerTokenAuthSignerProvider.h38
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer-provider/DefaultAuthSignerProvider.h42
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer/AWSAuthBearerSigner.h106
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer/AWSAuthEventStreamV4Signer.h114
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer/AWSAuthSignerBase.h142
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer/AWSAuthSignerCommon.h23
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer/AWSAuthSignerHelper.h45
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer/AWSAuthV4Signer.h214
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer/AWSNullSigner.h72
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AWSAsyncOperationTemplate.h191
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AWSClient.h321
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AWSClientAsyncCRTP.h125
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AWSError.h26
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AWSJsonClient.h110
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AWSUrlPresigner.h176
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AWSXmlClient.h135
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AdaptiveRetryStrategy.h154
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AsyncCallerContext.h5
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/ClientConfiguration.h113
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/CoreErrors.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/GenericClientConfiguration.h86
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/RequestCompression.h66
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/RetryStrategy.h13
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/config/AWSConfigFileProfileConfigLoader.h51
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/config/AWSProfileConfig.h120
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/config/AWSProfileConfigLoader.h261
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/config/AWSProfileConfigLoaderBase.h71
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/config/ConfigAndCredentialsCacheManager.h104
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/config/EC2InstanceProfileConfigLoader.h47
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/config/defaults/ClientConfigurationDefaults.h124
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/AWSEndpoint.h71
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/AWSPartitions.h23
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/BuiltInParameters.h44
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/ClientContextParameters.h37
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/DefaultEndpointProvider.h116
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/EndpointParameter.h139
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/EndpointProviderBase.h76
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/internal/AWSEndpointAttribute.h89
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/external/cjson/cJSON.h4
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/http/HttpRequest.h39
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/http/HttpResponse.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/http/URI.h54
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/http/curl/CurlHandleContainer.h2
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/http/standard/StandardHttpRequest.h3
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/internal/AWSHttpResourceClient.h37
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/monitoring/MonitoringInterface.h2
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/monitoring/MonitoringManager.h3
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/net/SimpleUDP.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/DateTime.h7
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/Document.h370
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/FileSystemUtils.h2
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/HashingUtils.h20
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/ResourceManager.h4
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/StringUtils.h17
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/CRC32.h147
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/Factories.h20
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/Hash.h10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/MD5.h10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/Sha1.h9
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/Sha256.h10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/openssl/CryptoImpl.h39
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/event/EventHeader.h39
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/event/EventMessage.h6
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/event/EventStreamEncoder.h16
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/event/EventStreamHandler.h2
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/json/JsonSerializer.h14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/logging/CRTLogSystem.h67
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/logging/CRTLogging.h31
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/logging/ErrorMacros.h57
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/memory/AWSMemory.h57
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/memory/stl/AWSAllocator.h51
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/memory/stl/AWSArray.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/memory/stl/AWSSet.h2
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/memory/stl/AWSString.h15
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/stream/ResponseStream.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/threading/Executor.h10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/xml/XmlSerializer.h3
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/AmazonWebServiceRequest.cpp14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/Aws.cpp45
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/Globals.cpp48
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/Region.cpp17
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSAuthSigner.cpp806
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSCredentialsProvider.cpp74
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSCredentialsProviderChain.cpp6
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/SSOCredentialsProvider.cpp32
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/STSCredentialsProvider.cpp11
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/bearer-token-provider/DefaultBearerTokenProviderChain.cpp35
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/bearer-token-provider/SSOBearerTokenProvider.cpp244
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer-provider/BearerTokenAuthSignerProvider.cpp46
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer-provider/DefaultAuthSignerProvider.cpp (renamed from contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSAuthSignerProvider.cpp)13
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthBearerSigner.cpp50
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthEventStreamV4Signer.cpp320
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthSignerCommon.cpp14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthSignerHelper.cpp103
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthV4Signer.cpp580
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSNullSigner.cpp14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSClient.cpp686
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSErrorMarshaller.cpp19
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSJsonClient.cpp212
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSUrlPresigner.cpp236
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSXmlClient.cpp180
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AdaptiveRetryStrategy.cpp228
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/ClientConfiguration.cpp322
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/CoreErrors.cpp20
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/DefaultRetryStrategy.cpp2
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/GenericClientConfiguration.cpp103
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/RequestCompression.cpp336
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/RetryStrategy.cpp14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/AWSConfigFileProfileConfigLoader.cpp629
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/AWSProfileConfigLoader.cpp571
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/AWSProfileConfigLoaderBase.cpp50
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/ConfigAndCredentialsCacheManager.cpp206
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/EC2InstanceProfileConfigLoader.cpp112
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/defaults/ClientConfigurationDefaults.cpp197
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/AWSEndpoint.cpp86
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/AWSPartitions.cpp153
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/BuiltInParameters.cpp135
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/ClientContextParameters.cpp61
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/DefaultEndpointProvider.cpp236
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/EndpointProviderBase.cpp20
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/internal/AWSEndpointAttribute.cpp82
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/external/cjson/cJSON.cpp31
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/external/tinyxml2/tinyxml2.cpp16
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpClientFactory.cpp4
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpRequest.cpp105
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpResponse.cpp24
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/URI.cpp181
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/curl/CurlHandleContainer.cpp5
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/curl/CurlHttpClient.cpp108
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/standard/StandardHttpRequest.cpp11
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/standard/StandardHttpResponse.cpp8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/internal/AWSHttpResourceClient.cpp175
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/monitoring/MonitoringManager.cpp69
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/DateTimeCommon.cpp12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/Document.cpp673
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/HashingUtils.cpp27
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/CRC32.cpp218
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/MD5.cpp14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/Sha1.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/Sha256.cpp12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/factory/Factories.cpp73
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/openssl/CryptoImpl.cpp136
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/event/EventStreamDecoder.cpp8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/event/EventStreamEncoder.cpp101
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/json/JsonSerializer.cpp17
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/CRTLogSystem.cpp107
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/CRTLogging.cpp31
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/FormattedLogSystem.cpp6
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/stream/ResponseStream.cpp63
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/stream/SimpleStreamBuf.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/threading/Executor.cpp15
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/xml/XmlSerializer.cpp2
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/ya.make75
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/CMakeLists.darwin-arm64.txt50
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/CMakeLists.darwin-x86_64.txt50
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/CMakeLists.linux-aarch64.txt50
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/CMakeLists.linux-x86_64.txt50
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/CMakeLists.windows-x86_64.txt50
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3ARN.h64
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3Client.h9861
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3ClientConfiguration.h63
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3Endpoint.h55
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3EndpointProvider.h114
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3EndpointRules.h23
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3Request.h4
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3ServiceClientModel.h503
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AbortIncompleteMultipartUpload.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AbortMultipartUploadRequest.h218
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AbortMultipartUploadResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AccelerateConfiguration.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AccessControlPolicy.h14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AccessControlTranslation.h30
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AnalyticsAndOperator.h14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AnalyticsConfiguration.h18
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AnalyticsExportDestination.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AnalyticsFilter.h16
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AnalyticsS3BucketDestination.h18
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Bucket.h18
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/BucketLifecycleConfiguration.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/BucketLocationConstraint.h2
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/BucketLoggingStatus.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CORSConfiguration.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CORSRule.h22
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CSVInput.h56
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CSVOutput.h20
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Checksum.h381
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ChecksumAlgorithm.h33
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ChecksumMode.h30
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CloudFunctionConfiguration.h22
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CommonPrefix.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CompleteMultipartUploadRequest.h761
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CompleteMultipartUploadResult.h623
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CompletedMultipartUpload.h44
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CompletedPart.h350
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Condition.h14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CopyObjectRequest.h670
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CopyObjectResult.h98
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CopyObjectResultDetails.h374
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CopyPartResult.h350
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CreateBucketConfiguration.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CreateBucketRequest.h89
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CreateBucketResult.h36
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CreateMultipartUploadRequest.h495
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CreateMultipartUploadResult.h316
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DefaultRetention.h16
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Delete.h14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketAnalyticsConfigurationRequest.h54
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketCorsRequest.h52
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketEncryptionRequest.h52
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketIntelligentTieringConfigurationRequest.h18
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketInventoryConfigurationRequest.h54
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketLifecycleRequest.h52
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketMetricsConfigurationRequest.h54
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketOwnershipControlsRequest.h52
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketPolicyRequest.h52
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketReplicationRequest.h52
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketRequest.h52
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketTaggingRequest.h52
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketWebsiteRequest.h52
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteMarkerEntry.h20
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteMarkerReplication.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteObjectRequest.h234
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteObjectResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteObjectTaggingRequest.h216
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteObjectTaggingResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteObjectsRequest.h339
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteObjectsResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeletePublicAccessBlockRequest.h52
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeletedObject.h18
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Destination.h174
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Encryption.h88
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/EncryptionConfiguration.h116
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Error.h1938
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ErrorDocument.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Event.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/EventBridgeConfiguration.h42
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ExistingObjectReplication.h14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/FilterRule.h14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketAccelerateConfigurationRequest.h52
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketAccelerateConfigurationResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketAclRequest.h52
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketAclResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketAnalyticsConfigurationRequest.h54
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketAnalyticsConfigurationResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketCorsRequest.h52
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketCorsResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketEncryptionRequest.h52
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketEncryptionResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketIntelligentTieringConfigurationRequest.h18
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketIntelligentTieringConfigurationResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketInventoryConfigurationRequest.h54
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketInventoryConfigurationResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketLifecycleConfigurationRequest.h52
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketLifecycleConfigurationResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketLocationRequest.h52
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketLocationResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketLoggingRequest.h52
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketLoggingResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketMetricsConfigurationRequest.h54
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketMetricsConfigurationResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketNotificationConfigurationRequest.h52
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketNotificationConfigurationResult.h65
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketOwnershipControlsRequest.h52
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketOwnershipControlsResult.h33
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketPolicyRequest.h52
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketPolicyResult.h16
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketPolicyStatusRequest.h52
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketPolicyStatusResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketReplicationRequest.h52
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketReplicationResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketRequestPaymentRequest.h52
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketRequestPaymentResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketTaggingRequest.h52
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketTaggingResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketVersioningRequest.h52
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketVersioningResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketWebsiteRequest.h52
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketWebsiteResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectAclRequest.h122
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectAclResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectAttributesParts.h238
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectAttributesRequest.h732
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectAttributesResult.h319
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectLegalHoldRequest.h170
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectLegalHoldResult.h18
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectLockConfigurationRequest.h116
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectLockConfigurationResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectRequest.h407
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectResult.h438
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectRetentionRequest.h122
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectRetentionResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectTaggingRequest.h218
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectTaggingResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectTorrentRequest.h56
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectTorrentResult.h16
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetPublicAccessBlockRequest.h52
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetPublicAccessBlockResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GlacierJobParameters.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Grant.h14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Grantee.h140
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/HeadBucketRequest.h212
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/HeadObjectRequest.h431
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/HeadObjectResult.h585
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/IndexDocument.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Initiator.h46
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InputSerialization.h18
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/IntelligentTieringAndOperator.h14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/IntelligentTieringConfiguration.h18
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/IntelligentTieringFilter.h16
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InvalidObjectState.h14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventoryConfiguration.h28
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventoryDestination.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventoryEncryption.h14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventoryFilter.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventoryOptionalField.h4
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventoryS3BucketDestination.h20
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventorySchedule.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/JSONInput.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/JSONOutput.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LambdaFunctionConfiguration.h84
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LifecycleConfiguration.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LifecycleExpiration.h16
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LifecycleRule.h38
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LifecycleRuleAndOperator.h62
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LifecycleRuleFilter.h64
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketAnalyticsConfigurationsRequest.h54
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketAnalyticsConfigurationsResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketIntelligentTieringConfigurationsRequest.h50
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketIntelligentTieringConfigurationsResult.h51
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketInventoryConfigurationsRequest.h54
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketInventoryConfigurationsResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketMetricsConfigurationsRequest.h54
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketMetricsConfigurationsResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketsResult.h22
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListMultipartUploadsRequest.h224
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListMultipartUploadsResult.h29
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectVersionsRequest.h64
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectVersionsResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectsRequest.h248
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectsResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectsV2Request.h228
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectsV2Result.h148
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListPartsRequest.h450
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListPartsResult.h88
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LoggingEnabled.h68
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/MetadataEntry.h14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Metrics.h14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/MetricsAndOperator.h58
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/MetricsConfiguration.h43
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/MetricsFilter.h67
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/MultipartUpload.h57
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/NoncurrentVersionExpiration.h84
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/NoncurrentVersionTransition.h65
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/NotificationConfiguration.h83
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/NotificationConfigurationDeprecated.h28
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/NotificationConfigurationFilter.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Object.h220
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectAttributes.h34
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectIdentifier.h14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectLockConfiguration.h14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectLockLegalHold.h26
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectLockRetention.h14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectLockRule.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectOwnership.h3
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectPart.h433
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectStorageClass.h3
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectVersion.h72
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/OutputLocation.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/OutputSerialization.h14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Owner.h14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/OwnershipControls.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/OwnershipControlsRule.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ParquetInput.h10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Part.h354
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PolicyStatus.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Progress.h16
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ProgressEvent.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PublicAccessBlockConfiguration.h74
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketAccelerateConfigurationRequest.h145
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketAclRequest.h223
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketAnalyticsConfigurationRequest.h56
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketCorsRequest.h179
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketEncryptionRequest.h227
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketIntelligentTieringConfigurationRequest.h20
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketInventoryConfigurationRequest.h56
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketLifecycleConfigurationRequest.h145
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketLoggingRequest.h179
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketMetricsConfigurationRequest.h56
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketNotificationConfigurationRequest.h82
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketOwnershipControlsRequest.h120
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketPolicyRequest.h193
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketReplicationRequest.h181
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketRequestPaymentRequest.h179
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketTaggingRequest.h179
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketVersioningRequest.h181
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketWebsiteRequest.h179
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectAclRequest.h453
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectAclResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectLegalHoldRequest.h325
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectLegalHoldResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectLockConfigurationRequest.h199
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectLockConfigurationResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectRequest.h883
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectResult.h486
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectRetentionRequest.h267
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectRetentionResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectTaggingRequest.h361
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectTaggingResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutPublicAccessBlockRequest.h179
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/QueueConfiguration.h18
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/QueueConfigurationDeprecated.h16
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RecordsEvent.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Redirect.h20
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RedirectAllRequestsTo.h14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicaModifications.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicationConfiguration.h62
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicationRule.h38
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicationRuleAndOperator.h14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicationRuleFilter.h16
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicationTime.h14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicationTimeValue.h24
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RequestPaymentConfiguration.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RequestProgress.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RestoreObjectRequest.h311
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RestoreObjectResult.h8
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RestoreRequest.h24
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RoutingRule.h14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Rule.h30
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/S3KeyFilter.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/S3Location.h26
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SSEKMS.h44
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SSES3.h10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ScanRange.h34
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SelectObjectContentHandler.h14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SelectObjectContentRequest.h266
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SelectParameters.h18
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ServerSideEncryptionByDefault.h192
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ServerSideEncryptionConfiguration.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ServerSideEncryptionRule.h14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SourceSelectionCriteria.h48
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SseKmsEncryptedObjects.h38
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Stats.h16
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/StatsEvent.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/StorageClass.h3
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/StorageClassAnalysis.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/StorageClassAnalysisDataExport.h14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Tag.h14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Tagging.h12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/TargetGrant.h21
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Tiering.h14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/TopicConfiguration.h18
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/TopicConfigurationDeprecated.h16
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Transition.h16
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/TransitionStorageClass.h3
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/UploadPartCopyRequest.h399
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/UploadPartCopyResult.h56
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/UploadPartRequest.h659
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/UploadPartResult.h348
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/VersioningConfiguration.h18
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/WebsiteConfiguration.h18
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/WriteGetObjectResponseRequest.h838
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3ARN.cpp210
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Client.cpp3439
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3ClientConfiguration.cpp89
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Endpoint.cpp221
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3EndpointProvider.cpp72
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3EndpointRules.cpp4191
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Request.cpp14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/AbortMultipartUploadRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Bucket.cpp4
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/BucketLocationConstraint.cpp14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Checksum.cpp106
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ChecksumAlgorithm.cpp84
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ChecksumMode.cpp63
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CompleteMultipartUploadRequest.cpp88
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CompleteMultipartUploadResult.cpp20
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CompletedPart.cpp56
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CopyObjectRequest.cpp47
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CopyObjectResultDetails.cpp64
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CopyPartResult.cpp64
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CreateBucketRequest.cpp19
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CreateMultipartUploadRequest.cpp21
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CreateMultipartUploadResult.cpp14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketAnalyticsConfigurationRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketCorsRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketEncryptionRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketIntelligentTieringConfigurationRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketInventoryConfigurationRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketLifecycleRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketMetricsConfigurationRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketOwnershipControlsRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketPolicyRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketReplicationRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketTaggingRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketWebsiteRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteMarkerEntry.cpp4
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteObjectRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteObjectTaggingRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteObjectsRequest.cpp30
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeletePublicAccessBlockRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Event.cpp70
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/EventBridgeConfiguration.cpp51
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketAccelerateConfigurationRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketAclRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketAnalyticsConfigurationRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketCorsRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketEncryptionRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketIntelligentTieringConfigurationRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketInventoryConfigurationRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketLifecycleConfigurationRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketLocationRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketLocationResult.cpp3
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketLoggingRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketMetricsConfigurationRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketNotificationConfigurationRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketNotificationConfigurationResult.cpp5
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketOwnershipControlsRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketPolicyRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketPolicyStatusRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketReplicationRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketRequestPaymentRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketTaggingRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketVersioningRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketWebsiteRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAclRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAttributesParts.cpp163
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAttributesRequest.cpp144
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAttributesResult.cpp96
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectLegalHoldRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectLockConfigurationRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectRequest.cpp38
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectResult.cpp38
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectRetentionRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectTaggingRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectTorrentRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetPublicAccessBlockRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/HeadBucketRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/HeadObjectRequest.cpp21
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/HeadObjectResult.cpp30
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/InventoryOptionalField.cpp14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/LifecycleExpiration.cpp4
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/LifecycleRuleAndOperator.cpp40
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/LifecycleRuleFilter.cpp36
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListBucketAnalyticsConfigurationsRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListBucketIntelligentTieringConfigurationsRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListBucketInventoryConfigurationsRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListBucketMetricsConfigurationsRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListMultipartUploadsRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListObjectVersionsRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListObjectsRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListObjectsV2Request.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListPartsRequest.cpp34
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListPartsResult.cpp13
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/MetricsAndOperator.cpp18
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/MetricsFilter.cpp14
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/MultipartUpload.cpp24
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/NoncurrentVersionExpiration.cpp22
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/NoncurrentVersionTransition.cpp22
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/NotificationConfiguration.cpp18
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Object.cpp28
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectAttributes.cpp91
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectLockRetention.cpp4
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectOwnership.cpp7
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectPart.cpp142
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectStorageClass.cpp7
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectVersion.cpp28
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Part.cpp64
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketAccelerateConfigurationRequest.cpp30
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketAclRequest.cpp30
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketAnalyticsConfigurationRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketCorsRequest.cpp30
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketEncryptionRequest.cpp30
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketIntelligentTieringConfigurationRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketInventoryConfigurationRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketLifecycleConfigurationRequest.cpp30
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketLoggingRequest.cpp30
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketMetricsConfigurationRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketNotificationConfigurationRequest.cpp19
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketOwnershipControlsRequest.cpp10
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketPolicyRequest.cpp30
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketReplicationRequest.cpp30
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketRequestPaymentRequest.cpp30
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketTaggingRequest.cpp30
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketVersioningRequest.cpp30
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketWebsiteRequest.cpp30
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectAclRequest.cpp30
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectLegalHoldRequest.cpp30
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectLockConfigurationRequest.cpp30
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectRequest.cpp66
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectResult.cpp24
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectRetentionRequest.cpp30
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectTaggingRequest.cpp30
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutPublicAccessBlockRequest.cpp30
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/RestoreObjectRequest.cpp30
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/SelectObjectContentRequest.cpp12
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/StorageClass.cpp7
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Transition.cpp4
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/TransitionStorageClass.cpp7
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/UploadPartCopyRequest.cpp36
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/UploadPartRequest.cpp62
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/UploadPartResult.cpp24
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/WriteGetObjectResponseRequest.cpp46
-rw-r--r--contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/ya.make50
-rw-r--r--contrib/restricted/aws/CMakeLists.txt7
-rw-r--r--contrib/restricted/aws/aws-c-auth/CMakeLists.darwin-arm64.txt77
-rw-r--r--contrib/restricted/aws/aws-c-auth/CMakeLists.darwin-x86_64.txt77
-rw-r--r--contrib/restricted/aws/aws-c-auth/CMakeLists.linux-aarch64.txt78
-rw-r--r--contrib/restricted/aws/aws-c-auth/CMakeLists.linux-x86_64.txt78
-rw-r--r--contrib/restricted/aws/aws-c-auth/CMakeLists.txt19
-rw-r--r--contrib/restricted/aws/aws-c-auth/CMakeLists.windows-x86_64.txt77
-rw-r--r--contrib/restricted/aws/aws-c-auth/CODE_OF_CONDUCT.md4
-rw-r--r--contrib/restricted/aws/aws-c-auth/CONTRIBUTING.md61
-rw-r--r--contrib/restricted/aws/aws-c-auth/LICENSE202
-rw-r--r--contrib/restricted/aws/aws-c-auth/NOTICE3
-rw-r--r--contrib/restricted/aws/aws-c-auth/README.md71
-rw-r--r--contrib/restricted/aws/aws-c-auth/include/aws/auth/auth.h83
-rw-r--r--contrib/restricted/aws/aws-c-auth/include/aws/auth/aws_imds_client.h481
-rw-r--r--contrib/restricted/aws/aws-c-auth/include/aws/auth/credentials.h1011
-rw-r--r--contrib/restricted/aws/aws-c-auth/include/aws/auth/exports.h29
-rw-r--r--contrib/restricted/aws/aws-c-auth/include/aws/auth/private/aws_profile.h25
-rw-r--r--contrib/restricted/aws/aws-c-auth/include/aws/auth/private/aws_signing.h128
-rw-r--r--contrib/restricted/aws/aws-c-auth/include/aws/auth/private/credentials_utils.h165
-rw-r--r--contrib/restricted/aws/aws-c-auth/include/aws/auth/private/key_derivation.h51
-rw-r--r--contrib/restricted/aws/aws-c-auth/include/aws/auth/private/sigv4_http_request.h14
-rw-r--r--contrib/restricted/aws/aws-c-auth/include/aws/auth/signable.h234
-rw-r--r--contrib/restricted/aws/aws-c-auth/include/aws/auth/signing.h135
-rw-r--r--contrib/restricted/aws/aws-c-auth/include/aws/auth/signing_config.h310
-rw-r--r--contrib/restricted/aws/aws-c-auth/include/aws/auth/signing_result.h166
-rw-r--r--contrib/restricted/aws/aws-c-auth/source/auth.c160
-rw-r--r--contrib/restricted/aws/aws-c-auth/source/aws_imds_client.c1753
-rw-r--r--contrib/restricted/aws/aws-c-auth/source/aws_profile.c40
-rw-r--r--contrib/restricted/aws/aws-c-auth/source/aws_signing.c2669
-rw-r--r--contrib/restricted/aws/aws-c-auth/source/credentials.c339
-rw-r--r--contrib/restricted/aws/aws-c-auth/source/credentials_provider_anonymous.c60
-rw-r--r--contrib/restricted/aws/aws-c-auth/source/credentials_provider_cached.c312
-rw-r--r--contrib/restricted/aws/aws-c-auth/source/credentials_provider_chain.c195
-rw-r--r--contrib/restricted/aws/aws-c-auth/source/credentials_provider_cognito.c859
-rw-r--r--contrib/restricted/aws/aws-c-auth/source/credentials_provider_default_chain.c423
-rw-r--r--contrib/restricted/aws/aws-c-auth/source/credentials_provider_delegate.c64
-rw-r--r--contrib/restricted/aws/aws-c-auth/source/credentials_provider_ecs.c590
-rw-r--r--contrib/restricted/aws/aws-c-auth/source/credentials_provider_environment.c79
-rw-r--r--contrib/restricted/aws/aws-c-auth/source/credentials_provider_imds.c208
-rw-r--r--contrib/restricted/aws/aws-c-auth/source/credentials_provider_process.c258
-rw-r--r--contrib/restricted/aws/aws-c-auth/source/credentials_provider_profile.c454
-rw-r--r--contrib/restricted/aws/aws-c-auth/source/credentials_provider_static.c71
-rw-r--r--contrib/restricted/aws/aws-c-auth/source/credentials_provider_sts.c848
-rw-r--r--contrib/restricted/aws/aws-c-auth/source/credentials_provider_sts_web_identity.c1188
-rw-r--r--contrib/restricted/aws/aws-c-auth/source/credentials_provider_x509.c629
-rw-r--r--contrib/restricted/aws/aws-c-auth/source/credentials_utils.c294
-rw-r--r--contrib/restricted/aws/aws-c-auth/source/key_derivation.c370
-rw-r--r--contrib/restricted/aws/aws-c-auth/source/signable.c165
-rw-r--r--contrib/restricted/aws/aws-c-auth/source/signable_chunk.c116
-rw-r--r--contrib/restricted/aws/aws-c-auth/source/signable_http_request.c130
-rw-r--r--contrib/restricted/aws/aws-c-auth/source/signable_trailer.c138
-rw-r--r--contrib/restricted/aws/aws-c-auth/source/signing.c183
-rw-r--r--contrib/restricted/aws/aws-c-auth/source/signing_config.c133
-rw-r--r--contrib/restricted/aws/aws-c-auth/source/signing_result.c247
-rw-r--r--contrib/restricted/aws/aws-c-auth/source/sigv4_http_request.c168
-rw-r--r--contrib/restricted/aws/aws-c-auth/ya.make87
-rw-r--r--contrib/restricted/aws/aws-c-common/CMakeLists.darwin-arm64.txt2
-rw-r--r--contrib/restricted/aws/aws-c-common/CMakeLists.darwin-x86_64.txt2
-rw-r--r--contrib/restricted/aws/aws-c-common/CMakeLists.linux-aarch64.txt2
-rw-r--r--contrib/restricted/aws/aws-c-common/CMakeLists.linux-x86_64.txt2
-rw-r--r--contrib/restricted/aws/aws-c-common/CMakeLists.windows-x86_64.txt2
-rw-r--r--contrib/restricted/aws/aws-c-common/README.md53
-rw-r--r--contrib/restricted/aws/aws-c-common/generated/include/aws/common/config.h2
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/array_list.inl7
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/assert.h20
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/bus.h97
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/command_line_parser.h6
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/common.h4
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/cpuid.h1
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/encoding.h74
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/error.h5
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/external/cJSON.h3
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/file.h5
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/json.h4
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/logging.h6
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/macros.h4
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/math.gcc_builtin.inl26
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/math.h6
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/math.inl2
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/math.msvc.inl162
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/private/hash_table_impl.h2
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/private/lookup3.inl4
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/string.h5
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/thread.h28
-rw-r--r--contrib/restricted/aws/aws-c-common/include/aws/common/uri.h2
-rw-r--r--contrib/restricted/aws/aws-c-common/source/allocator.c2
-rw-r--r--contrib/restricted/aws/aws-c-common/source/arch/intel/cpuid.c15
-rw-r--r--contrib/restricted/aws/aws-c-common/source/array_list.c5
-rw-r--r--contrib/restricted/aws/aws-c-common/source/bus.c724
-rw-r--r--contrib/restricted/aws/aws-c-common/source/byte_buf.c2
-rw-r--r--contrib/restricted/aws/aws-c-common/source/command_line_parser.c10
-rw-r--r--contrib/restricted/aws/aws-c-common/source/common.c8
-rw-r--r--contrib/restricted/aws/aws-c-common/source/date_time.c2
-rw-r--r--contrib/restricted/aws/aws-c-common/source/encoding.c115
-rw-r--r--contrib/restricted/aws/aws-c-common/source/error.c18
-rw-r--r--contrib/restricted/aws/aws-c-common/source/external/cJSON.c11
-rw-r--r--contrib/restricted/aws/aws-c-common/source/file.c29
-rw-r--r--contrib/restricted/aws/aws-c-common/source/json.c49
-rw-r--r--contrib/restricted/aws/aws-c-common/source/log_formatter.c2
-rw-r--r--contrib/restricted/aws/aws-c-common/source/log_writer.c5
-rw-r--r--contrib/restricted/aws/aws-c-common/source/logging.c13
-rw-r--r--contrib/restricted/aws/aws-c-common/source/memtrace.c81
-rw-r--r--contrib/restricted/aws/aws-c-common/source/posix/file.c102
-rw-r--r--contrib/restricted/aws/aws-c-common/source/posix/thread.c40
-rw-r--r--contrib/restricted/aws/aws-c-common/source/priority_queue.c10
-rw-r--r--contrib/restricted/aws/aws-c-common/source/uri.c22
-rw-r--r--contrib/restricted/aws/aws-c-common/source/xml_parser.c12
-rw-r--r--contrib/restricted/aws/aws-c-common/ya.make8
-rw-r--r--contrib/restricted/aws/aws-c-compression/CMakeLists.darwin-arm64.txt27
-rw-r--r--contrib/restricted/aws/aws-c-compression/CMakeLists.darwin-x86_64.txt27
-rw-r--r--contrib/restricted/aws/aws-c-compression/CMakeLists.linux-aarch64.txt28
-rw-r--r--contrib/restricted/aws/aws-c-compression/CMakeLists.linux-x86_64.txt28
-rw-r--r--contrib/restricted/aws/aws-c-compression/CMakeLists.txt19
-rw-r--r--contrib/restricted/aws/aws-c-compression/CMakeLists.windows-x86_64.txt27
-rw-r--r--contrib/restricted/aws/aws-c-compression/CODE_OF_CONDUCT.md4
-rw-r--r--contrib/restricted/aws/aws-c-compression/CONTRIBUTING.md61
-rw-r--r--contrib/restricted/aws/aws-c-compression/LICENSE202
-rw-r--r--contrib/restricted/aws/aws-c-compression/NOTICE3
-rw-r--r--contrib/restricted/aws/aws-c-compression/README.md183
-rw-r--r--contrib/restricted/aws/aws-c-compression/include/aws/compression/compression.h35
-rw-r--r--contrib/restricted/aws/aws-c-compression/include/aws/compression/exports.h28
-rw-r--r--contrib/restricted/aws/aws-c-compression/include/aws/compression/huffman.h161
-rw-r--r--contrib/restricted/aws/aws-c-compression/include/aws/compression/private/huffman_testing.h99
-rw-r--r--contrib/restricted/aws/aws-c-compression/source/compression.c44
-rw-r--r--contrib/restricted/aws/aws-c-compression/source/huffman.c285
-rw-r--r--contrib/restricted/aws/aws-c-compression/source/huffman_testing.c173
-rw-r--r--contrib/restricted/aws/aws-c-compression/ya.make37
-rw-r--r--contrib/restricted/aws/aws-c-event-stream/CMakeLists.darwin-arm64.txt9
-rw-r--r--contrib/restricted/aws/aws-c-event-stream/CMakeLists.darwin-x86_64.txt9
-rw-r--r--contrib/restricted/aws/aws-c-event-stream/CMakeLists.linux-aarch64.txt9
-rw-r--r--contrib/restricted/aws/aws-c-event-stream/CMakeLists.linux-x86_64.txt9
-rw-r--r--contrib/restricted/aws/aws-c-event-stream/CMakeLists.windows-x86_64.txt9
-rw-r--r--contrib/restricted/aws/aws-c-event-stream/README.md53
-rw-r--r--contrib/restricted/aws/aws-c-event-stream/include/aws/event-stream/event_stream.h218
-rw-r--r--contrib/restricted/aws/aws-c-event-stream/include/aws/event-stream/event_stream_rpc_client.h16
-rw-r--r--contrib/restricted/aws/aws-c-event-stream/include/aws/event-stream/event_stream_rpc_server.h13
-rw-r--r--contrib/restricted/aws/aws-c-event-stream/include/aws/event-stream/private/event_stream_rpc_priv.h14
-rw-r--r--contrib/restricted/aws/aws-c-event-stream/source/event_stream.c774
-rw-r--r--contrib/restricted/aws/aws-c-event-stream/source/event_stream_channel_handler.c2
-rw-r--r--contrib/restricted/aws/aws-c-event-stream/source/event_stream_rpc_client.c50
-rw-r--r--contrib/restricted/aws/aws-c-event-stream/source/event_stream_rpc_server.c73
-rw-r--r--contrib/restricted/aws/aws-c-event-stream/ya.make15
-rw-r--r--contrib/restricted/aws/aws-c-http/CMakeLists.darwin-arm64.txt70
-rw-r--r--contrib/restricted/aws/aws-c-http/CMakeLists.darwin-x86_64.txt70
-rw-r--r--contrib/restricted/aws/aws-c-http/CMakeLists.linux-aarch64.txt71
-rw-r--r--contrib/restricted/aws/aws-c-http/CMakeLists.linux-x86_64.txt71
-rw-r--r--contrib/restricted/aws/aws-c-http/CMakeLists.txt19
-rw-r--r--contrib/restricted/aws/aws-c-http/CMakeLists.windows-x86_64.txt70
-rw-r--r--contrib/restricted/aws/aws-c-http/CODE_OF_CONDUCT.md4
-rw-r--r--contrib/restricted/aws/aws-c-http/CONTRIBUTING.md61
-rw-r--r--contrib/restricted/aws/aws-c-http/LICENSE202
-rw-r--r--contrib/restricted/aws/aws-c-http/NOTICE3
-rw-r--r--contrib/restricted/aws/aws-c-http/README.md61
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/connection.h679
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/connection_manager.h194
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/exports.h29
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/http.h158
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/http2_stream_manager.h215
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_impl.h210
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_manager_system_vtable.h50
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_monitor.h46
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_connection.h201
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_decoder.h90
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_encoder.h140
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_stream.h123
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_connection.h289
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_decoder.h121
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_frames.h299
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_stream.h190
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/hpack.h297
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/hpack_header_static_table.def74
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/http2_stream_manager_impl.h199
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/http_impl.h100
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/proxy_impl.h236
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/random_access_set.h86
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/request_response_impl.h69
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/strutil.h84
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/websocket_decoder.h79
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/websocket_encoder.h57
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/private/websocket_impl.h115
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/proxy.h570
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/request_response.h1072
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/server.h198
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/statistics.h75
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/status_code.h82
-rw-r--r--contrib/restricted/aws/aws-c-http/include/aws/http/websocket.h483
-rw-r--r--contrib/restricted/aws/aws-c-http/source/connection.c1200
-rw-r--r--contrib/restricted/aws/aws-c-http/source/connection_manager.c1560
-rw-r--r--contrib/restricted/aws/aws-c-http/source/connection_monitor.c235
-rw-r--r--contrib/restricted/aws/aws-c-http/source/h1_connection.c2064
-rw-r--r--contrib/restricted/aws/aws-c-http/source/h1_decoder.c761
-rw-r--r--contrib/restricted/aws/aws-c-http/source/h1_encoder.c915
-rw-r--r--contrib/restricted/aws/aws-c-http/source/h1_stream.c535
-rw-r--r--contrib/restricted/aws/aws-c-http/source/h2_connection.c2850
-rw-r--r--contrib/restricted/aws/aws-c-http/source/h2_decoder.c1592
-rw-r--r--contrib/restricted/aws/aws-c-http/source/h2_frames.c1233
-rw-r--r--contrib/restricted/aws/aws-c-http/source/h2_stream.c1321
-rw-r--r--contrib/restricted/aws/aws-c-http/source/hpack.c525
-rw-r--r--contrib/restricted/aws/aws-c-http/source/hpack_decoder.c446
-rw-r--r--contrib/restricted/aws/aws-c-http/source/hpack_encoder.c418
-rw-r--r--contrib/restricted/aws/aws-c-http/source/hpack_huffman_static.c2337
-rw-r--r--contrib/restricted/aws/aws-c-http/source/http.c565
-rw-r--r--contrib/restricted/aws/aws-c-http/source/http2_stream_manager.c1238
-rw-r--r--contrib/restricted/aws/aws-c-http/source/proxy_connection.c1658
-rw-r--r--contrib/restricted/aws/aws-c-http/source/proxy_strategy.c1703
-rw-r--r--contrib/restricted/aws/aws-c-http/source/random_access_set.c187
-rw-r--r--contrib/restricted/aws/aws-c-http/source/request_response.c1228
-rw-r--r--contrib/restricted/aws/aws-c-http/source/statistics.c35
-rw-r--r--contrib/restricted/aws/aws-c-http/source/strutil.c232
-rw-r--r--contrib/restricted/aws/aws-c-http/source/websocket.c1790
-rw-r--r--contrib/restricted/aws/aws-c-http/source/websocket_bootstrap.c866
-rw-r--r--contrib/restricted/aws/aws-c-http/source/websocket_decoder.c387
-rw-r--r--contrib/restricted/aws/aws-c-http/source/websocket_encoder.c375
-rw-r--r--contrib/restricted/aws/aws-c-http/ya.make80
-rw-r--r--contrib/restricted/aws/aws-c-io/include/aws/io/uri.h11
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/CMakeLists.darwin-arm64.txt61
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/CMakeLists.darwin-x86_64.txt61
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/CMakeLists.linux-aarch64.txt62
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/CMakeLists.linux-x86_64.txt62
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/CMakeLists.txt19
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/CMakeLists.windows-x86_64.txt61
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/CODE_OF_CONDUCT.md4
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/CONTRIBUTING.md61
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/LICENSE202
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/NOTICE3
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/README.md208
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/client.h647
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/exports.h27
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/mqtt.h120
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/client_impl.h399
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/fixed_header.h62
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/mqtt_client_test_helper.h37
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/packets.h351
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/shared_constants.h18
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/topic_tree.h174
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_callbacks.h90
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_client_impl.h648
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_decoder.h264
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_encoder.h357
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_options_storage.h343
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_topic_alias.h66
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_utils.h363
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/rate_limiters.h110
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/v5/mqtt5_client.h809
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/v5/mqtt5_listener.h85
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/v5/mqtt5_packet_storage.h336
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/v5/mqtt5_types.h486
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/source/client.c3232
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/source/client_channel_handler.c1061
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/source/fixed_header.c144
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/source/mqtt.c282
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/source/packets.c1142
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/source/shared_constants.c22
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/source/topic_tree.c929
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_callbacks.c159
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_client.c3344
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_decoder.c1174
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_encoder.c1283
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_listener.c121
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_options_storage.c3984
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_topic_alias.c586
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_types.c333
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_utils.c574
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/source/v5/rate_limiters.c217
-rw-r--r--contrib/restricted/aws/aws-c-mqtt/ya.make71
-rw-r--r--contrib/restricted/aws/aws-c-s3/CMakeLists.darwin-arm64.txt68
-rw-r--r--contrib/restricted/aws/aws-c-s3/CMakeLists.darwin-x86_64.txt68
-rw-r--r--contrib/restricted/aws/aws-c-s3/CMakeLists.linux-aarch64.txt69
-rw-r--r--contrib/restricted/aws/aws-c-s3/CMakeLists.linux-x86_64.txt69
-rw-r--r--contrib/restricted/aws/aws-c-s3/CMakeLists.txt19
-rw-r--r--contrib/restricted/aws/aws-c-s3/CMakeLists.windows-x86_64.txt68
-rw-r--r--contrib/restricted/aws/aws-c-s3/CODE_OF_CONDUCT.md4
-rw-r--r--contrib/restricted/aws/aws-c-s3/CONTRIBUTING.md61
-rw-r--r--contrib/restricted/aws/aws-c-s3/LICENSE175
-rw-r--r--contrib/restricted/aws/aws-c-s3/NOTICE1
-rw-r--r--contrib/restricted/aws/aws-c-s3/README.md74
-rw-r--r--contrib/restricted/aws/aws-c-s3/include/aws/s3/exports.h29
-rw-r--r--contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_auto_ranged_get.h64
-rw-r--r--contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_auto_ranged_put.h117
-rw-r--r--contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_checksums.h149
-rw-r--r--contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_client_impl.h439
-rw-r--r--contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_copy_object.h76
-rw-r--r--contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_default_meta_request.h37
-rw-r--r--contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_list_objects.h121
-rw-r--r--contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_list_parts.h124
-rw-r--r--contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_meta_request_impl.h352
-rw-r--r--contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_paginator.h169
-rw-r--r--contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_request.h152
-rw-r--r--contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_request_messages.h197
-rw-r--r--contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_util.h255
-rw-r--r--contrib/restricted/aws/aws-c-s3/include/aws/s3/s3.h95
-rw-r--r--contrib/restricted/aws/aws-c-s3/include/aws/s3/s3_client.h664
-rw-r--r--contrib/restricted/aws/aws-c-s3/source/s3.c174
-rw-r--r--contrib/restricted/aws/aws-c-s3/source/s3_auto_ranged_get.c722
-rw-r--r--contrib/restricted/aws/aws-c-s3/source/s3_auto_ranged_put.c1305
-rw-r--r--contrib/restricted/aws/aws-c-s3/source/s3_checksum_stream.c116
-rw-r--r--contrib/restricted/aws/aws-c-s3/source/s3_checksums.c310
-rw-r--r--contrib/restricted/aws/aws-c-s3/source/s3_chunk_stream.c278
-rw-r--r--contrib/restricted/aws/aws-c-s3/source/s3_client.c2038
-rw-r--r--contrib/restricted/aws/aws-c-s3/source/s3_copy_object.c787
-rw-r--r--contrib/restricted/aws/aws-c-s3/source/s3_default_meta_request.c298
-rw-r--r--contrib/restricted/aws/aws-c-s3/source/s3_endpoint.c334
-rw-r--r--contrib/restricted/aws/aws-c-s3/source/s3_list_objects.c300
-rw-r--r--contrib/restricted/aws/aws-c-s3/source/s3_list_parts.c294
-rw-r--r--contrib/restricted/aws/aws-c-s3/source/s3_meta_request.c1607
-rw-r--r--contrib/restricted/aws/aws-c-s3/source/s3_paginator.c466
-rw-r--r--contrib/restricted/aws/aws-c-s3/source/s3_request.c97
-rw-r--r--contrib/restricted/aws/aws-c-s3/source/s3_request_messages.c1142
-rw-r--r--contrib/restricted/aws/aws-c-s3/source/s3_util.c558
-rw-r--r--contrib/restricted/aws/aws-c-s3/ya.make78
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/CMakeLists.darwin-arm64.txt33
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/CMakeLists.darwin-x86_64.txt33
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/CMakeLists.linux-aarch64.txt34
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/CMakeLists.linux-x86_64.txt34
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/CMakeLists.txt19
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/CMakeLists.windows-x86_64.txt33
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/CODE_OF_CONDUCT.md4
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/CONTRIBUTING.md59
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/LICENSE175
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/NOTICE1
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/README.md29
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/aws_profile.h218
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/endpoints_rule_engine.h303
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/exports.h30
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/partitions.h38
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/private/endpoints_types_impl.h314
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/private/endpoints_util.h136
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/resource_name.h44
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/sdkutils.h51
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/source/aws_profile.c1592
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/source/endpoints_rule_engine.c1132
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/source/endpoints_ruleset.c958
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/source/endpoints_standard_lib.c639
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/source/endpoints_types_impl.c235
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/source/endpoints_util.c588
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/source/partitions.c283
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/source/resource_name.c108
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/source/sdkutils.c67
-rw-r--r--contrib/restricted/aws/aws-c-sdkutils/ya.make43
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/CMakeLists.darwin-arm64.txt90
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/CMakeLists.darwin-x86_64.txt90
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/CMakeLists.linux-aarch64.txt91
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/CMakeLists.linux-x86_64.txt91
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/CMakeLists.txt19
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/CMakeLists.windows-x86_64.txt90
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/CODE_OF_CONDUCT.md4
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/CONTRIBUTING.md62
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/LICENSE202
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/NOTICE3
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/README.md144
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/Allocator.h47
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/Api.h218
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/Config.h11
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/DateTime.h198
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/Exports.h39
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/ImdsClient.h386
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/JsonObject.h406
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/Optional.h203
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/RefCounted.h68
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/StlAllocator.h63
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/StringUtils.h21
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/StringView.h864
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/Types.h165
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/UUID.h42
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/auth/Credentials.h585
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/auth/Signing.h99
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/auth/Sigv4Signing.h352
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/crypto/HMAC.h150
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/crypto/Hash.h168
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/endpoints/RuleEngine.h155
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/external/cJSON.h309
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/http/HttpConnection.h514
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/http/HttpConnectionManager.h127
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/http/HttpProxyStrategy.h116
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/http/HttpRequestResponse.h160
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/Bootstrap.h104
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/ChannelHandler.h238
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/EventLoopGroup.h74
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/HostResolver.h123
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/Pkcs11.h116
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/SocketOptions.h157
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/Stream.h173
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/TlsOptions.h453
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/Uri.h102
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/mqtt/Mqtt5Client.h770
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/mqtt/Mqtt5Packets.h2286
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/mqtt/Mqtt5Types.h120
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/crt/mqtt/MqttClient.h532
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/iot/Mqtt5Client.h548
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/iot/MqttClient.h450
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/include/aws/iot/MqttCommon.h103
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/Allocator.cpp21
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/Api.cpp405
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/DateTime.cpp200
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/ImdsClient.cpp457
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/JsonObject.cpp596
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/StringUtils.cpp15
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/Types.cpp103
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/UUID.cpp54
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/auth/Credentials.cpp478
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/auth/Sigv4Signing.cpp274
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/crypto/HMAC.cpp173
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/crypto/Hash.cpp174
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/endpoints/RuleEngine.cpp169
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/external/cJSON.cpp3120
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/http/HttpConnection.cpp400
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/http/HttpConnectionManager.cpp236
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/http/HttpProxyStrategy.cpp196
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/http/HttpRequestResponse.cpp151
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/io/Bootstrap.cpp122
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/io/ChannelHandler.cpp217
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/io/EventLoopGroup.cpp71
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/io/HostResolver.cpp121
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/io/Pkcs11.cpp69
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/io/SocketOptions.cpp28
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/io/Stream.cpp211
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/io/TlsOptions.cpp520
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/io/Uri.cpp145
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/iot/Mqtt5Client.cpp641
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/iot/MqttClient.cpp541
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/iot/MqttCommon.cpp88
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/mqtt/Mqtt5Client.cpp743
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/mqtt/Mqtt5Packets.cpp1236
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/source/mqtt/MqttClient.cpp816
-rw-r--r--contrib/restricted/aws/aws-crt-cpp/ya.make102
1055 files changed, 172533 insertions, 25187 deletions
diff --git a/.mapping.json b/.mapping.json
index 457ddf8ee3..72d41bc62c 100644
--- a/.mapping.json
+++ b/.mapping.json
@@ -1319,6 +1319,12 @@
"contrib/restricted/abseil-cpp/absl/utility/CMakeLists.txt":"",
"contrib/restricted/abseil-cpp/absl/utility/CMakeLists.windows-x86_64.txt":"",
"contrib/restricted/aws/CMakeLists.txt":"",
+ "contrib/restricted/aws/aws-c-auth/CMakeLists.darwin-arm64.txt":"",
+ "contrib/restricted/aws/aws-c-auth/CMakeLists.darwin-x86_64.txt":"",
+ "contrib/restricted/aws/aws-c-auth/CMakeLists.linux-aarch64.txt":"",
+ "contrib/restricted/aws/aws-c-auth/CMakeLists.linux-x86_64.txt":"",
+ "contrib/restricted/aws/aws-c-auth/CMakeLists.txt":"",
+ "contrib/restricted/aws/aws-c-auth/CMakeLists.windows-x86_64.txt":"",
"contrib/restricted/aws/aws-c-cal/CMakeLists.darwin-arm64.txt":"",
"contrib/restricted/aws/aws-c-cal/CMakeLists.darwin-x86_64.txt":"",
"contrib/restricted/aws/aws-c-cal/CMakeLists.linux-aarch64.txt":"",
@@ -1331,24 +1337,60 @@
"contrib/restricted/aws/aws-c-common/CMakeLists.linux-x86_64.txt":"",
"contrib/restricted/aws/aws-c-common/CMakeLists.txt":"",
"contrib/restricted/aws/aws-c-common/CMakeLists.windows-x86_64.txt":"",
+ "contrib/restricted/aws/aws-c-compression/CMakeLists.darwin-arm64.txt":"",
+ "contrib/restricted/aws/aws-c-compression/CMakeLists.darwin-x86_64.txt":"",
+ "contrib/restricted/aws/aws-c-compression/CMakeLists.linux-aarch64.txt":"",
+ "contrib/restricted/aws/aws-c-compression/CMakeLists.linux-x86_64.txt":"",
+ "contrib/restricted/aws/aws-c-compression/CMakeLists.txt":"",
+ "contrib/restricted/aws/aws-c-compression/CMakeLists.windows-x86_64.txt":"",
"contrib/restricted/aws/aws-c-event-stream/CMakeLists.darwin-arm64.txt":"",
"contrib/restricted/aws/aws-c-event-stream/CMakeLists.darwin-x86_64.txt":"",
"contrib/restricted/aws/aws-c-event-stream/CMakeLists.linux-aarch64.txt":"",
"contrib/restricted/aws/aws-c-event-stream/CMakeLists.linux-x86_64.txt":"",
"contrib/restricted/aws/aws-c-event-stream/CMakeLists.txt":"",
"contrib/restricted/aws/aws-c-event-stream/CMakeLists.windows-x86_64.txt":"",
+ "contrib/restricted/aws/aws-c-http/CMakeLists.darwin-arm64.txt":"",
+ "contrib/restricted/aws/aws-c-http/CMakeLists.darwin-x86_64.txt":"",
+ "contrib/restricted/aws/aws-c-http/CMakeLists.linux-aarch64.txt":"",
+ "contrib/restricted/aws/aws-c-http/CMakeLists.linux-x86_64.txt":"",
+ "contrib/restricted/aws/aws-c-http/CMakeLists.txt":"",
+ "contrib/restricted/aws/aws-c-http/CMakeLists.windows-x86_64.txt":"",
"contrib/restricted/aws/aws-c-io/CMakeLists.darwin-arm64.txt":"",
"contrib/restricted/aws/aws-c-io/CMakeLists.darwin-x86_64.txt":"",
"contrib/restricted/aws/aws-c-io/CMakeLists.linux-aarch64.txt":"",
"contrib/restricted/aws/aws-c-io/CMakeLists.linux-x86_64.txt":"",
"contrib/restricted/aws/aws-c-io/CMakeLists.txt":"",
"contrib/restricted/aws/aws-c-io/CMakeLists.windows-x86_64.txt":"",
+ "contrib/restricted/aws/aws-c-mqtt/CMakeLists.darwin-arm64.txt":"",
+ "contrib/restricted/aws/aws-c-mqtt/CMakeLists.darwin-x86_64.txt":"",
+ "contrib/restricted/aws/aws-c-mqtt/CMakeLists.linux-aarch64.txt":"",
+ "contrib/restricted/aws/aws-c-mqtt/CMakeLists.linux-x86_64.txt":"",
+ "contrib/restricted/aws/aws-c-mqtt/CMakeLists.txt":"",
+ "contrib/restricted/aws/aws-c-mqtt/CMakeLists.windows-x86_64.txt":"",
+ "contrib/restricted/aws/aws-c-s3/CMakeLists.darwin-arm64.txt":"",
+ "contrib/restricted/aws/aws-c-s3/CMakeLists.darwin-x86_64.txt":"",
+ "contrib/restricted/aws/aws-c-s3/CMakeLists.linux-aarch64.txt":"",
+ "contrib/restricted/aws/aws-c-s3/CMakeLists.linux-x86_64.txt":"",
+ "contrib/restricted/aws/aws-c-s3/CMakeLists.txt":"",
+ "contrib/restricted/aws/aws-c-s3/CMakeLists.windows-x86_64.txt":"",
+ "contrib/restricted/aws/aws-c-sdkutils/CMakeLists.darwin-arm64.txt":"",
+ "contrib/restricted/aws/aws-c-sdkutils/CMakeLists.darwin-x86_64.txt":"",
+ "contrib/restricted/aws/aws-c-sdkutils/CMakeLists.linux-aarch64.txt":"",
+ "contrib/restricted/aws/aws-c-sdkutils/CMakeLists.linux-x86_64.txt":"",
+ "contrib/restricted/aws/aws-c-sdkutils/CMakeLists.txt":"",
+ "contrib/restricted/aws/aws-c-sdkutils/CMakeLists.windows-x86_64.txt":"",
"contrib/restricted/aws/aws-checksums/CMakeLists.darwin-arm64.txt":"",
"contrib/restricted/aws/aws-checksums/CMakeLists.darwin-x86_64.txt":"",
"contrib/restricted/aws/aws-checksums/CMakeLists.linux-aarch64.txt":"",
"contrib/restricted/aws/aws-checksums/CMakeLists.linux-x86_64.txt":"",
"contrib/restricted/aws/aws-checksums/CMakeLists.txt":"",
"contrib/restricted/aws/aws-checksums/CMakeLists.windows-x86_64.txt":"",
+ "contrib/restricted/aws/aws-crt-cpp/CMakeLists.darwin-arm64.txt":"",
+ "contrib/restricted/aws/aws-crt-cpp/CMakeLists.darwin-x86_64.txt":"",
+ "contrib/restricted/aws/aws-crt-cpp/CMakeLists.linux-aarch64.txt":"",
+ "contrib/restricted/aws/aws-crt-cpp/CMakeLists.linux-x86_64.txt":"",
+ "contrib/restricted/aws/aws-crt-cpp/CMakeLists.txt":"",
+ "contrib/restricted/aws/aws-crt-cpp/CMakeLists.windows-x86_64.txt":"",
"contrib/restricted/aws/s2n/CMakeLists.darwin-arm64.txt":"",
"contrib/restricted/aws/s2n/CMakeLists.darwin-x86_64.txt":"",
"contrib/restricted/aws/s2n/CMakeLists.linux-aarch64.txt":"",
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/CMakeLists.darwin-arm64.txt b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/CMakeLists.darwin-arm64.txt
index 0754432177..34e01718e7 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/CMakeLists.darwin-arm64.txt
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/CMakeLists.darwin-arm64.txt
@@ -9,32 +9,44 @@
add_library(libs-aws-sdk-cpp-aws-cpp-sdk-core)
target_compile_options(libs-aws-sdk-cpp-aws-cpp-sdk-core PRIVATE
+ -DAWS_AUTH_USE_IMPORT_EXPORT
-DAWS_CAL_USE_IMPORT_EXPORT
-DAWS_CHECKSUMS_USE_IMPORT_EXPORT
-DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_CRT_CPP_USE_IMPORT_EXPORT
-DAWS_EVENT_STREAM_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
-DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_MQTT_USE_IMPORT_EXPORT
+ -DAWS_MQTT_WITH_WEBSOCKETS
+ -DAWS_S3_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
-DAWS_SDK_VERSION_MAJOR=1
- -DAWS_SDK_VERSION_MINOR=8
- -DAWS_SDK_VERSION_PATCH=186
+ -DAWS_SDK_VERSION_MINOR=11
+ -DAWS_SDK_VERSION_PATCH=37
+ -DAWS_TEST_REGION=US_EAST_1
-DAWS_USE_EPOLL
-DCURL_HAS_H2
-DCURL_HAS_TLS_PROXY
+ -DENABLED_REQUEST_COMPRESSION
+ -DENABLED_ZLIB_REQUEST_COMPRESSION
-DENABLE_CURL_CLIENT
-DENABLE_CURL_LOGGING
-DENABLE_OPENSSL_ENCRYPTION
-DHAS_PATHCONF
-DHAS_UMASK
- -DS2N_ADX
- -DS2N_BIKE_R3_AVX2
- -DS2N_BIKE_R3_AVX512
- -DS2N_BIKE_R3_PCLMUL
- -DS2N_BIKE_R3_VPCLMUL
+ -DS2N_CLONE_SUPPORTED
-DS2N_CPUID_AVAILABLE
-DS2N_FALL_THROUGH_SUPPORTED
- -DS2N_HAVE_EXECINFO
+ -DS2N_FEATURES_AVAILABLE
-DS2N_KYBER512R3_AVX2_BMI2
- -DS2N_SIKE_P434_R3_ASM
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
-DS2N___RESTRICT__SUPPORTED
$<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
)
@@ -45,8 +57,17 @@ target_link_libraries(libs-aws-sdk-cpp-aws-cpp-sdk-core PUBLIC
contrib-libs-cxxsupp
contrib-libs-curl
contrib-libs-openssl
+ contrib-libs-zlib
+ restricted-aws-aws-c-auth
+ restricted-aws-aws-c-cal
restricted-aws-aws-c-common
restricted-aws-aws-c-event-stream
+ restricted-aws-aws-c-http
+ restricted-aws-aws-c-io
+ restricted-aws-aws-c-mqtt
+ restricted-aws-aws-c-sdkutils
+ restricted-aws-aws-checksums
+ restricted-aws-aws-crt-cpp
)
target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-core PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/AmazonSerializableWebServiceRequest.cpp
@@ -56,26 +77,52 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-core PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/Globals.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/Region.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/Version.cpp
- ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSAuthSigner.cpp
- ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSAuthSignerProvider.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSCredentialsProvider.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSCredentialsProviderChain.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/SSOCredentialsProvider.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/STSCredentialsProvider.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/bearer-token-provider/DefaultBearerTokenProviderChain.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/bearer-token-provider/SSOBearerTokenProvider.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer-provider/BearerTokenAuthSignerProvider.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer-provider/DefaultAuthSignerProvider.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthBearerSigner.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthEventStreamV4Signer.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthSignerCommon.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthSignerHelper.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthV4Signer.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSNullSigner.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSClient.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSErrorMarshaller.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSJsonClient.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSUrlPresigner.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSXmlClient.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AdaptiveRetryStrategy.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AsyncCallerContext.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/ClientConfiguration.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/CoreErrors.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/DefaultRetryStrategy.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/GenericClientConfiguration.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/RequestCompression.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/RetryStrategy.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/SpecifiedRetryableErrorsRetryStrategy.cpp
- ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/AWSProfileConfigLoader.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/AWSConfigFileProfileConfigLoader.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/AWSProfileConfigLoaderBase.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/ConfigAndCredentialsCacheManager.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/EC2InstanceProfileConfigLoader.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/defaults/ClientConfigurationDefaults.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/AWSEndpoint.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/AWSPartitions.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/BuiltInParameters.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/ClientContextParameters.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/DefaultEndpointProvider.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/EndpointProviderBase.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/internal/AWSEndpointAttribute.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/external/cjson/cJSON.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/external/tinyxml2/tinyxml2.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpClient.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpClientFactory.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpRequest.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpResponse.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpTypes.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/Scheme.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/URI.cpp
@@ -92,6 +139,7 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-core PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/DNS.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/DateTimeCommon.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/Directory.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/Document.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/EnumParseOverflowContainer.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/FileSystemUtils.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/GetTheLights.cpp
@@ -100,6 +148,7 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-core PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/TempFile.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/UUID.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/base64/Base64.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/CRC32.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/Cipher.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/ContentCryptoMaterial.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/ContentCryptoScheme.cpp
@@ -123,6 +172,8 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-core PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/event/EventStreamErrors.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/json/JsonSerializer.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/AWSLogging.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/CRTLogSystem.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/CRTLogging.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/ConsoleLogSystem.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/DefaultLogSystem.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/FormattedLogSystem.cpp
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/CMakeLists.darwin-x86_64.txt b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/CMakeLists.darwin-x86_64.txt
index 0754432177..34e01718e7 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/CMakeLists.darwin-x86_64.txt
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/CMakeLists.darwin-x86_64.txt
@@ -9,32 +9,44 @@
add_library(libs-aws-sdk-cpp-aws-cpp-sdk-core)
target_compile_options(libs-aws-sdk-cpp-aws-cpp-sdk-core PRIVATE
+ -DAWS_AUTH_USE_IMPORT_EXPORT
-DAWS_CAL_USE_IMPORT_EXPORT
-DAWS_CHECKSUMS_USE_IMPORT_EXPORT
-DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_CRT_CPP_USE_IMPORT_EXPORT
-DAWS_EVENT_STREAM_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
-DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_MQTT_USE_IMPORT_EXPORT
+ -DAWS_MQTT_WITH_WEBSOCKETS
+ -DAWS_S3_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
-DAWS_SDK_VERSION_MAJOR=1
- -DAWS_SDK_VERSION_MINOR=8
- -DAWS_SDK_VERSION_PATCH=186
+ -DAWS_SDK_VERSION_MINOR=11
+ -DAWS_SDK_VERSION_PATCH=37
+ -DAWS_TEST_REGION=US_EAST_1
-DAWS_USE_EPOLL
-DCURL_HAS_H2
-DCURL_HAS_TLS_PROXY
+ -DENABLED_REQUEST_COMPRESSION
+ -DENABLED_ZLIB_REQUEST_COMPRESSION
-DENABLE_CURL_CLIENT
-DENABLE_CURL_LOGGING
-DENABLE_OPENSSL_ENCRYPTION
-DHAS_PATHCONF
-DHAS_UMASK
- -DS2N_ADX
- -DS2N_BIKE_R3_AVX2
- -DS2N_BIKE_R3_AVX512
- -DS2N_BIKE_R3_PCLMUL
- -DS2N_BIKE_R3_VPCLMUL
+ -DS2N_CLONE_SUPPORTED
-DS2N_CPUID_AVAILABLE
-DS2N_FALL_THROUGH_SUPPORTED
- -DS2N_HAVE_EXECINFO
+ -DS2N_FEATURES_AVAILABLE
-DS2N_KYBER512R3_AVX2_BMI2
- -DS2N_SIKE_P434_R3_ASM
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
-DS2N___RESTRICT__SUPPORTED
$<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
)
@@ -45,8 +57,17 @@ target_link_libraries(libs-aws-sdk-cpp-aws-cpp-sdk-core PUBLIC
contrib-libs-cxxsupp
contrib-libs-curl
contrib-libs-openssl
+ contrib-libs-zlib
+ restricted-aws-aws-c-auth
+ restricted-aws-aws-c-cal
restricted-aws-aws-c-common
restricted-aws-aws-c-event-stream
+ restricted-aws-aws-c-http
+ restricted-aws-aws-c-io
+ restricted-aws-aws-c-mqtt
+ restricted-aws-aws-c-sdkutils
+ restricted-aws-aws-checksums
+ restricted-aws-aws-crt-cpp
)
target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-core PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/AmazonSerializableWebServiceRequest.cpp
@@ -56,26 +77,52 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-core PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/Globals.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/Region.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/Version.cpp
- ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSAuthSigner.cpp
- ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSAuthSignerProvider.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSCredentialsProvider.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSCredentialsProviderChain.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/SSOCredentialsProvider.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/STSCredentialsProvider.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/bearer-token-provider/DefaultBearerTokenProviderChain.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/bearer-token-provider/SSOBearerTokenProvider.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer-provider/BearerTokenAuthSignerProvider.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer-provider/DefaultAuthSignerProvider.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthBearerSigner.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthEventStreamV4Signer.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthSignerCommon.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthSignerHelper.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthV4Signer.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSNullSigner.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSClient.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSErrorMarshaller.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSJsonClient.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSUrlPresigner.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSXmlClient.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AdaptiveRetryStrategy.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AsyncCallerContext.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/ClientConfiguration.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/CoreErrors.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/DefaultRetryStrategy.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/GenericClientConfiguration.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/RequestCompression.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/RetryStrategy.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/SpecifiedRetryableErrorsRetryStrategy.cpp
- ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/AWSProfileConfigLoader.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/AWSConfigFileProfileConfigLoader.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/AWSProfileConfigLoaderBase.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/ConfigAndCredentialsCacheManager.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/EC2InstanceProfileConfigLoader.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/defaults/ClientConfigurationDefaults.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/AWSEndpoint.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/AWSPartitions.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/BuiltInParameters.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/ClientContextParameters.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/DefaultEndpointProvider.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/EndpointProviderBase.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/internal/AWSEndpointAttribute.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/external/cjson/cJSON.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/external/tinyxml2/tinyxml2.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpClient.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpClientFactory.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpRequest.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpResponse.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpTypes.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/Scheme.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/URI.cpp
@@ -92,6 +139,7 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-core PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/DNS.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/DateTimeCommon.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/Directory.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/Document.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/EnumParseOverflowContainer.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/FileSystemUtils.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/GetTheLights.cpp
@@ -100,6 +148,7 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-core PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/TempFile.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/UUID.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/base64/Base64.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/CRC32.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/Cipher.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/ContentCryptoMaterial.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/ContentCryptoScheme.cpp
@@ -123,6 +172,8 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-core PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/event/EventStreamErrors.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/json/JsonSerializer.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/AWSLogging.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/CRTLogSystem.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/CRTLogging.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/ConsoleLogSystem.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/DefaultLogSystem.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/FormattedLogSystem.cpp
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/CMakeLists.linux-aarch64.txt b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/CMakeLists.linux-aarch64.txt
index 577d096b9a..43002865c6 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/CMakeLists.linux-aarch64.txt
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/CMakeLists.linux-aarch64.txt
@@ -9,32 +9,44 @@
add_library(libs-aws-sdk-cpp-aws-cpp-sdk-core)
target_compile_options(libs-aws-sdk-cpp-aws-cpp-sdk-core PRIVATE
+ -DAWS_AUTH_USE_IMPORT_EXPORT
-DAWS_CAL_USE_IMPORT_EXPORT
-DAWS_CHECKSUMS_USE_IMPORT_EXPORT
-DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_CRT_CPP_USE_IMPORT_EXPORT
-DAWS_EVENT_STREAM_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
-DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_MQTT_USE_IMPORT_EXPORT
+ -DAWS_MQTT_WITH_WEBSOCKETS
+ -DAWS_S3_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
-DAWS_SDK_VERSION_MAJOR=1
- -DAWS_SDK_VERSION_MINOR=8
- -DAWS_SDK_VERSION_PATCH=186
+ -DAWS_SDK_VERSION_MINOR=11
+ -DAWS_SDK_VERSION_PATCH=37
+ -DAWS_TEST_REGION=US_EAST_1
-DAWS_USE_EPOLL
-DCURL_HAS_H2
-DCURL_HAS_TLS_PROXY
+ -DENABLED_REQUEST_COMPRESSION
+ -DENABLED_ZLIB_REQUEST_COMPRESSION
-DENABLE_CURL_CLIENT
-DENABLE_CURL_LOGGING
-DENABLE_OPENSSL_ENCRYPTION
-DHAS_PATHCONF
-DHAS_UMASK
- -DS2N_ADX
- -DS2N_BIKE_R3_AVX2
- -DS2N_BIKE_R3_AVX512
- -DS2N_BIKE_R3_PCLMUL
- -DS2N_BIKE_R3_VPCLMUL
+ -DS2N_CLONE_SUPPORTED
-DS2N_CPUID_AVAILABLE
-DS2N_FALL_THROUGH_SUPPORTED
- -DS2N_HAVE_EXECINFO
+ -DS2N_FEATURES_AVAILABLE
-DS2N_KYBER512R3_AVX2_BMI2
- -DS2N_SIKE_P434_R3_ASM
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
-DS2N___RESTRICT__SUPPORTED
$<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
)
@@ -46,8 +58,17 @@ target_link_libraries(libs-aws-sdk-cpp-aws-cpp-sdk-core PUBLIC
contrib-libs-cxxsupp
contrib-libs-curl
contrib-libs-openssl
+ contrib-libs-zlib
+ restricted-aws-aws-c-auth
+ restricted-aws-aws-c-cal
restricted-aws-aws-c-common
restricted-aws-aws-c-event-stream
+ restricted-aws-aws-c-http
+ restricted-aws-aws-c-io
+ restricted-aws-aws-c-mqtt
+ restricted-aws-aws-c-sdkutils
+ restricted-aws-aws-checksums
+ restricted-aws-aws-crt-cpp
)
target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-core PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/AmazonSerializableWebServiceRequest.cpp
@@ -57,26 +78,52 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-core PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/Globals.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/Region.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/Version.cpp
- ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSAuthSigner.cpp
- ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSAuthSignerProvider.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSCredentialsProvider.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSCredentialsProviderChain.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/SSOCredentialsProvider.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/STSCredentialsProvider.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/bearer-token-provider/DefaultBearerTokenProviderChain.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/bearer-token-provider/SSOBearerTokenProvider.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer-provider/BearerTokenAuthSignerProvider.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer-provider/DefaultAuthSignerProvider.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthBearerSigner.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthEventStreamV4Signer.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthSignerCommon.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthSignerHelper.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthV4Signer.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSNullSigner.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSClient.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSErrorMarshaller.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSJsonClient.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSUrlPresigner.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSXmlClient.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AdaptiveRetryStrategy.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AsyncCallerContext.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/ClientConfiguration.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/CoreErrors.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/DefaultRetryStrategy.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/GenericClientConfiguration.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/RequestCompression.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/RetryStrategy.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/SpecifiedRetryableErrorsRetryStrategy.cpp
- ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/AWSProfileConfigLoader.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/AWSConfigFileProfileConfigLoader.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/AWSProfileConfigLoaderBase.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/ConfigAndCredentialsCacheManager.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/EC2InstanceProfileConfigLoader.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/defaults/ClientConfigurationDefaults.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/AWSEndpoint.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/AWSPartitions.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/BuiltInParameters.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/ClientContextParameters.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/DefaultEndpointProvider.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/EndpointProviderBase.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/internal/AWSEndpointAttribute.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/external/cjson/cJSON.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/external/tinyxml2/tinyxml2.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpClient.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpClientFactory.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpRequest.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpResponse.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpTypes.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/Scheme.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/URI.cpp
@@ -93,6 +140,7 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-core PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/DNS.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/DateTimeCommon.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/Directory.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/Document.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/EnumParseOverflowContainer.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/FileSystemUtils.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/GetTheLights.cpp
@@ -101,6 +149,7 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-core PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/TempFile.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/UUID.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/base64/Base64.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/CRC32.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/Cipher.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/ContentCryptoMaterial.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/ContentCryptoScheme.cpp
@@ -124,6 +173,8 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-core PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/event/EventStreamErrors.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/json/JsonSerializer.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/AWSLogging.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/CRTLogSystem.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/CRTLogging.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/ConsoleLogSystem.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/DefaultLogSystem.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/FormattedLogSystem.cpp
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/CMakeLists.linux-x86_64.txt b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/CMakeLists.linux-x86_64.txt
index 577d096b9a..43002865c6 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/CMakeLists.linux-x86_64.txt
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/CMakeLists.linux-x86_64.txt
@@ -9,32 +9,44 @@
add_library(libs-aws-sdk-cpp-aws-cpp-sdk-core)
target_compile_options(libs-aws-sdk-cpp-aws-cpp-sdk-core PRIVATE
+ -DAWS_AUTH_USE_IMPORT_EXPORT
-DAWS_CAL_USE_IMPORT_EXPORT
-DAWS_CHECKSUMS_USE_IMPORT_EXPORT
-DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_CRT_CPP_USE_IMPORT_EXPORT
-DAWS_EVENT_STREAM_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
-DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_MQTT_USE_IMPORT_EXPORT
+ -DAWS_MQTT_WITH_WEBSOCKETS
+ -DAWS_S3_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
-DAWS_SDK_VERSION_MAJOR=1
- -DAWS_SDK_VERSION_MINOR=8
- -DAWS_SDK_VERSION_PATCH=186
+ -DAWS_SDK_VERSION_MINOR=11
+ -DAWS_SDK_VERSION_PATCH=37
+ -DAWS_TEST_REGION=US_EAST_1
-DAWS_USE_EPOLL
-DCURL_HAS_H2
-DCURL_HAS_TLS_PROXY
+ -DENABLED_REQUEST_COMPRESSION
+ -DENABLED_ZLIB_REQUEST_COMPRESSION
-DENABLE_CURL_CLIENT
-DENABLE_CURL_LOGGING
-DENABLE_OPENSSL_ENCRYPTION
-DHAS_PATHCONF
-DHAS_UMASK
- -DS2N_ADX
- -DS2N_BIKE_R3_AVX2
- -DS2N_BIKE_R3_AVX512
- -DS2N_BIKE_R3_PCLMUL
- -DS2N_BIKE_R3_VPCLMUL
+ -DS2N_CLONE_SUPPORTED
-DS2N_CPUID_AVAILABLE
-DS2N_FALL_THROUGH_SUPPORTED
- -DS2N_HAVE_EXECINFO
+ -DS2N_FEATURES_AVAILABLE
-DS2N_KYBER512R3_AVX2_BMI2
- -DS2N_SIKE_P434_R3_ASM
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
-DS2N___RESTRICT__SUPPORTED
$<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
)
@@ -46,8 +58,17 @@ target_link_libraries(libs-aws-sdk-cpp-aws-cpp-sdk-core PUBLIC
contrib-libs-cxxsupp
contrib-libs-curl
contrib-libs-openssl
+ contrib-libs-zlib
+ restricted-aws-aws-c-auth
+ restricted-aws-aws-c-cal
restricted-aws-aws-c-common
restricted-aws-aws-c-event-stream
+ restricted-aws-aws-c-http
+ restricted-aws-aws-c-io
+ restricted-aws-aws-c-mqtt
+ restricted-aws-aws-c-sdkutils
+ restricted-aws-aws-checksums
+ restricted-aws-aws-crt-cpp
)
target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-core PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/AmazonSerializableWebServiceRequest.cpp
@@ -57,26 +78,52 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-core PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/Globals.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/Region.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/Version.cpp
- ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSAuthSigner.cpp
- ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSAuthSignerProvider.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSCredentialsProvider.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSCredentialsProviderChain.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/SSOCredentialsProvider.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/STSCredentialsProvider.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/bearer-token-provider/DefaultBearerTokenProviderChain.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/bearer-token-provider/SSOBearerTokenProvider.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer-provider/BearerTokenAuthSignerProvider.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer-provider/DefaultAuthSignerProvider.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthBearerSigner.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthEventStreamV4Signer.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthSignerCommon.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthSignerHelper.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthV4Signer.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSNullSigner.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSClient.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSErrorMarshaller.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSJsonClient.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSUrlPresigner.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSXmlClient.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AdaptiveRetryStrategy.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AsyncCallerContext.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/ClientConfiguration.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/CoreErrors.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/DefaultRetryStrategy.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/GenericClientConfiguration.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/RequestCompression.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/RetryStrategy.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/SpecifiedRetryableErrorsRetryStrategy.cpp
- ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/AWSProfileConfigLoader.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/AWSConfigFileProfileConfigLoader.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/AWSProfileConfigLoaderBase.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/ConfigAndCredentialsCacheManager.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/EC2InstanceProfileConfigLoader.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/defaults/ClientConfigurationDefaults.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/AWSEndpoint.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/AWSPartitions.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/BuiltInParameters.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/ClientContextParameters.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/DefaultEndpointProvider.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/EndpointProviderBase.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/internal/AWSEndpointAttribute.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/external/cjson/cJSON.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/external/tinyxml2/tinyxml2.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpClient.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpClientFactory.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpRequest.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpResponse.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpTypes.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/Scheme.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/URI.cpp
@@ -93,6 +140,7 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-core PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/DNS.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/DateTimeCommon.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/Directory.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/Document.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/EnumParseOverflowContainer.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/FileSystemUtils.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/GetTheLights.cpp
@@ -101,6 +149,7 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-core PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/TempFile.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/UUID.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/base64/Base64.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/CRC32.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/Cipher.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/ContentCryptoMaterial.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/ContentCryptoScheme.cpp
@@ -124,6 +173,8 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-core PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/event/EventStreamErrors.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/json/JsonSerializer.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/AWSLogging.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/CRTLogSystem.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/CRTLogging.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/ConsoleLogSystem.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/DefaultLogSystem.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/FormattedLogSystem.cpp
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/CMakeLists.windows-x86_64.txt b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/CMakeLists.windows-x86_64.txt
index 604debb4b7..ad17203f81 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/CMakeLists.windows-x86_64.txt
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/CMakeLists.windows-x86_64.txt
@@ -9,32 +9,44 @@
add_library(libs-aws-sdk-cpp-aws-cpp-sdk-core)
target_compile_options(libs-aws-sdk-cpp-aws-cpp-sdk-core PRIVATE
+ -DAWS_AUTH_USE_IMPORT_EXPORT
-DAWS_CAL_USE_IMPORT_EXPORT
-DAWS_CHECKSUMS_USE_IMPORT_EXPORT
-DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_CRT_CPP_USE_IMPORT_EXPORT
-DAWS_EVENT_STREAM_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
-DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_MQTT_USE_IMPORT_EXPORT
+ -DAWS_MQTT_WITH_WEBSOCKETS
+ -DAWS_S3_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
-DAWS_SDK_VERSION_MAJOR=1
- -DAWS_SDK_VERSION_MINOR=8
- -DAWS_SDK_VERSION_PATCH=186
+ -DAWS_SDK_VERSION_MINOR=11
+ -DAWS_SDK_VERSION_PATCH=37
+ -DAWS_TEST_REGION=US_EAST_1
-DAWS_USE_EPOLL
-DCURL_HAS_H2
-DCURL_HAS_TLS_PROXY
+ -DENABLED_REQUEST_COMPRESSION
+ -DENABLED_ZLIB_REQUEST_COMPRESSION
-DENABLE_CURL_CLIENT
-DENABLE_CURL_LOGGING
-DENABLE_OPENSSL_ENCRYPTION
-DHAS_PATHCONF
-DHAS_UMASK
- -DS2N_ADX
- -DS2N_BIKE_R3_AVX2
- -DS2N_BIKE_R3_AVX512
- -DS2N_BIKE_R3_PCLMUL
- -DS2N_BIKE_R3_VPCLMUL
+ -DS2N_CLONE_SUPPORTED
-DS2N_CPUID_AVAILABLE
-DS2N_FALL_THROUGH_SUPPORTED
- -DS2N_HAVE_EXECINFO
+ -DS2N_FEATURES_AVAILABLE
-DS2N_KYBER512R3_AVX2_BMI2
- -DS2N_SIKE_P434_R3_ASM
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
-DS2N___RESTRICT__SUPPORTED
$<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
)
@@ -45,8 +57,17 @@ target_link_libraries(libs-aws-sdk-cpp-aws-cpp-sdk-core PUBLIC
contrib-libs-cxxsupp
contrib-libs-curl
contrib-libs-openssl
+ contrib-libs-zlib
+ restricted-aws-aws-c-auth
+ restricted-aws-aws-c-cal
restricted-aws-aws-c-common
restricted-aws-aws-c-event-stream
+ restricted-aws-aws-c-http
+ restricted-aws-aws-c-io
+ restricted-aws-aws-c-mqtt
+ restricted-aws-aws-c-sdkutils
+ restricted-aws-aws-checksums
+ restricted-aws-aws-crt-cpp
)
target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-core PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/AmazonSerializableWebServiceRequest.cpp
@@ -56,26 +77,52 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-core PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/Globals.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/Region.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/Version.cpp
- ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSAuthSigner.cpp
- ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSAuthSignerProvider.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSCredentialsProvider.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSCredentialsProviderChain.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/SSOCredentialsProvider.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/STSCredentialsProvider.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/bearer-token-provider/DefaultBearerTokenProviderChain.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/bearer-token-provider/SSOBearerTokenProvider.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer-provider/BearerTokenAuthSignerProvider.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer-provider/DefaultAuthSignerProvider.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthBearerSigner.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthEventStreamV4Signer.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthSignerCommon.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthSignerHelper.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthV4Signer.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSNullSigner.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSClient.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSErrorMarshaller.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSJsonClient.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSUrlPresigner.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSXmlClient.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AdaptiveRetryStrategy.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AsyncCallerContext.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/ClientConfiguration.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/CoreErrors.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/DefaultRetryStrategy.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/GenericClientConfiguration.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/RequestCompression.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/RetryStrategy.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/SpecifiedRetryableErrorsRetryStrategy.cpp
- ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/AWSProfileConfigLoader.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/AWSConfigFileProfileConfigLoader.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/AWSProfileConfigLoaderBase.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/ConfigAndCredentialsCacheManager.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/EC2InstanceProfileConfigLoader.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/defaults/ClientConfigurationDefaults.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/AWSEndpoint.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/AWSPartitions.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/BuiltInParameters.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/ClientContextParameters.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/DefaultEndpointProvider.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/EndpointProviderBase.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/internal/AWSEndpointAttribute.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/external/cjson/cJSON.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/external/tinyxml2/tinyxml2.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpClient.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpClientFactory.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpRequest.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpResponse.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpTypes.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/Scheme.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/URI.cpp
@@ -92,6 +139,7 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-core PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/DNS.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/DateTimeCommon.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/Directory.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/Document.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/EnumParseOverflowContainer.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/FileSystemUtils.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/GetTheLights.cpp
@@ -100,6 +148,7 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-core PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/TempFile.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/UUID.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/base64/Base64.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/CRC32.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/Cipher.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/ContentCryptoMaterial.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/ContentCryptoScheme.cpp
@@ -123,6 +172,8 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-core PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/event/EventStreamErrors.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/json/JsonSerializer.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/AWSLogging.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/CRTLogSystem.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/CRTLogging.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/ConsoleLogSystem.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/DefaultLogSystem.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/FormattedLogSystem.cpp
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/AmazonWebServiceRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/AmazonWebServiceRequest.h
index 1fc7c0e7e0..3578307022 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/AmazonWebServiceRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/AmazonWebServiceRequest.h
@@ -7,13 +7,16 @@
#include <aws/core/Core_EXPORTS.h>
-#include <aws/core/utils/memory/stl/AWSString.h>
-#include <aws/core/utils/UnreferencedParam.h>
-#include <aws/core/http/HttpTypes.h>
+#include <aws/core/client/RequestCompression.h>
+#include <aws/core/auth/AWSAuthSigner.h>
+#include <aws/core/client/CoreErrors.h>
+#include <aws/core/endpoint/EndpointParameter.h>
#include <aws/core/http/HttpRequest.h>
+#include <aws/core/http/HttpTypes.h>
+#include <aws/core/utils/UnreferencedParam.h>
#include <aws/core/utils/memory/stl/AWSStreamFwd.h>
+#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/core/utils/stream/ResponseStream.h>
-#include <aws/core/auth/AWSAuthSigner.h>
namespace Aws
{
@@ -51,12 +54,21 @@ namespace Aws
*/
virtual Aws::Http::HeaderValueCollection GetHeaders() const = 0;
/**
+ * Get the additional user-set custom headers for the request
+ */
+ virtual const Aws::Http::HeaderValueCollection& GetAdditionalCustomHeaders() const;
+ /**
+ * Set an additional custom header value under a key. This value will overwrite any previously set or regular header.
+ */
+ virtual void SetAdditionalCustomHeaderValue(const Aws::String& headerName, const Aws::String& headerValue);
+
+ /**
* Do nothing virtual, override this to add query strings to the request
*/
virtual void AddQueryStringParameters(Aws::Http::URI& uri) const { AWS_UNREFERENCED_PARAM(uri); }
/**
- * Put the request to a url for later presigning. This will push the body to the url and
+ * Put the request to a url for later presigning. This will push the body to the url and
* then adds the existing query string parameters as normal.
*/
virtual void PutToPresignedUrl(Aws::Http::URI& uri) const { DumpBodyToUrl(uri); AddQueryStringParameters(uri); }
@@ -76,6 +88,15 @@ namespace Aws
virtual bool SignBody() const { return true; }
/**
+ * Defaults to false, if a derived class returns true it indicates that the body has an embedded error.
+ */
+ virtual bool HasEmbeddedError(Aws::IOStream& body, const Aws::Http::HeaderValueCollection& header) const {
+ (void) body;
+ (void) header;
+ return false;
+ }
+
+ /**
* Defaults to false, if this is set to true, it supports chunked transfer encoding.
*/
virtual bool IsChunked() const { return false; }
@@ -150,15 +171,28 @@ namespace Aws
*/
inline virtual bool ShouldComputeContentMd5() const { return false; }
+ inline virtual bool ShouldValidateResponseChecksum() const { return false; }
+
+ inline virtual Aws::Vector<Aws::String> GetResponseChecksumAlgorithmNames() const { return {}; }
+
+ inline virtual Aws::String GetChecksumAlgorithmName() const { return {}; }
+
virtual const char* GetServiceRequestName() const = 0;
+ using EndpointParameters = Aws::Vector<Aws::Endpoint::EndpointParameter>;
+ virtual EndpointParameters GetEndpointContextParams() const;
+
+ virtual Aws::Client::CompressionAlgorithm
+ GetSelectedCompressionAlgorithm(Aws::Client::RequestCompressionConfig) const { return Aws::Client::CompressionAlgorithm::NONE; }
+
protected:
/**
- * Default does nothing. Override this to convert what would otherwise be the payload of the
+ * Default does nothing. Override this to convert what would otherwise be the payload of the
* request to a query string format.
*/
virtual void DumpBodyToUrl(Aws::Http::URI& uri) const { AWS_UNREFERENCED_PARAM(uri); }
+ Aws::Http::HeaderValueCollection m_additionalCustomHeaders;
private:
Aws::IOStreamFactory m_responseStreamFactory;
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/AmazonWebServiceResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/AmazonWebServiceResult.h
index cde25d80f6..37538accb7 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/AmazonWebServiceResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/AmazonWebServiceResult.h
@@ -57,7 +57,7 @@ namespace Aws
/**
* Get the payload from the response and take ownership of it.
*/
- inline PAYLOAD_TYPE TakeOwnershipOfPayload() { return std::move(m_payload); }
+ inline PAYLOAD_TYPE&& TakeOwnershipOfPayload() { return std::move(m_payload); }
/**
* Get the headers from the response
*/
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/Aws.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/Aws.h
index 7c2d7af53f..03f965a3e6 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/Aws.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/Aws.h
@@ -6,11 +6,14 @@
#include <aws/core/utils/logging/LogLevel.h>
#include <aws/core/utils/logging/LogSystemInterface.h>
+#include <aws/core/utils/logging/CRTLogSystem.h>
#include <aws/core/utils/memory/MemorySystemInterface.h>
#include <aws/core/utils/crypto/Factories.h>
#include <aws/core/http/HttpClientFactory.h>
#include <aws/core/monitoring/MonitoringManager.h>
#include <aws/core/Core_EXPORTS.h>
+#include <aws/crt/io/Bootstrap.h>
+#include <aws/crt/io/TlsOptions.h>
namespace Aws
{
@@ -35,10 +38,16 @@ namespace Aws
const char* defaultLogPrefix;
/**
- * Defaults to empty, if logLevel has been set and this field is empty, then the default log interface will be used.
+ * Defaults to empty, if logLevel has been set and this field is empty, then the default log system will be used.
* otherwise, we will call this closure to create a logger
*/
std::function<std::shared_ptr<Aws::Utils::Logging::LogSystemInterface>()> logger_create_fn;
+
+ /**
+ * Defaults to empty, if logLevel has been set and this field is empty, then the default CRT log system will be used.
+ * The default CRT log system will redirect all logs from common runtime libraries (CRT) to C++ SDK with the same log level and formatting.
+ */
+ std::function<std::shared_ptr<Aws::Utils::Logging::CRTLogSystemInterface>()> crt_logger_create_fn;
};
/**
@@ -58,11 +67,20 @@ namespace Aws
};
/**
+ * SDK wide options for I/O: client bootstrap and TLS connection options
+ */
+ struct IoOptions
+ {
+ std::function<std::shared_ptr<Aws::Crt::Io::ClientBootstrap>()> clientBootstrap_create_fn;
+ std::function<std::shared_ptr<Aws::Crt::Io::TlsConnectionOptions>()> tlsConnectionOptions_create_fn;
+ };
+
+ /**
* SDK wide options for http
*/
struct HttpOptions
{
- HttpOptions() : initAndCleanupCurl(true), installSigPipeHandler(false)
+ HttpOptions() : initAndCleanupCurl(true), installSigPipeHandler(false), compliantRfc3986Encoding(false)
{ }
/**
@@ -82,6 +100,10 @@ namespace Aws
* NOTE: CURLOPT_NOSIGNAL is already being set.
*/
bool installSigPipeHandler;
+ /**
+ * Disable legacy URL encoding that leaves `$&,:@=` unescaped for legacy purposes.
+ */
+ bool compliantRfc3986Encoding;
};
/**
@@ -194,6 +216,10 @@ namespace Aws
struct SDKOptions
{
/**
+ * SDK wide options for I/O: client bootstrap and TLS connection options
+ */
+ IoOptions ioOptions;
+ /**
* SDK wide options for logging
*/
LoggingOptions loggingOptions;
@@ -260,7 +286,10 @@ namespace Aws
/**
* Shutdown SDK wide state for the SDK. This method must be called when you are finished using the SDK.
- * Do not call any other SDK methods after calling ShutdownAPI.
+ * Notes:
+ * 1) Please call this from the same thread from which InitAPI() has been called (use a dedicated thread
+ * if necessary). This avoids problems in initializing the dependent Common RunTime C libraries.
+ * 2) Do not call any other SDK methods after calling ShutdownAPI.
*/
AWS_CORE_API void ShutdownAPI(const SDKOptions& options);
}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/Core_EXPORTS.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/Core_EXPORTS.h
index 12e9dfc9be..1d3e312a60 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/Core_EXPORTS.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/Core_EXPORTS.h
@@ -21,8 +21,14 @@
#else // USE_IMPORT_EXPORT
#define AWS_CORE_API
#endif // USE_IMPORT_EXPORT
+ #define AWS_CORE_LOCAL
#else // defined (USE_WINDOWS_DLL_SEMANTICS) || defined (_WIN32)
#define AWS_CORE_API
+ #if __GNUC__ >= 4
+ #define AWS_CORE_LOCAL __attribute__((visibility("hidden")))
+ #else
+ #define AWS_CORE_LOCAL
+ #endif
#endif // defined (USE_WINDOWS_DLL_SEMANTICS) || defined (_WIN32)
#ifdef _MSC_VER
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/Globals.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/Globals.h
index cebe4891b7..08dfafce09 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/Globals.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/Globals.h
@@ -9,6 +9,54 @@
namespace Aws
{
+ namespace Crt
+ {
+ class ApiHandle;
+
+ namespace Io
+ {
+ class ClientBootstrap;
+ class TlsConnectionOptions;
+
+ }
+ }
+
+ /**
+ * Like we need to call InitAPI() to initialize aws-sdk-cpp, we need ApiHandle to initialize aws-crt-cpp, which is a wrapper of a collection of common runtime libraries.
+ * We will wrap the memory management system and pass it to common runtime libraries via ApiHandle.
+ */
+ AWS_CORE_API Aws::Crt::ApiHandle* GetApiHandle();
+
+ /**
+ * Set the default ClientBootStrap for AWS common runtime libraries in the global scope.
+ */
+ AWS_CORE_API void SetDefaultClientBootstrap(const std::shared_ptr<Aws::Crt::Io::ClientBootstrap>& clientBootstrap);
+
+ /**
+ * Get the default ClientBootStrap for AWS common runtime libraries in the global scope.
+ */
+ AWS_CORE_API Aws::Crt::Io::ClientBootstrap* GetDefaultClientBootstrap();
+
+ /**
+ * Set the default TlsConnectionOptions for AWS common runtime libraries in the global scope.
+ */
+ AWS_CORE_API void SetDefaultTlsConnectionOptions(const std::shared_ptr<Aws::Crt::Io::TlsConnectionOptions>& tlsConnectionOptions);
+
+ /**
+ * Get the default TlsConnectionOptions for AWS common runtime libraries in the global scope.
+ */
+ AWS_CORE_API Aws::Crt::Io::TlsConnectionOptions* GetDefaultTlsConnectionOptions();
+
+ /**
+ * Initialize ApiHandle in aws-crt-cpp.
+ */
+ void InitializeCrt();
+
+ /**
+ * Clean up ApiHandle in aws-crt-cpp.
+ */
+ void CleanupCrt();
+
namespace Utils
{
class EnumParseOverflowContainer;
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/Region.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/Region.h
index a728c829ed..9bfeba1ad3 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/Region.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/Region.h
@@ -20,33 +20,42 @@ namespace Aws
// You can specify this region to corresponding environment variable, config file item and in your code.
// For services without global region, the request will be directed to us-east-1
static const char AWS_GLOBAL[] = "aws-global";
- static const char US_EAST_1[] = "us-east-1";
- static const char US_EAST_2[] = "us-east-2";
- static const char US_WEST_1[] = "us-west-1";
- static const char US_WEST_2[] = "us-west-2";
- static const char AF_SOUTH_1[] = "af-south-1";
- static const char EU_WEST_1[] = "eu-west-1";
- static const char EU_WEST_2[] = "eu-west-2";
- static const char EU_WEST_3[] = "eu-west-3";
- static const char EU_CENTRAL_1[] = "eu-central-1";
- static const char EU_NORTH_1[] = "eu-north-1";
- static const char AP_EAST_1[] = "ap-east-1";
- static const char AP_SOUTH_1[] = "ap-south-1";
- static const char AP_SOUTHEAST_1[] = "ap-southeast-1";
- static const char AP_SOUTHEAST_2[] = "ap-southeast-2";
- static const char AP_NORTHEAST_1[] = "ap-northeast-1";
- static const char AP_NORTHEAST_2[] = "ap-northeast-2";
- static const char AP_NORTHEAST_3[] = "ap-northeast-3";
- static const char SA_EAST_1[] = "sa-east-1";
- static const char CA_CENTRAL_1[] = "ca-central-1";
- static const char CN_NORTH_1[] = "cn-north-1";
- static const char CN_NORTHWEST_1[] = "cn-northwest-1";
- static const char ME_SOUTH_1[] = "me-south-1";
- static const char US_GOV_WEST_1[] = "us-gov-west-1";
- static const char US_GOV_EAST_1[] = "us-gov-east-1";
+ static const char US_EAST_1[] = "us-east-1"; // US East (N. Virginia)
+ static const char US_EAST_2[] = "us-east-2"; // US East (Ohio)
+ static const char US_WEST_1[] = "us-west-1"; // US West (N. California)
+ static const char US_WEST_2[] = "us-west-2"; // US West (Oregon)
+ static const char EU_WEST_1[] = "eu-west-1"; // EU (Ireland)
+ static const char EU_WEST_2[] = "eu-west-2"; // EU (London)
+ static const char EU_WEST_3[] = "eu-west-3"; // EU (Paris)
+ static const char EU_CENTRAL_1[] = "eu-central-1"; // "EU (Frankfurt)
+ static const char EU_NORTH_1[] = "eu-north-1"; // EU (Stockholm)
+ static const char EU_SOUTH_1[] = "eu-south-1"; // EU (Milan)
+ static const char AP_EAST_1[] = "ap-east-1"; // Asia Pacific (Hong Kong)
+ static const char AP_SOUTH_1[] = "ap-south-1"; // Asia Pacific (Mumbai)
+ static const char AP_SOUTHEAST_1[] = "ap-southeast-1"; // Asia Pacific (Singapore)
+ static const char AP_SOUTHEAST_2[] = "ap-southeast-2"; // Asia Pacific (Sydney)
+ static const char AP_SOUTHEAST_3[] = "ap-southeast-3"; // Asia Pacific (Jakarta)
+ static const char AP_NORTHEAST_1[] = "ap-northeast-1"; // Asia Pacific (Tokyo)
+ static const char AP_NORTHEAST_2[] = "ap-northeast-2"; // Asia Pacific (Seoul)
+ static const char AP_NORTHEAST_3[] = "ap-northeast-3"; // Asia Pacific (Osaka)
+ static const char SA_EAST_1[] = "sa-east-1"; // South America (Sao Paulo
+ static const char CN_NORTH_1[] = "cn-north-1"; // China (Beijing)
+ static const char CN_NORTHWEST_1[] = "cn-northwest-1"; // China (Ningxia)
+ static const char CA_CENTRAL_1[] = "ca-central-1"; // Canada (Central)
+ static const char ME_SOUTH_1[] = "me-south-1"; // Middle East (Bahrain)
+ static const char ME_CENTRAL_1[] = "me-central-1"; // Middle East (UEA)
+ static const char AF_SOUTH_1[] = "af-south-1"; // Africa (Cape Town)
+ static const char US_GOV_WEST_1[] = "us-gov-west-1"; // AWS GovCloud (US-West)
+ static const char US_GOV_EAST_1[] = "us-gov-east-1"; // AWS GovCloud (US-East)
+ static const char US_ISO_EAST_1[] = "us-iso-east-1"; // US ISO East
+ static const char US_ISOB_EAST_1[] = "us-isob-east-1"; // US ISOB East (Ohio)
+ static const char US_ISO_WEST_1[] = "us-iso-west-1"; // US ISO West
// If a pseudo region, for example, aws-global or us-east-1-fips is provided, it should be converted to the region name used for signing.
Aws::String AWS_CORE_API ComputeSignerRegion(const Aws::String& region);
+
+ // A FIPs region starts with "fips-" or ends with "-fips".
+ bool AWS_CORE_API IsFipsRegion(const Aws::String& region);
}
} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/VersionConfig.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/VersionConfig.h
index fd965734a6..0dedf23c9a 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/VersionConfig.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/VersionConfig.h
@@ -2,5 +2,9 @@
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
+#pragma once
-#define AWS_SDK_VERSION_STRING "1.8.186"
+#define AWS_SDK_VERSION_STRING "1.11.37"
+#define AWS_SDK_VERSION_MAJOR 1
+#define AWS_SDK_VERSION_MINOR 11
+#define AWS_SDK_VERSION_PATCH 37
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/AWSAuthSigner.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/AWSAuthSigner.h
index 8061ee1e25..0c4a26dbd3 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/AWSAuthSigner.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/AWSAuthSigner.h
@@ -5,421 +5,11 @@
#pragma once
-#include <aws/core/Core_EXPORTS.h>
+#include <aws/core/auth/signer/AWSAuthSignerBase.h>
+#include <aws/core/auth/signer/AWSAuthSignerCommon.h>
-#include <aws/core/Region.h>
-#include <aws/core/utils/memory/AWSMemory.h>
-#include <aws/core/utils/memory/stl/AWSSet.h>
-#include <aws/core/utils/DateTime.h>
-#include <aws/core/utils/Array.h>
-#include <aws/core/utils/threading/ReaderWriterLock.h>
-#include <aws/core/utils/crypto/Sha256.h>
-#include <aws/core/utils/crypto/Sha256HMAC.h>
-
-#include <memory>
-#include <atomic>
-#include <chrono>
-
-namespace Aws
-{
- namespace Http
- {
- class HttpClientFactory;
- class HttpRequest;
- } // namespace Http
-
- namespace Utils
- {
- namespace Event
- {
- class Message;
- }
- } // namespace Utils
-
- namespace Auth
- {
- class AWSCredentials;
- class AWSCredentialsProvider;
- AWS_CORE_API extern const char SIGV4_SIGNER[];
- AWS_CORE_API extern const char EVENTSTREAM_SIGV4_SIGNER[];
- AWS_CORE_API extern const char SIGNATURE[];
- AWS_CORE_API extern const char NULL_SIGNER[];
- } // namespace Auth
-
- namespace Client
- {
- struct ClientConfiguration;
-
- /**
- * Auth Signer interface. Takes a generic AWS request and applies crypto tamper resistent signatures on the request.
- */
- class AWS_CORE_API AWSAuthSigner
- {
- public:
- AWSAuthSigner() : m_clockSkew() { m_clockSkew.store(std::chrono::milliseconds(0L)); }
- virtual ~AWSAuthSigner() = default;
-
- /**
- * Signs the request itself (usually by adding a signature header) based on info in the request and uri.
- */
- virtual bool SignRequest(Aws::Http::HttpRequest& request) const = 0;
-
- /**
- * Signs the request itself (usually by adding a signature header) based on info in the request and uri.
- * If signBody is false and https is being used then the body of the payload will not be signed.
- * The default virtual function, just calls SignRequest.
- */
- virtual bool SignRequest(Aws::Http::HttpRequest& request, bool signBody) const
- {
- AWS_UNREFERENCED_PARAM(signBody);
- return SignRequest(request);
- }
-
- /**
- * Signs the request itself (usually by adding a signature header) based on info in the request and uri.
- * If signBody is false and https is being used then the body of the payload will not be signed.
- * The default virtual function, just calls SignRequest.
- * Using m_region by default if parameter region is nullptr.
- */
- virtual bool SignRequest(Aws::Http::HttpRequest& request, const char* region, bool signBody) const
- {
- AWS_UNREFERENCED_PARAM(signBody);
- AWS_UNREFERENCED_PARAM(region);
- return SignRequest(request);
- }
-
- /**
- * Signs the request itself (usually by adding a signature header) based on info in the request and uri.
- * If signBody is false and https is being used then the body of the payload will not be signed.
- * The default virtual function, just calls SignRequest.
- * Using m_region by default if parameter region is nullptr.
- * Using m_serviceName by default if parameter serviceName is nullptr.
- */
- virtual bool SignRequest(Aws::Http::HttpRequest& request, const char* region, const char* serviceName, bool signBody) const
- {
- AWS_UNREFERENCED_PARAM(signBody);
- AWS_UNREFERENCED_PARAM(region);
- AWS_UNREFERENCED_PARAM(serviceName);
- return SignRequest(request);
- }
-
- /**
- * Signs a single event message in an event stream.
- * The input message buffer is copied and signed. The message's input buffer will be deallocated and a new
- * buffer will be assigned. The new buffer encodes the original message with its headers as the payload of
- * the new message. The signature of the original message will be added as a header to the new message.
- *
- * A Hex encoded signature of the previous event (or of the HTTP request headers in case of the first event)
- * is provided as the 'priorSignature' parameter. 'priorSignature' will contain the value of the new
- * signature after this call returns successfully.
- *
- * The function returns true if the message is successfully signed.
- */
- virtual bool SignEventMessage(Aws::Utils::Event::Message&, Aws::String& /* priorSignature */) const { return false; }
-
- /**
- * Takes a request and signs the URI based on the HttpMethod, URI and other info from the request.
- * The URI can then be used in a normal HTTP call until expiration.
- */
- virtual bool PresignRequest(Aws::Http::HttpRequest& request, long long expirationInSeconds) const = 0;
-
- /**
- * Generates a signed Uri using the injected signer. for the supplied uri and http method and region. expirationInSeconds defaults
- * to 0 which is the default 7 days.
- * Using m_region by default if parameter region is nullptr.
- */
- virtual bool PresignRequest(Aws::Http::HttpRequest& request, const char* region, long long expirationInSeconds = 0) const = 0;
-
- /**
- * Generates a signed Uri using the injected signer. for the supplied uri and http method, region, and service name. expirationInSeconds defaults
- * to 0 which is the default 7 days.
- * Using m_region by default if parameter region is nullptr.
- * Using m_serviceName by default if parameter serviceName is nullptr.
- */
- virtual bool PresignRequest(Aws::Http::HttpRequest& request, const char* region, const char* serviceName, long long expirationInSeconds = 0) const = 0;
-
- /**
- * Return the signer's name
- */
- virtual const char* GetName() const = 0;
-
- /**
- * This handles detection of clock skew between clients and the server and adjusts the clock so that the next request will not
- * fail on the timestamp check.
- */
- virtual void SetClockSkew(const std::chrono::milliseconds& clockSkew) { m_clockSkew = clockSkew; }
-
- /**
- * Gets the timestamp being used by the signer. This may include a clock skew if a clock skew has been detected.
- */
- virtual Aws::Utils::DateTime GetSigningTimestamp() const { return Aws::Utils::DateTime::Now() + GetClockSkewOffset(); }
-
- protected:
- virtual std::chrono::milliseconds GetClockSkewOffset() const { return m_clockSkew.load(); }
-
- std::atomic<std::chrono::milliseconds> m_clockSkew;
- };
-
- /**
- * AWS Auth v4 Signer implementation of the AWSAuthSigner interface. More information on AWS Auth v4 Can be found here:
- * http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
- */
- class AWS_CORE_API AWSAuthV4Signer : public AWSAuthSigner
- {
-
- public:
- /**
- * Even though different payload signing polices, HTTP will force payload signing to be on.
- */
- enum class PayloadSigningPolicy
- {
- /**
- * Sign the request based on the value returned by AmazonWebServiceRequest::SignBody()
- */
- RequestDependent,
- /**
- * Always sign the body of the request.
- */
- Always,
- /**
- * Never sign the body of the request
- */
- Never
- };
- /**
- * credentialsProvider, source of AWS Credentials to sign requests with
- * serviceName, canonical service name to sign with
- * region, region string to use in signature
- * signPayloads, if Always, the payload will have a sha256 computed on the body of the request. If this is set
- * to Never, the sha256 will not be computed on the body. This is only useful for Amazon S3 over Https. If
- * Https is not used then this flag will be ignored. If set to RequestDependent, compute or not is based on
- * the value from AmazonWebServiceRequest::SignBody()
- */
- AWSAuthV4Signer(const std::shared_ptr<Auth::AWSCredentialsProvider>& credentialsProvider,
- const char* serviceName, const Aws::String& region, PayloadSigningPolicy signingPolicy = PayloadSigningPolicy::RequestDependent,
- bool urlEscapePath = true);
-
- virtual ~AWSAuthV4Signer();
-
- /**
- * AWSAuthV4signer's implementation of virtual function from base class
- * Return Auth Signer's name, here the value is specified in Aws::Auth::DEFAULT_AUTHV4_SIGNER.
- */
- const char* GetName() const override { return Aws::Auth::SIGV4_SIGNER; }
-
- /**
- * Signs the request itself based on info in the request and uri.
- * Uses AWS Auth V4 signing method with SHA256 HMAC algorithm.
- */
- bool SignRequest(Aws::Http::HttpRequest& request) const override
- {
- return SignRequest(request, m_region.c_str(), m_serviceName.c_str(), true/*signBody*/);
- }
-
- /**
- * Signs the request itself based on info in the request and uri.
- * Uses AWS Auth V4 signing method with SHA256 HMAC algorithm. If signBody is false
- * and https is being used then the body of the payload will not be signed.
- */
- bool SignRequest(Aws::Http::HttpRequest& request, bool signBody) const override
- {
- return SignRequest(request, m_region.c_str(), m_serviceName.c_str(), signBody);
- }
-
- /**
- * Uses AWS Auth V4 signing method with SHA256 HMAC algorithm. If signBody is false
- * and https is being used then the body of the payload will not be signed.
- * Using m_region by default if parameter region is nullptr.
- */
- bool SignRequest(Aws::Http::HttpRequest& request, const char* region, bool signBody) const override
- {
- return SignRequest(request, region, m_serviceName.c_str(), signBody);
- }
-
- /**
- * Uses AWS Auth V4 signing method with SHA256 HMAC algorithm. If signBody is false
- * and https is being used then the body of the payload will not be signed.
- * Using m_region by default if parameter region is nullptr.
- */
- bool SignRequest(Aws::Http::HttpRequest& request, const char* region, const char* serviceName, bool signBody) const override;
-
- /**
- * Takes a request and signs the URI based on the HttpMethod, URI and other info from the request.
- * the region the signer was initialized with will be used for the signature.
- * The URI can then be used in a normal HTTP call until expiration.
- * Uses AWS Auth V4 signing method with SHA256 HMAC algorithm.
- * expirationInSeconds defaults to 0 which provides a URI good for 7 days.
- */
- bool PresignRequest(Aws::Http::HttpRequest& request, long long expirationInSeconds = 0) const override;
-
- /**
- * Takes a request and signs the URI based on the HttpMethod, URI and other info from the request.
- * The URI can then be used in a normal HTTP call until expiration.
- * Uses AWS Auth V4 signing method with SHA256 HMAC algorithm.
- * expirationInSeconds defaults to 0 which provides a URI good for 7 days.
- * Using m_region by default if parameter region is nullptr.
- */
- bool PresignRequest(Aws::Http::HttpRequest& request, const char* region, long long expirationInSeconds = 0) const override;
-
- /**
- * Takes a request and signs the URI based on the HttpMethod, URI and other info from the request.
- * The URI can then be used in a normal HTTP call until expiration.
- * Uses AWS Auth V4 signing method with SHA256 HMAC algorithm.
- * expirationInSeconds defaults to 0 which provides a URI good for 7 days.
- * Using m_region by default if parameter region is nullptr.
- * Using m_serviceName by default if parameter serviceName is nullptr.
- */
- bool PresignRequest(Aws::Http::HttpRequest& request, const char* region, const char* serviceName, long long expirationInSeconds = 0) const override;
-
- Aws::String GetServiceName() const { return m_serviceName; }
- Aws::String GetRegion() const { return m_region; }
- Aws::String GenerateSignature(const Aws::Auth::AWSCredentials& credentials,
- const Aws::String& stringToSign, const Aws::String& simpleDate) const;
- bool ShouldSignHeader(const Aws::String& header) const;
-
- protected:
- bool m_includeSha256HashHeader;
-
- private:
-
- Aws::String GenerateSignature(const Aws::Auth::AWSCredentials& credentials,
- const Aws::String& stringToSign, const Aws::String& simpleDate, const Aws::String& region,
- const Aws::String& serviceName) const;
-
- Aws::String GenerateSignature(const Aws::String& stringToSign, const Aws::Utils::ByteBuffer& key) const;
- bool ServiceRequireUnsignedPayload(const Aws::String& serviceName) const;
- Aws::String ComputePayloadHash(Aws::Http::HttpRequest&) const;
- Aws::String GenerateStringToSign(const Aws::String& dateValue, const Aws::String& simpleDate,
- const Aws::String& canonicalRequestHash, const Aws::String& region,
- const Aws::String& serviceName) const;
- Aws::Utils::ByteBuffer ComputeHash(const Aws::String& secretKey, const Aws::String& simpleDate) const;
- Aws::Utils::ByteBuffer ComputeHash(const Aws::String& secretKey,
- const Aws::String& simpleDate, const Aws::String& region, const Aws::String& serviceName) const;
-
-
- std::shared_ptr<Auth::AWSCredentialsProvider> m_credentialsProvider;
- const Aws::String m_serviceName;
- const Aws::String m_region;
- Aws::UniquePtr<Aws::Utils::Crypto::Sha256> m_hash;
- Aws::UniquePtr<Aws::Utils::Crypto::Sha256HMAC> m_HMAC;
-
- Aws::Set<Aws::String> m_unsignedHeaders;
-
- //these next four fields are ONLY for caching purposes and do not change
- //the logical state of the signer. They are marked mutable so the
- //interface can remain const.
- mutable Aws::Utils::ByteBuffer m_partialSignature;
- mutable Aws::String m_currentDateStr;
- mutable Aws::String m_currentSecretKey;
- mutable Utils::Threading::ReaderWriterLock m_partialSignatureLock;
- PayloadSigningPolicy m_payloadSigningPolicy;
- bool m_urlEscapePath;
- };
-
- class AWS_CORE_API AWSAuthEventStreamV4Signer : public AWSAuthSigner
- {
- public:
- AWSAuthEventStreamV4Signer(const std::shared_ptr<Auth::AWSCredentialsProvider>& credentialsProvider,
- const char* serviceName, const Aws::String& region);
-
- const char* GetName() const override { return Aws::Auth::EVENTSTREAM_SIGV4_SIGNER; }
-
- bool SignEventMessage(Aws::Utils::Event::Message&, Aws::String& priorSignature) const override;
-
- bool SignRequest(Aws::Http::HttpRequest& request) const override
- {
- return SignRequest(request, m_region.c_str(), m_serviceName.c_str(), true);
- }
-
- bool SignRequest(Aws::Http::HttpRequest& request, bool signBody) const override
- {
- return SignRequest(request, m_region.c_str(), m_serviceName.c_str(), signBody);
- }
-
- bool SignRequest(Aws::Http::HttpRequest& request, const char* region, bool signBody) const override
- {
- return SignRequest(request, region, m_serviceName.c_str(), signBody);
- }
-
- bool SignRequest(Aws::Http::HttpRequest& request, const char* region, const char* serviceName, bool signBody) const override;
-
- /**
- * Do nothing
- */
- bool PresignRequest(Aws::Http::HttpRequest&, long long) const override { return false; }
-
- /**
- * Do nothing
- */
- bool PresignRequest(Aws::Http::HttpRequest&, const char*, long long) const override { return false; }
-
- /**
- * Do nothing
- */
- bool PresignRequest(Aws::Http::HttpRequest&, const char*, const char*, long long) const override { return false; }
-
- bool ShouldSignHeader(const Aws::String& header) const;
- private:
- Utils::ByteBuffer GenerateSignature(const Aws::Auth::AWSCredentials& credentials,
- const Aws::String& stringToSign, const Aws::String& simpleDate, const Aws::String& region, const Aws::String& serviceName) const;
- Utils::ByteBuffer GenerateSignature(const Aws::String& stringToSign, const Aws::Utils::ByteBuffer& key) const;
- Aws::String GenerateStringToSign(const Aws::String& dateValue, const Aws::String& simpleDate,
- const Aws::String& canonicalRequestHash, const Aws::String& region,
- const Aws::String& serviceName) const;
- Aws::Utils::ByteBuffer ComputeHash(const Aws::String& secretKey, const Aws::String& simpleDate) const;
- Aws::Utils::ByteBuffer ComputeHash(const Aws::String& secretKey,
- const Aws::String& simpleDate, const Aws::String& region, const Aws::String& serviceName) const;
- const Aws::String m_serviceName;
- const Aws::String m_region;
- mutable Aws::Utils::Crypto::Sha256 m_hash;
- mutable Aws::Utils::Crypto::Sha256HMAC m_HMAC;
- mutable Utils::Threading::ReaderWriterLock m_derivedKeyLock;
- mutable Aws::Utils::ByteBuffer m_derivedKey;
- mutable Aws::String m_currentDateStr;
- mutable Aws::String m_currentSecretKey;
- Aws::Vector<Aws::String> m_unsignedHeaders;
- std::shared_ptr<Auth::AWSCredentialsProvider> m_credentialsProvider;
- };
-
-
- /**
- * A no-op implementation of the AWSAuthSigner interface
- */
- class AWS_CORE_API AWSNullSigner : public AWSAuthSigner
- {
- public:
- /**
- * AWSNullSigner's implementation of virtual function from base class
- * Here the returned value is specified in Aws::Auth::NULL_SIGNER.
- */
- const char* GetName() const override { return Aws::Auth::NULL_SIGNER; }
-
- /**
- * Do nothing
- */
- bool SignRequest(Aws::Http::HttpRequest&) const override { return true; }
-
- /**
- * Do nothing
- */
- bool SignEventMessage(Aws::Utils::Event::Message&, Aws::String& /* priorSignature */) const override { return true; }
-
- /**
- * Do nothing
- */
- bool PresignRequest(Aws::Http::HttpRequest&, long long) const override { return false; }
-
- /**
- * Do nothing
- */
- bool PresignRequest(Aws::Http::HttpRequest&, const char*, long long) const override { return false; }
-
- /**
- * Do nothing
- */
- bool PresignRequest(Aws::Http::HttpRequest&, const char*, const char*, long long) const override { return false; }
- };
-
- } // namespace Client
-} // namespace Aws
+#include <aws/core/auth/signer/AWSAuthV4Signer.h>
+#include <aws/core/auth/signer/AWSAuthEventStreamV4Signer.h>
+#include <aws/core/auth/signer/AWSNullSigner.h>
+// This is a header that represents old legacy all-in-one header to maintain backward compatibility
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/AWSAuthSignerProvider.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/AWSAuthSignerProvider.h
index 305c58cd9d..b27d079284 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/AWSAuthSignerProvider.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/AWSAuthSignerProvider.h
@@ -6,48 +6,7 @@
#pragma once
-#include <aws/core/Core_EXPORTS.h>
-#include <aws/core/utils/memory/stl/AWSString.h>
-#include <aws/core/utils/memory/stl/AWSVector.h>
-#include <memory>
+#include <aws/core/auth/signer-provider/AWSAuthSignerProviderBase.h>
+#include <aws/core/auth/signer-provider/DefaultAuthSignerProvider.h>
-
-namespace Aws
-{
- namespace Client
- {
- class AWSAuthSigner;
- }
- namespace Auth
- {
- class AWSCredentialsProvider;
-
- class AWS_CORE_API AWSAuthSignerProvider
- {
- public:
- virtual std::shared_ptr<Aws::Client::AWSAuthSigner> GetSigner(const Aws::String& signerName) const = 0;
- virtual void AddSigner(std::shared_ptr<Aws::Client::AWSAuthSigner>& signer) = 0;
- virtual ~AWSAuthSignerProvider() = default;
- };
-
- class AWS_CORE_API DefaultAuthSignerProvider : public AWSAuthSignerProvider
- {
- public:
- /**
- * Creates a Signature-V4 signer provider that supports the different implementations of Signature-V4
- * used for standard and event-stream requests.
- *
- * @param credentialsProvider A provider to retrieve the access/secret key used to derive the signing
- * @param serviceName The canonical name of the AWS service to be used in the signature
- * @param region The AWS region in which the requests will be made.
- */
- DefaultAuthSignerProvider(const std::shared_ptr<AWSCredentialsProvider>& credentialsProvider,
- const Aws::String& serviceName, const Aws::String& region);
- explicit DefaultAuthSignerProvider(const std::shared_ptr<Aws::Client::AWSAuthSigner>& signer);
- void AddSigner(std::shared_ptr<Aws::Client::AWSAuthSigner>& signer) override;
- std::shared_ptr<Aws::Client::AWSAuthSigner> GetSigner(const Aws::String& signerName) const override;
- private:
- Aws::Vector<std::shared_ptr<Aws::Client::AWSAuthSigner>> m_signers;
- };
- }
-}
+// This is a header that represents old legacy all-in-one header to maintain backward compatibility
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/AWSBearerToken.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/AWSBearerToken.h
new file mode 100644
index 0000000000..9565dd3802
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/AWSBearerToken.h
@@ -0,0 +1,113 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+
+#include <aws/core/Core_EXPORTS.h>
+#include <aws/core/utils/memory/stl/AWSString.h>
+#include <aws/core/utils/DateTime.h>
+
+namespace Aws
+{
+ namespace Auth
+ {
+ /**
+ * Simple data object around aws credentials
+ */
+ class AWS_CORE_API AWSBearerToken
+ {
+ public:
+ /**
+ * Initializes an empty token.
+ * Empty token is not expired by default.
+ * Token expires only if an expiration date is explicitly set.
+ */
+ AWSBearerToken() : m_expiration((std::chrono::time_point<std::chrono::system_clock>::max)())
+ {
+ }
+
+ /**
+ * Initializes object with token.
+ * Expiration date is set to "never expire".
+ */
+ AWSBearerToken(const Aws::String& token) :
+ m_token(token),
+ m_expiration((std::chrono::time_point<std::chrono::system_clock>::max)())
+ {
+ }
+
+ /**
+ * Initializes object with token and expiration date.
+ */
+ AWSBearerToken(const Aws::String& token, Aws::Utils::DateTime expiration) :
+ m_token(token),
+ m_expiration(expiration)
+ {
+ }
+
+ /**
+ * If token has not been initialized or been initialized to empty value.
+ * Expiration date does not affect the result of this function.
+ */
+ inline bool IsEmpty() const { return m_token.empty(); }
+
+ inline bool IsExpired() const { return m_expiration <= Aws::Utils::DateTime::Now(); }
+
+ inline bool IsExpiredOrEmpty() const { return IsEmpty() || IsExpired(); }
+
+ /**
+ * Gets the underlying token
+ */
+ inline const Aws::String& GetToken() const
+ {
+ return m_token;
+ }
+
+ /**
+ * Gets the expiration date of the token
+ */
+ inline Aws::Utils::DateTime GetExpiration() const
+ {
+ return m_expiration;
+ }
+
+ /**
+ * Sets the underlying token. Copies from the parameter token
+ */
+ inline void SetToken(const Aws::String& token)
+ {
+ m_token = token;
+ }
+
+ /**
+ * Sets the underlying token. Moves from the parameter token
+ */
+ inline void SetToken(Aws::String&& token)
+ {
+ m_token = std::move(token);
+ }
+
+ /**
+ * Sets the expiration date of the credential
+ */
+ inline void SetExpiration(const Aws::Utils::DateTime& expiration)
+ {
+ m_expiration = expiration;
+ }
+
+ /**
+ * Sets the expiration date of the credential
+ */
+ inline void SetExpiration(Aws::Utils::DateTime&& expiration)
+ {
+ m_expiration = std::move(expiration);
+ }
+
+ private:
+ Aws::String m_token;
+ Aws::Utils::DateTime m_expiration;
+ };
+ }
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/AWSCredentials.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/AWSCredentials.h
index 1c14b955fc..0152d3e5a7 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/AWSCredentials.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/AWSCredentials.h
@@ -68,7 +68,7 @@ namespace Aws
}
/**
- * If credentials haven't been initialized or been initialized to emtpy values.
+ * If credentials haven't been initialized or been initialized to empty values.
* Expiration date does not affect the result of this function.
*/
inline bool IsEmpty() const { return m_accessKeyId.empty() && m_secretKey.empty(); }
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/AWSCredentialsProvider.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/AWSCredentialsProvider.h
index a3a46964e8..e8fec174ab 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/AWSCredentialsProvider.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/AWSCredentialsProvider.h
@@ -217,6 +217,7 @@ namespace Aws
void Reload() override;
private:
+ bool ExpiresSoon() const;
void RefreshIfExpired();
std::shared_ptr<Aws::Config::AWSProfileConfigLoader> m_ec2MetadataConfigLoader;
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/AWSCredentialsProviderChain.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/AWSCredentialsProviderChain.h
index dae8cf5a42..ffcd57dde4 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/AWSCredentialsProviderChain.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/AWSCredentialsProviderChain.h
@@ -32,7 +32,7 @@ namespace Aws
/**
* Gets all providers stored in this chain.
*/
- const Aws::Vector<std::shared_ptr<AWSCredentialsProvider>>& GetProviders() { return m_providerChain; }
+ const Aws::Vector<std::shared_ptr<AWSCredentialsProvider>>& GetProviders() const { return m_providerChain; }
protected:
/**
@@ -62,6 +62,8 @@ namespace Aws
* and InstanceProfileCredentialsProvider in that order.
*/
DefaultAWSCredentialsProviderChain();
+
+ DefaultAWSCredentialsProviderChain(const DefaultAWSCredentialsProviderChain& chain);
};
} // namespace Auth
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/SSOCredentialsProvider.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/SSOCredentialsProvider.h
index 288beccb3b..3b476177b9 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/SSOCredentialsProvider.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/SSOCredentialsProvider.h
@@ -8,6 +8,7 @@
#include <aws/core/Core_EXPORTS.h>
#include <aws/core/auth/AWSCredentialsProvider.h>
+#include <aws/core/auth/bearer-token-provider/SSOBearerTokenProvider.h>
#include <memory>
namespace Aws {
@@ -39,6 +40,8 @@ namespace Aws {
Aws::String m_ssoRegion;
// The expiration time of the accessToken.
Aws::Utils::DateTime m_expiresAt;
+ // The SSO Token Provider
+ Aws::Auth::SSOBearerTokenProvider m_bearerTokenProvider;
void Reload() override;
void RefreshIfExpired();
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/STSCredentialsProvider.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/STSCredentialsProvider.h
index 92d997c7ca..720006592c 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/STSCredentialsProvider.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/STSCredentialsProvider.h
@@ -46,6 +46,7 @@ namespace Aws
Aws::String m_sessionName;
Aws::String m_token;
bool m_initialized;
+ bool ExpiresSoon() const;
};
} // namespace Auth
} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/bearer-token-provider/AWSBearerTokenProviderBase.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/bearer-token-provider/AWSBearerTokenProviderBase.h
new file mode 100644
index 0000000000..d7608a351c
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/bearer-token-provider/AWSBearerTokenProviderBase.h
@@ -0,0 +1,30 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+
+#pragma once
+
+#include <aws/core/auth/AWSBearerToken.h>
+
+namespace Aws
+{
+ namespace Auth
+ {
+ /**
+ * Abstract class for retrieving Bearer Token. Create a derived class from this to allow
+ * various methods of storing and retrieving auth bearer tokens.
+ */
+ class AWS_CORE_API AWSBearerTokenProviderBase
+ {
+ public:
+ virtual ~AWSBearerTokenProviderBase() = default;
+
+ /**
+ * The core of the bearer token provider interface. Override this method to control how credentials are retrieved.
+ */
+ virtual AWSBearerToken GetAWSBearerToken() = 0;
+ };
+ } // namespace Auth
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/bearer-token-provider/AWSBearerTokenProviderChainBase.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/bearer-token-provider/AWSBearerTokenProviderChainBase.h
new file mode 100644
index 0000000000..e5f3b828bc
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/bearer-token-provider/AWSBearerTokenProviderChainBase.h
@@ -0,0 +1,31 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+
+#pragma once
+
+#include <aws/core/auth/bearer-token-provider/AWSBearerTokenProviderBase.h>
+#include <aws/core/utils/memory/stl/AWSVector.h>
+#include <memory>
+
+namespace Aws
+{
+ namespace Auth
+ {
+ /**
+ * Abstract class for providing chains of bearer token providers
+ */
+ class AWS_CORE_API AWSBearerTokenProviderChainBase : public AWSBearerTokenProviderBase
+ {
+ public:
+ virtual ~AWSBearerTokenProviderChainBase() = default;
+
+ /**
+ * Gets all providers stored in this chain.
+ */
+ virtual const Aws::Vector<std::shared_ptr<AWSBearerTokenProviderBase>>& GetProviders() = 0;
+ };
+ } // namespace Auth
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/bearer-token-provider/DefaultBearerTokenProviderChain.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/bearer-token-provider/DefaultBearerTokenProviderChain.h
new file mode 100644
index 0000000000..8e8a4508a3
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/bearer-token-provider/DefaultBearerTokenProviderChain.h
@@ -0,0 +1,49 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+
+#pragma once
+
+#include <aws/core/auth/bearer-token-provider/AWSBearerTokenProviderChainBase.h>
+#include <aws/core/utils/memory/stl/AWSVector.h>
+#include <memory>
+
+namespace Aws
+{
+ namespace Auth
+ {
+ /**
+ * Default built-in AWSBearerTokenProviderChainBase implementation that includes Aws::Auth::SSOBearerTokenProvider in the chain.
+ */
+ class AWS_CORE_API DefaultBearerTokenProviderChain : public AWSBearerTokenProviderChainBase
+ {
+ public:
+ DefaultBearerTokenProviderChain();
+ virtual ~DefaultBearerTokenProviderChain() = default;
+
+ /**
+ * Return bearer token, implementation of a base class interface
+ */
+ virtual AWSBearerToken GetAWSBearerToken() override;
+
+ /**
+ * Gets all providers stored in this chain.
+ */
+ const Aws::Vector<std::shared_ptr<AWSBearerTokenProviderBase>>& GetProviders() override
+ {
+ return m_providerChain;
+ }
+
+ protected:
+ /**
+ * Adds a provider to the back of the chain.
+ */
+ void AddProvider(const std::shared_ptr<AWSBearerTokenProviderBase>& provider) { m_providerChain.push_back(provider); }
+
+ Aws::Vector<std::shared_ptr<AWSBearerTokenProviderBase> > m_providerChain;
+ };
+
+ } // namespace Auth
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/bearer-token-provider/SSOBearerTokenProvider.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/bearer-token-provider/SSOBearerTokenProvider.h
new file mode 100644
index 0000000000..b0656caa8e
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/bearer-token-provider/SSOBearerTokenProvider.h
@@ -0,0 +1,63 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+
+#pragma once
+
+#include <aws/core/auth/bearer-token-provider/AWSBearerTokenProviderBase.h>
+
+#include <aws/core/internal/AWSHttpResourceClient.h>
+#include <aws/core/utils/threading/ReaderWriterLock.h>
+
+namespace Aws
+{
+ namespace Auth
+ {
+ /**
+ * To support usage of SSO bearerToken.
+ * The SSO token provider assumes that an SSO access token has already been resolved and cached to disk.
+ */
+ class AWS_CORE_API SSOBearerTokenProvider : public AWSBearerTokenProviderBase
+ {
+ public:
+ SSOBearerTokenProvider();
+ explicit SSOBearerTokenProvider(const Aws::String& awsProfile);
+ /**
+ * Retrieves the bearerToken if found, otherwise returns empty credential set.
+ */
+ AWSBearerToken GetAWSBearerToken() override;
+
+ protected:
+ struct CachedSsoToken
+ {
+ public:
+ Aws::String accessToken;
+ Aws::Utils::DateTime expiresAt;
+ Aws::String refreshToken;
+ Aws::String clientId;
+ Aws::String clientSecret;
+ Aws::Utils::DateTime registrationExpiresAt;
+ Aws::String region;
+ Aws::String startUrl;
+ };
+
+ static const size_t REFRESH_ATTEMPT_INTERVAL_S;
+ static const size_t REFRESH_WINDOW_BEFORE_EXPIRATION_S;
+ // Profile description variables
+ Aws::UniquePtr<Aws::Internal::SSOCredentialsClient> m_client;
+ Aws::String m_profileToUse;
+
+ mutable Aws::Auth::AWSBearerToken m_token;
+ mutable Aws::Utils::DateTime m_lastUpdateAttempt;
+
+ mutable Aws::Utils::Threading::ReaderWriterLock m_reloadLock;
+
+ void Reload();
+ void RefreshFromSso();
+ CachedSsoToken LoadAccessTokenFile() const;
+ bool WriteAccessTokenFile(const CachedSsoToken& token) const;
+ };
+ } // namespace Auth
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer-provider/AWSAuthSignerProviderBase.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer-provider/AWSAuthSignerProviderBase.h
new file mode 100644
index 0000000000..0577cef77e
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer-provider/AWSAuthSignerProviderBase.h
@@ -0,0 +1,33 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+
+#pragma once
+
+#include <aws/core/Core_EXPORTS.h>
+
+#include <aws/core/utils/memory/stl/AWSVector.h>
+#include <aws/core/utils/memory/stl/AWSString.h>
+#include <memory>
+
+namespace Aws
+{
+ namespace Client
+ {
+ class AWSAuthSigner;
+ }
+ namespace Auth
+ {
+ class AWSCredentialsProvider;
+
+ class AWS_CORE_API AWSAuthSignerProvider
+ {
+ public:
+ virtual std::shared_ptr<Aws::Client::AWSAuthSigner> GetSigner(const Aws::String& signerName) const = 0;
+ virtual void AddSigner(std::shared_ptr<Aws::Client::AWSAuthSigner>& signer) = 0;
+ virtual ~AWSAuthSignerProvider() = default;
+ };
+ }
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer-provider/BearerTokenAuthSignerProvider.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer-provider/BearerTokenAuthSignerProvider.h
new file mode 100644
index 0000000000..09b83a8e27
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer-provider/BearerTokenAuthSignerProvider.h
@@ -0,0 +1,38 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+
+#include <aws/core/auth/signer-provider/AWSAuthSignerProviderBase.h>
+#include <aws/core/utils/memory/stl/AWSSet.h>
+#include <aws/core/auth/signer/AWSAuthBearerSigner.h>
+
+
+namespace Aws
+{
+ namespace Auth
+ {
+ class AWSCredentialsProvider;
+
+ class AWS_CORE_API BearerTokenAuthSignerProvider : public AWSAuthSignerProvider
+ {
+ public:
+ /**
+ * Creates a Signature-V4 signer provider that supports the different implementations of Signature-V4
+ * used for standard and event-stream requests.
+ *
+ * @param credentialsProvider A provider to retrieve the access/secret key used to derive the signing
+ * @param serviceName The canonical name of the AWS service to be used in the signature
+ * @param region The AWS region in which the requests will be made.
+ */
+ BearerTokenAuthSignerProvider() = delete;
+ BearerTokenAuthSignerProvider(const std::shared_ptr<Aws::Auth::AWSBearerTokenProviderBase> bearerTokenProvider);
+ void AddSigner(std::shared_ptr<Aws::Client::AWSAuthSigner>& signer) override;
+ std::shared_ptr<Aws::Client::AWSAuthSigner> GetSigner(const Aws::String& signerName) const override;
+ private:
+ Aws::Vector<std::shared_ptr<Aws::Client::AWSAuthSigner>> m_signers;
+ };
+ }
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer-provider/DefaultAuthSignerProvider.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer-provider/DefaultAuthSignerProvider.h
new file mode 100644
index 0000000000..4c31aaaa32
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer-provider/DefaultAuthSignerProvider.h
@@ -0,0 +1,42 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+
+#pragma once
+
+#include <aws/core/auth/signer-provider/AWSAuthSignerProviderBase.h>
+
+#include <aws/core/auth/signer/AWSAuthV4Signer.h>
+
+
+namespace Aws
+{
+ namespace Auth
+ {
+ class AWSCredentialsProvider;
+
+ class AWS_CORE_API DefaultAuthSignerProvider : public AWSAuthSignerProvider
+ {
+ public:
+ /**
+ * Creates a Signature-V4 signer provider that supports the different implementations of Signature-V4
+ * used for standard and event-stream requests.
+ *
+ * @param credentialsProvider A provider to retrieve the access/secret key used to derive the signing
+ * @param serviceName The canonical name of the AWS service to be used in the signature
+ * @param region The AWS region in which the requests will be made.
+ */
+ DefaultAuthSignerProvider(const std::shared_ptr<AWSCredentialsProvider>& credentialsProvider,
+ const Aws::String& serviceName, const Aws::String& region,
+ Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy signingPolicy = Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::RequestDependent,
+ bool urlEscapePath = true);
+ explicit DefaultAuthSignerProvider(const std::shared_ptr<Aws::Client::AWSAuthSigner>& signer);
+ void AddSigner(std::shared_ptr<Aws::Client::AWSAuthSigner>& signer) override;
+ std::shared_ptr<Aws::Client::AWSAuthSigner> GetSigner(const Aws::String& signerName) const override;
+ private:
+ Aws::Vector<std::shared_ptr<Aws::Client::AWSAuthSigner>> m_signers;
+ };
+ }
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer/AWSAuthBearerSigner.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer/AWSAuthBearerSigner.h
new file mode 100644
index 0000000000..655aeb1a00
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer/AWSAuthBearerSigner.h
@@ -0,0 +1,106 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+
+#pragma once
+
+#include "aws/core/auth/signer/AWSAuthSignerBase.h"
+
+namespace Aws
+{
+ namespace Http
+ {
+ class HttpRequest;
+ } // namespace Http
+
+ namespace Utils
+ {
+ namespace Event
+ {
+ class Message;
+ }
+ } // namespace Utils
+
+ namespace Auth
+ {
+ class AWSBearerTokenProviderBase;
+ AWS_CORE_API extern const char BEARER_SIGNER[];
+ } // namespace Auth
+
+ namespace Client
+ {
+ class AWS_CORE_API AWSAuthBearerSigner : public AWSAuthSigner
+ {
+
+ public:
+ /**
+ * An implementation of a signer interface that uses bearer token auth signature.
+ */
+ AWSAuthBearerSigner(const std::shared_ptr<Aws::Auth::AWSBearerTokenProviderBase> bearerTokenProvider)
+ : m_bearerTokenProvider(bearerTokenProvider)
+ {}
+
+ virtual ~AWSAuthBearerSigner() {};
+
+ /**
+ * Return the signer's name
+ */
+ const char* GetName() const override
+ {
+ return Aws::Auth::BEARER_SIGNER;
+ }
+
+ /**
+ * Sign request with a bearer auth token
+ * @return true if success, false if fail to sign
+ */
+ bool SignRequest(Aws::Http::HttpRequest& ) const override;
+
+ /**
+ * Dummy function to satisfy the interface requirements of a base Signer interface
+ * additional arguments are not used.
+ * @return true if success, false if fail to sign
+ */
+ bool SignRequest(Aws::Http::HttpRequest& ioRequest, const char* /*region*/, const char* /*serviceName*/, bool /*signBody*/) const override
+ {
+ return SignRequest(ioRequest);
+ }
+
+ /**
+ * Dummy function to satisfy the interface requirements of a base Signer interface
+ * @return true if success, false if fail to sign
+ */
+ bool PresignRequest(Aws::Http::HttpRequest& ioRequest, long long /*expirationInSeconds = 0*/) const override
+ {
+ return SignRequest(ioRequest);
+ }
+
+ /**
+ * Dummy function to satisfy the interface requirements of a base Signer interface
+ * additional arguments are not used.
+ * @return true if success, false if fail to sign
+ */
+ bool PresignRequest(Aws::Http::HttpRequest& ioRequest, const char* /*region*/, long long expirationInSeconds = 0) const override
+ {
+ return PresignRequest(ioRequest, expirationInSeconds);
+ }
+
+ /**
+ * Dummy function to satisfy the interface requirements of a base Signer interface
+ * additional arguments are not used.
+ * @return true if success, false if fail to sign
+ */
+ bool PresignRequest(Aws::Http::HttpRequest& ioRequest, const char* /*region*/, const char* /*serviceName*/, long long expirationInSeconds = 0) const override
+ {
+ return PresignRequest(ioRequest, expirationInSeconds);
+ }
+
+ protected:
+ std::shared_ptr<Aws::Auth::AWSBearerTokenProviderBase> m_bearerTokenProvider;
+ };
+
+ } // namespace Client
+} // namespace Aws
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer/AWSAuthEventStreamV4Signer.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer/AWSAuthEventStreamV4Signer.h
new file mode 100644
index 0000000000..f9c4d4fedf
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer/AWSAuthEventStreamV4Signer.h
@@ -0,0 +1,114 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+
+#include "aws/core/auth/signer/AWSAuthSignerBase.h"
+
+#include <aws/core/utils/Array.h>
+#include <aws/core/utils/memory/stl/AWSSet.h>
+#include <aws/core/utils/threading/ReaderWriterLock.h>
+#include <aws/core/utils/crypto/Sha256.h>
+#include <aws/core/utils/crypto/Sha256HMAC.h>
+
+#include <aws/crt/auth/Sigv4Signing.h>
+
+#include <memory>
+
+namespace Aws
+{
+ namespace Http
+ {
+ class HttpRequest;
+ } // namespace Http
+
+ namespace Utils
+ {
+ namespace Event
+ {
+ class Message;
+ }
+ } // namespace Utils
+
+ namespace Auth
+ {
+ class AWSCredentials;
+ class AWSCredentialsProvider;
+
+ AWS_CORE_API extern const char EVENTSTREAM_SIGV4_SIGNER[];
+ } // namespace Auth
+
+ namespace Client
+ {
+ /**
+ * AWS Auth EventStream v4 Signer implementation of the AWSAuthSigner interface.
+ */
+ class AWS_CORE_API AWSAuthEventStreamV4Signer : public AWSAuthSigner
+ {
+ public:
+ AWSAuthEventStreamV4Signer(const std::shared_ptr<Auth::AWSCredentialsProvider>& credentialsProvider,
+ const char* serviceName, const Aws::String& region);
+
+ const char* GetName() const override { return Aws::Auth::EVENTSTREAM_SIGV4_SIGNER; }
+
+ bool SignEventMessage(Aws::Utils::Event::Message&, Aws::String& priorSignature) const override;
+
+ bool SignRequest(Aws::Http::HttpRequest& request) const override
+ {
+ return SignRequest(request, m_region.c_str(), m_serviceName.c_str(), true);
+ }
+
+ bool SignRequest(Aws::Http::HttpRequest& request, bool signBody) const override
+ {
+ return SignRequest(request, m_region.c_str(), m_serviceName.c_str(), signBody);
+ }
+
+ bool SignRequest(Aws::Http::HttpRequest& request, const char* region, bool signBody) const override
+ {
+ return SignRequest(request, region, m_serviceName.c_str(), signBody);
+ }
+
+ bool SignRequest(Aws::Http::HttpRequest& request, const char* region, const char* serviceName, bool signBody) const override;
+
+ /**
+ * Do nothing
+ */
+ bool PresignRequest(Aws::Http::HttpRequest&, long long) const override { return false; }
+
+ /**
+ * Do nothing
+ */
+ bool PresignRequest(Aws::Http::HttpRequest&, const char*, long long) const override { return false; }
+
+ /**
+ * Do nothing
+ */
+ bool PresignRequest(Aws::Http::HttpRequest&, const char*, const char*, long long) const override { return false; }
+
+ bool ShouldSignHeader(const Aws::String& header) const;
+ private:
+ Utils::ByteBuffer GenerateSignature(const Aws::Auth::AWSCredentials& credentials,
+ const Aws::String& stringToSign, const Aws::String& simpleDate, const Aws::String& region, const Aws::String& serviceName) const;
+ Utils::ByteBuffer GenerateSignature(const Aws::String& stringToSign, const Aws::Utils::ByteBuffer& key) const;
+ Aws::String GenerateStringToSign(const Aws::String& dateValue, const Aws::String& simpleDate,
+ const Aws::String& canonicalRequestHash, const Aws::String& region,
+ const Aws::String& serviceName) const;
+ Aws::Utils::ByteBuffer ComputeHash(const Aws::String& secretKey, const Aws::String& simpleDate) const;
+ Aws::Utils::ByteBuffer ComputeHash(const Aws::String& secretKey,
+ const Aws::String& simpleDate, const Aws::String& region, const Aws::String& serviceName) const;
+ const Aws::String m_serviceName;
+ const Aws::String m_region;
+ mutable Aws::Utils::Crypto::Sha256 m_hash;
+ mutable Aws::Utils::Crypto::Sha256HMAC m_HMAC;
+ mutable Utils::Threading::ReaderWriterLock m_derivedKeyLock;
+ mutable Aws::Utils::ByteBuffer m_derivedKey;
+ mutable Aws::String m_currentDateStr;
+ mutable Aws::String m_currentSecretKey;
+ Aws::Vector<Aws::String> m_unsignedHeaders;
+ std::shared_ptr<Auth::AWSCredentialsProvider> m_credentialsProvider;
+ };
+ } // namespace Client
+} // namespace Aws
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer/AWSAuthSignerBase.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer/AWSAuthSignerBase.h
new file mode 100644
index 0000000000..54be880a37
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer/AWSAuthSignerBase.h
@@ -0,0 +1,142 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+
+#include "aws/core/Core_EXPORTS.h"
+#include "aws/core/utils/DateTime.h"
+
+#include <memory>
+#include <atomic>
+#include <chrono>
+
+namespace Aws
+{
+ namespace Http
+ {
+ class HttpRequest;
+ } // namespace Http
+
+ namespace Utils
+ {
+ namespace Event
+ {
+ class Message;
+ }
+ } // namespace Utils
+
+ namespace Client
+ {
+ /**
+ * Auth Signer interface. Takes a generic AWS request and applies crypto tamper resistent signatures on the request.
+ */
+ class AWS_CORE_API AWSAuthSigner
+ {
+ public:
+ AWSAuthSigner() : m_clockSkew() { m_clockSkew.store(std::chrono::milliseconds(0L)); }
+ virtual ~AWSAuthSigner() = default;
+
+ /**
+ * Signs the request itself (usually by adding a signature header) based on info in the request and uri.
+ */
+ virtual bool SignRequest(Aws::Http::HttpRequest& request) const = 0;
+
+ /**
+ * Signs the request itself (usually by adding a signature header) based on info in the request and uri.
+ * If signBody is false and https is being used then the body of the payload will not be signed.
+ * The default virtual function, just calls SignRequest.
+ */
+ virtual bool SignRequest(Aws::Http::HttpRequest& request, bool signBody) const
+ {
+ AWS_UNREFERENCED_PARAM(signBody);
+ return SignRequest(request);
+ }
+
+ /**
+ * Signs the request itself (usually by adding a signature header) based on info in the request and uri.
+ * If signBody is false and https is being used then the body of the payload will not be signed.
+ * The default virtual function, just calls SignRequest.
+ * Using m_region by default if parameter region is nullptr.
+ */
+ virtual bool SignRequest(Aws::Http::HttpRequest& request, const char* region, bool signBody) const
+ {
+ AWS_UNREFERENCED_PARAM(signBody);
+ AWS_UNREFERENCED_PARAM(region);
+ return SignRequest(request);
+ }
+
+ /**
+ * Signs the request itself (usually by adding a signature header) based on info in the request and uri.
+ * If signBody is false and https is being used then the body of the payload will not be signed.
+ * The default virtual function, just calls SignRequest.
+ * Using m_region by default if parameter region is nullptr.
+ * Using m_serviceName by default if parameter serviceName is nullptr.
+ */
+ virtual bool SignRequest(Aws::Http::HttpRequest& request, const char* region, const char* serviceName, bool signBody) const
+ {
+ AWS_UNREFERENCED_PARAM(signBody);
+ AWS_UNREFERENCED_PARAM(region);
+ AWS_UNREFERENCED_PARAM(serviceName);
+ return SignRequest(request);
+ }
+
+ /**
+ * Signs a single event message in an event stream.
+ * The input message buffer is copied and signed. The message's input buffer will be deallocated and a new
+ * buffer will be assigned. The new buffer encodes the original message with its headers as the payload of
+ * the new message. The signature of the original message will be added as a header to the new message.
+ *
+ * A Hex encoded signature of the previous event (or of the HTTP request headers in case of the first event)
+ * is provided as the 'priorSignature' parameter. 'priorSignature' will contain the value of the new
+ * signature after this call returns successfully.
+ *
+ * The function returns true if the message is successfully signed.
+ */
+ virtual bool SignEventMessage(Aws::Utils::Event::Message&, Aws::String& /* priorSignature */) const { return false; }
+
+ /**
+ * Takes a request and signs the URI based on the HttpMethod, URI and other info from the request.
+ * The URI can then be used in a normal HTTP call until expiration.
+ */
+ virtual bool PresignRequest(Aws::Http::HttpRequest& request, long long expirationInSeconds) const = 0;
+
+ /**
+ * Generates a signed Uri using the injected signer. for the supplied uri and http method and region. expirationInSeconds defaults
+ * to 0 which is the default 7 days.
+ * Using m_region by default if parameter region is nullptr.
+ */
+ virtual bool PresignRequest(Aws::Http::HttpRequest& request, const char* region, long long expirationInSeconds = 0) const = 0;
+
+ /**
+ * Generates a signed Uri using the injected signer. for the supplied uri and http method, region, and service name. expirationInSeconds defaults
+ * to 0 which is the default 7 days.
+ * Using m_region by default if parameter region is nullptr.
+ * Using m_serviceName by default if parameter serviceName is nullptr.
+ */
+ virtual bool PresignRequest(Aws::Http::HttpRequest& request, const char* region, const char* serviceName, long long expirationInSeconds = 0) const = 0;
+
+ /**
+ * Return the signer's name
+ */
+ virtual const char* GetName() const = 0;
+
+ /**
+ * This handles detection of clock skew between clients and the server and adjusts the clock so that the next request will not
+ * fail on the timestamp check.
+ */
+ virtual void SetClockSkew(const std::chrono::milliseconds& clockSkew) { m_clockSkew = clockSkew; }
+
+ /**
+ * Gets the timestamp being used by the signer. This may include a clock skew if a clock skew has been detected.
+ */
+ virtual Aws::Utils::DateTime GetSigningTimestamp() const { return Aws::Utils::DateTime::Now() + GetClockSkewOffset(); }
+
+ protected:
+ virtual std::chrono::milliseconds GetClockSkewOffset() const { return m_clockSkew.load(); }
+
+ std::atomic<std::chrono::milliseconds> m_clockSkew;
+ };
+ } // namespace Client
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer/AWSAuthSignerCommon.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer/AWSAuthSignerCommon.h
new file mode 100644
index 0000000000..7ed72b87b7
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer/AWSAuthSignerCommon.h
@@ -0,0 +1,23 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+
+#include <aws/core/Core_EXPORTS.h>
+#include <aws/core/http/HttpTypes.h>
+
+namespace Aws
+{
+ namespace Http
+ {
+ class HttpRequest;
+ } // namespace Http
+
+ namespace Auth
+ {
+ AWS_CORE_API extern const char SIGNATURE[];
+ } // namespace Client
+} // namespace Aws
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer/AWSAuthSignerHelper.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer/AWSAuthSignerHelper.h
new file mode 100644
index 0000000000..955445b6b6
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer/AWSAuthSignerHelper.h
@@ -0,0 +1,45 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+
+#include <aws/core/Core_EXPORTS.h>
+#include <aws/core/http/HttpTypes.h>
+
+namespace Aws
+{
+ namespace Http
+ {
+ class HttpRequest;
+ } // namespace Http
+
+ namespace Auth
+ {
+ class AWSAuthHelper
+ {
+ public:
+ /**
+ * Helper functions used across different signers
+ */
+ static Aws::String CanonicalizeRequestSigningString(Aws::Http::HttpRequest &request, bool urlEscapePath);
+ static Aws::Http::HeaderValueCollection CanonicalizeHeaders(Http::HeaderValueCollection &&headers);
+
+ /**
+ * Static const variables used across different signers
+ */
+ static const char* EQ;
+ static const char* AWS_HMAC_SHA256;
+ static const char* AWS4_REQUEST;
+ static const char* SIGNED_HEADERS;
+ static const char* CREDENTIAL;
+ static const char* NEWLINE;
+ static const char* X_AMZN_TRACE_ID;
+ static const char* X_AMZ_CONTENT_SHA256;
+ static const char* SIGNING_KEY;
+ static const char* SIMPLE_DATE_FORMAT_STR;
+ };
+ } // namespace Client
+} // namespace Aws
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer/AWSAuthV4Signer.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer/AWSAuthV4Signer.h
new file mode 100644
index 0000000000..469391a1ca
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer/AWSAuthV4Signer.h
@@ -0,0 +1,214 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+
+#pragma once
+
+#include "aws/core/auth/signer/AWSAuthSignerBase.h"
+
+#include <aws/core/utils/Array.h>
+#include <aws/core/utils/memory/stl/AWSSet.h>
+#include <aws/core/utils/threading/ReaderWriterLock.h>
+#include <aws/core/utils/crypto/Sha256.h>
+#include <aws/core/utils/crypto/Sha256HMAC.h>
+
+#include <aws/crt/auth/Sigv4Signing.h>
+
+#include <memory>
+
+namespace Aws
+{
+ namespace Http
+ {
+ class HttpRequest;
+ } // namespace Http
+
+ namespace Auth
+ {
+ class AWSCredentials;
+ class AWSCredentialsProvider;
+
+ enum class AWSSigningAlgorithm
+ {
+ SIGV4 = static_cast<int>(Aws::Crt::Auth::SigningAlgorithm::SigV4),
+ ASYMMETRIC_SIGV4 = static_cast<int>(Aws::Crt::Auth::SigningAlgorithm::SigV4A),
+ };
+
+ AWS_CORE_API extern const char SIGV4_SIGNER[];
+ AWS_CORE_API extern const char ASYMMETRIC_SIGV4_SIGNER[];
+ } // namespace Auth
+
+ namespace Client
+ {
+ /**
+ * AWS Auth v4 Signer implementation of the AWSAuthSigner interface. More information on AWS Auth v4 Can be found here:
+ * http://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html
+ */
+ class AWS_CORE_API AWSAuthV4Signer : public AWSAuthSigner
+ {
+
+ public:
+ /**
+ * Even though different payload signing polices, HTTP will force payload signing to be on.
+ */
+ enum class PayloadSigningPolicy
+ {
+ /**
+ * Sign the request based on the value returned by AmazonWebServiceRequest::SignBody()
+ */
+ RequestDependent,
+ /**
+ * Always sign the body of the request.
+ */
+ Always,
+ /**
+ * Never sign the body of the request
+ */
+ Never
+ };
+ /**
+ * credentialsProvider, source of AWS Credentials to sign requests with
+ * serviceName, canonical service name to sign with
+ * region, region string to use in signature
+ * signPayloads, if Always, the payload will have a sha256 computed on the body of the request. If this is set
+ * to Never, the sha256 will not be computed on the body. This is only useful for Amazon S3 over Https. If
+ * Https is not used then this flag will be ignored. If set to RequestDependent, compute or not is based on
+ * the value from AmazonWebServiceRequest::SignBody()
+ */
+ AWSAuthV4Signer(const std::shared_ptr<Auth::AWSCredentialsProvider>& credentialsProvider,
+ const char* serviceName, const Aws::String& region, PayloadSigningPolicy signingPolicy = PayloadSigningPolicy::RequestDependent,
+ bool urlEscapePath = true, Aws::Auth::AWSSigningAlgorithm signingAlgorithm = Aws::Auth::AWSSigningAlgorithm::SIGV4);
+
+ virtual ~AWSAuthV4Signer();
+
+ /**
+ * AWSAuthV4signer's implementation of virtual function from base class
+ * Return Auth Signer's name, here the value is specified in Aws::Auth::DEFAULT_AUTHV4_SIGNER.
+ */
+ const char* GetName() const override
+ {
+ if (m_signingAlgorithm == Aws::Auth::AWSSigningAlgorithm::ASYMMETRIC_SIGV4)
+ {
+ return Aws::Auth::ASYMMETRIC_SIGV4_SIGNER;
+ }
+ else
+ {
+ return Aws::Auth::SIGV4_SIGNER;
+ }
+ }
+
+ /**
+ * Signs the request itself based on info in the request and uri.
+ * Uses AWS Auth V4 signing method with SHA256 HMAC algorithm.
+ */
+ bool SignRequest(Aws::Http::HttpRequest& request) const override
+ {
+ return SignRequest(request, m_region.c_str(), m_serviceName.c_str(), true/*signBody*/);
+ }
+
+ /**
+ * Signs the request itself based on info in the request and uri.
+ * Uses AWS Auth V4 signing method with SHA256 HMAC algorithm. If signBody is false
+ * and https is being used then the body of the payload will not be signed.
+ */
+ bool SignRequest(Aws::Http::HttpRequest& request, bool signBody) const override
+ {
+ return SignRequest(request, m_region.c_str(), m_serviceName.c_str(), signBody);
+ }
+
+ /**
+ * Uses AWS Auth V4 signing method with SHA256 HMAC algorithm. If signBody is false
+ * and https is being used then the body of the payload will not be signed.
+ * Using m_region by default if parameter region is nullptr.
+ */
+ bool SignRequest(Aws::Http::HttpRequest& request, const char* region, bool signBody) const override
+ {
+ return SignRequest(request, region, m_serviceName.c_str(), signBody);
+ }
+
+ /**
+ * Uses AWS Auth V4 signing method with SHA256 HMAC algorithm. If signBody is false
+ * and https is being used then the body of the payload will not be signed.
+ * Using m_region by default if parameter region is nullptr.
+ */
+ bool SignRequest(Aws::Http::HttpRequest& request, const char* region, const char* serviceName, bool signBody) const override;
+
+ /**
+ * Takes a request and signs the URI based on the HttpMethod, URI and other info from the request.
+ * the region the signer was initialized with will be used for the signature.
+ * The URI can then be used in a normal HTTP call until expiration.
+ * Uses AWS Auth V4 signing method with SHA256 HMAC algorithm.
+ * expirationInSeconds defaults to 0 which provides a URI good for 7 days.
+ */
+ bool PresignRequest(Aws::Http::HttpRequest& request, long long expirationInSeconds = 0) const override;
+
+ /**
+ * Takes a request and signs the URI based on the HttpMethod, URI and other info from the request.
+ * The URI can then be used in a normal HTTP call until expiration.
+ * Uses AWS Auth V4 signing method with SHA256 HMAC algorithm.
+ * expirationInSeconds defaults to 0 which provides a URI good for 7 days.
+ * Using m_region by default if parameter region is nullptr.
+ */
+ bool PresignRequest(Aws::Http::HttpRequest& request, const char* region, long long expirationInSeconds = 0) const override;
+
+ /**
+ * Takes a request and signs the URI based on the HttpMethod, URI and other info from the request.
+ * The URI can then be used in a normal HTTP call until expiration.
+ * Uses AWS Auth V4 signing method with SHA256 HMAC algorithm.
+ * expirationInSeconds defaults to 0 which provides a URI good for 7 days.
+ * Using m_region by default if parameter region is nullptr.
+ * Using m_serviceName by default if parameter serviceName is nullptr.
+ */
+ bool PresignRequest(Aws::Http::HttpRequest& request, const char* region, const char* serviceName, long long expirationInSeconds = 0) const override;
+
+ Aws::String GetServiceName() const { return m_serviceName; }
+ Aws::String GetRegion() const { return m_region; }
+ Aws::String GenerateSignature(const Aws::Auth::AWSCredentials& credentials,
+ const Aws::String& stringToSign, const Aws::String& simpleDate) const;
+ bool ShouldSignHeader(const Aws::String& header) const;
+
+ protected:
+ bool m_includeSha256HashHeader;
+
+ private:
+
+ Aws::String GenerateSignature(const Aws::Auth::AWSCredentials& credentials,
+ const Aws::String& stringToSign, const Aws::String& simpleDate, const Aws::String& region,
+ const Aws::String& serviceName) const;
+
+ Aws::String GenerateSignature(const Aws::String& stringToSign, const Aws::Utils::ByteBuffer& key) const;
+ bool ServiceRequireUnsignedPayload(const Aws::String& serviceName) const;
+ Aws::String ComputePayloadHash(Aws::Http::HttpRequest&) const;
+ Aws::String GenerateStringToSign(const Aws::String& dateValue, const Aws::String& simpleDate,
+ const Aws::String& canonicalRequestHash, const Aws::String& region,
+ const Aws::String& serviceName) const;
+ Aws::Utils::ByteBuffer ComputeHash(const Aws::String& secretKey, const Aws::String& simpleDate) const;
+ Aws::Utils::ByteBuffer ComputeHash(const Aws::String& secretKey,
+ const Aws::String& simpleDate, const Aws::String& region, const Aws::String& serviceName) const;
+ bool SignRequestWithSigV4a(Aws::Http::HttpRequest& request, const char* region, const char* serviceName,
+ bool signBody, long long expirationTimeInSeconds, Aws::Crt::Auth::SignatureType signatureType) const;
+
+ Aws::Auth::AWSSigningAlgorithm m_signingAlgorithm;
+ std::shared_ptr<Auth::AWSCredentialsProvider> m_credentialsProvider;
+ const Aws::String m_serviceName;
+ const Aws::String m_region;
+ Aws::UniquePtr<Aws::Utils::Crypto::Sha256> m_hash;
+ Aws::UniquePtr<Aws::Utils::Crypto::Sha256HMAC> m_HMAC;
+
+ Aws::Set<Aws::String> m_unsignedHeaders;
+
+ //these next four fields are ONLY for caching purposes and do not change
+ //the logical state of the signer. They are marked mutable so the
+ //interface can remain const.
+ mutable Aws::Utils::ByteBuffer m_partialSignature;
+ mutable Aws::String m_currentDateStr;
+ mutable Aws::String m_currentSecretKey;
+ mutable Utils::Threading::ReaderWriterLock m_partialSignatureLock;
+ PayloadSigningPolicy m_payloadSigningPolicy;
+ bool m_urlEscapePath;
+ };
+ } // namespace Client
+} // namespace Aws
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer/AWSNullSigner.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer/AWSNullSigner.h
new file mode 100644
index 0000000000..7c40f44fb4
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/auth/signer/AWSNullSigner.h
@@ -0,0 +1,72 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+
+#include "aws/core/auth/signer/AWSAuthSignerBase.h"
+
+namespace Aws
+{
+ namespace Http
+ {
+ class HttpRequest;
+ } // namespace Http
+
+ namespace Utils
+ {
+ namespace Event
+ {
+ class Message;
+ }
+ } // namespace Utils
+
+ namespace Auth
+ {
+ AWS_CORE_API extern const char NULL_SIGNER[];
+ } // namespace Auth
+
+ namespace Client
+ {
+ /**
+ * A no-op implementation of the AWSAuthSigner interface
+ */
+ class AWS_CORE_API AWSNullSigner : public AWSAuthSigner
+ {
+ public:
+ /**
+ * AWSNullSigner's implementation of virtual function from base class
+ * Here the returned value is specified in Aws::Auth::NULL_SIGNER.
+ */
+ const char* GetName() const override { return Aws::Auth::NULL_SIGNER; }
+
+ /**
+ * Do nothing
+ */
+ bool SignRequest(Aws::Http::HttpRequest&) const override { return true; }
+
+ /**
+ * Do nothing
+ */
+ bool SignEventMessage(Aws::Utils::Event::Message&, Aws::String& /* priorSignature */) const override { return true; }
+
+ /**
+ * Do nothing
+ */
+ bool PresignRequest(Aws::Http::HttpRequest&, long long) const override { return false; }
+
+ /**
+ * Do nothing
+ */
+ bool PresignRequest(Aws::Http::HttpRequest&, const char*, long long) const override { return false; }
+
+ /**
+ * Do nothing
+ */
+ bool PresignRequest(Aws::Http::HttpRequest&, const char*, const char*, long long) const override { return false; }
+ };
+
+ } // namespace Client
+} // namespace Aws
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AWSAsyncOperationTemplate.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AWSAsyncOperationTemplate.h
new file mode 100644
index 0000000000..15ee45f885
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AWSAsyncOperationTemplate.h
@@ -0,0 +1,191 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+
+#include <aws/core/Core_EXPORTS.h>
+#include <aws/core/utils/memory/AWSMemory.h>
+#include <aws/core/utils/memory/stl/AWSAllocator.h>
+#include <aws/core/utils/threading/Executor.h>
+#include <functional>
+#include <future>
+
+namespace Aws
+{
+namespace Client
+{
+ /**
+ * A template function that is used to create an Async Operation function body for AWS Operations
+ */
+ template<typename ClientT,
+ typename RequestT,
+ typename HandlerT,
+ typename HandlerContextT,
+ typename OperationFuncT,
+ typename ExecutorT>
+ inline void AWS_CORE_LOCAL MakeAsyncOperation(OperationFuncT&& operationFunc,
+ const ClientT* clientThis,
+ const RequestT& request,
+ const HandlerT& handler,
+ const HandlerContextT& context,
+ ExecutorT* pExecutor)
+ {
+ std::function<void()> asyncTask =
+ [operationFunc, clientThis, request, handler, context]() // note capture by value
+ {
+ handler(clientThis,
+ request,
+ (clientThis->*operationFunc)(request),
+ context);
+ };
+
+ pExecutor->Submit(std::move(asyncTask));
+ }
+
+ /**
+ * A template function that is used to create an Async Operation function body for AWS Streaming Operations
+ * The only difference compared to a regular non-streaming Operation is that
+ * the request is passed by non-const reference, therefore virtual copy constructor is not needed.
+ * However, caller code must ensure the life time of the request object is maintained during the Async execution.
+ */
+ template<typename ClientT,
+ typename RequestT,
+ typename HandlerT,
+ typename HandlerContextT,
+ typename OperationFuncT,
+ typename ExecutorT>
+ inline void AWS_CORE_LOCAL MakeAsyncStreamingOperation(OperationFuncT&& operationFunc,
+ const ClientT* clientThis,
+ RequestT& request, // note non-const ref
+ const HandlerT& handler,
+ const HandlerContextT& context,
+ ExecutorT* pExecutor)
+ {
+ std::function<void()> asyncTask =
+ [operationFunc, clientThis, &request, handler, context]() // note capture by ref
+ {
+ handler(clientThis,
+ request,
+ (clientThis->*operationFunc)(request),
+ context);
+ };
+
+ pExecutor->Submit(std::move(asyncTask));
+ }
+
+ /**
+ * A template function to create an Async Operation function body for AWS Operation without a request on input.
+ */
+ template<typename ClientT,
+ typename HandlerT,
+ typename HandlerContextT,
+ typename OperationFuncT,
+ typename ExecutorT>
+ inline void AWS_CORE_LOCAL MakeAsyncOperation(OperationFuncT&& operationFunc,
+ const ClientT* clientThis,
+ const HandlerT& handler,
+ const HandlerContextT& context,
+ ExecutorT* pExecutor)
+ {
+ std::function<void()> asyncTask =
+ [operationFunc, clientThis, handler, context]()
+ {
+ handler(clientThis,
+ (clientThis->*operationFunc)(),
+ context);
+ };
+
+ pExecutor->Submit(std::move(asyncTask));
+ }
+
+ /**
+ * A template function that is used to create a Callable Operation function body for AWS Operations
+ */
+ template<typename ClientT,
+ typename RequestT,
+ typename OperationFuncT,
+ typename ExecutorT>
+ inline auto AWS_CORE_LOCAL MakeCallableOperation(const char* ALLOCATION_TAG,
+ OperationFuncT&& operationFunc,
+ const ClientT* clientThis,
+ const RequestT& request,
+ ExecutorT* pExecutor) -> std::future<decltype((clientThis->*operationFunc)(request))>
+ {
+ using OperationOutcomeT = decltype((clientThis->*operationFunc)(request));
+
+ auto task = Aws::MakeShared< std::packaged_task< OperationOutcomeT() > >(
+ ALLOCATION_TAG,
+ [clientThis, operationFunc, request]() // note capture by value
+ {
+ auto futureOutcome = (clientThis->*operationFunc)(request);
+ return futureOutcome;
+ } );
+
+ std::function<void()> packagedFunction =
+ [task]() { (*task)(); };
+ pExecutor->Submit(std::move(packagedFunction));
+ return task->get_future();
+ }
+
+ /**
+ * A template function that is used to create a Callable Operation function body for AWS Streaming Operations
+ * The only difference compared to a regular non-streaming Operation is that
+ * the request is passed by non-const reference, therefore virtual copy constructor is not needed.
+ * However, caller code must ensure the life time of the request object is maintained during the Async execution.
+ */
+ template<typename ClientT,
+ typename RequestT,
+ typename OperationFuncT,
+ typename ExecutorT>
+ inline auto AWS_CORE_LOCAL MakeCallableStreamingOperation(const char* ALLOCATION_TAG,
+ OperationFuncT&& operationFunc,
+ const ClientT* clientThis,
+ RequestT& request, // note non-const ref
+ ExecutorT* pExecutor) -> std::future<decltype((clientThis->*operationFunc)(request))>
+ {
+ using OperationOutcomeT = decltype((clientThis->*operationFunc)(request));
+
+ auto task = Aws::MakeShared< std::packaged_task< OperationOutcomeT() > >(
+ ALLOCATION_TAG,
+ [clientThis, operationFunc, &request]() // note capture by ref
+ {
+ return (clientThis->*operationFunc)(request);
+ } );
+
+ std::function<void()> packagedFunction =
+ [task]() { (*task)(); };
+ pExecutor->Submit(std::move(packagedFunction));
+ return task->get_future();
+ }
+
+ /**
+ * A template function that is used to create a Callable Operation function body for AWS Operation without a request on input.
+ */
+ template<typename ClientT,
+ typename OperationFuncT,
+ typename ExecutorT>
+ inline auto AWS_CORE_LOCAL MakeCallableOperation(const char* ALLOCATION_TAG,
+ OperationFuncT&& operationFunc,
+ const ClientT* clientThis,
+ ExecutorT* pExecutor) -> std::future<decltype((clientThis->*operationFunc)())>
+ {
+ using OperationOutcomeT = decltype((clientThis->*operationFunc)());
+
+ auto task = Aws::MakeShared< std::packaged_task< OperationOutcomeT() > >(
+ ALLOCATION_TAG,
+ [clientThis, operationFunc]()
+ {
+ return (clientThis->*operationFunc)();
+ } );
+
+ std::function<void()> packagedFunction =
+ [task]() { (*task)(); };
+ pExecutor->Submit(std::move(packagedFunction));
+ return task->get_future();
+ }
+} // namespace Client
+} // namespace Aws
+
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AWSClient.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AWSClient.h
index 186206a66e..4e67e8a4df 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AWSClient.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AWSClient.h
@@ -4,19 +4,21 @@
*/
#pragma once
+#if !defined(AWS_CLIENT_H)
+#define AWS_CLIENT_H
#include <aws/core/Core_EXPORTS.h>
#include <aws/core/client/CoreErrors.h>
+#include <aws/core/client/AWSUrlPresigner.h>
#include <aws/core/http/HttpTypes.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/core/AmazonWebServiceResult.h>
#include <aws/core/utils/crypto/Hash.h>
#include <aws/core/auth/AWSAuthSignerProvider.h>
+#include <aws/core/endpoint/AWSEndpoint.h>
#include <memory>
#include <atomic>
-struct aws_array_list;
-
namespace Aws
{
namespace Utils
@@ -24,16 +26,6 @@ namespace Aws
template<typename R, typename E>
class Outcome;
- namespace Xml
- {
- class XmlDocument;
- } // namespace Xml
-
- namespace Json
- {
- class JsonValue;
- } // namespace Json
-
namespace RateLimits
{
class RateLimiterInterface;
@@ -71,7 +63,6 @@ namespace Aws
template<typename ERROR_TYPE>
class AWSError;
class AWSErrorMarshaller;
- class AWSRestfulJsonErrorMarshaller;
class AWSAuthSigner;
struct ClientConfiguration;
class RetryStrategy;
@@ -80,7 +71,7 @@ namespace Aws
typedef Utils::Outcome<AmazonWebServiceResult<Utils::Stream::ResponseStream>, AWSError<CoreErrors>> StreamOutcome;
/**
- * Abstract AWS Client. Contains most of the functionality necessary to build an http request, get it signed, and send it accross the wire.
+ * Abstract AWS Client. Contains most of the functionality necessary to build an http request, get it signed, and send it across the wire.
*/
class AWS_CORE_API AWSClient
{
@@ -91,8 +82,8 @@ namespace Aws
* errorMarshaller tells the client how to convert error payloads into AWSError objects.
*/
AWSClient(const Aws::Client::ClientConfiguration& configuration,
- const std::shared_ptr<Aws::Client::AWSAuthSigner>& signer,
- const std::shared_ptr<AWSErrorMarshaller>& errorMarshaller);
+ const std::shared_ptr<Aws::Client::AWSAuthSigner>& signer,
+ const std::shared_ptr<AWSErrorMarshaller>& errorMarshaller);
/**
* Configuration will be used for http client settings, retry strategy, throttles, and signing information.
@@ -100,8 +91,8 @@ namespace Aws
* SigV4 signer. errorMarshaller tells the client how to convert error payloads into AWSError objects.
*/
AWSClient(const Aws::Client::ClientConfiguration& configuration,
- const std::shared_ptr<Aws::Auth::AWSAuthSignerProvider>& signerProvider,
- const std::shared_ptr<AWSErrorMarshaller>& errorMarshaller);
+ const std::shared_ptr<Aws::Auth::AWSAuthSignerProvider>& signerProvider,
+ const std::shared_ptr<AWSErrorMarshaller>& errorMarshaller);
virtual ~AWSClient() { };
@@ -109,46 +100,69 @@ namespace Aws
* Generates a signed Uri using the injected signer. for the supplied uri and http method. expirationInSeconds defaults
* to 0 which is the default 7 days. The implication of this function is using auth signer v4 to sign it.
*/
- Aws::String GeneratePresignedUrl(Aws::Http::URI& uri, Aws::Http::HttpMethod method, long long expirationInSeconds = 0);
+ Aws::String GeneratePresignedUrl(const Aws::Http::URI& uri, Aws::Http::HttpMethod method, long long expirationInSeconds = 0);
/**
* Generates a signed Uri using the injected signer. for the supplied uri, http method and customized headers. expirationInSeconds defaults
* to 0 which is the default 7 days. The implication of this function is using auth signer v4 to sign it.
*/
- Aws::String GeneratePresignedUrl(Aws::Http::URI& uri, Aws::Http::HttpMethod method, const Aws::Http::HeaderValueCollection& customizedHeaders, long long expirationInSeconds = 0);
+ Aws::String GeneratePresignedUrl(const Aws::Http::URI& uri, Aws::Http::HttpMethod method, const Aws::Http::HeaderValueCollection& customizedHeaders, long long expirationInSeconds = 0);
/**
* Generates a signed Uri using the injected signer. for the supplied uri and http method and region. expirationInSeconds defaults
* to 0 which is the default 7 days.
*/
- Aws::String GeneratePresignedUrl(Aws::Http::URI& uri, Aws::Http::HttpMethod method, const char* region, long long expirationInSeconds = 0) const;
+ Aws::String GeneratePresignedUrl(const Aws::Http::URI& uri, Aws::Http::HttpMethod method, const char* region, long long expirationInSeconds = 0) const;
/**
* Generates a signed Uri using the injected signer. for the supplied uri, http method and customized headers. expirationInSeconds defaults
* to 0 which is the default 7 days.
*/
- Aws::String GeneratePresignedUrl(Aws::Http::URI& uri, Aws::Http::HttpMethod method, const char* region, const Aws::Http::HeaderValueCollection& customizedHeaders, long long expirationInSeconds = 0);
+ Aws::String GeneratePresignedUrl(const Aws::Http::URI& uri, Aws::Http::HttpMethod method, const char* region, const Aws::Http::HeaderValueCollection& customizedHeaders, long long expirationInSeconds = 0);
/**
* Generates a signed Uri using the injected signer. for the supplied uri and http method, region, and service name. expirationInSeconds defaults
* to 0 which is the default 7 days.
*/
- Aws::String GeneratePresignedUrl(Aws::Http::URI& uri, Aws::Http::HttpMethod method, const char* region, const char* serviceName, long long expirationInSeconds = 0) const;
+ Aws::String GeneratePresignedUrl(const Aws::Http::URI& uri, Aws::Http::HttpMethod method, const char* region, const char* serviceName, long long expirationInSeconds = 0) const;
/**
* Generates a signed Uri using the injected signer. for the supplied uri, http method and customized headers. expirationInSeconds defaults
* to 0 which is the default 7 days.
*/
- Aws::String GeneratePresignedUrl(Aws::Http::URI& uri, Aws::Http::HttpMethod method, const char* region, const char* serviceName, const Aws::Http::HeaderValueCollection& customizedHeaders, long long expirationInSeconds = 0);
+ Aws::String GeneratePresignedUrl(const Aws::Http::URI& uri, Aws::Http::HttpMethod method, const char* region, const char* serviceName, const Aws::Http::HeaderValueCollection& customizedHeaders, long long expirationInSeconds = 0);
- Aws::String GeneratePresignedUrl(const Aws::AmazonWebServiceRequest& request, Aws::Http::URI& uri, Aws::Http::HttpMethod method,
- const Aws::Http::QueryStringParameterCollection& extraParams = Aws::Http::QueryStringParameterCollection(), long long expirationInSeconds = 0) const;
+ /**
+ * Generates a signed Uri using the injected signer. for the supplied uri and http method, region, service name and signer name. expirationInSeconds defaults
+ * to 0 which is the default 7 days.
+ */
+ Aws::String GeneratePresignedUrl(const Aws::Http::URI& uri, Aws::Http::HttpMethod method, const char* region, const char* serviceName, const char* signerName, long long expirationInSeconds = 0) const;
- Aws::String GeneratePresignedUrl(const Aws::AmazonWebServiceRequest& request, Aws::Http::URI& uri, Aws::Http::HttpMethod method, const char* region, const char* serviceName,
- const Aws::Http::QueryStringParameterCollection& extraParams = Aws::Http::QueryStringParameterCollection(), long long expirationInSeconds = 0) const;
+ /**
+ * Generates a signed Uri using the injected signer. for the supplied uri, http method, region, service name, signer name and customized headers. expirationInSeconds defaults
+ * to 0 which is the default 7 days.
+ */
+ Aws::String GeneratePresignedUrl(const Aws::Http::URI& uri, Aws::Http::HttpMethod method, const char* region, const char* serviceName, const char* signerName, const Aws::Http::HeaderValueCollection& customizedHeaders, long long expirationInSeconds = 0);
- Aws::String GeneratePresignedUrl(const Aws::AmazonWebServiceRequest& request, Aws::Http::URI& uri, Aws::Http::HttpMethod method, const char* region,
- const Aws::Http::QueryStringParameterCollection& extraParams = Aws::Http::QueryStringParameterCollection(), long long expirationInSeconds = 0) const;
+ Aws::String GeneratePresignedUrl(const Aws::Endpoint::AWSEndpoint& endpoint,
+ Aws::Http::HttpMethod method = Http::HttpMethod::HTTP_POST,
+ const Aws::Http::HeaderValueCollection& customizedHeaders = {},
+ uint64_t expirationInSeconds = 0,
+ const char* signerName = Aws::Auth::SIGV4_SIGNER,
+ const char* signerRegionOverride = nullptr,
+ const char* signerServiceNameOverride = nullptr);
+
+ Aws::String GeneratePresignedUrl(const Aws::AmazonWebServiceRequest& request, const Aws::Http::URI& uri, Aws::Http::HttpMethod method,
+ const Aws::Http::QueryStringParameterCollection& extraParams = Aws::Http::QueryStringParameterCollection(), long long expirationInSeconds = 0) const;
+
+ Aws::String GeneratePresignedUrl(const Aws::AmazonWebServiceRequest& request, const Aws::Http::URI& uri, Aws::Http::HttpMethod method, const char* region, const char* serviceName,
+ const char* signerName, const Aws::Http::QueryStringParameterCollection& extraParams = Aws::Http::QueryStringParameterCollection(), long long expirationInSeconds = 0) const;
+
+ Aws::String GeneratePresignedUrl(const Aws::AmazonWebServiceRequest& request, const Aws::Http::URI& uri, Aws::Http::HttpMethod method, const char* region, const char* serviceName,
+ const Aws::Http::QueryStringParameterCollection& extraParams = Aws::Http::QueryStringParameterCollection(), long long expirationInSeconds = 0) const;
+
+ Aws::String GeneratePresignedUrl(const Aws::AmazonWebServiceRequest& request, const Aws::Http::URI& uri, Aws::Http::HttpMethod method, const char* region,
+ const Aws::Http::QueryStringParameterCollection& extraParams = Aws::Http::QueryStringParameterCollection(), long long expirationInSeconds = 0) const;
/**
* Stop all requests immediately.
@@ -175,11 +189,11 @@ namespace Aws
* or encounters and error that is not retryable.
*/
HttpResponseOutcome AttemptExhaustively(const Aws::Http::URI& uri,
- const Aws::AmazonWebServiceRequest& request,
- Http::HttpMethod httpMethod,
- const char* signerName,
- const char* signerRegionOverride = nullptr,
- const char* signerServiceNameOverride = nullptr) const;
+ const Aws::AmazonWebServiceRequest& request,
+ Http::HttpMethod httpMethod,
+ const char* signerName,
+ const char* signerRegionOverride = nullptr,
+ const char* signerServiceNameOverride = nullptr) const;
/**
* Calls AttemptOneRequest until it either, succeeds, runs out of retries from the retry strategy,
@@ -189,45 +203,45 @@ namespace Aws
* name.
*/
HttpResponseOutcome AttemptExhaustively(const Aws::Http::URI& uri,
- Http::HttpMethod httpMethod,
- const char* signerName,
- const char* requestName = "",
- const char* signerRegionOverride = nullptr,
- const char* signerServiceNameOverride = nullptr) const;
+ Http::HttpMethod httpMethod,
+ const char* signerName,
+ const char* requestName = "",
+ const char* signerRegionOverride = nullptr,
+ const char* signerServiceNameOverride = nullptr) const;
/**
- * Build an Http Request from the AmazonWebServiceRequest object. Signs the request, sends it accross the wire
+ * Build an Http Request from the AmazonWebServiceRequest object. Signs the request, sends it across the wire
* then reports the http response.
*/
HttpResponseOutcome AttemptOneRequest(const std::shared_ptr<Http::HttpRequest>& httpRequest,
- const Aws::AmazonWebServiceRequest& request,
- const char* signerName,
- const char* signerRegionOverride = nullptr,
- const char* signerServiceNameOverride = nullptr) const;
+ const Aws::AmazonWebServiceRequest& request,
+ const char* signerName,
+ const char* signerRegionOverride = nullptr,
+ const char* signerServiceNameOverride = nullptr) const;
/**
- * Signs an Http Request, sends it accross the wire
+ * Signs an Http Request, sends it across the wire
* then reports the http response. This method is for payloadless requests e.g. GET, DELETE, HEAD
*
* requestName is used for metrics and defaults to empty string, to avoid empty names in metrics provide a valid
* name.
*/
HttpResponseOutcome AttemptOneRequest(const std::shared_ptr<Http::HttpRequest>& httpRequest,
- const char* signerName,
- const char* requestName = "",
- const char* signerRegionOverride = nullptr,
- const char* signerServiceNameOverride = nullptr) const;
+ const char* signerName,
+ const char* requestName = "",
+ const char* signerRegionOverride = nullptr,
+ const char* signerServiceNameOverride = nullptr) const;
/**
* This is used for structureless response payloads (file streams, binary data etc...). It calls AttemptExhaustively, but upon
* return transfers ownership of the underlying stream for the http response to the caller.
*/
StreamOutcome MakeRequestWithUnparsedResponse(const Aws::Http::URI& uri,
- const Aws::AmazonWebServiceRequest& request,
- Http::HttpMethod method = Http::HttpMethod::HTTP_POST,
- const char* signerName = Aws::Auth::SIGV4_SIGNER,
- const char* signerRegionOverride = nullptr,
- const char* signerServiceNameOverride = nullptr) const;
+ const Aws::AmazonWebServiceRequest& request,
+ Http::HttpMethod method = Http::HttpMethod::HTTP_POST,
+ const char* signerName = Aws::Auth::SIGV4_SIGNER,
+ const char* signerRegionOverride = nullptr,
+ const char* signerServiceNameOverride = nullptr) const;
/**
* This is used for structureless response payloads (file streams, binary data etc...). It calls AttemptExhaustively, but upon
@@ -237,11 +251,18 @@ namespace Aws
* name.
*/
StreamOutcome MakeRequestWithUnparsedResponse(const Aws::Http::URI& uri,
- Http::HttpMethod method = Http::HttpMethod::HTTP_POST,
- const char* signerName = Aws::Auth::SIGV4_SIGNER,
- const char* requestName = "",
- const char* signerRegionOverride = nullptr,
- const char* signerServiceNameOverride = nullptr) const;
+ Http::HttpMethod method = Http::HttpMethod::HTTP_POST,
+ const char* signerName = Aws::Auth::SIGV4_SIGNER,
+ const char* requestName = "",
+ const char* signerRegionOverride = nullptr,
+ const char* signerServiceNameOverride = nullptr) const;
+
+ StreamOutcome MakeRequestWithUnparsedResponse(const Aws::AmazonWebServiceRequest& request,
+ const Aws::Endpoint::AWSEndpoint& endpoint,
+ Http::HttpMethod method = Http::HttpMethod::HTTP_POST,
+ const char* signerName = Aws::Auth::SIGV4_SIGNER,
+ const char* signerRegionOverride = nullptr,
+ const char* signerServiceNameOverride = nullptr) const;
/**
* Abstract. Subclassing clients should override this to tell the client how to marshall error payloads
@@ -252,7 +273,7 @@ namespace Aws
* Transforms the AmazonWebServicesResult object into an HttpRequest.
*/
virtual void BuildHttpRequest(const Aws::AmazonWebServiceRequest& request,
- const std::shared_ptr<Aws::Http::HttpRequest>& httpRequest) const;
+ const std::shared_ptr<Aws::Http::HttpRequest>& httpRequest) const;
/**
* Gets the underlying ErrorMarshaller for subclasses to use.
@@ -266,6 +287,8 @@ namespace Aws
* Gets the corresponding signer from the signers map by name.
*/
Aws::Client::AWSAuthSigner* GetSignerByName(const char* name) const;
+
+ friend Aws::Client::AWSAuthSigner* AWSUrlPresigner::GetSignerByName(const char* name) const;
protected:
/**
@@ -276,14 +299,24 @@ namespace Aws
* event-streams.
*/
std::shared_ptr<Aws::Http::HttpRequest> BuildAndSignHttpRequest(const Aws::Http::URI& uri,
- const Aws::AmazonWebServiceRequest& request,
- Http::HttpMethod method, const char* signerName) const;
+ const Aws::AmazonWebServiceRequest& request,
+ Http::HttpMethod method, const char* signerName) const;
/**
* Performs the HTTP request via the HTTP client while enforcing rate limiters
*/
std::shared_ptr<Aws::Http::HttpResponse> MakeHttpRequest(std::shared_ptr<Aws::Http::HttpRequest>& request) const;
Aws::String m_region;
+
+ /**
+ * Adds "X-Amzn-Trace-Id" header with the value of _X_AMZN_TRACE_ID if both
+ * environment variables AWS_LAMBDA_FUNCTION_NAME and _X_AMZN_TRACE_ID are set.
+ * Does not add/modify header "X-Amzn-Trace-Id" if it is already set.
+ */
+ static void AppendRecursionDetectionHeader(std::shared_ptr<Aws::Http::HttpRequest> ioRequest);
+
+ static CoreErrors GuessBodylessErrorType(Aws::Http::HttpResponseCode responseCode);
+ static bool DoesResponseGenerateError(const std::shared_ptr<Aws::Http::HttpResponse>& response);
private:
/**
* Try to adjust signer's clock
@@ -291,12 +324,11 @@ namespace Aws
*/
bool AdjustClockSkew(HttpResponseOutcome& outcome, const char* signerName) const;
void AddHeadersToRequest(const std::shared_ptr<Aws::Http::HttpRequest>& httpRequest, const Http::HeaderValueCollection& headerValues) const;
+ void AddChecksumToRequest(const std::shared_ptr<Aws::Http::HttpRequest>& HttpRequest, const Aws::AmazonWebServiceRequest& request) const;
void AddContentBodyToRequest(const std::shared_ptr<Aws::Http::HttpRequest>& httpRequest, const std::shared_ptr<Aws::IOStream>& body,
bool needsContentMd5 = false, bool isChunked = false) const;
void AddCommonHeaders(Aws::Http::HttpRequest& httpRequest) const;
- void InitializeGlobalStatics();
- std::shared_ptr<Aws::Http::HttpRequest> ConvertToRequestForPresigning(const Aws::AmazonWebServiceRequest& request, Aws::Http::URI& uri,
- Aws::Http::HttpMethod method, const Aws::Http::QueryStringParameterCollection& extraParams) const;
+ std::shared_ptr<Aws::IOStream> GetBodyStream(const Aws::AmazonWebServiceRequest& request) const;
std::shared_ptr<Aws::Http::HttpClient> m_httpClient;
std::shared_ptr<Aws::Auth::AWSAuthSignerProvider> m_signerProvider;
@@ -310,154 +342,19 @@ namespace Aws
long m_requestTimeoutMs;
bool m_enableClockSkewAdjustment;
Aws::String m_serviceName;
+ Aws::Client::RequestCompressionConfig m_requestCompressionConfig;
+ void AppendHeaderValueToRequest(
+ const std::shared_ptr<Http::HttpRequest> &request, String header,
+ String value) const;
};
- typedef Utils::Outcome<AmazonWebServiceResult<Utils::Json::JsonValue>, AWSError<CoreErrors>> JsonOutcome;
AWS_CORE_API Aws::String GetAuthorizationHeader(const Aws::Http::HttpRequest& httpRequest);
-
- /**
- * AWSClient that handles marshalling json response bodies. You would inherit from this class
- * to create a client that uses Json as its payload format.
- */
- class AWS_CORE_API AWSJsonClient : public AWSClient
- {
- public:
- typedef AWSClient BASECLASS;
-
- /**
- * Simply calls AWSClient constructor.
- */
- AWSJsonClient(const Aws::Client::ClientConfiguration& configuration,
- const std::shared_ptr<Aws::Client::AWSAuthSigner>& signer,
- const std::shared_ptr<AWSErrorMarshaller>& errorMarshaller);
-
- /**
- * Simply calls AWSClient constructor.
- */
- AWSJsonClient(const Aws::Client::ClientConfiguration& configuration,
- const std::shared_ptr<Aws::Auth::AWSAuthSignerProvider>& signerProvider,
- const std::shared_ptr<AWSErrorMarshaller>& errorMarshaller);
-
- virtual ~AWSJsonClient() = default;
-
- protected:
- /**
- * Converts/Parses an http response into a meaningful AWSError object using the json message structure.
- */
- virtual AWSError<CoreErrors> BuildAWSError(const std::shared_ptr<Aws::Http::HttpResponse>& response) const override;
-
- /**
- * Returns a Json document or an error from the request. Does some marshalling json and raw streams,
- * then just calls AttemptExhaustively.
- *
- * method defaults to POST
- */
- JsonOutcome MakeRequest(const Aws::Http::URI& uri,
- const Aws::AmazonWebServiceRequest& request,
- Http::HttpMethod method = Http::HttpMethod::HTTP_POST,
- const char* signerName = Aws::Auth::SIGV4_SIGNER,
- const char* signerRegionOverride = nullptr,
- const char* signerServiceNameOverride = nullptr) const;
-
- /**
- * Returns a Json document or an error from the request. Does some marshalling json and raw streams,
- * then just calls AttemptExhaustively.
- *
- * requestName is used for metrics and defaults to empty string, to avoid empty names in metrics provide a valid
- * name.
- *
- * method defaults to POST
- */
- JsonOutcome MakeRequest(const Aws::Http::URI& uri,
- Http::HttpMethod method = Http::HttpMethod::HTTP_POST,
- const char* signerName = Aws::Auth::SIGV4_SIGNER,
- const char* requestName = "",
- const char* signerRegionOverride = nullptr,
- const char* signerServiceNameOverride = nullptr) const;
-
- JsonOutcome MakeEventStreamRequest(std::shared_ptr<Aws::Http::HttpRequest>& request) const;
- };
-
- typedef Utils::Outcome<AmazonWebServiceResult<Utils::Xml::XmlDocument>, AWSError<CoreErrors>> XmlOutcome;
-
- /**
- * AWSClient that handles marshalling xml response bodies. You would inherit from this class
- * to create a client that uses Xml as its payload format.
- */
- class AWS_CORE_API AWSXMLClient : public AWSClient
- {
- public:
-
- typedef AWSClient BASECLASS;
-
- AWSXMLClient(const Aws::Client::ClientConfiguration& configuration,
- const std::shared_ptr<Aws::Client::AWSAuthSigner>& signer,
- const std::shared_ptr<AWSErrorMarshaller>& errorMarshaller);
-
- AWSXMLClient(const Aws::Client::ClientConfiguration& configuration,
- const std::shared_ptr<Aws::Auth::AWSAuthSignerProvider>& signerProvider,
- const std::shared_ptr<AWSErrorMarshaller>& errorMarshaller);
-
- virtual ~AWSXMLClient() = default;
-
- protected:
- /**
- * Converts/Parses an http response into a meaningful AWSError object. Using the XML message structure.
- */
- virtual AWSError<CoreErrors> BuildAWSError(const std::shared_ptr<Aws::Http::HttpResponse>& response) const override;
-
- /**
- * Returns an xml document or an error from the request. Does some marshalling xml and raw streams,
- * then just calls AttemptExhaustively.
- *
- * method defaults to POST
- */
- XmlOutcome MakeRequest(const Aws::Http::URI& uri,
- const Aws::AmazonWebServiceRequest& request,
- Http::HttpMethod method = Http::HttpMethod::HTTP_POST,
- const char* signerName = Aws::Auth::SIGV4_SIGNER,
- const char* signerRegionOverride = nullptr,
- const char* signerServiceNameOverride = nullptr) const;
-
-
- /**
- * Returns an xml document or an error from the request. Does some marshalling xml and raw streams,
- * then just calls AttemptExhaustively.
- *
- * requestName is used for metrics and defaults to empty string, to avoid empty names in metrics provide a valid
- * name.
- *
- * method defaults to POST
- */
- XmlOutcome MakeRequest(const Aws::Http::URI& uri,
- Http::HttpMethod method = Http::HttpMethod::HTTP_POST,
- const char* signerName = Aws::Auth::SIGV4_SIGNER,
- const char* requestName = "",
- const char* signerRegionOverride = nullptr,
- const char* signerServiceNameOverride = nullptr) const;
-
- /**
- * This is used for event stream response.
- */
- XmlOutcome MakeRequestWithEventStream(const Aws::Http::URI& uri,
- const Aws::AmazonWebServiceRequest& request,
- Http::HttpMethod method = Http::HttpMethod::HTTP_POST,
- const char* singerName = Aws::Auth::SIGV4_SIGNER,
- const char* signerRegionOverride = nullptr,
- const char* signerServiceNameOverride = nullptr) const;
-
- /**
- * This is used for event stream response.
- * requestName is used for metrics and defaults to empty string, to avoid empty names in metrics provide a valid
- * name.
- */
- XmlOutcome MakeRequestWithEventStream(const Aws::Http::URI& uri,
- Http::HttpMethod method = Http::HttpMethod::HTTP_POST,
- const char* signerName = Aws::Auth::SIGV4_SIGNER,
- const char* requestName = "",
- const char* signerRegionOverride = nullptr,
- const char* signerServiceNameOverride = nullptr) const;
- };
-
} // namespace Client
} // namespace Aws
+
+#if !defined(AWS_JSON_CLIENT_H) && !defined(AWS_XML_CLIENT_H)
+/* Legacy backward compatibility macros to not break the build for ones including just AWSClient.h */
+#include <aws/core/client/AWSJsonClient.h>
+#include <aws/core/client/AWSXmlClient.h>
+#endif // !defined(AWS_JSON_CLIENT_H) && !defined(AWS_XML_CLIENT_H)
+#endif // !defined(AWS_CLIENT_H) \ No newline at end of file
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AWSClientAsyncCRTP.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AWSClientAsyncCRTP.h
new file mode 100644
index 0000000000..a31b1c1783
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AWSClientAsyncCRTP.h
@@ -0,0 +1,125 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+
+#include <aws/core/client/AWSAsyncOperationTemplate.h>
+
+namespace Aws
+{
+namespace Client
+{
+ class AsyncCallerContext;
+
+ /**
+ * A helper to determine if AWS Operation is EventStream-enabled or not (based on const-ness of the request)
+ */
+ template<typename T>
+ struct AWS_CORE_LOCAL IsEventStreamOperation : IsEventStreamOperation<decltype(&T::operator())> {};
+
+ template<typename ReturnT, typename ClassT, typename RequestT>
+ struct AWS_CORE_LOCAL IsEventStreamOperation<ReturnT(ClassT::*)(RequestT) const>
+ {
+ static const bool value = !std::is_const<typename std::remove_reference<RequestT>::type>::value;
+ };
+
+ template<typename ReturnT, typename ClassT>
+ struct AWS_CORE_LOCAL IsEventStreamOperation<ReturnT(ClassT::*)() const>
+ {
+ static const bool value = false;
+ };
+
+
+ /**
+ * A CRTP-base class template that is used to add template methods to call AWS Operations in parallel using ThreadExecutor
+ * An Aws<Service>Client is going to inherit from this class and will get methods below available.
+ */
+ template <typename AwsServiceClientT>
+ class ClientWithAsyncTemplateMethods
+ {
+ public:
+ /**
+ * A template to submit a AwsServiceClient regular operation method for async execution.
+ * This template method copies and queues the request into a thread executor and triggers associated callback when operation has finished.
+ */
+ template<typename RequestT, typename HandlerT, typename OperationFuncT, typename std::enable_if<!IsEventStreamOperation<OperationFuncT>::value, int>::type = 0>
+ void SubmitAsync(OperationFuncT operationFunc,
+ const RequestT& request,
+ const HandlerT& handler,
+ const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ const AwsServiceClientT* clientThis = static_cast<const AwsServiceClientT*>(this);
+ Aws::Client::MakeAsyncOperation(operationFunc, clientThis, request, handler, context, clientThis->m_executor.get());
+ }
+
+ /**
+ * A template to submit a AwsServiceClient event stream enabled operation method for async execution.
+ * This template method queues the original request object into a thread executor and triggers associated callback when operation has finished.
+ * It is caller's responsibility to ensure the lifetime of the original request object for a duration of the async execution.
+ */
+ template<typename RequestT, typename HandlerT, typename OperationFuncT, typename std::enable_if<IsEventStreamOperation<OperationFuncT>::value, int>::type = 0>
+ void SubmitAsync(OperationFuncT operationFunc,
+ RequestT& request, // note non-const ref
+ const HandlerT& handler,
+ const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ const AwsServiceClientT* clientThis = static_cast<const AwsServiceClientT*>(this);
+ Aws::Client::MakeAsyncStreamingOperation(operationFunc, clientThis, request, handler, context, clientThis->m_executor.get());
+ }
+
+ /**
+ * A template to submit a AwsServiceClient regular operation method without arguments for async execution.
+ * This template method submits a task into a thread executor and triggers associated callback when operation has finished.
+ */
+ template<typename HandlerT, typename OperationFuncT>
+ void SubmitAsync(OperationFuncT operationFunc,
+ const HandlerT& handler,
+ const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ const AwsServiceClientT* clientThis = static_cast<const AwsServiceClientT*>(this);
+ Aws::Client::MakeAsyncOperation(operationFunc, clientThis, handler, context, clientThis->m_executor.get());
+ }
+
+ /**
+ * A template to submit a AwsServiceClient regular operation method for async execution that returns a future<OperationOutcome> object.
+ * This template method copies and queues the request into a thread executor and returns a future<OperationOutcome> object when operation has finished.
+ */
+ template<typename RequestT, typename OperationFuncT, typename std::enable_if<!IsEventStreamOperation<OperationFuncT>::value, int>::type = 0>
+ auto SubmitCallable(OperationFuncT operationFunc,
+ const RequestT& request) const
+ -> std::future<decltype((static_cast<const AwsServiceClientT*>(nullptr)->*operationFunc)(request))>
+ {
+ const AwsServiceClientT* clientThis = static_cast<const AwsServiceClientT*>(this);
+ return Aws::Client::MakeCallableOperation(AwsServiceClientT::ALLOCATION_TAG, operationFunc, clientThis, request, clientThis->m_executor.get());
+ }
+
+ /**
+ * A template to submit a AwsServiceClient event stream enabled operation method for async execution that returns a future<OperationOutcome> object.
+ * This template method queues the original request into a thread executor and returns a future<OperationOutcome> object when operation has finished.
+ * It is caller's responsibility to ensure the lifetime of the original request object for a duration of the async execution.
+ */
+ template<typename RequestT, typename OperationFuncT, typename std::enable_if<IsEventStreamOperation<OperationFuncT>::value, int>::type = 0>
+ auto SubmitCallable(OperationFuncT operationFunc, /*note non-const ref*/ RequestT& request) const
+ -> std::future<decltype((static_cast<const AwsServiceClientT*>(nullptr)->*operationFunc)(request))>
+ {
+ const AwsServiceClientT* clientThis = static_cast<const AwsServiceClientT*>(this);
+ return Aws::Client::MakeCallableStreamingOperation(AwsServiceClientT::ALLOCATION_TAG, operationFunc, clientThis, request, clientThis->m_executor.get());
+ }
+
+ /**
+ * A template to submit a AwsServiceClient regular operation without request argument for
+ * an async execution that returns a future<OperationOutcome> object.
+ * This template method copies and queues the request into a thread executor and returns a future<OperationOutcome> object when operation has finished.
+ */
+ template<typename OperationFuncT>
+ auto SubmitCallable(OperationFuncT operationFunc) const
+ -> std::future<decltype((static_cast<const AwsServiceClientT*>(nullptr)->*operationFunc)())>
+ {
+ const AwsServiceClientT* clientThis = static_cast<const AwsServiceClientT*>(this);
+ return Aws::Client::MakeCallableOperation(AwsServiceClientT::ALLOCATION_TAG, operationFunc, clientThis, clientThis->m_executor.get());
+ }
+ };
+} // namespace Client
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AWSError.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AWSError.h
index 39f033c3fc..e9df0be8a2 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AWSError.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AWSError.h
@@ -12,6 +12,13 @@
#include <aws/core/utils/json/JsonSerializer.h>
#include <aws/core/utils/StringUtils.h>
+// TODO: temporary fix for naming conflicts on Windows.
+#ifdef _WIN32
+#ifdef GetMessage
+#undef GetMessage
+#endif
+#endif
+
namespace Aws
{
namespace Client
@@ -41,12 +48,17 @@ namespace Aws
/**
* Initializes AWSError object as empty with the error not being retryable.
*/
- AWSError() : m_responseCode(Aws::Http::HttpResponseCode::REQUEST_NOT_MADE), m_isRetryable(false) {}
+ AWSError()
+ : m_errorType(),
+ m_responseCode(Aws::Http::HttpResponseCode::REQUEST_NOT_MADE),
+ m_isRetryable(false),
+ m_errorPayloadType(ErrorPayloadType::NOT_SET)
+ {}
/**
* Initializes AWSError object with errorType, exceptionName, message, and retryable flag.
*/
- AWSError(ERROR_TYPE errorType, Aws::String exceptionName, const Aws::String message, bool isRetryable) :
- m_errorType(errorType), m_exceptionName(exceptionName), m_message(message),
+ AWSError(ERROR_TYPE errorType, Aws::String exceptionName, Aws::String message, bool isRetryable)
+ : m_errorType(errorType), m_exceptionName(std::move(exceptionName)), m_message(std::move(message)),
m_responseCode(Aws::Http::HttpResponseCode::REQUEST_NOT_MADE), m_isRetryable(isRetryable),
m_errorPayloadType(ErrorPayloadType::NOT_SET) {}
/**
@@ -85,7 +97,7 @@ namespace Aws
/**
* Move assignment operator
*/
- AWSError& operator=(AWSError<ERROR_TYPE>&& other) = default;
+ AWSError& operator=(AWSError<ERROR_TYPE>&&) = default;
/**
* Gets underlying errorType.
@@ -191,10 +203,10 @@ namespace Aws
Aws::String m_remoteHostIpAddress;
Aws::String m_requestId;
Aws::Http::HeaderValueCollection m_responseHeaders;
- Aws::Http::HttpResponseCode m_responseCode;
- bool m_isRetryable;
+ Aws::Http::HttpResponseCode m_responseCode = Aws::Http::HttpResponseCode::REQUEST_NOT_MADE;
+ bool m_isRetryable = false;
- ErrorPayloadType m_errorPayloadType;
+ ErrorPayloadType m_errorPayloadType = ErrorPayloadType::NOT_SET;
Aws::Utils::Xml::XmlDocument m_xmlPayload;
Aws::Utils::Json::JsonValue m_jsonPayload;
};
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AWSJsonClient.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AWSJsonClient.h
new file mode 100644
index 0000000000..877db0aec5
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AWSJsonClient.h
@@ -0,0 +1,110 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+#if !defined(AWS_JSON_CLIENT_H)
+#define AWS_JSON_CLIENT_H
+
+#include <aws/core/Core_EXPORTS.h>
+#include <aws/core/client/AWSClient.h>
+
+namespace Aws
+{
+ namespace Utils
+ {
+ namespace Json
+ {
+ class JsonValue;
+ } // namespace Json
+ } // namespace Utils
+
+ namespace Client
+ {
+ typedef Utils::Outcome<AmazonWebServiceResult<Utils::Json::JsonValue>, AWSError<CoreErrors>> JsonOutcome;
+ /**
+ * AWSClient that handles marshalling json response bodies. You would inherit from this class
+ * to create a client that uses Json as its payload format.
+ */
+ class AWS_CORE_API AWSJsonClient : public AWSClient
+ {
+ public:
+ typedef AWSClient BASECLASS;
+
+ /**
+ * Simply calls AWSClient constructor.
+ */
+ AWSJsonClient(const Aws::Client::ClientConfiguration& configuration,
+ const std::shared_ptr<Aws::Client::AWSAuthSigner>& signer,
+ const std::shared_ptr<AWSErrorMarshaller>& errorMarshaller);
+
+ /**
+ * Simply calls AWSClient constructor.
+ */
+ AWSJsonClient(const Aws::Client::ClientConfiguration& configuration,
+ const std::shared_ptr<Aws::Auth::AWSAuthSignerProvider>& signerProvider,
+ const std::shared_ptr<AWSErrorMarshaller>& errorMarshaller);
+
+ virtual ~AWSJsonClient() = default;
+
+ protected:
+ /**
+ * Converts/Parses an http response into a meaningful AWSError object using the json message structure.
+ */
+ virtual AWSError<CoreErrors> BuildAWSError(const std::shared_ptr<Aws::Http::HttpResponse>& response) const override;
+
+ /**
+ * Returns a Json document or an error from the request. Does some marshalling json and raw streams,
+ * then just calls AttemptExhaustively.
+ *
+ * method defaults to POST
+ */
+ JsonOutcome MakeRequest(const Aws::AmazonWebServiceRequest& request,
+ const Aws::Endpoint::AWSEndpoint& endpoint,
+ Http::HttpMethod method = Http::HttpMethod::HTTP_POST,
+ const char* signerName = Aws::Auth::SIGV4_SIGNER,
+ const char* signerRegionOverride = nullptr,
+ const char* signerServiceNameOverride = nullptr) const;
+
+ JsonOutcome MakeRequest(const Aws::Endpoint::AWSEndpoint& endpoint,
+ Http::HttpMethod method = Http::HttpMethod::HTTP_POST,
+ const char* signerName = Aws::Auth::SIGV4_SIGNER,
+ const char* signerRegionOverride = nullptr,
+ const char* signerServiceNameOverride = nullptr) const;
+
+ /**
+ * Returns a Json document or an error from the request. Does some marshalling json and raw streams,
+ * then just calls AttemptExhaustively.
+ *
+ * method defaults to POST
+ */
+ JsonOutcome MakeRequest(const Aws::Http::URI& uri,
+ const Aws::AmazonWebServiceRequest& request,
+ Http::HttpMethod method = Http::HttpMethod::HTTP_POST,
+ const char* signerName = Aws::Auth::SIGV4_SIGNER,
+ const char* signerRegionOverride = nullptr,
+ const char* signerServiceNameOverride = nullptr) const;
+
+ /**
+ * Returns a Json document or an error from the request. Does some marshalling json and raw streams,
+ * then just calls AttemptExhaustively.
+ *
+ * requestName is used for metrics and defaults to empty string, to avoid empty names in metrics provide a valid
+ * name.
+ *
+ * method defaults to POST
+ */
+ JsonOutcome MakeRequest(const Aws::Http::URI& uri,
+ Http::HttpMethod method = Http::HttpMethod::HTTP_POST,
+ const char* signerName = Aws::Auth::SIGV4_SIGNER,
+ const char* requestName = "",
+ const char* signerRegionOverride = nullptr,
+ const char* signerServiceNameOverride = nullptr) const;
+
+ JsonOutcome MakeEventStreamRequest(std::shared_ptr<Aws::Http::HttpRequest>& request) const;
+ };
+ } // namespace Client
+} // namespace Aws
+
+#endif // !defined(AWS_JSON_CLIENT_H) \ No newline at end of file
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AWSUrlPresigner.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AWSUrlPresigner.h
new file mode 100644
index 0000000000..01601c1eb1
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AWSUrlPresigner.h
@@ -0,0 +1,176 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+
+#include <aws/core/Core_EXPORTS.h>
+#include <aws/core/AmazonWebServiceRequest.h>
+#include <aws/core/endpoint/AWSEndpoint.h>
+
+#include <aws/core/utils/memory/stl/AWSString.h>
+
+namespace Aws
+{
+ namespace Client
+ {
+ class AWSClient;
+
+ /**
+ * Helper class to generate pre-signed AWS URLs.
+ */
+ class AWS_CORE_API AWSUrlPresigner
+ {
+ public:
+ AWSUrlPresigner(const AWSClient& client);
+
+ virtual ~AWSUrlPresigner() {};
+
+ /**
+ * Generates a signed Uri using the injected signer, for the supplied uri and http method. expirationInSeconds defaults
+ * to 0 which is the default 7 days. The implication of this function is using auth signer v4 to sign it.
+ */
+ Aws::String GeneratePresignedUrl(const Aws::Http::URI &uri,
+ Aws::Http::HttpMethod method,
+ long long expirationInSeconds = 0) const;
+
+ /**
+ * Generates a signed Uri using the injected signer, for the supplied uri, http method, and customized headers. expirationInSeconds defaults
+ * to 0 which is the default 7 days. The implication of this function is using auth signer v4 to sign it.
+ */
+ Aws::String GeneratePresignedUrl(const Aws::Http::URI &uri,
+ Aws::Http::HttpMethod method,
+ const Aws::Http::HeaderValueCollection &customizedHeaders,
+ long long expirationInSeconds = 0) const;
+
+ /**
+ * Generates a signed Uri using the injected signer, for the supplied uri, http method, and region. expirationInSeconds defaults
+ * to 0 which is the default 7 days.
+ */
+ Aws::String GeneratePresignedUrl(const Aws::Http::URI &uri,
+ Aws::Http::HttpMethod method,
+ const char *regionOverride,
+ long long expirationInSeconds = 0) const;
+
+ /**
+ * Generates a signed Uri using the injected signer, for the supplied uri, http method, and customized headers. expirationInSeconds defaults
+ * to 0 which is the default 7 days.
+ */
+ Aws::String GeneratePresignedUrl(const Aws::Http::URI &uri,
+ Aws::Http::HttpMethod method,
+ const char *regionOverride,
+ const Aws::Http::HeaderValueCollection &customizedHeaders,
+ long long expirationInSeconds = 0) const;
+
+ /**
+ * Generates a signed Uri using the injected signer, for the supplied uri, http method, region, and service name. expirationInSeconds defaults
+ * to 0 which is the default 7 days.
+ */
+ Aws::String GeneratePresignedUrl(const Aws::Http::URI &uri,
+ Aws::Http::HttpMethod method,
+ const char *regionOverride,
+ const char *serviceNameOverride,
+ long long expirationInSeconds = 0) const;
+
+ /**
+ * Generates a signed Uri using the injected signer, for the supplied uri, http method, and customized headers. expirationInSeconds defaults
+ * to 0 which is the default 7 days.
+ */
+ Aws::String GeneratePresignedUrl(const Aws::Http::URI &uri,
+ Aws::Http::HttpMethod method,
+ const char *regionOverride,
+ const char *serviceNameOverride,
+ const Aws::Http::HeaderValueCollection &customizedHeaders,
+ long long expirationInSeconds = 0) const;
+
+ /**
+ * Generates a signed Uri using the injected signer, for the supplied uri, http method, region, service name, and signer name. expirationInSeconds defaults
+ * to 0 which is the default 7 days.
+ */
+ Aws::String GeneratePresignedUrl(const Aws::Http::URI &uri,
+ Aws::Http::HttpMethod method,
+ const char *regionOverride,
+ const char *serviceNameOverride,
+ const char *signerName,
+ long long expirationInSeconds = 0) const;
+
+ /**
+ * Generates a signed Uri using the injected signer, for the supplied uri, http method, region, service name, signer name, and customized headers. expirationInSeconds defaults
+ * to 0 which is the default 7 days.
+ *
+ * This is a real method for uri pre-signing, the rest are just overloads.
+ */
+ Aws::String GeneratePresignedUrl(const Aws::Http::URI &uri,
+ Aws::Http::HttpMethod method,
+ const char *regionOverride,
+ const char *serviceNameOverride,
+ const char *signerName,
+ const Aws::Http::HeaderValueCollection &customizedHeaders,
+ long long expirationInSeconds = 0) const;
+
+ /**
+ * Generates a signed Uri for a supplied AWSEndpoint.
+ */
+ Aws::String GeneratePresignedUrl(const Aws::Endpoint::AWSEndpoint &endpoint,
+ Aws::Http::HttpMethod method = Http::HttpMethod::HTTP_POST,
+ const Aws::Http::HeaderValueCollection &customizedHeaders = {},
+ uint64_t expirationInSeconds = 0,
+ const char *signerName = Aws::Auth::SIGV4_SIGNER,
+ const char *signerRegionOverride = nullptr,
+ const char *signerServiceNameOverride = nullptr) const;
+
+ /**
+ * Generates a signed Uri for a supplied request and uri.
+ */
+ Aws::String GeneratePresignedUrl(const Aws::AmazonWebServiceRequest &request,
+ const Aws::Http::URI &uri,
+ Aws::Http::HttpMethod method,
+ const Aws::Http::QueryStringParameterCollection &extraParams = Aws::Http::QueryStringParameterCollection(),
+ long long expirationInSeconds = 0) const;
+
+ /**
+ * Generates a signed Uri using the injected signer. for the supplied request object, uri, http method, region, service name, signer name, and customized headers.
+ * expirationInSeconds defaults to 0 which is the default 7 days.
+ *
+ * This is a real method for request+uri pre-signing, the rest are just overloads.
+ */
+ Aws::String GeneratePresignedUrl(const Aws::AmazonWebServiceRequest &request,
+ const Aws::Http::URI &uri,
+ Aws::Http::HttpMethod method,
+ const char *regionOverride,
+ const char *serviceNameOverride,
+ const char *signerName,
+ const Aws::Http::QueryStringParameterCollection &extraParams = Aws::Http::QueryStringParameterCollection(),
+ long long expirationInSeconds = 0) const;
+
+ /**
+ * Generates a signed Uri for a supplied request and uri.
+ */
+ Aws::String GeneratePresignedUrl(const Aws::AmazonWebServiceRequest &request,
+ const Aws::Http::URI &uri,
+ Aws::Http::HttpMethod method,
+ const char *regionOverride,
+ const char *serviceNameOverride,
+ const Aws::Http::QueryStringParameterCollection &extraParams = Aws::Http::QueryStringParameterCollection(),
+ long long expirationInSeconds = 0) const;
+
+ /**
+ * Generates a signed Uri for a supplied request and uri.
+ */
+ Aws::String GeneratePresignedUrl(const Aws::AmazonWebServiceRequest &request,
+ const Aws::Http::URI &uri,
+ Aws::Http::HttpMethod method,
+ const char *regionOverride,
+ const Aws::Http::QueryStringParameterCollection &extraParams = Aws::Http::QueryStringParameterCollection(),
+ long long expirationInSeconds = 0) const;
+
+ protected:
+ const AWSClient& m_awsClient;
+
+ friend class AWSClient; // allow AWSClient to see method below to make friends with it
+ Aws::Client::AWSAuthSigner* GetSignerByName(const char* name) const;
+ }; // class AWSUrlPresigner
+
+ } // namespace Client
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AWSXmlClient.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AWSXmlClient.h
new file mode 100644
index 0000000000..1d3426e6e8
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AWSXmlClient.h
@@ -0,0 +1,135 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+#if !defined(AWS_XML_CLIENT_H)
+#define AWS_XML_CLIENT_H
+
+#include <aws/core/Core_EXPORTS.h>
+#include <aws/core/client/AWSClient.h>
+
+namespace Aws
+{
+ namespace Utils
+ {
+ namespace Xml
+ {
+ class XmlDocument;
+ } // namespace Xml
+ } // namespace Utils
+
+ namespace Client
+ {
+ typedef Utils::Outcome<AmazonWebServiceResult<Utils::Xml::XmlDocument>, AWSError<CoreErrors>> XmlOutcome;
+
+ /**
+ * AWSClient that handles marshalling xml response bodies. You would inherit from this class
+ * to create a client that uses Xml as its payload format.
+ */
+ class AWS_CORE_API AWSXMLClient : public AWSClient
+ {
+ public:
+ typedef AWSClient BASECLASS;
+
+ AWSXMLClient(const Aws::Client::ClientConfiguration& configuration,
+ const std::shared_ptr<Aws::Client::AWSAuthSigner>& signer,
+ const std::shared_ptr<AWSErrorMarshaller>& errorMarshaller);
+
+ AWSXMLClient(const Aws::Client::ClientConfiguration& configuration,
+ const std::shared_ptr<Aws::Auth::AWSAuthSignerProvider>& signerProvider,
+ const std::shared_ptr<AWSErrorMarshaller>& errorMarshaller);
+
+ virtual ~AWSXMLClient() = default;
+
+ protected:
+ /**
+ * Converts/Parses an http response into a meaningful AWSError object. Using the XML message structure.
+ */
+ virtual AWSError<CoreErrors> BuildAWSError(const std::shared_ptr<Aws::Http::HttpResponse>& response) const override;
+
+ /**
+ * Returns an xml document or an error from the request. Does some marshalling xml and raw streams,
+ * then just calls AttemptExhaustively.
+ *
+ * method defaults to POST
+ */
+ XmlOutcome MakeRequest(const Aws::AmazonWebServiceRequest& request,
+ const Aws::Endpoint::AWSEndpoint& endpoint,
+ Http::HttpMethod method = Http::HttpMethod::HTTP_POST,
+ const char* signerName = Aws::Auth::SIGV4_SIGNER,
+ const char* signerRegionOverride = nullptr,
+ const char* signerServiceNameOverride = nullptr) const;
+
+ XmlOutcome MakeRequest(const Aws::Endpoint::AWSEndpoint& endpoint,
+ const char* requestName = "",
+ Http::HttpMethod method = Http::HttpMethod::HTTP_POST,
+ const char* signerName = Aws::Auth::SIGV4_SIGNER,
+ const char* signerRegionOverride = nullptr,
+ const char* signerServiceNameOverride = nullptr) const;
+
+ /**
+ * Returns an xml document or an error from the request. Does some marshalling xml and raw streams,
+ * then just calls AttemptExhaustively.
+ *
+ * method defaults to POST
+ */
+ XmlOutcome MakeRequest(const Aws::Http::URI& uri,
+ const Aws::AmazonWebServiceRequest& request,
+ Http::HttpMethod method = Http::HttpMethod::HTTP_POST,
+ const char* signerName = Aws::Auth::SIGV4_SIGNER,
+ const char* signerRegionOverride = nullptr,
+ const char* signerServiceNameOverride = nullptr) const;
+
+
+ /**
+ * Returns an xml document or an error from the request. Does some marshalling xml and raw streams,
+ * then just calls AttemptExhaustively.
+ *
+ * requestName is used for metrics and defaults to empty string, to avoid empty names in metrics provide a valid
+ * name.
+ *
+ * method defaults to POST
+ */
+ XmlOutcome MakeRequest(const Aws::Http::URI& uri,
+ Http::HttpMethod method = Http::HttpMethod::HTTP_POST,
+ const char* signerName = Aws::Auth::SIGV4_SIGNER,
+ const char* requestName = "",
+ const char* signerRegionOverride = nullptr,
+ const char* signerServiceNameOverride = nullptr) const;
+
+ /**
+ * This is used for event stream response.
+ */
+ XmlOutcome MakeRequestWithEventStream(const Aws::Http::URI& uri,
+ const Aws::AmazonWebServiceRequest& request,
+ Http::HttpMethod method = Http::HttpMethod::HTTP_POST,
+ const char* singerName = Aws::Auth::SIGV4_SIGNER,
+ const char* signerRegionOverride = nullptr,
+ const char* signerServiceNameOverride = nullptr) const;
+
+ XmlOutcome MakeRequestWithEventStream(const Aws::AmazonWebServiceRequest& request,
+ const Aws::Endpoint::AWSEndpoint& endpoint,
+ Http::HttpMethod method = Http::HttpMethod::HTTP_POST,
+ const char* signerName = Aws::Auth::SIGV4_SIGNER,
+ const char* signerRegionOverride = nullptr,
+ const char* signerServiceNameOverride = nullptr) const;
+
+ /**
+ * This is used for event stream response.
+ * requestName is used for metrics and defaults to empty string, to avoid empty names in metrics provide a valid
+ * name.
+ */
+ XmlOutcome MakeRequestWithEventStream(const Aws::Http::URI& uri,
+ Http::HttpMethod method = Http::HttpMethod::HTTP_POST,
+ const char* signerName = Aws::Auth::SIGV4_SIGNER,
+ const char* requestName = "",
+ const char* signerRegionOverride = nullptr,
+ const char* signerServiceNameOverride = nullptr) const;
+ };
+
+ } // namespace Client
+} // namespace Aws
+
+#endif // !defined(AWS_XML_CLIENT_H) \ No newline at end of file
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AdaptiveRetryStrategy.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AdaptiveRetryStrategy.h
new file mode 100644
index 0000000000..b0a51126cd
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AdaptiveRetryStrategy.h
@@ -0,0 +1,154 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+
+#include <aws/core/Core_EXPORTS.h>
+#include <aws/core/client/RetryStrategy.h>
+#include <aws/core/utils/DateTime.h>
+
+namespace Aws
+{
+namespace Client
+{
+
+/**
+ * A helper class of the AdaptiveRetryStrategy
+ * representing a (send) token bucket with a dynamically changing fill rate and capacity.
+ */
+class AWS_CORE_API RetryTokenBucket
+{
+public:
+ /**
+ * C-tor
+ */
+ RetryTokenBucket() = default;
+
+ /**
+ * Acquire tokens from the bucket. If the bucket contains enough capacity
+ * to satisfy the request, this method will return immediately, otherwise
+ * the method will block the calling thread until enough tokens are refilled
+ * unless fast fail is provided as an argument.
+ */
+ bool Acquire(size_t amount = 1, bool fastFail = false);
+
+ /**
+ * Update limiter's client sending rate during the request bookkeeping process
+ * based on a service response.
+ */
+ void UpdateClientSendingRate(bool throttlingResponse, const Aws::Utils::DateTime& now = Aws::Utils::DateTime::Now());
+
+protected:
+ /**
+ * Internal C-tor for unit testing
+ */
+ RetryTokenBucket(double fillRate, double maxCapacity, double currentCapacity,
+ const Aws::Utils::DateTime& lastTimestamp, double measuredTxRate, double lastTxRateBucket,
+ size_t requestCount, bool enabled, double lastMaxRate, const Aws::Utils::DateTime& lastThrottleTime);
+
+ /**
+ * Internal method to update the token bucket's fill rate when we receive a response from the service.
+ * The update amount will depend on whether or not a throttling response is received from a service.
+ * The request rate is measured using an exponentially smoothed average,
+ * with the rate being updated in half second buckets.
+ */
+ void UpdateMeasuredRate(const Aws::Utils::DateTime& now = Aws::Utils::DateTime::Now());
+
+ /**
+ * Internal method to enable rate limiting.
+ */
+ void Enable();
+
+ /**
+ * Internal method to refill and update refill rate with a new refill rate.
+ */
+ void UpdateRate(double newRps, const Aws::Utils::DateTime& now = Aws::Utils::DateTime::Now());
+
+ /**
+ * Internal method to refill send tokens based on a current fill rate.
+ */
+ void Refill(const Aws::Utils::DateTime& now = Aws::Utils::DateTime::Now());
+
+ /**
+ * Internal method to compute time window for a last max fill rate.
+ */
+ double CalculateTimeWindow() const;
+
+ /**
+ * Internal method with a modified CUBIC algorithm to compute new max sending rate for a successful response.
+ */
+ double CUBICSuccess(const Aws::Utils::DateTime& timestamp, const double timeWindow) const;
+
+ /**
+ * Internal method with a modified CUBIC algorithm to compute new max sending rate for a throttled response.
+ */
+ double CUBICThrottle(const double rateToUse) const;
+
+ // The rate at which token are replenished.
+ double m_fillRate = 0.0;
+ // The maximum capacity allowed in the token bucket.
+ double m_maxCapacity = 0.0;
+ // The current capacity of the token bucket.
+ double m_currentCapacity = 0.0;
+ // The last time the token bucket was refilled.
+ Aws::Utils::DateTime m_lastTimestamp;
+ // The smoothed rate which tokens are being retrieved.
+ double m_measuredTxRate = 0.0;
+ // The last half second time bucket used.
+ double m_lastTxRateBucket = 0.0;
+ // The number of requests seen within the current time bucket.
+ size_t m_requestCount = 0;
+ // Boolean indicating if the token bucket is enabled.
+ bool m_enabled = false;
+ // The maximum rate when the client was last throttled.
+ double m_lastMaxRate = 0.0;
+ // The last time when the client was throttled.
+ Aws::Utils::DateTime m_lastThrottleTime;
+
+ // TokenBucket's mutex to synchronize read/write operations
+ std::recursive_mutex m_mutex;
+};
+
+/**
+ * A retry strategy that builds on the standard strategy and introduces congestion control through
+ * client side rate limiting.
+ */
+class AWS_CORE_API AdaptiveRetryStrategy : public StandardRetryStrategy
+{
+public:
+ /**
+ * C-tors
+ */
+ AdaptiveRetryStrategy(long maxAttempts = 3);
+ AdaptiveRetryStrategy(std::shared_ptr<RetryQuotaContainer> retryQuotaContainer, long maxAttempts = 3);
+
+ /**
+ * Retrieve and consume a send token.
+ * Returns true if send token is available.
+ *
+ * If there is not sufficient capacity, HasSendToken() will either sleep a certain amount of time until the rate
+ * limiter can retrieve a token from its token bucket or return false indicating there is insufficient capacity.
+ */
+ virtual bool HasSendToken() override;
+
+ /**
+ * Update status, like the information of retry quota when receiving a response.
+ */
+ virtual void RequestBookkeeping(const HttpResponseOutcome& httpResponseOutcome) override;
+ virtual void RequestBookkeeping(const HttpResponseOutcome& httpResponseOutcome, const AWSError<CoreErrors>& lastError) override;
+
+protected:
+ RetryTokenBucket m_retryTokenBucket;
+ bool m_fastFail = false;
+
+private:
+ /**
+ * An internal helper function to check if a given service response is classified as a throttled one.
+ */
+ static bool IsThrottlingResponse(const HttpResponseOutcome& httpResponseOutcome);
+};
+
+} // namespace Client
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AsyncCallerContext.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AsyncCallerContext.h
index 6831791ffc..03ebca5151 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AsyncCallerContext.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/AsyncCallerContext.h
@@ -33,11 +33,11 @@ namespace Aws
* Initializes object with UUID
*/
AsyncCallerContext(const char* uuid) : m_uuid(uuid) {}
-
+
virtual ~AsyncCallerContext() {}
/**
- * Gets underlying UUID
+ * Gets underlying UUID
*/
inline const Aws::String& GetUUID() const { return m_uuid; }
@@ -56,4 +56,3 @@ namespace Aws
};
}
}
-
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/ClientConfiguration.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/ClientConfiguration.h
index 69c2166a6e..b8e32b5fe4 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/ClientConfiguration.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/ClientConfiguration.h
@@ -11,6 +11,7 @@
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/core/http/HttpTypes.h>
#include <aws/core/utils/Array.h>
+#include <aws/crt/Optional.h>
#include <memory>
namespace Aws
@@ -45,6 +46,20 @@ namespace Aws
};
/**
+ * This setting is an enumeration, not a boolean, to allow for future expansion.
+ */
+ enum class UseRequestCompression
+ {
+ DISABLE,
+ ENABLE,
+ };
+
+ struct RequestCompressionConfig {
+ UseRequestCompression useRequestCompression=UseRequestCompression::ENABLE;
+ size_t requestMinCompressionSizeBytes = 10240;
+ };
+
+ /**
* This mutable structure is used to configure any of the AWS clients.
* Default values can only be overwritten prior to passing to the client constructors.
*/
@@ -55,8 +70,18 @@ namespace Aws
/**
* Create a configuration based on settings in the aws configuration file for the given profile name.
* The configuration file location can be set via the environment variable AWS_CONFIG_FILE
+ * @param profileName the aws profile name.
+ * @param shouldDisableIMDS whether or not to disable IMDS calls.
*/
- ClientConfiguration(const char* profileName);
+ ClientConfiguration(const char* profileName, bool shouldDisableIMDS = false);
+
+ /**
+ * Create a configuration with a predefined smart defaults
+ * @param useSmartDefaults, required to differentiate c-tors
+ * @param defaultMode, default mode to use
+ * @param shouldDisableIMDS whether or not to disable IMDS calls.
+ */
+ explicit ClientConfiguration(bool useSmartDefaults, const char* defaultMode = "legacy", bool shouldDisableIMDS = false);
/**
* User Agent string user for http calls. This is filled in for you in the constructor. Don't override this unless you have a really good reason.
@@ -73,47 +98,53 @@ namespace Aws
/**
* Use dual stack endpoint in the endpoint calculation. It is your responsibility to verify that the service supports ipv6 in the region you select.
*/
- bool useDualStack;
+ bool useDualStack = false;
+
+ /**
+ * Use FIPS endpoint in the endpoint calculation. Please check first that the service supports FIPS in a selected region.
+ */
+ bool useFIPS = false;
+
/**
* Max concurrent tcp connections for a single http client to use. Default 25.
*/
- unsigned maxConnections;
+ unsigned maxConnections = 25;
/**
* This is currently only applicable for Curl to set the http request level timeout, including possible dns lookup time, connection establish time, ssl handshake time and actual data transmission time.
* the corresponding Curl option is CURLOPT_TIMEOUT_MS
* defaults to 0, no http request level timeout.
*/
- long httpRequestTimeoutMs;
+ long httpRequestTimeoutMs = 0;
/**
- * Socket read timeouts for HTTP clients on Windows. Default 3000 ms. This should be more than adequate for most services. However, if you are transfering large amounts of data
+ * Socket read timeouts for HTTP clients on Windows. Default 3000 ms. This should be more than adequate for most services. However, if you are transferring large amounts of data
* or are worried about higher latencies, you should set to something that makes more sense for your use case.
* For Curl, it's the low speed time, which contains the time in number milliseconds that transfer speed should be below "lowSpeedLimit" for the library to consider it too slow and abort.
* Note that for Curl this config is converted to seconds by rounding down to the nearest whole second except when the value is greater than 0 and less than 1000. In this case it is set to one second. When it's 0, low speed limit check will be disabled.
* Note that for Windows when this config is 0, the behavior is not specified by Windows.
*/
- long requestTimeoutMs;
+ long requestTimeoutMs = 0;
/**
- * Socket connect timeout. Default 1000 ms. Unless you are very far away from your the data center you are talking to. 1000ms is more than sufficient.
+ * Socket connect timeout. Default 1000 ms. Unless you are very far away from your the data center you are talking to, 1000ms is more than sufficient.
*/
- long connectTimeoutMs;
+ long connectTimeoutMs = 1000;
/**
* Enable TCP keep-alive. Default true;
* No-op for WinHTTP, WinINet and IXMLHTTPRequest2 client.
*/
- bool enableTcpKeepAlive;
+ bool enableTcpKeepAlive = true;
/**
* Interval to send a keep-alive packet over the connection. Default 30 seconds. Minimum 15 seconds.
* WinHTTP & libcurl support this option. Note that for Curl, this value will be rounded to an integer with second granularity.
* No-op for WinINet and IXMLHTTPRequest2 client.
*/
- unsigned long tcpKeepAliveIntervalMs;
+ unsigned long tcpKeepAliveIntervalMs = 30000;
/**
* Average transfer speed in bytes per second that the transfer should be below during the request timeout interval for it to be considered too slow and abort.
* Default 1 byte/second. Only for CURL client currently.
*/
- unsigned long lowSpeedLimit;
+ unsigned long lowSpeedLimit = 1;
/**
- * Strategy to use in case of failed requests. Default is DefaultRetryStrategy (e.g. exponential backoff)
+ * Strategy to use in case of failed requests. Default is DefaultRetryStrategy (i.e. exponential backoff)
*/
std::shared_ptr<RetryStrategy> retryStrategy;
/**
@@ -131,7 +162,7 @@ namespace Aws
/**
* If you have users going through a proxy, set the port here.
*/
- unsigned proxyPort;
+ unsigned proxyPort = 0;
/**
* If you have users going through a proxy, set the username here.
*/
@@ -175,9 +206,9 @@ namespace Aws
std::shared_ptr<Aws::Utils::Threading::Executor> executor;
/**
* If you need to test and want to get around TLS validation errors, do that here.
- * you probably shouldn't use this flag in a production scenario.
+ * You probably shouldn't use this flag in a production scenario.
*/
- bool verifySSL;
+ bool verifySSL = true;
/**
* If your Certificate Authority path is different from the default, you can tell
* clients that aren't using the default trust store where to find your CA trust store.
@@ -226,35 +257,73 @@ namespace Aws
* But be careful when Http request has large payload such S3 PutObject. You don't want to spend long time sending a large payload just getting a error response for server.
* The default value will be false.
*/
- bool disableExpectHeader;
+ bool disableExpectHeader = false;
/**
* If set to true clock skew will be adjusted after each http attempt, default to true.
*/
- bool enableClockSkewAdjustment;
+ bool enableClockSkewAdjustment = true;
/**
* Enable host prefix injection.
* For services whose endpoint is injectable. e.g. servicediscovery, you can modify the http host's prefix so as to add "data-" prefix for DiscoverInstances request.
* Default to true, enabled. You can disable it for testing purpose.
+ *
+ * Deprecated in API v. 1.10. Please set in service-specific client configuration.
*/
- bool enableHostPrefixInjection;
+ bool enableHostPrefixInjection = true;
/**
* Enable endpoint discovery
* For some services to dynamically set up their endpoints for different requests.
- * Defaults to false, it's an opt-in feature.
- * If disabled, regional or overriden endpoint will be used instead.
+ * By default, service clients will decide if endpoint discovery is enabled or not.
+ * If disabled, regional or overridden endpoint will be used instead.
* If a request requires endpoint discovery but you disabled it. The request will never succeed.
+ * A boolean value is either true of false, use Optional here to have an instance does not contain a value,
+ * such that SDK will decide the default behavior as stated before, if no value specified.
+ *
+ * Deprecated in API v. 1.10. Please set in service-specific client configuration.
*/
- bool enableEndpointDiscovery;
+ Aws::Crt::Optional<bool> enableEndpointDiscovery;
/**
- * profileName in config file that will be used by this object to reslove more configurations.
+ * profileName in config file that will be used by this object to resolve more configurations.
*/
Aws::String profileName;
+ /**
+ * Request compression configuration
+ * To use this feature, the service needs to provide the support, and the compression
+ * algorithms needs to be available at SDK build time.
+ */
+ Aws::Client::RequestCompressionConfig requestCompressionConfig;
+
+ /**
+ * Disable all internal IMDS Calls
+ */
+ bool disableIMDS = false;
+
+ /**
+ * A helper function to read config value from env variable or aws profile config
+ */
+ static Aws::String LoadConfigFromEnvOrProfile(const Aws::String& envKey,
+ const Aws::String& profile,
+ const Aws::String& profileProperty,
+ const Aws::Vector<Aws::String>& allowedValues,
+ const Aws::String& defaultValue);
};
+ /**
+ * A helper function to initialize a retry strategy.
+ * Default is DefaultRetryStrategy (i.e. exponential backoff)
+ */
+ std::shared_ptr<RetryStrategy> InitRetryStrategy(Aws::String retryMode = "");
+
+ /**
+ * A helper function to compute a user agent
+ * @return Aws::String with a user-agent
+ */
+ AWS_CORE_API Aws::String ComputeUserAgentString();
+
} // namespace Client
} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/CoreErrors.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/CoreErrors.h
index 52ebe51c82..08803b5016 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/CoreErrors.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/CoreErrors.h
@@ -4,7 +4,8 @@
*/
#pragma once
-#include "aws/core/Core_EXPORTS.h"
+#include <aws/core/Core_EXPORTS.h>
+#include <aws/core/utils/memory/stl/AWSStreamFwd.h>
namespace Aws
{
@@ -51,9 +52,16 @@ namespace Aws
UNKNOWN = 100, // Unknown to the SDK
CLIENT_SIGNING_FAILURE = 101, // Client failed to sign the request
USER_CANCELLED = 102, // User cancelled the request
- SERVICE_EXTENSION_START_RANGE = 128
+ ENDPOINT_RESOLUTION_FAILURE = 103,
+ SERVICE_EXTENSION_START_RANGE = 128,
+ OK = -1 // No error set
};
+ /**
+ * Overload ostream operator<< for CoreErrors enum class for a prettier output such as "128"
+ */
+ AWS_CORE_API Aws::OStream& operator<< (Aws::OStream& oStream, CoreErrors code);
+
namespace CoreErrorsMapper
{
/**
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/GenericClientConfiguration.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/GenericClientConfiguration.h
new file mode 100644
index 0000000000..81dda32db9
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/GenericClientConfiguration.h
@@ -0,0 +1,86 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+
+#include <aws/core/Core_EXPORTS.h>
+#include <aws/core/client/ClientConfiguration.h>
+
+namespace Aws
+{
+ namespace Client
+ {
+ /**
+ * This mutable structure is used to configure a regular AWS client.
+ */
+ template<bool HasEndpointDiscovery = false>
+ struct AWS_CORE_API GenericClientConfiguration : public ClientConfiguration
+ {
+ static const bool EndpointDiscoverySupported = HasEndpointDiscovery;
+
+ GenericClientConfiguration()
+ : ClientConfiguration()
+ {}
+
+ /**
+ * Create a configuration based on settings in the aws configuration file for the given profile name.
+ * The configuration file location can be set via the environment variable AWS_CONFIG_FILE
+ * @param profileName the aws profile name.
+ * @param shouldDisableIMDS whether or not to disable IMDS calls.
+ */
+ GenericClientConfiguration(const char* inputProfileName, bool shouldDisableIMDS = false)
+ : ClientConfiguration(inputProfileName, shouldDisableIMDS)
+ {}
+
+ /**
+ * Create a configuration with a predefined smart defaults
+ * @param useSmartDefaults, required to differentiate c-tors
+ * @param defaultMode, default mode to use
+ * @param shouldDisableIMDS whether or not to disable IMDS calls.
+ */
+ explicit GenericClientConfiguration(bool useSmartDefaults, const char* defaultMode = "legacy", bool shouldDisableIMDS = false)
+ : ClientConfiguration(useSmartDefaults, defaultMode, shouldDisableIMDS)
+ {}
+
+ GenericClientConfiguration(const ClientConfiguration& config)
+ : ClientConfiguration(config)
+ {}
+ };
+
+ /**
+ * This mutable structure is used to configure a regular AWS client that supports endpoint discovery.
+ */
+ template <> struct AWS_CORE_API GenericClientConfiguration<true> : public ClientConfiguration
+ {
+ static const bool EndpointDiscoverySupported = true;
+
+ GenericClientConfiguration();
+ GenericClientConfiguration(const char* profileName, bool shouldDisableIMDS = false);
+ explicit GenericClientConfiguration(bool useSmartDefaults, const char* defaultMode = "legacy", bool shouldDisableIMDS = false);
+ GenericClientConfiguration(const ClientConfiguration& config);
+ GenericClientConfiguration(const GenericClientConfiguration&);
+ GenericClientConfiguration& operator=(const GenericClientConfiguration&);
+
+
+ /**
+ * Enable host prefix injection.
+ * For services whose endpoint is injectable. e.g. servicediscovery, you can modify the http host's prefix so as to add "data-" prefix for DiscoverInstances request.
+ * Default to true, enabled. You can disable it for testing purpose.
+ */
+ bool& enableHostPrefixInjection;
+
+ /**
+ * Enable endpoint discovery
+ * For some services to dynamically set up their endpoints for different requests.
+ * By default, service clients will decide if endpoint discovery is enabled or not.
+ * If disabled, regional or overridden endpoint will be used instead.
+ * If a request requires endpoint discovery but you disabled it. The request will never succeed.
+ * A boolean value is either true of false, use Optional here to have an instance does not contain a value,
+ * such that SDK will decide the default behavior as stated before, if no value specified.
+ */
+ Aws::Crt::Optional<bool>& enableEndpointDiscovery;
+ };
+ } // namespace Client
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/RequestCompression.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/RequestCompression.h
new file mode 100644
index 0000000000..31c0bc98cc
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/RequestCompression.h
@@ -0,0 +1,66 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+
+#include <aws/core/Core_EXPORTS.h>
+#include <aws/core/client/ClientConfiguration.h>
+#include <aws/core/utils/Outcome.h>
+#include <aws/core/utils/memory/stl/AWSStreamFwd.h>
+#include <aws/core/utils/memory/stl/AWSString.h>
+#include <aws/core/utils/memory/stl/AWSVector.h>
+
+using iostream_outcome =
+ Aws::Utils::Outcome<std::shared_ptr<Aws::IOStream>, bool>;
+
+namespace Aws {
+namespace Client {
+enum class CompressionAlgorithm { NONE, GZIP };
+
+/**
+ * Converts a compression Algorithms enum to String to be used as content-type
+ * header value when compressing a request.
+ * @param algorithm
+ * @return string with HTTP content type algorithm id
+ */
+Aws::String AWS_CORE_API
+GetCompressionAlgorithmId(const CompressionAlgorithm &algorithm);
+
+/**
+ * Request compression API
+ */
+class AWS_CORE_API RequestCompression final {
+public:
+ /**
+ * Select the best matching algorithm based in proposed ones, config, length
+ * of content and the available algorithms.
+ * @param proposedAlgorithms
+ * @param config
+ * @param payloadLength
+ * @return selected compression algorithm
+ */
+ CompressionAlgorithm
+ selectAlgorithm(const Aws::Vector<CompressionAlgorithm> &proposedAlgorithms,
+ const Aws::Client::RequestCompressionConfig &config,
+ const size_t payloadLength);
+ /**
+ * Compress a IOStream input using the requested algorithm.
+ * @param input
+ * @param algorithm
+ * @return IOStream compressed
+ */
+ iostream_outcome compress(std::shared_ptr<Aws::IOStream> input,
+ const CompressionAlgorithm &algorithm) const;
+ /**
+ * Uncompress a IOStream input using the requested algorithm.
+ * @param input
+ * @param algorithm
+ * @return
+ */
+ iostream_outcome uncompress(std::shared_ptr<Aws::IOStream> input,
+ const CompressionAlgorithm &algorithm) const;
+};
+} // namespace Client
+} // namespace Aws \ No newline at end of file
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/RetryStrategy.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/RetryStrategy.h
index 930eaa581d..a0b5ea8131 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/RetryStrategy.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/client/RetryStrategy.h
@@ -24,6 +24,8 @@ namespace Aws
namespace Client
{
+ static const int NO_RETRY_INCREMENT = 1;
+
enum class CoreErrors;
template<typename ERROR_TYPE>
class AWSError;
@@ -54,11 +56,20 @@ namespace Aws
virtual long GetMaxAttempts() const { return 0; }
/**
- * Retrives send tokens from the bucket.
+ * Retrieves send tokens from the bucket. Throws an exception if not available.
*/
virtual void GetSendToken() {}
/**
+ * Retrieves send tokens from the bucket. Returns true is send token is retrieved.
+ */
+ virtual bool HasSendToken()
+ {
+ GetSendToken(); // first call old method for backward compatibility
+ return true;
+ }
+
+ /**
* Update status, like the information of retry quota when receiving a response.
*/
virtual void RequestBookkeeping(const HttpResponseOutcome& /* httpResponseOutcome */) {}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/config/AWSConfigFileProfileConfigLoader.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/config/AWSConfigFileProfileConfigLoader.h
new file mode 100644
index 0000000000..cc778241b6
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/config/AWSConfigFileProfileConfigLoader.h
@@ -0,0 +1,51 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+
+#include <aws/core/config/AWSProfileConfigLoaderBase.h>
+
+#include <aws/core/utils/memory/stl/AWSString.h>
+
+namespace Aws
+{
+ namespace Config
+ {
+ /**
+ * Reads configuration from a config file (e.g. $HOME/.aws/config or $HOME/.aws/credentials
+ */
+ class AWS_CORE_API AWSConfigFileProfileConfigLoader : public AWSProfileConfigLoader
+ {
+ public:
+ /**
+ * fileName - file to load config from
+ * useProfilePrefix - whether or not the profiles are prefixed with "profile", credentials file is not
+ * while the config file is. Defaults to off.
+ */
+ AWSConfigFileProfileConfigLoader(const Aws::String& fileName, bool useProfilePrefix = false);
+
+ virtual ~AWSConfigFileProfileConfigLoader() = default;
+
+ /**
+ * File path being used for the config loader.
+ */
+ const Aws::String& GetFileName() const { return m_fileName; }
+
+ /**
+ * Give loader the ability to change the file path to load config from.
+ * This can avoid creating new loader object if the file changed.
+ */
+ void SetFileName(const Aws::String& fileName) { m_fileName = fileName; }
+
+ protected:
+ virtual bool LoadInternal() override;
+ virtual bool PersistInternal(const Aws::Map<Aws::String, Aws::Config::Profile>&) override;
+
+ private:
+ Aws::String m_fileName;
+ bool m_useProfilePrefix;
+ };
+ }
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/config/AWSProfileConfig.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/config/AWSProfileConfig.h
new file mode 100644
index 0000000000..4f08231d9c
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/config/AWSProfileConfig.h
@@ -0,0 +1,120 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+
+#include <aws/core/utils/memory/stl/AWSString.h>
+#include <aws/core/utils/memory/stl/AWSMap.h>
+#include <aws/core/auth/AWSCredentials.h>
+
+namespace Aws
+{
+ namespace Config
+ {
+ /**
+ * Simple data container for a Profile.
+ */
+ class Profile
+ {
+ public:
+ /*
+ * Data container for a sso-session config entry.
+ * This is independent of the general profile configuration and used by a bearer auth token provider.
+ */
+ class SsoSession
+ {
+ public:
+ inline const Aws::String& GetName() const { return m_name; }
+ inline void SetName(const Aws::String& value) { m_name = value; }
+ inline const Aws::String& GetSsoRegion() const { return m_ssoRegion; }
+ inline void SetSsoRegion(const Aws::String& value) { m_ssoRegion = value; }
+ inline const Aws::String& GetSsoStartUrl() const { return m_ssoStartUrl; }
+ inline void SetSsoStartUrl(const Aws::String& value) { m_ssoStartUrl = value; }
+
+ inline void SetAllKeyValPairs(const Aws::Map<Aws::String, Aws::String>& map) { m_allKeyValPairs = map; }
+ inline const Aws::String GetValue(const Aws::String& key) const
+ {
+ auto iter = m_allKeyValPairs.find(key);
+ if (iter == m_allKeyValPairs.end()) return {};
+ return iter->second;
+ }
+
+ bool operator==(SsoSession const& other) const
+ {
+ return this->m_name == other.m_name &&
+ this->m_ssoRegion == other.m_ssoRegion &&
+ this->m_ssoStartUrl == other.m_ssoStartUrl &&
+ this->m_allKeyValPairs == other.m_allKeyValPairs;
+ }
+ bool operator!=(SsoSession const& other) const
+ {
+ return !operator==(other);
+ }
+ private:
+ // This is independent of the general configuration
+ Aws::String m_name;
+ Aws::String m_ssoRegion;
+ Aws::String m_ssoStartUrl;
+ Aws::Map<Aws::String, Aws::String> m_allKeyValPairs;
+ };
+
+ inline const Aws::String& GetName() const { return m_name; }
+ inline void SetName(const Aws::String& value) { m_name = value; }
+ inline const Aws::Auth::AWSCredentials& GetCredentials() const { return m_credentials; }
+ inline void SetCredentials(const Aws::Auth::AWSCredentials& value) { m_credentials = value; }
+ inline const Aws::String& GetRegion() const { return m_region; }
+ inline void SetRegion(const Aws::String& value) { m_region = value; }
+ inline const Aws::String& GetRoleArn() const { return m_roleArn; }
+ inline void SetRoleArn(const Aws::String& value) { m_roleArn = value; }
+ inline const Aws::String& GetExternalId() const { return m_externalId; }
+ inline void SetExternalId(const Aws::String& value) { m_externalId = value; }
+ inline const Aws::String& GetSsoStartUrl() const { return m_ssoStartUrl; }
+ inline void SetSsoStartUrl(const Aws::String& value) { m_ssoStartUrl = value; }
+ inline const Aws::String& GetSsoRegion() const { return m_ssoRegion; }
+ inline void SetSsoRegion(const Aws::String& value) { m_ssoRegion = value; }
+ inline const Aws::String& GetSsoAccountId() const { return m_ssoAccountId; }
+ inline void SetSsoAccountId(const Aws::String& value) { m_ssoAccountId = value; }
+ inline const Aws::String& GetSsoRoleName() const { return m_ssoRoleName; }
+ inline void SetSsoRoleName(const Aws::String& value) { m_ssoRoleName = value; }
+ inline const Aws::String& GetDefaultsMode() const { return m_defaultsMode; }
+ inline void SetDefaultsMode(const Aws::String& value) { m_defaultsMode = value; }
+ inline const Aws::String& GetSourceProfile() const { return m_sourceProfile; }
+ inline void SetSourceProfile(const Aws::String& value ) { m_sourceProfile = value; }
+ inline const Aws::String& GetCredentialProcess() const { return m_credentialProcess; }
+ inline void SetCredentialProcess(const Aws::String& value ) { m_credentialProcess = value; }
+ inline void SetAllKeyValPairs(const Aws::Map<Aws::String, Aws::String>& map) { m_allKeyValPairs = map; }
+ inline void SetAllKeyValPairs(Aws::Map<Aws::String, Aws::String>&& map) { m_allKeyValPairs = std::move(map); }
+ inline const Aws::String GetValue(const Aws::String& key) const
+ {
+ auto iter = m_allKeyValPairs.find(key);
+ if (iter == m_allKeyValPairs.end()) return {};
+ return iter->second;
+ }
+
+ inline bool IsSsoSessionSet() const { return m_ssoSessionSet; }
+ inline const SsoSession& GetSsoSession() const { return m_ssoSession; }
+ inline void SetSsoSession(const SsoSession& value) { m_ssoSessionSet = true; m_ssoSession = value; }
+ inline void SetSsoSession(SsoSession&& value) { m_ssoSessionSet = true; m_ssoSession = std::move(value); }
+
+ private:
+ Aws::String m_name;
+ Aws::String m_region;
+ Aws::Auth::AWSCredentials m_credentials;
+ Aws::String m_roleArn;
+ Aws::String m_externalId;
+ Aws::String m_sourceProfile;
+ Aws::String m_credentialProcess;
+ Aws::String m_ssoStartUrl;
+ Aws::String m_ssoRegion;
+ Aws::String m_ssoAccountId;
+ Aws::String m_ssoRoleName;
+ Aws::String m_defaultsMode;
+ Aws::Map<Aws::String, Aws::String> m_allKeyValPairs;
+
+ bool m_ssoSessionSet = false;
+ SsoSession m_ssoSession;
+ };
+ }
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/config/AWSProfileConfigLoader.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/config/AWSProfileConfigLoader.h
index ee467c5640..b9b4bd9e30 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/config/AWSProfileConfigLoader.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/config/AWSProfileConfigLoader.h
@@ -1,261 +1,14 @@
-/**
+/**AWSProfileConfigLoaderBaseAWSProfileConfigLoader
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
#pragma once
-#include <aws/core/utils/memory/stl/AWSString.h>
-#include <aws/core/utils/memory/stl/AWSMap.h>
-#include <aws/core/auth/AWSCredentials.h>
-#include <aws/core/utils/DateTime.h>
-#include <aws/core/utils/threading/ReaderWriterLock.h>
+#include <aws/core/config/AWSProfileConfig.h>
+#include <aws/core/config/AWSProfileConfigLoaderBase.h>
+#include <aws/core/config/AWSConfigFileProfileConfigLoader.h>
+#include <aws/core/config/EC2InstanceProfileConfigLoader.h>
+#include <aws/core/config/ConfigAndCredentialsCacheManager.h>
-namespace Aws
-{
- namespace Internal
- {
- class EC2MetadataClient;
- }
-
- namespace Config
- {
- /**
- * Simple data container for a Profile.
- */
- class Profile
- {
- public:
- inline const Aws::String& GetName() const { return m_name; }
- inline void SetName(const Aws::String& value) { m_name = value; }
- inline const Aws::Auth::AWSCredentials& GetCredentials() const { return m_credentials; }
- inline void SetCredentials(const Aws::Auth::AWSCredentials& value) { m_credentials = value; }
- inline const Aws::String& GetRegion() const { return m_region; }
- inline void SetRegion(const Aws::String& value) { m_region = value; }
- inline const Aws::String& GetRoleArn() const { return m_roleArn; }
- inline void SetRoleArn(const Aws::String& value) { m_roleArn = value; }
- inline const Aws::String& GetExternalId() const { return m_externalId; }
- inline void SetExternalId(const Aws::String& value) { m_externalId = value; }
- inline const Aws::String& GetSsoStartUrl() const { return m_ssoStartUrl; }
- inline void SetSsoStartUrl(const Aws::String& value) { m_ssoStartUrl = value; }
- inline const Aws::String& GetSsoRegion() const { return m_ssoRegion; }
- inline void SetSsoRegion(const Aws::String& value) { m_ssoRegion = value; }
- inline const Aws::String& GetSsoAccountId() const { return m_ssoAccountId; }
- inline void SetSsoAccountId(const Aws::String& value) { m_ssoAccountId = value; }
- inline const Aws::String& GetSsoRoleName() const { return m_ssoRoleName; }
- inline void SetSsoRoleName(const Aws::String& value) { m_ssoRoleName = value; }
- inline const Aws::String& GetSourceProfile() const { return m_sourceProfile; }
- inline void SetSourceProfile(const Aws::String& value ) { m_sourceProfile = value; }
- inline const Aws::String& GetCredentialProcess() const { return m_credentialProcess; }
- inline void SetCredentialProcess(const Aws::String& value ) { m_credentialProcess = value; }
- inline void SetAllKeyValPairs(const Aws::Map<Aws::String, Aws::String>& map) { m_allKeyValPairs = map; }
- inline const Aws::String GetValue(const Aws::String& key) const
- {
- auto iter = m_allKeyValPairs.find(key);
- if (iter == m_allKeyValPairs.end()) return {};
- return iter->second;
- }
-
- private:
- Aws::String m_name;
- Aws::String m_region;
- Aws::Auth::AWSCredentials m_credentials;
- Aws::String m_roleArn;
- Aws::String m_externalId;
- Aws::String m_sourceProfile;
- Aws::String m_credentialProcess;
- Aws::String m_ssoStartUrl;
- Aws::String m_ssoRegion;
- Aws::String m_ssoAccountId;
- Aws::String m_ssoRoleName;
- Aws::Map<Aws::String, Aws::String> m_allKeyValPairs;
- };
-
- /**
- * Loads Configuration such as .aws/config, .aws/credentials or ec2 metadata service.
- */
- class AWS_CORE_API AWSProfileConfigLoader
- {
- public:
- virtual ~AWSProfileConfigLoader() = default;
-
- /**
- * Load the configuration
- */
- bool Load();
-
- /**
- * Over writes the entire config source with the newly configured profile data.
- */
- bool PersistProfiles(const Aws::Map<Aws::String, Aws::Config::Profile>& profiles);
-
- /**
- * Gets all profiles from the configuration file.
- */
- inline const Aws::Map<Aws::String, Aws::Config::Profile>& GetProfiles() const { return m_profiles; };
-
- /**
- * the timestamp from the last time the profile information was loaded from file.
- */
- inline const Aws::Utils::DateTime& LastLoadTime() const { return m_lastLoadTime; }
-
- using ProfilesContainer = Aws::Map<Aws::String, Aws::Config::Profile>;
-
- protected:
- /**
- * Subclasses override this method to implement fetching the profiles.
- */
- virtual bool LoadInternal() = 0;
-
- /**
- * Subclasses override this method to implement persisting the profiles. Default returns false.
- */
- virtual bool PersistInternal(const Aws::Map<Aws::String, Aws::Config::Profile>&) { return false; }
-
- ProfilesContainer m_profiles;
- Aws::Utils::DateTime m_lastLoadTime;
- };
-
- /**
- * Reads configuration from a config file (e.g. $HOME/.aws/config or $HOME/.aws/credentials
- */
- class AWS_CORE_API AWSConfigFileProfileConfigLoader : public AWSProfileConfigLoader
- {
- public:
- /**
- * fileName - file to load config from
- * useProfilePrefix - whether or not the profiles are prefixed with "profile", credentials file is not
- * while the config file is. Defaults to off.
- */
- AWSConfigFileProfileConfigLoader(const Aws::String& fileName, bool useProfilePrefix = false);
-
- virtual ~AWSConfigFileProfileConfigLoader() = default;
-
- /**
- * File path being used for the config loader.
- */
- const Aws::String& GetFileName() const { return m_fileName; }
-
- /**
- * Give loader the ability to change the file path to load config from.
- * This can avoid creating new loader object if the file changed.
- */
- void SetFileName(const Aws::String& fileName) { m_fileName = fileName; }
-
- protected:
- virtual bool LoadInternal() override;
- virtual bool PersistInternal(const Aws::Map<Aws::String, Aws::Config::Profile>&) override;
-
- private:
- Aws::String m_fileName;
- bool m_useProfilePrefix;
- };
-
- static const char* const INSTANCE_PROFILE_KEY = "InstanceProfile";
-
- /**
- * Loads configuration from the EC2 Metadata Service
- */
- class AWS_CORE_API EC2InstanceProfileConfigLoader : public AWSProfileConfigLoader
- {
- public:
- /**
- * If client is nullptr, the default EC2MetadataClient will be created.
- */
- EC2InstanceProfileConfigLoader(const std::shared_ptr<Aws::Internal::EC2MetadataClient>& = nullptr);
-
- virtual ~EC2InstanceProfileConfigLoader() = default;
-
- protected:
- virtual bool LoadInternal() override;
- private:
- std::shared_ptr<Aws::Internal::EC2MetadataClient> m_ec2metadataClient;
- };
-
- /**
- * Stores the contents of config file and credentials file to avoid multiple file readings.
- * At the same time provides the flexibility to reload from file.
- */
- class AWS_CORE_API ConfigAndCredentialsCacheManager
- {
- public:
- ConfigAndCredentialsCacheManager();
-
- void ReloadConfigFile();
-
- void ReloadCredentialsFile();
-
- bool HasConfigProfile(const Aws::String& profileName) const;
-
- /**
- * Returns cached config profile with the specified profile name.
- * Using copy instead of const reference to avoid reading bad contents due to thread contention.
- */
- Aws::Config::Profile GetConfigProfile(const Aws::String& profileName) const;
-
- /**
- * Returns cached config profiles
- * Using copy instead of const reference to avoid reading bad contents due to thread contention.
- */
- Aws::Map<Aws::String, Aws::Config::Profile> GetConfigProfiles() const;
-
- /**
- * Returns cached config value with the specified profile name and key.
- * Using copy instead of const reference to avoid reading bad contents due to thread contention.
- */
- Aws::String GetConfig(const Aws::String& profileName, const Aws::String& key) const;
-
- bool HasCredentialsProfile(const Aws::String& profileName) const;
- /**
- * Returns cached credentials profile with the specified profile name.
- * Using copy instead of const reference to avoid reading bad contents due to thread contention.
- */
- Aws::Config::Profile GetCredentialsProfile(const Aws::String& profileName) const;
-
- /**
- * Returns cached credentials profiles.
- * Using copy instead of const reference to avoid reading bad contents due to thread contention.
- */
- Aws::Map<Aws::String, Aws::Config::Profile> GetCredentialsProfiles() const;
-
- /**
- * Returns cached credentials with the specified profile name.
- * Using copy instead of const reference to avoid reading bad contents due to thread contention.
- */
- Aws::Auth::AWSCredentials GetCredentials(const Aws::String& profileName) const;
-
- private:
- mutable Aws::Utils::Threading::ReaderWriterLock m_credentialsLock;
- Aws::Config::AWSConfigFileProfileConfigLoader m_credentialsFileLoader;
- mutable Aws::Utils::Threading::ReaderWriterLock m_configLock;
- Aws::Config::AWSConfigFileProfileConfigLoader m_configFileLoader;
- };
-
- AWS_CORE_API void InitConfigAndCredentialsCacheManager();
-
- AWS_CORE_API void CleanupConfigAndCredentialsCacheManager();
-
- AWS_CORE_API void ReloadCachedConfigFile();
-
- AWS_CORE_API void ReloadCachedCredentialsFile();
-
- AWS_CORE_API bool HasCachedConfigProfile(const Aws::String& profileName);
-
- AWS_CORE_API Aws::Config::Profile GetCachedConfigProfile(const Aws::String& profileName);
-
- AWS_CORE_API Aws::Map<Aws::String, Aws::Config::Profile> GetCachedConfigProfiles();
-
- AWS_CORE_API Aws::String GetCachedConfigValue(const Aws::String& profileName, const Aws::String& key);
-
- AWS_CORE_API Aws::String GetCachedConfigValue(const Aws::String& key);
-
- AWS_CORE_API bool HasCachedCredentialsProfile(const Aws::String &profileName);
-
- AWS_CORE_API Aws::Config::Profile GetCachedCredentialsProfile(const Aws::String& profileName);
-
- AWS_CORE_API Aws::Auth::AWSCredentials GetCachedCredentials(const Aws::String& profileName);
-
- AWS_CORE_API Aws::Map<Aws::String, Aws::Config::Profile> GetCachedCredentialsProfiles();
-
- }
-}
+// This is a header that represents old legacy all-in-one header to maintain backward compatibility
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/config/AWSProfileConfigLoaderBase.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/config/AWSProfileConfigLoaderBase.h
new file mode 100644
index 0000000000..8403f8fed0
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/config/AWSProfileConfigLoaderBase.h
@@ -0,0 +1,71 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+
+#include <aws/core/config/AWSProfileConfig.h>
+
+#include <aws/core/utils/memory/stl/AWSString.h>
+#include <aws/core/utils/memory/stl/AWSMap.h>
+#include <aws/core/auth/AWSCredentials.h>
+#include <aws/core/utils/DateTime.h>
+#include <aws/core/utils/threading/ReaderWriterLock.h>
+
+namespace Aws
+{
+ namespace Config {
+ /**
+ * Loads Configuration such as .aws/config, .aws/credentials or ec2 metadata service.
+ */
+ class AWS_CORE_API AWSProfileConfigLoader
+ {
+ public:
+ virtual ~AWSProfileConfigLoader() = default;
+
+ /**
+ * Load the configuration
+ */
+ bool Load();
+
+ /**
+ * Over writes the entire config source with the newly configured profile data.
+ */
+ bool PersistProfiles(const Aws::Map<Aws::String, Aws::Config::Profile> &profiles);
+
+ /**
+ * Gets all profiles from the configuration file.
+ */
+ inline const Aws::Map<Aws::String, Aws::Config::Profile> &GetProfiles() const { return m_profiles; };
+
+ /**
+ * the timestamp from the last time the profile information was loaded from file.
+ */
+ inline const Aws::Utils::DateTime &LastLoadTime() const { return m_lastLoadTime; }
+
+ using ProfilesContainer = Aws::Map<Aws::String, Aws::Config::Profile>;
+
+ // Delete copy c-tor and assignment operator
+ AWSProfileConfigLoader() = default;
+
+ AWSProfileConfigLoader(const AWSProfileConfigLoader &) = delete;
+
+ const AWSProfileConfigLoader &operator=(AWSProfileConfigLoader &) = delete;
+
+ protected:
+ /**
+ * Subclasses override this method to implement fetching the profiles.
+ */
+ virtual bool LoadInternal() = 0;
+
+ /**
+ * Subclasses override this method to implement persisting the profiles. Default returns false.
+ */
+ virtual bool PersistInternal(const Aws::Map<Aws::String, Aws::Config::Profile> &) { return false; }
+
+ ProfilesContainer m_profiles;
+ Aws::Utils::DateTime m_lastLoadTime;
+ };
+ }
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/config/ConfigAndCredentialsCacheManager.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/config/ConfigAndCredentialsCacheManager.h
new file mode 100644
index 0000000000..12470b4087
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/config/ConfigAndCredentialsCacheManager.h
@@ -0,0 +1,104 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+
+#include <aws/core/config/AWSProfileConfigLoader.h>
+
+#include <aws/core/utils/memory/stl/AWSString.h>
+#include <aws/core/utils/memory/stl/AWSMap.h>
+#include <aws/core/auth/AWSCredentials.h>
+#include <aws/core/utils/threading/ReaderWriterLock.h>
+
+namespace Aws
+{
+ namespace Config
+ {
+ /**
+ * Stores the contents of config file and credentials file to avoid multiple file readings.
+ * At the same time provides the flexibility to reload from file.
+ */
+ class AWS_CORE_API ConfigAndCredentialsCacheManager
+ {
+ public:
+ ConfigAndCredentialsCacheManager();
+
+ void ReloadConfigFile();
+
+ void ReloadCredentialsFile();
+
+ bool HasConfigProfile(const Aws::String& profileName) const;
+
+ /**
+ * Returns cached config profile with the specified profile name.
+ * Using copy instead of const reference to avoid reading bad contents due to thread contention.
+ */
+ Aws::Config::Profile GetConfigProfile(const Aws::String& profileName) const;
+
+ /**
+ * Returns cached config profiles
+ * Using copy instead of const reference to avoid reading bad contents due to thread contention.
+ */
+ Aws::Map<Aws::String, Aws::Config::Profile> GetConfigProfiles() const;
+
+ /**
+ * Returns cached config value with the specified profile name and key.
+ * Using copy instead of const reference to avoid reading bad contents due to thread contention.
+ */
+ Aws::String GetConfig(const Aws::String& profileName, const Aws::String& key) const;
+
+ bool HasCredentialsProfile(const Aws::String& profileName) const;
+ /**
+ * Returns cached credentials profile with the specified profile name.
+ * Using copy instead of const reference to avoid reading bad contents due to thread contention.
+ */
+ Aws::Config::Profile GetCredentialsProfile(const Aws::String& profileName) const;
+
+ /**
+ * Returns cached credentials profiles.
+ * Using copy instead of const reference to avoid reading bad contents due to thread contention.
+ */
+ Aws::Map<Aws::String, Aws::Config::Profile> GetCredentialsProfiles() const;
+
+ /**
+ * Returns cached credentials with the specified profile name.
+ * Using copy instead of const reference to avoid reading bad contents due to thread contention.
+ */
+ Aws::Auth::AWSCredentials GetCredentials(const Aws::String& profileName) const;
+
+ private:
+ mutable Aws::Utils::Threading::ReaderWriterLock m_credentialsLock;
+ Aws::Config::AWSConfigFileProfileConfigLoader m_credentialsFileLoader;
+ mutable Aws::Utils::Threading::ReaderWriterLock m_configLock;
+ Aws::Config::AWSConfigFileProfileConfigLoader m_configFileLoader;
+ };
+
+ AWS_CORE_API void InitConfigAndCredentialsCacheManager();
+
+ AWS_CORE_API void CleanupConfigAndCredentialsCacheManager();
+
+ AWS_CORE_API void ReloadCachedConfigFile();
+
+ AWS_CORE_API void ReloadCachedCredentialsFile();
+
+ AWS_CORE_API bool HasCachedConfigProfile(const Aws::String& profileName);
+
+ AWS_CORE_API Aws::Config::Profile GetCachedConfigProfile(const Aws::String& profileName);
+
+ AWS_CORE_API Aws::Map<Aws::String, Aws::Config::Profile> GetCachedConfigProfiles();
+
+ AWS_CORE_API Aws::String GetCachedConfigValue(const Aws::String& profileName, const Aws::String& key);
+
+ AWS_CORE_API Aws::String GetCachedConfigValue(const Aws::String& key);
+
+ AWS_CORE_API bool HasCachedCredentialsProfile(const Aws::String &profileName);
+
+ AWS_CORE_API Aws::Config::Profile GetCachedCredentialsProfile(const Aws::String& profileName);
+
+ AWS_CORE_API Aws::Auth::AWSCredentials GetCachedCredentials(const Aws::String& profileName);
+
+ AWS_CORE_API Aws::Map<Aws::String, Aws::Config::Profile> GetCachedCredentialsProfiles();
+ }
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/config/EC2InstanceProfileConfigLoader.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/config/EC2InstanceProfileConfigLoader.h
new file mode 100644
index 0000000000..ae3a743fc3
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/config/EC2InstanceProfileConfigLoader.h
@@ -0,0 +1,47 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+
+#include <aws/core/config/AWSProfileConfigLoaderBase.h>
+
+#include <aws/core/utils/memory/stl/AWSString.h>
+#include <aws/core/utils/memory/stl/AWSMap.h>
+#include <aws/core/utils/DateTime.h>
+#include <aws/core/utils/threading/ReaderWriterLock.h>
+
+namespace Aws
+{
+ namespace Internal
+ {
+ class EC2MetadataClient;
+ }
+
+ namespace Config
+ {
+ static const char* const INSTANCE_PROFILE_KEY = "InstanceProfile";
+
+ /**
+ * Loads configuration from the EC2 Metadata Service
+ */
+ class AWS_CORE_API EC2InstanceProfileConfigLoader : public AWSProfileConfigLoader
+ {
+ public:
+ /**
+ * If client is nullptr, the default EC2MetadataClient will be created.
+ */
+ EC2InstanceProfileConfigLoader(const std::shared_ptr<Aws::Internal::EC2MetadataClient>& = nullptr);
+
+ virtual ~EC2InstanceProfileConfigLoader() = default;
+
+ protected:
+ virtual bool LoadInternal() override;
+ private:
+ std::shared_ptr<Aws::Internal::EC2MetadataClient> m_ec2metadataClient;
+ int64_t credentialsValidUntilMillis = 0;
+ int64_t calculateRetryTime() const;
+ };
+ }
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/config/defaults/ClientConfigurationDefaults.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/config/defaults/ClientConfigurationDefaults.h
new file mode 100644
index 0000000000..cbea1ce997
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/config/defaults/ClientConfigurationDefaults.h
@@ -0,0 +1,124 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+/**
+ * Please note that this file is autogenerated.
+ * The backwards compatibility of the default values provided by new client configuration defaults is not guaranteed;
+ * the values might change over time.
+ */
+
+#pragma once
+
+#include <aws/core/client/ClientConfiguration.h>
+
+namespace Aws
+{
+ namespace Config
+ {
+ namespace Defaults
+ {
+ /**
+ * Set default client configuration parameters per provided default mode
+ *
+ * @param clientConfig, a ClientConfiguration to update
+ * @param defaultMode, requested default mode name
+ * @param hasEc2MetadataRegion, if ec2 metadata region has been already queried
+ * @param ec2MetadataRegion, a region resolved by EC2 Instance Metadata service
+ */
+ AWS_CORE_API void SetSmartDefaultsConfigurationParameters(Aws::Client::ClientConfiguration& clientConfig,
+ const Aws::String& defaultMode,
+ bool hasEc2MetadataRegion,
+ const Aws::String& ec2MetadataRegion);
+
+ /**
+ * Resolve the name of an actual mode for a default mode "auto"
+ *
+ * The AUTO mode is an experimental mode that builds on the standard mode. The SDK
+ * will attempt to discover the execution environment to determine the appropriate
+ * settings automatically.
+ *
+ * Note that the auto detection is heuristics-based and does not guarantee 100%
+ * accuracy. STANDARD mode will be used if the execution environment cannot be
+ * determined. The auto detection might query EC2 Instance Metadata service, which
+ * might introduce latency. Therefore we recommend choosing an explicit
+ * defaults_mode instead if startup latency is critical to your application.
+ */
+ AWS_CORE_API const char* ResolveAutoClientConfiguration(const Aws::Client::ClientConfiguration& clientConfig,
+ const Aws::String& ec2MetadataRegion);
+
+ /**
+ * Default mode "legacy"
+ *
+ * The LEGACY mode provides default settings that vary per SDK and were used prior
+ * to establishment of defaults_mode.
+ */
+ AWS_CORE_API void SetLegacyClientConfiguration(Aws::Client::ClientConfiguration& clientConfig);
+
+ /**
+ * Default mode "standard"
+ *
+ * The STANDARD mode provides the latest recommended default values that should be
+ * safe to run in most scenarios.
+ *
+ * Note that the default values vended from this mode might change as best
+ * practices may evolve. As a result, it is encouraged to perform tests when
+ * upgrading the SDK.
+ */
+ AWS_CORE_API void SetStandardClientConfiguration(Aws::Client::ClientConfiguration& clientConfig);
+
+ /**
+ * Default mode "in-region"
+ *
+ * The IN_REGION mode builds on the standard mode and includes optimization
+ * tailored for applications which call AWS services from within the same AWS
+ * region.
+ *
+ * Note that the default values vended from this mode might change as best
+ * practices may evolve. As a result, it is encouraged to perform tests when
+ * upgrading the SDK.
+ */
+ AWS_CORE_API void SetInRegionClientConfiguration(Aws::Client::ClientConfiguration& clientConfig);
+
+ /**
+ * Default mode "cross-region"
+ *
+ * The CROSS_REGION mode builds on the standard mode and includes optimization
+ * tailored for applications which call AWS services in a different region.
+ *
+ * Note that the default values vended from this mode might change as best
+ * practices may evolve. As a result, it is encouraged to perform tests when
+ * upgrading the SDK.
+ */
+ AWS_CORE_API void SetCrossRegionClientConfiguration(Aws::Client::ClientConfiguration& clientConfig);
+
+ /**
+ * Default mode "mobile"
+ *
+ * The MOBILE mode builds on the standard mode and includes optimization tailored
+ * for mobile applications.
+ *
+ * Note that the default values vended from this mode might change as best
+ * practices may evolve. As a result, it is encouraged to perform tests when
+ * upgrading the SDK.
+ */
+ AWS_CORE_API void SetMobileClientConfiguration(Aws::Client::ClientConfiguration& clientConfig);
+
+ /**
+ * Internal helper function to resolve smart defaults mode if not provided
+ *
+ * @param clientConfig, a ClientConfiguration to update
+ * @param requestedDefaultMode, requested default mode name
+ * @param configFileDefaultMode, default mode specified in a config file
+ * @param hasEc2MetadataRegion, if ec2 metadata region has been already queried
+ * @param ec2MetadataRegion, a region resolved by EC2 Instance Metadata service
+ */
+ AWS_CORE_API Aws::String ResolveDefaultModeName(const Aws::Client::ClientConfiguration& clientConfig,
+ Aws::String requestedDefaultMode,
+ const Aws::String& configFileDefaultMode,
+ bool hasEc2MetadataRegion,
+ Aws::String ec2MetadataRegion);
+ } //namespace Defaults
+ } //namespace Config
+} //namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/AWSEndpoint.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/AWSEndpoint.h
new file mode 100644
index 0000000000..311ac71426
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/AWSEndpoint.h
@@ -0,0 +1,71 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+
+#include <aws/core/Core_EXPORTS.h>
+#include <aws/core/utils/memory/stl/AWSString.h>
+#include <aws/core/utils/memory/stl/AWSMap.h>
+
+#include <aws/core/endpoint/internal/AWSEndpointAttribute.h>
+
+namespace Aws
+{
+ namespace Endpoint
+ {
+ /**
+ * A public type that encapsulates the information about an endpoint
+ */
+ class AWS_CORE_API AWSEndpoint
+ {
+ public:
+ using EndpointAttributes = Internal::Endpoint::EndpointAttributes;
+
+ virtual ~AWSEndpoint()
+ {};
+
+ Aws::String GetURL() const;
+ void SetURL(Aws::String url);
+
+ const Aws::Http::URI& GetURI() const;
+ void SetURI(Aws::Http::URI uri);
+
+ template<typename T>
+ inline void AddPathSegment(T&& pathSegment)
+ {
+ m_uri.AddPathSegment(std::forward<T>(pathSegment));
+ }
+
+ template<typename T>
+ inline void AddPathSegments(T&& pathSegments)
+ {
+ m_uri.AddPathSegments(std::forward<T>(pathSegments));
+ }
+
+ using OptionalError = Crt::Optional<Aws::Client::AWSError<Aws::Client::CoreErrors>>;
+ OptionalError AddPrefixIfMissing(const Aws::String& prefix);
+
+ void SetQueryString(const Aws::String& queryString);
+
+ const Crt::Optional<EndpointAttributes>& GetAttributes() const;
+ Crt::Optional<EndpointAttributes>& AccessAttributes();
+ void SetAttributes(EndpointAttributes&& attributes);
+
+ const Aws::UnorderedMap<Aws::String, Aws::String>& GetHeaders() const;
+ void SetHeaders(Aws::UnorderedMap<Aws::String, Aws::String> headers);
+
+ protected:
+ // A URI containing at minimum the scheme and host. May optionally include a port and a path.
+ Aws::Http::URI m_uri;
+
+ // A grab bag property map of endpoint attributes. The values here are considered unstable.
+ Crt::Optional<EndpointAttributes> m_attributes;
+
+ // A map of additional headers to be set when calling the endpoint.
+ // Note: the values in these maps are Lists to support multi-value headers.
+ Aws::UnorderedMap<Aws::String, Aws::String> m_headers;
+ };
+ }
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/AWSPartitions.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/AWSPartitions.h
new file mode 100644
index 0000000000..fe08b9f4b6
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/AWSPartitions.h
@@ -0,0 +1,23 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+#include <cstddef>
+#include <aws/core/Core_EXPORTS.h>
+
+namespace Aws
+{
+namespace Endpoint
+{
+ struct AWS_CORE_API AWSPartitions
+ {
+ public:
+ static const size_t PartitionsBlobStrLen;
+ static const size_t PartitionsBlobSize;
+
+ static const char* GetPartitionsBlob();
+ };
+} // namespace Endpoint
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/BuiltInParameters.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/BuiltInParameters.h
new file mode 100644
index 0000000000..c55dac969f
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/BuiltInParameters.h
@@ -0,0 +1,44 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+
+#pragma once
+
+#include <aws/core/endpoint/EndpointParameter.h>
+#include <aws/core/client/ClientConfiguration.h>
+#include <aws/core/client/GenericClientConfiguration.h>
+#include <aws/core/utils/memory/stl/AWSVector.h>
+
+namespace Aws
+{
+ namespace Endpoint
+ {
+ class AWS_CORE_API BuiltInParameters
+ {
+ public:
+ using EndpointParameter = Aws::Endpoint::EndpointParameter;
+
+ BuiltInParameters() = default;
+ BuiltInParameters(const BuiltInParameters&) = delete; // avoid accidental copy
+ virtual ~BuiltInParameters(){};
+
+ virtual void SetFromClientConfiguration(const Client::ClientConfiguration& config);
+ virtual void SetFromClientConfiguration(const Client::GenericClientConfiguration<false>& config);
+ virtual void SetFromClientConfiguration(const Client::GenericClientConfiguration<true>& config);
+
+ virtual void OverrideEndpoint(const Aws::String& endpoint, const Aws::Http::Scheme& scheme = Aws::Http::Scheme::HTTPS);
+
+ const EndpointParameter& GetParameter(const Aws::String& name) const;
+ void SetParameter(EndpointParameter param);
+ void SetStringParameter(Aws::String name, Aws::String value);
+ void SetBooleanParameter(Aws::String name, bool value);
+
+ const Aws::Vector<EndpointParameter>& GetAllParameters() const;
+
+ protected:
+ Aws::Vector<EndpointParameter> m_params;
+ };
+ } // namespace Endpoint
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/ClientContextParameters.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/ClientContextParameters.h
new file mode 100644
index 0000000000..e39eeed2e6
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/ClientContextParameters.h
@@ -0,0 +1,37 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+
+#pragma once
+
+#include <aws/core/endpoint/EndpointParameter.h>
+#include <aws/core/utils/memory/stl/AWSVector.h>
+
+namespace Aws
+{
+ namespace Endpoint
+ {
+ class AWS_CORE_API ClientContextParameters
+ {
+ public:
+ using EndpointParameter = Aws::Endpoint::EndpointParameter;
+
+ ClientContextParameters() = default;
+ // avoid accidental copy from endpointProvider::AccessClientContextParameters()
+ ClientContextParameters(const ClientContextParameters&) = delete;
+
+ virtual ~ClientContextParameters(){};
+
+ const EndpointParameter& GetParameter(const Aws::String& name) const;
+ void SetParameter(EndpointParameter param);
+ void SetStringParameter(Aws::String name, Aws::String value);
+ void SetBooleanParameter(Aws::String name, bool value);
+
+ const Aws::Vector<EndpointParameter>& GetAllParameters() const;
+ protected:
+ Aws::Vector<EndpointParameter> m_params;
+ };
+ } // namespace Endpoint
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/DefaultEndpointProvider.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/DefaultEndpointProvider.h
new file mode 100644
index 0000000000..d4d5cdd941
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/DefaultEndpointProvider.h
@@ -0,0 +1,116 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+
+#pragma once
+
+#include <aws/core/endpoint/AWSPartitions.h>
+#include <aws/core/endpoint/EndpointProviderBase.h>
+#include <aws/core/endpoint/EndpointParameter.h>
+#include <aws/core/endpoint/ClientContextParameters.h>
+#include <aws/core/endpoint/BuiltInParameters.h>
+#include <aws/core/utils/memory/stl/AWSArray.h>
+
+#include <aws/crt/endpoints/RuleEngine.h>
+
+#include <aws/core/utils/Outcome.h>
+#include <aws/core/client/AWSError.h>
+#include <aws/core/client/CoreErrors.h>
+#include "aws/core/utils/logging/LogMacros.h"
+
+namespace Aws
+{
+ namespace Endpoint
+ {
+ static const char DEFAULT_ENDPOINT_PROVIDER_TAG[] = "Aws::Endpoint::DefaultEndpointProvider";
+
+ /**
+ * Default template implementation for endpoint resolution
+ * @param ruleEngine
+ * @param builtInParameters
+ * @param clientContextParameters
+ * @param endpointParameters
+ * @return
+ */
+ AWS_CORE_API ResolveEndpointOutcome
+ ResolveEndpointDefaultImpl(const Aws::Crt::Endpoints::RuleEngine& ruleEngine,
+ const EndpointParameters& builtInParameters,
+ const EndpointParameters& clientContextParameters,
+ const EndpointParameters& endpointParameters);
+
+ /**
+ * Default endpoint provider template used in this SDK.
+ */
+ template<typename ClientConfigurationT = Aws::Client::GenericClientConfiguration<false>,
+ typename BuiltInParametersT = Aws::Endpoint::BuiltInParameters,
+ typename ClientContextParametersT = Aws::Endpoint::ClientContextParameters>
+ class AWS_CORE_API DefaultEndpointProvider : public EndpointProviderBase<ClientConfigurationT, BuiltInParametersT, ClientContextParametersT>
+ {
+ public:
+ DefaultEndpointProvider(const char* endpointRulesBlob, const size_t endpointRulesBlobSz)
+ : m_crtRuleEngine(Aws::Crt::ByteCursorFromArray((const uint8_t*) endpointRulesBlob, endpointRulesBlobSz),
+ Aws::Crt::ByteCursorFromArray((const uint8_t*) AWSPartitions::GetPartitionsBlob(), AWSPartitions::PartitionsBlobSize))
+ {
+ if(!m_crtRuleEngine) {
+ AWS_LOGSTREAM_FATAL(DEFAULT_ENDPOINT_PROVIDER_TAG, "Invalid CRT Rule Engine state");
+ }
+ }
+
+ virtual ~DefaultEndpointProvider()
+ {
+ }
+
+ void InitBuiltInParameters(const ClientConfigurationT& config) override
+ {
+ m_builtInParameters.SetFromClientConfiguration(config);
+ }
+
+ /**
+ * Default implementation of the ResolveEndpoint
+ */
+ ResolveEndpointOutcome ResolveEndpoint(const EndpointParameters& endpointParameters) const override
+ {
+ auto ResolveEndpointDefaultImpl = Aws::Endpoint::ResolveEndpointDefaultImpl;
+ return ResolveEndpointDefaultImpl(m_crtRuleEngine, m_builtInParameters.GetAllParameters(), m_clientContextParameters.GetAllParameters(), endpointParameters);
+ };
+
+ const ClientContextParametersT& GetClientContextParameters() const override
+ {
+ return m_clientContextParameters;
+ }
+ ClientContextParametersT& AccessClientContextParameters() override
+ {
+ return m_clientContextParameters;
+ }
+
+ const BuiltInParametersT& GetBuiltInParameters() const
+ {
+ return m_builtInParameters;
+ }
+ BuiltInParametersT& AccessBuiltInParameters()
+ {
+ return m_builtInParameters;
+ }
+
+ void OverrideEndpoint(const Aws::String& endpoint) override
+ {
+ m_builtInParameters.OverrideEndpoint(endpoint);
+ }
+
+ protected:
+ /* Crt RuleEngine evaluator built using the service's Rule engine */
+ Aws::Crt::Endpoints::RuleEngine m_crtRuleEngine;
+
+ /* Also known as a configurable parameters defined by the AWS Service in their c2j/smithy model definition */
+ ClientContextParametersT m_clientContextParameters;
+
+ /* Also known as parameters on the ClientConfiguration in this SDK */
+ BuiltInParametersT m_builtInParameters;
+ };
+
+ // Export symbol from the DLL:
+ template class AWS_CORE_API DefaultEndpointProvider<Aws::Client::GenericClientConfiguration</*HasEndpointDiscovery*/ true> >;
+ } // namespace Endpoint
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/EndpointParameter.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/EndpointParameter.h
new file mode 100644
index 0000000000..85911eb94a
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/EndpointParameter.h
@@ -0,0 +1,139 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+
+#pragma once
+
+#include <aws/core/client/AWSError.h>
+#include <aws/core/utils/memory/stl/AWSVector.h>
+
+namespace Aws
+{
+ namespace Endpoint
+ {
+ class AWS_CORE_API EndpointParameter
+ {
+ public:
+ enum class ParameterType
+ {
+ BOOLEAN,
+ STRING
+ };
+ enum class ParameterOrigin
+ {
+ STATIC_CONTEXT,
+ OPERATION_CONTEXT,
+ CLIENT_CONTEXT,
+ BUILT_IN,
+ NOT_SET = -1
+ };
+
+ EndpointParameter(Aws::String name, bool initialValue, ParameterOrigin parameterOrigin = ParameterOrigin::NOT_SET)
+ : m_storedType(ParameterType::BOOLEAN),
+ m_parameterOrigin(parameterOrigin),
+ m_name(std::move(name)),
+ m_boolValue(initialValue)
+ {}
+
+ EndpointParameter(Aws::String name, Aws::String initialValue, ParameterOrigin parameterOrigin = ParameterOrigin::NOT_SET)
+ : m_storedType(ParameterType::STRING),
+ m_parameterOrigin(parameterOrigin),
+ m_name(std::move(name)),
+ m_stringValue(std::move(initialValue))
+ {}
+
+ EndpointParameter(Aws::String name, const char* initialValue, ParameterOrigin parameterOrigin = ParameterOrigin::NOT_SET)
+ : m_storedType(ParameterType::STRING),
+ m_parameterOrigin(parameterOrigin),
+ m_name(std::move(name)),
+ m_stringValue(initialValue)
+ {}
+
+ EndpointParameter(ParameterType storedType, ParameterOrigin parameterOrigin, Aws::String name)
+ : m_storedType(storedType),
+ m_parameterOrigin(parameterOrigin),
+ m_name(std::move(name))
+ {}
+
+ EndpointParameter(const EndpointParameter&) = default;
+ EndpointParameter(EndpointParameter&&) = default;
+ EndpointParameter& operator=(const EndpointParameter&) = default;
+ EndpointParameter& operator=(EndpointParameter&&) = default;
+
+ inline ParameterType GetStoredType() const
+ {
+ return m_storedType;
+ }
+
+ inline ParameterOrigin GetParameterOrigin() const
+ {
+ return m_parameterOrigin;
+ }
+
+ inline const Aws::String& GetName() const
+ {
+ return m_name;
+ }
+
+ enum class GetSetResult
+ {
+ SUCCESS,
+ ERROR_WRONG_TYPE
+ };
+
+ inline GetSetResult GetBool(bool& ioValue) const
+ {
+ if(m_storedType != ParameterType::BOOLEAN)
+ return GetSetResult::ERROR_WRONG_TYPE;
+ ioValue = m_boolValue;
+ return GetSetResult::SUCCESS;
+ }
+
+ inline GetSetResult GetString(Aws::String& ioValue) const
+ {
+ // disabled RTTI...
+ if(m_storedType != ParameterType::STRING)
+ return GetSetResult::ERROR_WRONG_TYPE;
+ ioValue = m_stringValue;
+ return GetSetResult::SUCCESS;
+ }
+
+ inline GetSetResult SetBool(bool iValue)
+ {
+ if(m_storedType != ParameterType::BOOLEAN)
+ return GetSetResult::ERROR_WRONG_TYPE;
+ m_boolValue = iValue;
+ return GetSetResult::SUCCESS;
+ }
+
+ inline GetSetResult SetString(Aws::String iValue)
+ {
+ if(m_storedType != ParameterType::STRING)
+ return GetSetResult::ERROR_WRONG_TYPE;
+ m_stringValue = std::move(iValue);
+ return GetSetResult::SUCCESS;
+ }
+
+ bool GetBoolValueNoCheck() const
+ {
+ return m_boolValue;
+ }
+ const Aws::String& GetStrValueNoCheck() const
+ {
+ return m_stringValue;
+ }
+
+ protected:
+ ParameterType m_storedType;
+ ParameterOrigin m_parameterOrigin;
+ Aws::String m_name;
+
+ bool m_boolValue = false;
+ Aws::String m_stringValue;
+ };
+
+ using EndpointParameters = Aws::Vector<Aws::Endpoint::EndpointParameter>;
+ } // namespace Endpoint
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/EndpointProviderBase.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/EndpointProviderBase.h
new file mode 100644
index 0000000000..29b67626e3
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/EndpointProviderBase.h
@@ -0,0 +1,76 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+
+#pragma once
+
+#include <aws/core/endpoint/AWSEndpoint.h>
+#include <aws/core/client/AWSError.h>
+#include <aws/core/endpoint/EndpointParameter.h>
+#include <aws/core/endpoint/BuiltInParameters.h>
+#include <aws/core/endpoint/ClientContextParameters.h>
+
+namespace Aws
+{
+ namespace Utils
+ {
+ template< typename R, typename E> class Outcome;
+ } // namespace Utils
+ namespace Client
+ {
+ enum class CoreErrors;
+ } // namespace CoreErrors
+
+ namespace Endpoint
+ {
+ using EndpointParameters = Aws::Vector<EndpointParameter>;
+ using ResolveEndpointOutcome = Aws::Utils::Outcome<AWSEndpoint, Aws::Client::AWSError<Aws::Client::CoreErrors> >;
+
+ /**
+ * EndpointProviderBase is an interface definition that resolves the provided
+ * EndpointParameters to either an Endpoint or an error.
+ * This Base class represents a min interface required to be implemented to override an endpoint provider.
+ */
+ template<typename ClientConfigurationT = Aws::Client::GenericClientConfiguration<false>,
+ typename BuiltInParametersT = Aws::Endpoint::BuiltInParameters,
+ typename ClientContextParametersT = Aws::Endpoint::ClientContextParameters>
+ class AWS_CORE_API EndpointProviderBase
+ {
+ public:
+ using BuiltInParameters = BuiltInParametersT;
+ using ClientContextParameters = ClientContextParametersT;
+
+ virtual ~EndpointProviderBase() = default;
+
+ /**
+ * Initialize client context parameters from a ClientConfiguration
+ */
+ virtual void InitBuiltInParameters(const ClientConfigurationT& config) = 0;
+
+ /**
+ * Function to override endpoint, i.e. to set built-in parameter "AWS::Endpoint"
+ */
+ virtual void OverrideEndpoint(const Aws::String& endpoint) = 0;
+
+ /**
+ * Method for write access to Client Context Parameters (i.e. configurable service-specific parameters)
+ */
+ virtual ClientContextParametersT& AccessClientContextParameters() = 0;
+
+ /**
+ * Method for read-only access to Client Context Parameters (i.e. configurable service-specific parameters)
+ */
+ virtual const ClientContextParametersT& GetClientContextParameters() const = 0;
+
+ /**
+ * The core of the endpoint provider interface.
+ */
+ virtual ResolveEndpointOutcome ResolveEndpoint(const EndpointParameters& endpointParameters) const = 0;
+ };
+
+ // Export symbol from the DLL:
+ template class AWS_CORE_API EndpointProviderBase<Aws::Client::GenericClientConfiguration</*HasEndpointDiscovery*/ true> >;
+ } // namespace Endpoint
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/internal/AWSEndpointAttribute.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/internal/AWSEndpointAttribute.h
new file mode 100644
index 0000000000..c4dfd07eb3
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/endpoint/internal/AWSEndpointAttribute.h
@@ -0,0 +1,89 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+
+#pragma once
+
+#include <aws/core/client/AWSError.h>
+#include <aws/crt/Optional.h>
+
+namespace Aws
+{
+ namespace Internal
+ {
+ namespace Endpoint
+ {
+ class AWS_CORE_API EndpointAuthScheme
+ {
+ public:
+ virtual ~EndpointAuthScheme(){};
+
+ inline const Aws::String& GetName() const
+ {
+ return m_name;
+ }
+ inline void SetName(Aws::String name)
+ {
+ m_name = std::move(name);
+ }
+
+ inline const Crt::Optional<Aws::String>& GetSigningName() const
+ {
+ return m_signingName;
+ }
+ inline void SetSigningName(Aws::String signingName)
+ {
+ m_signingName = std::move(signingName);
+ }
+
+ inline const Crt::Optional<Aws::String>& GetSigningRegion() const
+ {
+ return m_signingRegion;
+ }
+ inline void SetSigningRegion(Aws::String signingRegion)
+ {
+ m_signingRegion = std::move(signingRegion);
+ }
+
+ inline const Crt::Optional<Aws::String>& GetSigningRegionSet() const
+ {
+ return m_signingRegionSet;
+ }
+ inline void SetSigningRegionSet(Aws::String signingRegionSet)
+ {
+ m_signingRegionSet = std::move(signingRegionSet);
+ }
+
+ inline const Crt::Optional<bool>& GetDisableDoubleEncoding() const
+ {
+ return m_disableDoubleEncoding;
+ }
+ inline void SetDisableDoubleEncoding(bool disableDoubleEncoding)
+ {
+ m_disableDoubleEncoding = disableDoubleEncoding;
+ }
+
+ private:
+ Aws::String m_name;
+
+ Crt::Optional<Aws::String> m_signingName;
+ Crt::Optional<Aws::String> m_signingRegion;
+ Crt::Optional<Aws::String> m_signingRegionSet;
+ Crt::Optional<bool> m_disableDoubleEncoding;
+ };
+
+ /**
+ * A grab bag property map of endpoint attributes. The values here are considered unstable.
+ * C++ SDK supports only (and single so far) endpoint attribute "AuthScheme"
+ */
+ struct AWS_CORE_API EndpointAttributes
+ {
+ Aws::Internal::Endpoint::EndpointAuthScheme authScheme;
+
+ static EndpointAttributes BuildEndpointAttributesFromJson(const Aws::String& iJsonStr);
+ };
+ } // namespace Endpoint
+ } // namespace Internal
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/external/cjson/cJSON.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/external/cjson/cJSON.h
index 92ccec786c..a069f3c772 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/external/cjson/cJSON.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/external/cjson/cJSON.h
@@ -183,8 +183,10 @@ CJSON_AS4CPP_PUBLIC(cJSON *) cJSON_AS4CPP_GetArrayItem(const cJSON *array, int i
CJSON_AS4CPP_PUBLIC(cJSON *) cJSON_AS4CPP_GetObjectItem(const cJSON * const object, const char * const string);
CJSON_AS4CPP_PUBLIC(cJSON *) cJSON_AS4CPP_GetObjectItemCaseSensitive(const cJSON * const object, const char * const string);
CJSON_AS4CPP_PUBLIC(cJSON_AS4CPP_bool) cJSON_AS4CPP_HasObjectItem(const cJSON *object, const char *string);
-/* For analysing failed parses. This returns a pointer to the parse error. You'll probably need to look a few chars back to make sense of it. Defined when cJSON_AS4CPP_Parse() returns 0. 0 when cJSON_AS4CPP_Parse() succeeds. */
+/* For analysing failed parses. This returns a pointer to the parse error. You'll probably need to look a few chars back to make sense of it. Defined when cJSON_AS4CPP_Parse() returns 0. 0 when cJSON_AS4CPP_Parse() succeeds.
+ * NOTE: disabled, since this method is not thread-safe. See comments in source/external/cjson/cJSON.cpp.
CJSON_AS4CPP_PUBLIC(const char *) cJSON_AS4CPP_GetErrorPtr(void);
+ */
/* Check item type and return its value */
CJSON_AS4CPP_PUBLIC(char *) cJSON_AS4CPP_GetStringValue(const cJSON * const item);
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/http/HttpRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/http/HttpRequest.h
index ab71a3a29b..129bd3bd36 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/http/HttpRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/http/HttpRequest.h
@@ -18,6 +18,20 @@
namespace Aws
{
+ namespace Crt
+ {
+ namespace Http
+ {
+ class HttpRequest;
+ }
+ }
+ namespace Utils
+ {
+ namespace Crypto
+ {
+ class Hash;
+ }
+ }
namespace Http
{
extern AWS_CORE_API const char DATE_HEADER[];
@@ -29,8 +43,10 @@ namespace Aws
extern AWS_CORE_API const char AUTHORIZATION_HEADER[];
extern AWS_CORE_API const char AWS_AUTHORIZATION_HEADER[];
extern AWS_CORE_API const char COOKIE_HEADER[];
+ extern AWS_CORE_API const char DECODED_CONTENT_LENGTH_HEADER[];
extern AWS_CORE_API const char CONTENT_LENGTH_HEADER[];
extern AWS_CORE_API const char CONTENT_TYPE_HEADER[];
+ extern AWS_CORE_API const char CONTENT_ENCODING_HEADER[];
extern AWS_CORE_API const char TRANSFER_ENCODING_HEADER[];
extern AWS_CORE_API const char USER_AGENT_HEADER[];
extern AWS_CORE_API const char VIA_HEADER[];
@@ -39,9 +55,13 @@ namespace Aws
extern AWS_CORE_API const char X_AMZ_EXPIRES_HEADER[];
extern AWS_CORE_API const char CONTENT_MD5_HEADER[];
extern AWS_CORE_API const char API_VERSION_HEADER[];
+ extern AWS_CORE_API const char AWS_TRAILER_HEADER[];
extern AWS_CORE_API const char SDK_INVOCATION_ID_HEADER[];
extern AWS_CORE_API const char SDK_REQUEST_HEADER[];
+ extern AWS_CORE_API const char X_AMZN_TRACE_ID_HEADER[];
extern AWS_CORE_API const char CHUNKED_VALUE[];
+ extern AWS_CORE_API const char AWS_CHUNKED_VALUE[];
+ extern AWS_CORE_API const char X_AMZN_ERROR_TYPE[];
class HttpRequest;
class HttpResponse;
@@ -107,7 +127,7 @@ namespace Aws
*/
virtual bool HasHeader(const char* name) const = 0;
/**
- * Get size in bytes of the request when as it will be going accross the wire.
+ * Get size in bytes of the request when as it will be going across the wire.
*/
virtual int64_t GetSize() const = 0;
/**
@@ -525,6 +545,21 @@ namespace Aws
bool IsEventStreamRequest() { return m_isEvenStreamRequest; }
void SetEventStreamRequest(bool eventStreamRequest) { m_isEvenStreamRequest = eventStreamRequest; }
+
+ virtual std::shared_ptr<Aws::Crt::Http::HttpRequest> ToCrtHttpRequest();
+
+ void SetRequestHash(const Aws::String& algorithmName, const std::shared_ptr<Aws::Utils::Crypto::Hash>& hash)
+ {
+ m_requestHash = std::make_pair(algorithmName, hash);
+ }
+ const std::pair<Aws::String, std::shared_ptr<Aws::Utils::Crypto::Hash>>& GetRequestHash() { return m_requestHash; }
+
+ void AddResponseValidationHash(const Aws::String& algorithmName, const std::shared_ptr<Aws::Utils::Crypto::Hash>& hash)
+ {
+ m_responseValidationHashes.emplace_back(algorithmName, hash);
+ }
+ const Aws::Vector<std::pair<Aws::String, std::shared_ptr<Aws::Utils::Crypto::Hash>>>& GetResponseValidationHashes() const { return m_responseValidationHashes; }
+
private:
URI m_uri;
HttpMethod m_method;
@@ -536,6 +571,8 @@ namespace Aws
Aws::String m_signingAccessKey;
Aws::String m_resolvedRemoteHost;
Aws::Monitoring::HttpClientMetricsCollection m_httpRequestMetrics;
+ std::pair<Aws::String, std::shared_ptr<Aws::Utils::Crypto::Hash>> m_requestHash;
+ Aws::Vector<std::pair<Aws::String, std::shared_ptr<Aws::Utils::Crypto::Hash>>> m_responseValidationHashes;
};
} // namespace Http
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/http/HttpResponse.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/http/HttpResponse.h
index 1db30d1730..8082547a71 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/http/HttpResponse.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/http/HttpResponse.h
@@ -105,6 +105,11 @@ namespace Aws
NETWORK_CONNECT_TIMEOUT = 599
};
+ /**
+ * Overload ostream operator<< for HttpResponseCode enum class for a prettier output such as "200"
+ */
+ AWS_CORE_API Aws::OStream& operator<< (Aws::OStream& oStream, HttpResponseCode code);
+
inline bool IsRetryableHttpResponseCode(HttpResponseCode responseCode)
{
switch (responseCode)
@@ -137,7 +142,8 @@ namespace Aws
HttpResponse(const std::shared_ptr<const HttpRequest>& originatingRequest) :
m_httpRequest(originatingRequest),
m_responseCode(HttpResponseCode::REQUEST_NOT_MADE),
- m_hasClientError(false)
+ m_hasClientError(false),
+ m_clientErrorType(Aws::Client::CoreErrors::OK)
{}
virtual ~HttpResponse() = default;
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/http/URI.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/http/URI.h
index d72e96b863..724c24379f 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/http/URI.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/http/URI.h
@@ -9,7 +9,7 @@
#include <aws/core/http/Scheme.h>
#include <aws/core/utils/memory/stl/AWSMap.h>
-#include <aws/core/utils/memory/stl/AWSString.h>
+#include <aws/core/utils/StringUtils.h>
#include <stdint.h>
@@ -21,6 +21,9 @@ namespace Aws
static const uint16_t HTTP_DEFAULT_PORT = 80;
static const uint16_t HTTPS_DEFAULT_PORT = 443;
+ extern bool s_compliantRfc3986Encoding;
+ AWS_CORE_API void SetCompliantRfc3986Encoding(bool compliant);
+
//per https://tools.ietf.org/html/rfc3986#section-3.4 there is nothing preventing servers from allowing
//multiple values for the same key. So use a multimap instead of a map.
typedef Aws::MultiMap<Aws::String, Aws::String> QueryStringParameterCollection;
@@ -89,12 +92,17 @@ namespace Aws
* Gets the path portion of the uri e.g. the portion after the first slash after the authority and prior to the
* query string. This is not url encoded.
*/
- inline const Aws::String& GetPath() const { return m_path; }
+ Aws::String GetPath() const;
/**
* Gets the path portion of the uri, url encodes it and returns it
*/
- inline Aws::String GetURLEncodedPath() const { return URLEncodePath(m_path); }
+ Aws::String GetURLEncodedPath() const;
+
+ /**
+ * Gets the path portion of the uri, url encodes it according to RFC3986 and returns it.
+ */
+ Aws::String GetURLEncodedPathRFC3986() const;
/**
* Sets the path portion of the uri. URL encodes it if needed
@@ -102,6 +110,39 @@ namespace Aws
void SetPath(const Aws::String& value);
/**
+ * Add a path segment to the uri.
+ * Leading slashes and trailing slashes will be removed.
+ * Use AddPathSegments() to enable trailing slashes.
+ */
+ template<typename T>
+ inline void AddPathSegment(T pathSegment)
+ {
+ Aws::StringStream ss;
+ ss << pathSegment;
+ Aws::String segment = ss.str();
+ segment.erase(0, segment.find_first_not_of('/'));
+ segment.erase(segment.find_last_not_of('/') + 1);
+ m_pathSegments.push_back(segment);
+ m_pathHasTrailingSlash = false;
+ }
+
+ /**
+ * Add path segments to the uri.
+ */
+ template<typename T>
+ inline void AddPathSegments(T pathSegments)
+ {
+ Aws::StringStream ss;
+ ss << pathSegments;
+ Aws::String segments = ss.str();
+ for (const auto& segment : Aws::Utils::StringUtils::Split(segments, '/'))
+ {
+ m_pathSegments.push_back(segment);
+ }
+ m_pathHasTrailingSlash = (!segments.empty() && segments.back() == '/');
+ }
+
+ /**
* Gets the raw query string including the ?
*/
inline const Aws::String& GetQueryString() const { return m_queryString; }
@@ -159,10 +200,11 @@ namespace Aws
void ExtractAndSetQueryString(const Aws::String& uri);
bool CompareURIParts(const URI& other) const;
- Scheme m_scheme;
+ Scheme m_scheme = Scheme::HTTP;
Aws::String m_authority;
- uint16_t m_port;
- Aws::String m_path;
+ uint16_t m_port = HTTP_DEFAULT_PORT;
+ Aws::Vector<Aws::String> m_pathSegments;
+ bool m_pathHasTrailingSlash = false;
Aws::String m_queryString;
};
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/http/curl/CurlHandleContainer.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/http/curl/CurlHandleContainer.h
index c2745753eb..15cfdbf10a 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/http/curl/CurlHandleContainer.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/http/curl/CurlHandleContainer.h
@@ -43,7 +43,7 @@ public:
void ReleaseCurlHandle(CURL* handle);
/**
- * When the handle has bad DNS entries, problematic live connections, we need to destory the handle from pool.
+ * When the handle has bad DNS entries, problematic live connections, we need to destroy the handle from pool.
*/
void DestroyCurlHandle(CURL* handle);
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/http/standard/StandardHttpRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/http/standard/StandardHttpRequest.h
index c9c0016ef5..adf10fd182 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/http/standard/StandardHttpRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/http/standard/StandardHttpRequest.h
@@ -63,7 +63,7 @@ namespace Aws
*/
virtual bool HasHeader(const char*) const override;
/**
- * Get size in bytes of the request when as it will be going accross the wire.
+ * Get size in bytes of the request when as it will be going across the wire.
*/
virtual int64_t GetSize() const override;
/**
@@ -79,7 +79,6 @@ namespace Aws
HeaderValueCollection headerMap;
std::shared_ptr<Aws::IOStream> bodyStream;
Aws::IOStreamFactory m_responseStreamFactory;
- Aws::String m_emptyHeader;
};
} // namespace Standard
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/internal/AWSHttpResourceClient.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/internal/AWSHttpResourceClient.h
index bc28cd8861..ba03f6f39a 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/internal/AWSHttpResourceClient.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/internal/AWSHttpResourceClient.h
@@ -118,12 +118,13 @@ namespace Aws
*/
virtual Aws::String GetResource(const char* resourcePath) const;
+#if !defined(DISABLE_IMDSV1)
/**
* Connects to the Amazon EC2 Instance Metadata Service to retrieve the
* default credential information (if any).
*/
virtual Aws::String GetDefaultCredentials() const;
-
+#endif
/**
* Connects to the Amazon EC2 Instance Metadata Service to retrieve the
* credential information (if any) in a more secure way.
@@ -136,8 +137,19 @@ namespace Aws
*/
virtual Aws::String GetCurrentRegion() const;
+ /**
+ * Sets endpoint used to connect to the EC2 Instance metadata Service
+ */
+ virtual void SetEndpoint(const Aws::String& endpoint);
+
+ /**
+ * Gets endpoint used to connect to the EC2 Instance metadata Service
+ */
+ virtual Aws::String GetEndpoint() const;
+
private:
Aws::String m_endpoint;
+ bool m_disableIMDS;
mutable std::recursive_mutex m_tokenMutex;
mutable Aws::String m_token;
mutable bool m_tokenRequired;
@@ -249,8 +261,31 @@ namespace Aws
SSOGetRoleCredentialsResult GetSSOCredentials(const SSOGetRoleCredentialsRequest& request);
+ struct SSOCreateTokenRequest
+ {
+ Aws::String clientId;
+ Aws::String clientSecret;
+ Aws::String grantType;
+ Aws::String refreshToken;
+ };
+
+ struct SSOCreateTokenResult
+ {
+ Aws::String accessToken;
+ size_t expiresIn = 0; //seconds
+ Aws::String idToken;
+ Aws::String refreshToken;
+ Aws::String clientId;
+ Aws::String tokenType;
+ };
+
+ SSOCreateTokenResult CreateToken(const SSOCreateTokenRequest& request);
private:
+ Aws::String buildEndpoint(const Aws::Client::ClientConfiguration& clientConfiguration,
+ const Aws::String& domain,
+ const Aws::String& endpoint);
Aws::String m_endpoint;
+ Aws::String m_oidcEndpoint;
};
} // namespace Internal
} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/monitoring/MonitoringInterface.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/monitoring/MonitoringInterface.h
index ebe50722c7..68a308a092 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/monitoring/MonitoringInterface.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/monitoring/MonitoringInterface.h
@@ -26,7 +26,7 @@ namespace Aws
/**
* @brief This function lets you do preparation work when a http attempt(request) starts. It returns a pointer to an implementation defined context which will be
* passed down with the other facilities that completes the request's lifetime. This context can be used to track the lifetime of the request and record metrics
- * specific to this particular request. You are responsible for deleteing the context during your OnFinish call.
+ * specific to this particular request. You are responsible for deleting the context during your OnFinish call.
* @param serviceName, the service client who initiates this http attempt. like "s3", "ec2", etc.
* @param requestName, the operation or API name of this http attempt, like "GetObject" in s3.
* @param request, the actual Http Request.
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/monitoring/MonitoringManager.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/monitoring/MonitoringManager.h
index aaa5bca5d1..87b04fcbbf 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/monitoring/MonitoringManager.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/monitoring/MonitoringManager.h
@@ -57,7 +57,8 @@ namespace Aws
AWS_CORE_API void InitMonitoring(const std::vector<MonitoringFactoryCreateFunction>& monitoringFactoryCreateFunctions);
/**
- * Clean up monitoring related global variables
+ * Clean up monitoring related global variables. This should be done first at shutdown, to avoid a race condition in
+ * testing whether the global Monitoring instance has been destructed.
*/
AWS_CORE_API void CleanupMonitoring();
}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/net/SimpleUDP.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/net/SimpleUDP.h
index 2e96b509fb..a9f2f4f59f 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/net/SimpleUDP.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/net/SimpleUDP.h
@@ -26,7 +26,7 @@ namespace Aws
* @brief Constructor of SimpleUDP
* @param addressFamily, AF_INET for IPV4 or AF_INET6 for IPV6
* @param sendBufSize, if nonzero, try set socket's send buffer size to this value.
- * @param receieveBufSize, if nonzero, try set socket's receive buffer size to this value.
+ * @param receiveBufSize, if nonzero, try set socket's receive buffer size to this value.
* @param nonBlocking, if it is true, implementation will try to create a non-blocking underlying UDP socket.
* Implementation should create and set the underlying udp socket.
*/
@@ -36,7 +36,7 @@ namespace Aws
* @brief An easy constructor of an IPV4 or IPV6 SimpleUDP
* @param addressFamily, either AF_INET for IPV4 or AF_INET6 for IPV6
* @param sendBufSize, if nonzero, try set socket's send buffer size to this value.
- * @param receieveBufSize, if nonzero, try set socket's receive buffer size to this value.
+ * @param receiveBufSize, if nonzero, try set socket's receive buffer size to this value.
* @param nonBlocking, if it is true, implementation will try to create a non-blocking underlying UDP socket.
* Implementation should create and set the underlying udp socket.
*/
@@ -48,7 +48,7 @@ namespace Aws
* Note that "localhost" is not necessarily bind to 127.0.0.1, it could bind to ipv6 address ::1, or other type of ip addresses. If you pass localhost here, we will go through getaddrinfo procedure on Linux and Windows.
* @param port, the port number that the host listens on.
* @param sendBufSize, if nonzero, try set socket's send buffer size to this value.
- * @param receieveBufSize, if nonzero, try set socket's receive buffer size to this value.
+ * @param receiveBufSize, if nonzero, try set socket's receive buffer size to this value.
* @param nonBlocking, if it is true, implementation will try to create a non-blocking underlying UDP socket.
* Implementation should create and set the underlying udp socket.
*/
@@ -131,7 +131,7 @@ namespace Aws
/**
* @brief Receive data from network.
* @param address, if not null and underlying implementation supply the incoming data's source address, this will be filled with source address info.
- * @param addressLength, the size of source adddress, should not be null.
+ * @param addressLength, the size of source address, should not be null.
* @param buffer, the memory address where you want to store received data.
* @param bufferLen, the size of data buffer.
* @return -1 on failure, check errno for detailed error information, on success, returns the actual bytes of data received.
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/DateTime.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/DateTime.h
index a410279011..878fc1ac63 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/DateTime.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/DateTime.h
@@ -49,7 +49,7 @@ namespace Aws
};
/**
- * Wrapper for all the weird crap we need to do with timestamps.
+ * Wrapper for timestamp functionality.
*/
class AWS_CORE_API DateTime
{
@@ -145,6 +145,11 @@ namespace Aws
double SecondsWithMSPrecision() const;
/**
+ * Get the seconds without millisecond precision.
+ */
+ int64_t Seconds() const;
+
+ /**
* Milliseconds since epoch of this datetime.
*/
int64_t Millis() const;
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/Document.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/Document.h
new file mode 100644
index 0000000000..2c062b8ca6
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/Document.h
@@ -0,0 +1,370 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+
+#include <aws/core/Core_EXPORTS.h>
+
+#include <aws/core/utils/Array.h>
+#include <aws/core/utils/memory/stl/AWSStreamFwd.h>
+#include <aws/core/utils/memory/stl/AWSString.h>
+#include <aws/core/utils/memory/stl/AWSMap.h>
+#include <aws/core/external/cjson/cJSON.h>
+
+#include <utility>
+
+namespace Aws
+{
+ namespace Utils
+ {
+ namespace Json
+ {
+ class JsonValue;
+ class JsonView;
+ }
+
+ class DocumentView;
+ /**
+ * A Document type represents open content that is serialized using the same format as its surroundings and requires no additional encoding or escaping.
+ * Document types provide a JSON "view" of data regardless of the underlying protocol. This normalized JSON interface makes document types protocol-agnostic.
+ * Clients can use the data stored in a document without prior knowledge of the protocol by interacting with the normalized JSON view of the data.
+ * Document types are only initially supported in JSON protocol, so it's identical to Aws::Utils::Json::JsonValue and Aws::Utils::Json::JsonView at this moment.
+ */
+ class AWS_CORE_API Document
+ {
+ public:
+ /**
+ * Constructs empty Document.
+ */
+ Document();
+
+ /**
+ * Constructs a Document by parsing the input string.
+ */
+ Document(const Aws::String& value);
+
+ /**
+ * Constructs a Document by parsing the text in the input stream.
+ */
+ Document(Aws::IStream& istream);
+
+ /**
+ * Performs a deep copy of the Document parameter.
+ * Prefer using a @ref DocumentView if copying is not needed.
+ */
+ Document(const Document& value);
+
+ /**
+ * Moves the ownership of the internal Document.
+ * No copying is performed.
+ */
+ Document(Document&& value);
+
+ /**
+ * Performs a deep copy of the JsonView parameter.
+ */
+ Document(const Json::JsonView& view);
+
+ ~Document();
+
+ /**
+ * Performs a deep copy of the Document parameter.
+ */
+ Document& operator=(const Document& other);
+
+ /**
+ * Performs a deep copy of the JsonView parameter.
+ */
+ Document& operator=(const Json::JsonView& view);
+
+ /**
+ * Moves the ownership of the internal Document of the parameter to the current object.
+ * No copying is performed.
+ * A Document currently owned by the object will be freed prior to copying.
+ * @warning This will result in invalidating any outstanding views of the current Document. However, views
+ * to the moved-from Document would still valid.
+ */
+ Document& operator=(Document&& other);
+
+ bool operator==(const Document& other) const;
+ bool operator!=(const Document& other) const;
+
+ /**
+ * Adds a string to the top level of this Document with key.
+ */
+ Document& WithString(const Aws::String& key, const Aws::String& value);
+ Document& WithString(const char* key, const Aws::String& value);
+ /**
+ * Converts the current Document to a string.
+ */
+ Document& AsString(const Aws::String& value);
+
+ /**
+ * Adds a bool value with key to the top level of this Document.
+ */
+ Document& WithBool(const Aws::String& key, bool value);
+ Document& WithBool(const char* key, bool value);
+ /**
+ * Converts the current Document to a bool.
+ */
+ Document& AsBool(bool value);
+
+ /**
+ * Adds an integer value at key at the top level of this Document.
+ */
+ Document& WithInteger(const Aws::String& key, int value);
+ Document& WithInteger(const char* key, int value);
+ /**
+ * Converts the current Document to an integer.
+ */
+ Document& AsInteger(int value);
+
+ /**
+ * Adds a 64-bit integer value at key to the top level of this Document.
+ */
+ Document& WithInt64(const Aws::String& key, long long value);
+ Document& WithInt64(const char* key, long long value);
+ /**
+ * Converts the current Document to a 64-bit integer.
+ */
+ Document& AsInt64(long long value);
+
+ /**
+ * Adds a double value at key at the top level of this Document.
+ */
+ Document& WithDouble(const Aws::String& key, double value);
+ Document& WithDouble(const char* key, double value);
+ /**
+ * Converts the current Document to a double.
+ */
+ Document& AsDouble(double value);
+
+ /**
+ * Adds an array of strings to the top level of this Document at key.
+ */
+ Document& WithArray(const Aws::String& key, const Array<Aws::String>& array);
+ Document& WithArray(const char* key, const Array<Aws::String>& array);
+ /**
+ * Adds an array of arbitrary Document objects to the top level of this Document at key.
+ * The values in the array parameter will be deep-copied.
+ */
+ Document& WithArray(const Aws::String& key, const Array<Document>& array);
+ /**
+ * Adds an array of arbitrary Document objects to the top level of this Document at key.
+ * The values in the array parameter will be moved-from.
+ */
+ Document& WithArray(const Aws::String& key, Array<Document>&& array);
+ /**
+ * Converts the current Document to an array whose values are deep-copied from the array parameter.
+ */
+ Document& AsArray(const Array<Document>& array);
+ /**
+ * Converts the current Document to an array whose values are moved from the array parameter.
+ */
+ Document& AsArray(Array<Document>&& array);
+
+ /**
+ * Adds a Document object to the top level of this Document at key.
+ * The object parameter is deep-copied.
+ */
+ Document& WithObject(const Aws::String& key, const Document& value);
+ Document& WithObject(const char* key, const Document& value);
+ /**
+ * Adds a Document object to the top level of this Document at key.
+ */
+ Document& WithObject(const Aws::String& key, Document&& value);
+ Document& WithObject(const char* key, Document&& value);
+ /**
+ * Converts the current Document to a Document object by deep-copying the parameter.
+ */
+ Document& AsObject(const Document& value);
+ /**
+ * Converts the current Document to a Document object by moving from the parameter.
+ */
+ Document& AsObject(Document&& value);
+
+ /**
+ * Returns true if the last parse request was successful. If this returns false,
+ * you can call GetErrorMessage() to find the cause.
+ */
+ inline bool WasParseSuccessful() const
+ {
+ return m_wasParseSuccessful;
+ }
+
+ /**
+ * Returns the last error message from a failed parse attempt. Returns empty string if no error.
+ */
+ inline const Aws::String& GetErrorMessage() const
+ {
+ return m_errorMessage;
+ }
+
+ /**
+ * Creates a view from the current Document.
+ */
+ DocumentView View() const;
+
+ private:
+ void Destroy();
+ Document(cJSON* value);
+ cJSON* m_json;
+ bool m_wasParseSuccessful;
+ Aws::String m_errorMessage;
+ friend DocumentView;
+ };
+
+ /**
+ * Provides read-only view to an existing Document. This allows lightweight copying without making deep
+ * copies of the Document.
+ * Note: This class does not extend the lifetime of the given Document. It's your responsibility to ensure
+ * the lifetime of the Document is extended beyond the lifetime of its view.
+ */
+ class AWS_CORE_API DocumentView
+ {
+ public:
+ /* constructors */
+ DocumentView();
+ DocumentView(const Document& value);
+ DocumentView& operator=(const Document& value);
+
+ /**
+ * Gets a string from this Document by its key.
+ */
+ Aws::String GetString(const Aws::String& key) const;
+ /**
+ * Returns the value of this Document as a string.
+ */
+ Aws::String AsString() const;
+ /**
+ * Tests whether the current value is a string.
+ */
+ bool IsString() const;
+
+ /**
+ * Gets a boolean value from this Document by its key.
+ */
+ bool GetBool(const Aws::String& key) const;
+ /**
+ * Returns the value of this Document as a boolean.
+ */
+ bool AsBool() const;
+ /**
+ * Tests whether the current value is a boolean.
+ */
+ bool IsBool() const;
+
+ /**
+ * Gets an integer value from this Document by its key.
+ * The integer is of the same size as an int on the machine.
+ */
+ int GetInteger(const Aws::String& key) const;
+ /**
+ * Returns the value of this Document as an int.
+ */
+ int AsInteger() const;
+ /**
+ * Tests whether the current value is an int or int64_t.
+ * Returns false if the value is floating-point.
+ */
+ bool IsIntegerType() const;
+
+ /**
+ * Converts the current Document to a 64-bit integer.
+ */
+ Document& AsInt64(long long value);
+ /**
+ * Gets a 64-bit integer value from this Document by its key.
+ * The value is 64-bit regardless of the platform/machine.
+ */
+ int64_t GetInt64(const Aws::String& key) const;
+ /**
+ * Returns the value of this Document as 64-bit integer.
+ */
+ int64_t AsInt64() const;
+
+ /**
+ * Gets a double precision floating-point value from this Document by its key.
+ */
+ double GetDouble(const Aws::String& key) const;
+ /**
+ * Returns the value of this Document as a double precision floating-point.
+ */
+ double AsDouble() const;
+ /**
+ * Tests whether the current value is a floating-point.
+ */
+ bool IsFloatingPointType() const;
+
+ /**
+ * Gets an array of DocumentView objects from this Document by its key.
+ */
+ Array<DocumentView> GetArray(const Aws::String& key) const;
+ /**
+ * Returns the value of this Document as an array of DocumentView objects.
+ */
+ Array<DocumentView> AsArray() const;
+ /**
+ * Tests whether the current value is a Document array.
+ */
+ bool IsListType() const;
+
+ /**
+ * Gets a DocumentView object from this Document by its key.
+ */
+ DocumentView GetObject(const Aws::String& key) const;
+ /**
+ * Returns the value of this Document as a DocumentView object.
+ */
+ DocumentView AsObject() const;
+ /**
+ * Reads all Document objects at the top level of this Document (does not traverse the tree any further)
+ * along with their keys.
+ */
+ Aws::Map<Aws::String, DocumentView> GetAllObjects() const;
+ /**
+ * Tests whether the current value is a Document object.
+ */
+ bool IsObject() const;
+
+ /**
+ * Tests whether the current value is NULL.
+ */
+ bool IsNull() const;
+
+ /**
+ * Tests whether a value exists at the current Document level for the given key.
+ * Returns true if a value has been found and its value is not null, false otherwise.
+ */
+ bool ValueExists(const Aws::String& key) const;
+
+ /**
+ * Tests whether a key exists at the current Document level.
+ */
+ bool KeyExists(const Aws::String& key) const;
+
+ /**
+ * Writes the current Document view without whitespace characters starting at the current level to a string.
+ */
+ Aws::String WriteCompact() const;
+
+ /**
+ * Writes the current Document view to a string in a human friendly format.
+ */
+ Aws::String WriteReadable() const;
+
+ /**
+ * Creates a deep copy of the JSON value rooted in the current JSON view.
+ */
+ Document Materialize() const;
+ private:
+ DocumentView(cJSON* value);
+ DocumentView& operator=(cJSON* value);
+ cJSON* m_json;
+ friend Aws::Utils::Json::JsonValue;
+ };
+
+ } // namespace Utils
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/FileSystemUtils.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/FileSystemUtils.h
index d62ff4c094..430e7ae441 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/FileSystemUtils.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/FileSystemUtils.h
@@ -45,7 +45,7 @@ namespace Aws
*/
TempFile(const char* prefix, std::ios_base::openmode openFlags);
/**
- * Creates a temporary file with a randome string for the name.
+ * Creates a temporary file with a random string for the name.
*/
TempFile(std::ios_base::openmode openFlags);
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/HashingUtils.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/HashingUtils.h
index 465c9827b2..d09fe5c94f 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/HashingUtils.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/HashingUtils.h
@@ -87,6 +87,26 @@ namespace Aws
*/
static ByteBuffer CalculateMD5(Aws::IOStream& stream);
+ /**
+ * Calculates a CRC32 Hash value
+ */
+ static ByteBuffer CalculateCRC32(const Aws::String& str);
+
+ /**
+ * Calculates a CRC32 Hash value
+ */
+ static ByteBuffer CalculateCRC32(Aws::IOStream& stream);
+
+ /**
+ * Calculates a CRC32C Hash value
+ */
+ static ByteBuffer CalculateCRC32C(const Aws::String& str);
+
+ /**
+ * Calculates a CRC32C Hash value
+ */
+ static ByteBuffer CalculateCRC32C(Aws::IOStream& stream);
+
static int HashString(const char* strToHash);
};
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/ResourceManager.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/ResourceManager.h
index 517f65d0fa..c564e58650 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/ResourceManager.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/ResourceManager.h
@@ -96,7 +96,6 @@ namespace Aws
*/
Aws::Vector<RESOURCE_TYPE> ShutdownAndWait(size_t resourceCount)
{
- Aws::Vector<RESOURCE_TYPE> resources;
std::unique_lock<std::mutex> locker(m_queueLock);
m_shutdown = true;
@@ -106,8 +105,7 @@ namespace Aws
m_semaphore.wait(locker, [&]() { return m_resources.size() == resourceCount; });
}
- resources = m_resources;
- m_resources.clear();
+ Aws::Vector<RESOURCE_TYPE> resources{std::move(m_resources)};
return resources;
}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/StringUtils.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/StringUtils.h
index 312342b86a..0281a8fe06 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/StringUtils.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/StringUtils.h
@@ -10,7 +10,7 @@
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/core/utils/memory/stl/AWSStringStream.h>
-
+#include <aws/common/byte_buf.h>
namespace Aws
@@ -82,14 +82,14 @@ namespace Aws
/**
* @brief Splits a string on a delimiter (empty items are excluded).
* @param toSplit, the original string to split
- * @param splitOn, the delemiter you want to use.
+ * @param splitOn, the delimiter you want to use.
*/
static Aws::Vector<Aws::String> Split(const Aws::String& toSplit, char splitOn);
/**
* @brief Splits a string on a delimiter.
* @param toSplit, the original string to split
- * @param splitOn, the delemiter you want to use.
+ * @param splitOn, the delimiter you want to use.
* @param option, if INCLUDE_EMPTY_ENTRIES, includes empty entries in the result, otherwise removes empty entries.
*/
static Aws::Vector<Aws::String> Split(const Aws::String& toSplit, char splitOn, SplitOptions option);
@@ -97,7 +97,7 @@ namespace Aws
/**
* @brief Splits a string on a delimiter (empty items are excluded).
* @param toSplit, the original string to split
- * @param splitOn, the delemiter you want to use.
+ * @param splitOn, the delimiter you want to use.
* @param numOfTargetParts, how many target parts you want to get, if it is 0, as many as possible.
*/
static Aws::Vector<Aws::String> Split(const Aws::String& toSplit, char splitOn, size_t numOfTargetParts);
@@ -105,7 +105,7 @@ namespace Aws
/**
* @brief Splits a string on a delimiter.
* @param toSplit, the original string to split
- * @param splitOn, the delemiter you want to use.
+ * @param splitOn, the delimiter you want to use.
* @param numOfTargetParts, how many target parts you want to get, if it is 0, as many as possible.
* @param option, if INCLUDE_EMPTY_ENTRIES, includes empty entries in the result, otherwise removes empty entries.
*/
@@ -212,8 +212,13 @@ namespace Aws
std::reverse(s.begin(), s.end());
return s;
}
+
+ static Aws::String FromByteCursor(aws_byte_cursor cursor)
+ {
+ return Aws::String(reinterpret_cast<char *>(cursor.ptr), cursor.len);
+ }
};
} // namespace Utils
-} // namespace Aws \ No newline at end of file
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/CRC32.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/CRC32.h
new file mode 100644
index 0000000000..75fc30f0d5
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/CRC32.h
@@ -0,0 +1,147 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+/*
+* Interface for CRC32 and CRC32C
+*/
+#pragma once
+
+#ifdef __APPLE__
+
+#ifdef __clang__
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+#endif // __clang__
+
+#ifdef __GNUC__
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+#endif // __GNUC__
+
+#endif // __APPLE__
+
+#include <aws/core/Core_EXPORTS.h>
+
+#include <aws/core/utils/crypto/Hash.h>
+
+namespace Aws
+{
+ namespace Utils
+ {
+ namespace Crypto
+ {
+ /**
+ * CRC32 hash implementation.
+ */
+ class AWS_CORE_API CRC32 : public Hash
+ {
+ public:
+ /**
+ * Initializes platform crypto libs for crc32.
+ */
+ CRC32();
+ virtual ~CRC32();
+
+ /**
+ * Calculates a CRC32 Hash digest
+ */
+ virtual HashResult Calculate(const Aws::String& str) override;
+
+ /**
+ * Calculates a CRC32 Hash digest on a stream (the entire stream is read)
+ */
+ virtual HashResult Calculate(Aws::IStream& stream) override;
+
+ /**
+ * Updates a Hash digest
+ */
+ virtual void Update(unsigned char* buffer, size_t bufferSize) override;
+
+ /**
+ * Get the result in the current value
+ */
+ virtual HashResult GetHash() override;
+ private:
+
+ std::shared_ptr< Hash > m_hashImpl;
+ };
+
+ /**
+ * CRC32C hash implementation.
+ */
+ class AWS_CORE_API CRC32C : public Hash
+ {
+ public:
+ /**
+ * Initializes platform crypto libs for crc32c.
+ */
+ CRC32C();
+ virtual ~CRC32C();
+
+ /**
+ * Calculates a CRC32C Hash digest
+ */
+ virtual HashResult Calculate(const Aws::String& str) override;
+
+ /**
+ * Calculates a CRC32C Hash digest on a stream (the entire stream is read)
+ */
+ virtual HashResult Calculate(Aws::IStream& stream) override;
+
+ /**
+ * Updates a Hash digest
+ */
+ virtual void Update(unsigned char* buffer, size_t bufferSize) override;
+
+ /**
+ * Get the result in the current value
+ */
+ virtual HashResult GetHash() override;
+
+ private:
+
+ std::shared_ptr< Hash > m_hashImpl;
+ };
+
+ class AWS_CORE_API CRC32Impl : public Hash
+ {
+ public:
+
+ CRC32Impl();
+ virtual ~CRC32Impl() {}
+
+ virtual HashResult Calculate(const Aws::String& str) override;
+
+ virtual HashResult Calculate(Aws::IStream& stream) override;
+
+ virtual void Update(unsigned char* buffer, size_t bufferSize) override;
+
+ virtual HashResult GetHash() override;
+
+ private:
+ int m_runningCrc32;
+ };
+
+ class AWS_CORE_API CRC32CImpl : public Hash
+ {
+ public:
+
+ CRC32CImpl();
+ virtual ~CRC32CImpl() {}
+
+ virtual HashResult Calculate(const Aws::String& str) override;
+
+ virtual HashResult Calculate(Aws::IStream& stream) override;
+
+ virtual void Update(unsigned char* buffer, size_t bufferSize) override;
+
+ virtual HashResult GetHash() override;
+
+ private:
+ int m_runningCrc32c;
+ };
+
+ } // namespace Crypto
+ } // namespace Utils
+} // namespace Aws
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/Factories.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/Factories.h
index a219d3eae5..12f610d4de 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/Factories.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/Factories.h
@@ -44,6 +44,14 @@ namespace Aws
*/
AWS_CORE_API std::shared_ptr<Hash> CreateMD5Implementation();
/**
+ * Create a CRC32 Hash provider
+ */
+ AWS_CORE_API std::shared_ptr<Hash> CreateCRC32Implementation();
+ /**
+ * Create a CRC32C Hash provider
+ */
+ AWS_CORE_API std::shared_ptr<Hash> CreateCRC32CImplementation();
+ /**
* Create a Sha1 Hash provider
*/
AWS_CORE_API std::shared_ptr<Hash> CreateSha1Implementation();
@@ -106,18 +114,26 @@ namespace Aws
/**
* Create AES in Key Wrap mode off of a 256 bit key.
*/
- AWS_CORE_API std::shared_ptr<SymmetricCipher> CreateAES_KeyWrapImplementation(const CryptoBuffer& key);
+ AWS_CORE_API std::shared_ptr<SymmetricCipher> CreateAES_KeyWrapImplementation(const CryptoBuffer& key);
/**
* Create SecureRandomBytes instance
*/
AWS_CORE_API std::shared_ptr<SecureRandomBytes> CreateSecureRandomBytesImplementation();
-
+
/**
* Set the global factory for MD5 Hash providers
*/
AWS_CORE_API void SetMD5Factory(const std::shared_ptr<HashFactory>& factory);
/**
+ * Set the global factory for CRC32 Hash providers
+ */
+ AWS_CORE_API void SetCRC32Factory(const std::shared_ptr<HashFactory>& factory);
+ /**
+ * Set the global factory for CRC32C Hash providers
+ */
+ AWS_CORE_API void SetCRC32CFactory(const std::shared_ptr<HashFactory>& factory);
+ /**
* Set the global factory for Sha1 Hash providers
*/
AWS_CORE_API void SetSha1Factory(const std::shared_ptr<HashFactory>& factory);
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/Hash.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/Hash.h
index 8ebbe009fc..c39f012f4c 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/Hash.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/Hash.h
@@ -36,6 +36,16 @@ namespace Aws
*/
virtual HashResult Calculate(Aws::IStream& stream) = 0;
+ /**
+ * Updates a Hash digest
+ */
+ virtual void Update(unsigned char*, size_t bufferSize) = 0;
+
+ /**
+ * Get the result in the current value
+ */
+ virtual HashResult GetHash() = 0;
+
// when hashing streams, this is the size of our internal buffer we read the stream into
static const uint32_t INTERNAL_HASH_STREAM_BUFFER_SIZE = 8192;
};
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/MD5.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/MD5.h
index 59304a4a88..6cc7178630 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/MD5.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/MD5.h
@@ -55,6 +55,16 @@ namespace Aws
*/
virtual HashResult Calculate(Aws::IStream& stream) override;
+ /**
+ * Updates a Hash digest
+ */
+ virtual void Update(unsigned char* buffer, size_t bufferSize) override;
+
+ /**
+ * Get the result in the current value
+ */
+ virtual HashResult GetHash() override;
+
private:
std::shared_ptr<Hash> m_hashImpl;
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/Sha1.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/Sha1.h
index 5d9d44f867..547ca65555 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/Sha1.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/Sha1.h
@@ -54,6 +54,15 @@ namespace Aws
*/
virtual HashResult Calculate(Aws::IStream& stream) override;
+ /**
+ * Updates a Hash digest
+ */
+ virtual void Update(unsigned char* buffer, size_t bufferSize) override;
+
+ /**
+ * Get the result in the current value
+ */
+ virtual HashResult GetHash() override;
private:
std::shared_ptr< Hash > m_hashImpl;
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/Sha256.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/Sha256.h
index 441752b0a6..40782cd58e 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/Sha256.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/Sha256.h
@@ -54,6 +54,16 @@ namespace Aws
*/
virtual HashResult Calculate(Aws::IStream& stream) override;
+ /**
+ * Updates a Hash digest
+ */
+ virtual void Update(unsigned char* buffer, size_t bufferSize) override;
+
+ /**
+ * Get the result in the current value
+ */
+ virtual HashResult GetHash() override;
+
private:
std::shared_ptr< Hash > m_hashImpl;
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/openssl/CryptoImpl.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/openssl/CryptoImpl.h
index 39f5e5fcd4..aa31d3602f 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/openssl/CryptoImpl.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/crypto/openssl/CryptoImpl.h
@@ -39,7 +39,7 @@ namespace Aws
* OpenSSL implementation for SecureRandomBytes.
* Incidentally, this implementation is thread safe, though it is not
* on other platforms. You should treat an instance of SecureRandomBytes
- * as needed to be memory fenced if you will be using accross multiple threads
+ * as needed to be memory fenced if you will be using across multiple threads
*/
class SecureRandomBytes_OpenSSLImpl : public SecureRandomBytes
{
@@ -59,40 +59,59 @@ namespace Aws
{
public:
- MD5OpenSSLImpl()
- { }
+ MD5OpenSSLImpl();
- virtual ~MD5OpenSSLImpl() = default;
+ virtual ~MD5OpenSSLImpl();
virtual HashResult Calculate(const Aws::String& str) override;
virtual HashResult Calculate(Aws::IStream& stream) override;
+ virtual void Update(unsigned char* buffer, size_t bufferSize) override;
+
+ virtual HashResult GetHash() override;
+
+ private:
+ EVP_MD_CTX *m_ctx;
};
class Sha1OpenSSLImpl : public Hash
{
public:
- Sha1OpenSSLImpl() {}
- virtual ~Sha1OpenSSLImpl() = default;
+ Sha1OpenSSLImpl();
+
+ virtual ~Sha1OpenSSLImpl();
virtual HashResult Calculate(const Aws::String& str) override;
virtual HashResult Calculate(Aws::IStream& stream) override;
+
+ virtual void Update(unsigned char* buffer, size_t bufferSize) override;
+
+ virtual HashResult GetHash() override;
+
+ private:
+ EVP_MD_CTX *m_ctx;
};
class Sha256OpenSSLImpl : public Hash
{
public:
- Sha256OpenSSLImpl()
- { }
+ Sha256OpenSSLImpl();
- virtual ~Sha256OpenSSLImpl() = default;
+ virtual ~Sha256OpenSSLImpl();
virtual HashResult Calculate(const Aws::String& str) override;
virtual HashResult Calculate(Aws::IStream& stream) override;
+
+ virtual void Update(unsigned char* buffer, size_t bufferSize) override;
+
+ virtual HashResult GetHash() override;
+
+ private:
+ EVP_MD_CTX *m_ctx;
};
class Sha256HMACOpenSSLImpl : public HMAC
@@ -187,7 +206,7 @@ namespace Aws
void Cleanup();
/* openssl has bug finalize decryption of an empty string */
- bool m_emptyPlaintext;
+ bool m_emptyPlaintext = false;
};
/**
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/event/EventHeader.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/event/EventHeader.h
index c60dda0435..ae58ad5def 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/event/EventHeader.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/event/EventHeader.h
@@ -44,10 +44,11 @@ namespace Aws
UNKNOWN
};
- EventHeaderValue() = default;
+ EventHeaderValue() : m_eventHeaderType(EventHeaderType::UNKNOWN), m_eventHeaderStaticValue({0}) {}
EventHeaderValue(aws_event_stream_header_value_pair* header) :
- m_eventHeaderType(static_cast<EventHeaderType>(header->header_value_type))
+ m_eventHeaderType(static_cast<EventHeaderType>(header->header_value_type)),
+ m_eventHeaderStaticValue({0})
{
switch (m_eventHeaderType)
{
@@ -88,49 +89,57 @@ namespace Aws
EventHeaderValue(const Aws::String& s) :
m_eventHeaderType(EventHeaderType::STRING),
- m_eventHeaderVariableLengthValue(reinterpret_cast<const uint8_t*>(s.data()), s.length())
+ m_eventHeaderVariableLengthValue(reinterpret_cast<const uint8_t*>(s.data()), s.length()),
+ m_eventHeaderStaticValue({0})
{
}
EventHeaderValue(const ByteBuffer& bb) :
m_eventHeaderType(EventHeaderType::BYTE_BUF),
- m_eventHeaderVariableLengthValue(bb)
+ m_eventHeaderVariableLengthValue(bb),
+ m_eventHeaderStaticValue({0})
{
}
EventHeaderValue(ByteBuffer&& bb) :
m_eventHeaderType(EventHeaderType::BYTE_BUF),
- m_eventHeaderVariableLengthValue(std::move(bb))
+ m_eventHeaderVariableLengthValue(std::move(bb)),
+ m_eventHeaderStaticValue({0})
{
}
explicit EventHeaderValue(unsigned char byte) :
- m_eventHeaderType(EventHeaderType::BYTE)
+ m_eventHeaderType(EventHeaderType::BYTE),
+ m_eventHeaderStaticValue({0})
{
m_eventHeaderStaticValue.byteValue = byte;
}
explicit EventHeaderValue(bool b) :
- m_eventHeaderType(b ? EventHeaderType::BOOL_TRUE : EventHeaderType::BOOL_FALSE)
+ m_eventHeaderType(b ? EventHeaderType::BOOL_TRUE : EventHeaderType::BOOL_FALSE),
+ m_eventHeaderStaticValue({0})
{
m_eventHeaderStaticValue.boolValue = b;
}
explicit EventHeaderValue(int16_t n) :
- m_eventHeaderType(EventHeaderType::INT16)
+ m_eventHeaderType(EventHeaderType::INT16),
+ m_eventHeaderStaticValue({0})
{
m_eventHeaderStaticValue.int16Value = n;
}
explicit EventHeaderValue(int32_t n) :
- m_eventHeaderType(EventHeaderType::INT32)
+ m_eventHeaderType(EventHeaderType::INT32),
+ m_eventHeaderStaticValue({0})
{
m_eventHeaderStaticValue.int32Value = n;
}
explicit EventHeaderValue(int64_t n, EventHeaderType type = EventHeaderType::INT64) :
- m_eventHeaderType(type)
+ m_eventHeaderType(type),
+ m_eventHeaderStaticValue({0})
{
if (type == EventHeaderType::TIMESTAMP)
{
@@ -295,12 +304,12 @@ namespace Aws
ByteBuffer m_eventHeaderVariableLengthValue;
union
{
- bool boolValue;
- uint8_t byteValue;
- int16_t int16Value;
- int32_t int32Value;
- int64_t int64Value;
int64_t timestampValue;
+ int64_t int64Value;
+ int32_t int32Value;
+ int16_t int16Value;
+ uint8_t byteValue;
+ bool boolValue;
} m_eventHeaderStaticValue;
};
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/event/EventMessage.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/event/EventMessage.h
index 4540fbe79c..48e13c0865 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/event/EventMessage.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/event/EventMessage.h
@@ -109,9 +109,9 @@ namespace Aws
inline Aws::String GetEventPayloadAsString() { return Aws::String(m_eventPayload.begin(), m_eventPayload.end()); }
private:
- size_t m_totalLength;
- size_t m_headersLength;
- size_t m_payloadLength;
+ size_t m_totalLength = 0;
+ size_t m_headersLength = 0;
+ size_t m_payloadLength = 0;
Aws::Utils::Event::EventHeaderValueCollection m_eventHeaders;
Aws::Vector<unsigned char> m_eventPayload;
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/event/EventStreamEncoder.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/event/EventStreamEncoder.h
index 3cd06a9df2..8b9c123241 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/event/EventStreamEncoder.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/event/EventStreamEncoder.h
@@ -41,8 +41,20 @@ namespace Aws
*/
Aws::Vector<unsigned char> EncodeAndSign(const Aws::Utils::Event::Message& msg);
private:
- aws_event_stream_message Encode(const Aws::Utils::Event::Message& msg);
- aws_event_stream_message Sign(aws_event_stream_message* msg);
+ /**
+ * Initialize C struct based on C++ object.
+ * Returns true if successful.
+ * A successfully initialized struct must be cleaned up when you're done with it.
+ */
+ bool InitEncodedStruct(const Aws::Utils::Event::Message& msg, aws_event_stream_message* encoded);
+
+ /**
+ * Initialize signed C struct based on unsigned C struct.
+ * Returns true if successful.
+ * A successfully initialized struct must be cleaned up when you're done with it.
+ */
+ bool InitSignedStruct(const aws_event_stream_message* msg, aws_event_stream_message* signedmsg);
+
Aws::Client::AWSAuthSigner* m_signer;
Aws::String m_signatureSeed;
};
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/event/EventStreamHandler.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/event/EventStreamHandler.h
index 257bfd8288..ac13c8ee28 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/event/EventStreamHandler.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/event/EventStreamHandler.h
@@ -29,7 +29,7 @@ namespace Aws
{
public:
EventStreamHandler() :
- m_failure(false), m_internalError(EventStreamErrors::EVENT_STREAM_NO_ERROR), m_headersBytesReceived(0), m_payloadBytesReceived(0)
+ m_failure(false), m_internalError(EventStreamErrors::EVENT_STREAM_NO_ERROR), m_headersBytesReceived(0), m_payloadBytesReceived(0), m_message()
{}
virtual ~EventStreamHandler() = default;
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/json/JsonSerializer.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/json/JsonSerializer.h
index 657ee6eff4..8b2ccc139b 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/json/JsonSerializer.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/json/JsonSerializer.h
@@ -19,6 +19,9 @@ namespace Aws
{
namespace Utils
{
+ class Document;
+ class DocumentView;
+
namespace Json
{
class JsonView;
@@ -56,6 +59,11 @@ namespace Aws
*/
JsonValue(JsonValue&& value);
+ /**
+ * Performs a deep copy of the Document parameter.
+ */
+ JsonValue(const Aws::Utils::DocumentView& value);
+
~JsonValue();
/**
@@ -72,6 +80,11 @@ namespace Aws
*/
JsonValue& operator=(JsonValue&& other);
+ /**
+ * Performs a deep copy of the Document parameter.
+ */
+ JsonValue& operator=(const Aws::Utils::DocumentView& value);
+
bool operator==(const JsonValue& other) const;
bool operator!=(const JsonValue& other) const;
@@ -373,6 +386,7 @@ namespace Aws
JsonView(cJSON* val);
JsonView& operator=(cJSON* val);
cJSON* m_value;
+ friend class Aws::Utils::Document;
};
} // namespace Json
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/logging/CRTLogSystem.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/logging/CRTLogSystem.h
new file mode 100644
index 0000000000..204f974bbf
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/logging/CRTLogSystem.h
@@ -0,0 +1,67 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+
+#include <aws/core/Core_EXPORTS.h>
+#include <aws/core/utils/logging/LogLevel.h>
+#include <aws/common/logging.h>
+
+#include <atomic>
+
+namespace Aws
+{
+ namespace Utils
+ {
+ namespace Logging
+ {
+ enum class LogLevel : int;
+
+ /**
+ * Interface for CRT (common runtime libraries) logging implementations.
+ * A wrapper on the top of aws_logger, the logging interface used by common runtime libraries.
+ */
+ class AWS_CORE_API CRTLogSystemInterface
+ {
+ public:
+ virtual ~CRTLogSystemInterface() = default;
+ };
+
+ /**
+ * The default CRT log system will just do a redirection of all logs from common runtime libraries to C++ SDK.
+ * You can override virtual function Log() in your subclass to change the default behaviors.
+ */
+ class AWS_CORE_API DefaultCRTLogSystem : public CRTLogSystemInterface
+ {
+ public:
+ DefaultCRTLogSystem(LogLevel logLevel);
+ virtual ~DefaultCRTLogSystem();
+
+ /**
+ * Gets the currently configured log level.
+ */
+ LogLevel GetLogLevel() const { return m_logLevel; }
+ /**
+ * Set a new log level. This has the immediate effect of changing the log output to the new level.
+ */
+ void SetLogLevel(LogLevel logLevel) { m_logLevel.store(logLevel); }
+
+ /**
+ * Handle the logging information from common runtime libraries.
+ * Redirect them to C++ SDK logging system by default.
+ */
+ virtual void Log(LogLevel logLevel, const char* subjectName, const char* formatStr, va_list args);
+
+ protected:
+ std::atomic<LogLevel> m_logLevel;
+ /**
+ * Underlying logging interface used by common runtime libraries.
+ */
+ aws_logger m_logger;
+ };
+
+ } // namespace Logging
+ } // namespace Utils
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/logging/CRTLogging.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/logging/CRTLogging.h
new file mode 100644
index 0000000000..5b0e6ac580
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/logging/CRTLogging.h
@@ -0,0 +1,31 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+
+#include <aws/core/Core_EXPORTS.h>
+#include <memory>
+
+namespace Aws
+{
+ namespace Utils
+ {
+ namespace Logging
+ {
+ class CRTLogSystemInterface;
+
+ /**
+ * Initialize CRT (common runtime libraries) log system to handle loggings from common runtime libraries, including aws-c-auth, aws-c-http, aws-c-event-stream and etc.
+ */
+ AWS_CORE_API void InitializeCRTLogging(const std::shared_ptr<CRTLogSystemInterface>& crtLogSystem);
+
+ /**
+ * Shutdown CRT (common runtime libraries) log system.
+ */
+ AWS_CORE_API void ShutdownCRTLogging();
+
+ } // namespace Logging
+ } // namespace Utils
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/logging/ErrorMacros.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/logging/ErrorMacros.h
new file mode 100644
index 0000000000..8783ba9c0a
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/logging/ErrorMacros.h
@@ -0,0 +1,57 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+
+#include <aws/core/Core_EXPORTS.h>
+
+#include <aws/core/utils/logging/LogLevel.h>
+#include <aws/core/utils/logging/AWSLogging.h>
+#include <aws/core/utils/logging/LogMacros.h>
+
+#define AWS_OPERATION_CHECK_PTR(PTR, OPERATION, ERROR_TYPE, ERROR) \
+do { \
+ if (PTR == nullptr) \
+ { \
+ AWS_LOGSTREAM_FATAL(#OPERATION, "Unexpected nullptr: " #PTR); \
+ return OPERATION##Outcome(Aws::Client::AWSError<ERROR_TYPE>(ERROR, #ERROR, "Unexpected nullptr: " #PTR, false)); \
+ } \
+} while (0)
+
+#define AWS_CHECK(LOG_TAG, CONDITION, ERROR_MESSAGE, RETURN) \
+do { \
+ if (!(CONDITION)) \
+ { \
+ AWS_LOGSTREAM_ERROR(LOG_TAG, ERROR_MESSAGE); \
+ return RETURN; \
+ } \
+} while (0)
+
+#define AWS_CHECK_PTR(LOG_TAG, PTR) \
+do { \
+ if (PTR == nullptr) \
+ { \
+ AWS_LOGSTREAM_FATAL(LOG_TAG, "Unexpected nullptr: " #PTR); \
+ return; \
+ } \
+} while (0)
+
+#define AWS_OPERATION_CHECK_SUCCESS(OUTCOME, OPERATION, ERROR_TYPE, ERROR, ERROR_MESSAGE) \
+do { \
+ if (!OUTCOME.IsSuccess()) \
+ { \
+ AWS_LOGSTREAM_ERROR(#OPERATION, ERROR_MESSAGE); \
+ return OPERATION##Outcome(Aws::Client::AWSError<ERROR_TYPE>(ERROR, #ERROR, ERROR_MESSAGE, false)); \
+ } \
+} while (0)
+
+#define AWS_OPERATION_CHECK_PARAMETER_PRESENT(REQUEST, FIELD, OPERATION, CLIENT_NAMESPACE) \
+do { \
+ if (!REQUEST##.##FIELD##HasBeenSet()) \
+ { \
+ AWS_LOGSTREAM_ERROR(#OPERATION, "Required field: "#FIELD" is not set"); \
+ return OPERATION##Outcome(Aws::Client::AWSError<CLIENT_NAMESPACE##Errors>(CLIENT_NAMESPACE##Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field ["#FIELD"]", false)); \
+ } \
+} while (0)
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/memory/AWSMemory.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/memory/AWSMemory.h
index 5b1221917a..125b4e05a5 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/memory/AWSMemory.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/memory/AWSMemory.h
@@ -9,6 +9,7 @@
#include <aws/core/utils/UnreferencedParam.h>
#include <aws/core/utils/memory/MemorySystemInterface.h>
+#include <assert.h>
#include <memory>
#include <cstdlib>
#include <algorithm>
@@ -246,7 +247,50 @@ namespace Aws
}
};
- template< typename T > using UniquePtr = std::unique_ptr< T, Deleter< T > >;
+ template< typename T, typename D = Deleter< T > > using UniquePtr = std::unique_ptr< T, D >;
+
+ /*
+ * A UniquePtr that ensures that underlying pointer is set to null on destruction.
+ * ...thanks to the legacy design, UniquePtr is used as a static global variable that may be destructed twice.
+ */
+ template< typename T, typename D = Deleter< T > >
+ class UniquePtrSafeDeleted : public UniquePtr< T, D >
+ {
+ public:
+ using UniquePtr<T,D>::UniquePtr;
+ UniquePtrSafeDeleted(const UniquePtrSafeDeleted&) noexcept = delete;
+ UniquePtrSafeDeleted(UniquePtrSafeDeleted&&) noexcept = default;
+ UniquePtrSafeDeleted& operator=( const UniquePtrSafeDeleted<T,D>& r ) noexcept = delete;
+ UniquePtrSafeDeleted& operator=( UniquePtrSafeDeleted<T,D>&& r ) noexcept
+ {
+ if(&r != this) {
+ UniquePtr<T, D>::operator=(std::move(r));
+ r.forceReset();
+ }
+ return *this;
+ }
+ UniquePtrSafeDeleted& operator=( std::nullptr_t ) noexcept
+ {
+ forceReset();
+ return *this;
+ }
+
+ void forceReset()
+ {
+ if(!this->get())
+ return;
+ this->reset(nullptr);
+ T volatile* newVal = this->get(); // volatile to prohibit optimizing out setting ptr to null
+ AWS_UNREFERENCED_PARAM(newVal);
+ // issue happens in Release where asserts are not enabled, so the next statement is for you, my dear reader
+ assert(newVal == nullptr && this->get() == nullptr);
+ }
+
+ ~UniquePtrSafeDeleted()
+ {
+ forceReset();
+ }
+ };
/**
* ::new, ::delete, ::malloc, ::free, std::make_shared, and std::make_unique should not be used in SDK code
@@ -255,9 +299,20 @@ namespace Aws
template<typename T, typename ...ArgTypes>
UniquePtr<T> MakeUnique(const char* allocationTag, ArgTypes&&... args)
{
+ static_assert(!std::is_array<T>::value || std::is_trivial<T>::value,
+ "This wrapper/function is not designed to support non-trivial arrays.");
return UniquePtr<T>(Aws::New<T>(allocationTag, std::forward<ArgTypes>(args)...));
}
+ template<typename T, typename D = Deleter<T>, typename ...ArgTypes>
+ UniquePtrSafeDeleted<T, D> MakeUniqueSafeDeleted(const char* allocationTag, ArgTypes&&... args)
+ {
+ static_assert(!std::is_array<T>::value || std::is_trivial<T>::value,
+ "This wrapper/function is not designed to support non-trivial arrays.");
+
+ return UniquePtrSafeDeleted<T, D>(Aws::New<T>(allocationTag, std::forward<ArgTypes>(args)...), D());
+ }
+
template<typename T>
struct ArrayDeleter
{
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/memory/stl/AWSAllocator.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/memory/stl/AWSAllocator.h
index 932408b761..0e680cb9b1 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/memory/stl/AWSAllocator.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/memory/stl/AWSAllocator.h
@@ -10,6 +10,7 @@
#include <aws/core/SDKConfig.h>
#include <aws/core/utils/memory/AWSMemory.h>
#include <aws/core/utils/memory/MemorySystemInterface.h>
+#include <aws/crt/StlAllocator.h>
#include <memory>
#include <cstdlib>
@@ -17,54 +18,8 @@
namespace Aws
{
#ifdef USE_AWS_MEMORY_MANAGEMENT
- /**
- * Std allocator interface that is used for all STL types in the event that Custom Memory Management is being used.
- */
- template <typename T>
- class Allocator : public std::allocator<T>
- {
- public:
-
- typedef std::allocator<T> Base;
-
- Allocator() throw() :
- Base()
- {}
-
- Allocator(const Allocator<T>& a) throw() :
- Base(a)
- {}
-
- template <class U>
- Allocator(const Allocator<U>& a) throw() :
- Base(a)
- {}
-
- ~Allocator() throw() {}
- typedef std::size_t size_type;
-
- template<typename U>
- struct rebind
- {
- typedef Allocator<U> other;
- };
-
- typename Base::pointer allocate(size_type n, const void *hint = nullptr)
- {
- AWS_UNREFERENCED_PARAM(hint);
-
- return reinterpret_cast<typename Base::pointer>(Malloc("AWSSTL", n * sizeof(T)));
- }
-
- void deallocate(typename Base::pointer p, size_type n)
- {
- AWS_UNREFERENCED_PARAM(n);
-
- Free(p);
- }
-
- };
+ template< typename T > using Allocator = Aws::Crt::StlAllocator<T>;
#ifdef __ANDROID__
#if _GLIBCXX_FULLY_DYNAMIC_STRING == 0
@@ -80,7 +35,7 @@ namespace Aws
#endif // __ANDROID__
#else
-
+
template< typename T > using Allocator = std::allocator<T>;
#endif // USE_AWS_MEMORY_MANAGEMENT
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/memory/stl/AWSArray.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/memory/stl/AWSArray.h
new file mode 100644
index 0000000000..f80be9e4a5
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/memory/stl/AWSArray.h
@@ -0,0 +1,12 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+#include <array>
+
+namespace Aws
+{
+ template< typename T, std::size_t N > using Array = std::array< T, N >;
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/memory/stl/AWSSet.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/memory/stl/AWSSet.h
index 6aee204f39..e3ee9a0c6d 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/memory/stl/AWSSet.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/memory/stl/AWSSet.h
@@ -10,10 +10,12 @@
#include <aws/core/utils/memory/stl/AWSAllocator.h>
#include <set>
+#include <unordered_set>
namespace Aws
{
template< typename T > using Set = std::set< T, std::less< T >, Aws::Allocator< T > >;
+template< typename T > using UnorderedSet = std::unordered_set< T, std::hash< T >, std::equal_to< T >, Aws::Allocator< T > >;
} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/memory/stl/AWSString.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/memory/stl/AWSString.h
index 3cd013e3f8..8b6b691b84 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/memory/stl/AWSString.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/memory/stl/AWSString.h
@@ -104,5 +104,18 @@ using WString = std::basic_string< wchar_t, std::char_traits< wchar_t >, Aws::Al
} // namespace Aws
+#ifdef USE_AWS_MEMORY_MANAGEMENT
+#include <aws/crt/StringUtils.h>
-
+/* Inject hash method for an Aws::String with a custom allocator to workaround original C++ defect:
+ * "hash support for std::basic_string with customized allocators was not enabled"
+ * see LWG 3705: https://en.cppreference.com/w/cpp/string/basic_string/hash */
+namespace std
+{
+ template<>
+ struct hash<Aws::String>
+ {
+ size_t operator()(const Aws::String& t) const { return Aws::Crt::HashString(t.c_str()); }
+ };
+}
+#endif
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/stream/ResponseStream.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/stream/ResponseStream.h
index e82e6448cd..b4488a7636 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/stream/ResponseStream.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/stream/ResponseStream.h
@@ -49,14 +49,22 @@ namespace Aws
/**
* Gives access to underlying stream, but keep in mind that this changes state of the stream
*/
- inline Aws::IOStream& GetUnderlyingStream() const { return *m_underlyingStream; }
+ Aws::IOStream& GetUnderlyingStream() const;
private:
void ReleaseStream();
+ void RegisterStream();
+ void DeregisterStream();
- Aws::IOStream* m_underlyingStream;
+ Aws::IOStream* m_underlyingStream = nullptr;
+
+ static const int xindex;
+ static void StreamCallback(Aws::IOStream::event evt, std::ios_base& str, int idx);
};
+ /**
+ * A default IOStream for ResponseStream.
+ */
class AWS_CORE_API DefaultUnderlyingStream : public Aws::IOStream
{
public:
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/threading/Executor.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/threading/Executor.h
index 36975af513..376abf480a 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/threading/Executor.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/threading/Executor.h
@@ -42,6 +42,12 @@ namespace Aws
return SubmitToThread(std::move(callable));
}
+ /* explicit _overload_ of the template function above to avoid template bloat */
+ bool Submit(std::function<void()>&& callable)
+ {
+ return SubmitToThread(std::move(callable));
+ }
+
protected:
/**
* To implement your own executor implementation, then simply subclass Executor and implement this method.
@@ -71,7 +77,7 @@ namespace Aws
enum class OverflowPolicy
{
- QUEUE_TASKS_EVENLY_ACCROSS_THREADS,
+ QUEUE_TASKS_EVENLY_ACROSS_THREADS,
REJECT_IMMEDIATELY
};
@@ -81,7 +87,7 @@ namespace Aws
class AWS_CORE_API PooledThreadExecutor : public Executor
{
public:
- PooledThreadExecutor(size_t poolSize, OverflowPolicy overflowPolicy = OverflowPolicy::QUEUE_TASKS_EVENLY_ACCROSS_THREADS);
+ PooledThreadExecutor(size_t poolSize, OverflowPolicy overflowPolicy = OverflowPolicy::QUEUE_TASKS_EVENLY_ACROSS_THREADS);
~PooledThreadExecutor();
/**
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/xml/XmlSerializer.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/xml/XmlSerializer.h
index d1ca79ffab..bb6fe2ca58 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/xml/XmlSerializer.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/include/aws/core/utils/xml/XmlSerializer.h
@@ -160,6 +160,7 @@ namespace Aws
*/
XmlDocument(const XmlDocument& other);
XmlDocument(XmlDocument&& doc);
+ XmlDocument();
XmlDocument& operator=(const XmlDocument& other);
XmlDocument& operator=(XmlDocument&& other);
@@ -197,7 +198,6 @@ namespace Aws
static XmlDocument CreateWithRootNode(const Aws::String&);
private:
- XmlDocument();
void InitDoc();
Aws::External::tinyxml2::XMLDocument* m_doc;
@@ -215,4 +215,3 @@ namespace Aws
} // namespace Xml
} // namespace Utils
} // namespace Aws
-
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/AmazonWebServiceRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/AmazonWebServiceRequest.cpp
index a6b0406683..c0f89adad0 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/AmazonWebServiceRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/AmazonWebServiceRequest.cpp
@@ -18,3 +18,17 @@ AmazonWebServiceRequest::AmazonWebServiceRequest() :
{
}
+AmazonWebServiceRequest::EndpointParameters AmazonWebServiceRequest::GetEndpointContextParams() const
+{
+ return AmazonWebServiceRequest::EndpointParameters();
+}
+
+const Aws::Http::HeaderValueCollection& AmazonWebServiceRequest::GetAdditionalCustomHeaders() const
+{
+ return m_additionalCustomHeaders;
+}
+
+void AmazonWebServiceRequest::SetAdditionalCustomHeaderValue(const Aws::String& headerName, const Aws::String& headerValue)
+{
+ m_additionalCustomHeaders[Utils::StringUtils::ToLower(headerName.c_str())] = Utils::StringUtils::Trim(headerValue.c_str());
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/Aws.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/Aws.cpp
index 0711fd69d6..4fd97618f3 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/Aws.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/Aws.cpp
@@ -7,6 +7,7 @@
#include <aws/core/Aws.h>
#include <aws/core/client/CoreErrors.h>
#include <aws/core/utils/logging/AWSLogging.h>
+#include <aws/core/utils/logging/CRTLogging.h>
#include <aws/core/utils/logging/DefaultLogSystem.h>
#include <aws/core/Globals.h>
#include <aws/core/external/cjson/cJSON.h>
@@ -27,6 +28,7 @@ namespace Aws
Aws::Utils::Memory::InitializeAWSMemorySystem(*options.memoryManagementOptions.memoryManager);
}
#endif // USE_AWS_MEMORY_MANAGEMENT
+ Aws::InitializeCrt();
Aws::Client::CoreErrorsMapper::InitCoreErrorsMapper();
if(options.loggingOptions.logLevel != Aws::Utils::Logging::LogLevel::Off)
{
@@ -39,12 +41,46 @@ namespace Aws
Aws::Utils::Logging::InitializeAWSLogging(
Aws::MakeShared<Aws::Utils::Logging::DefaultLogSystem>(ALLOCATION_TAG, options.loggingOptions.logLevel, options.loggingOptions.defaultLogPrefix));
}
+ if(options.loggingOptions.crt_logger_create_fn)
+ {
+ Aws::Utils::Logging::InitializeCRTLogging(options.loggingOptions.crt_logger_create_fn());
+ }
+ else
+ {
+ Aws::Utils::Logging::InitializeCRTLogging(
+ Aws::MakeShared<Aws::Utils::Logging::DefaultCRTLogSystem>(ALLOCATION_TAG, options.loggingOptions.logLevel));
+ }
// For users to better debugging in case multiple versions of SDK installed
AWS_LOGSTREAM_INFO(ALLOCATION_TAG, "Initiate AWS SDK for C++ with Version:" << Aws::String(Aws::Version::GetVersionString()));
}
Aws::Config::InitConfigAndCredentialsCacheManager();
+ if (options.ioOptions.clientBootstrap_create_fn)
+ {
+ Aws::SetDefaultClientBootstrap(options.ioOptions.clientBootstrap_create_fn());
+ }
+ else
+ {
+ Aws::Crt::Io::EventLoopGroup eventLoopGroup;
+ Aws::Crt::Io::DefaultHostResolver defaultHostResolver(eventLoopGroup, 8, 30);
+ auto clientBootstrap = Aws::MakeShared<Aws::Crt::Io::ClientBootstrap>(ALLOCATION_TAG, eventLoopGroup, defaultHostResolver);
+ clientBootstrap->EnableBlockingShutdown();
+ Aws::SetDefaultClientBootstrap(clientBootstrap);
+ }
+
+ if (options.ioOptions.tlsConnectionOptions_create_fn)
+ {
+ Aws::SetDefaultTlsConnectionOptions(options.ioOptions.tlsConnectionOptions_create_fn());
+ }
+ else
+ {
+ Aws::Crt::Io::TlsContextOptions tlsCtxOptions = Aws::Crt::Io::TlsContextOptions::InitDefaultClient();
+ Aws::Crt::Io::TlsContext tlsContext(tlsCtxOptions, Aws::Crt::Io::TlsMode::CLIENT);
+ auto tlsConnectionOptions = Aws::MakeShared<Aws::Crt::Io::TlsConnectionOptions>(ALLOCATION_TAG, tlsContext.NewConnectionOptions());
+ Aws::SetDefaultTlsConnectionOptions(tlsConnectionOptions);
+ }
+
if (options.cryptoOptions.aes_CBCFactory_create_fn)
{
Aws::Utils::Crypto::SetAES_CBCFactory(options.cryptoOptions.aes_CBCFactory_create_fn());
@@ -100,6 +136,7 @@ namespace Aws
Aws::Http::SetInitCleanupCurlFlag(options.httpOptions.initAndCleanupCurl);
Aws::Http::SetInstallSigPipeHandlerFlag(options.httpOptions.installSigPipeHandler);
+ Aws::Http::SetCompliantRfc3986Encoding(options.httpOptions.compliantRfc3986Encoding);
Aws::Http::InitHttp();
Aws::InitializeEnumOverflowContainer();
cJSON_AS4CPP_Hooks hooks;
@@ -122,13 +159,13 @@ namespace Aws
Aws::Config::CleanupConfigAndCredentialsCacheManager();
- if(options.loggingOptions.logLevel != Aws::Utils::Logging::LogLevel::Off)
+ Aws::Client::CoreErrorsMapper::CleanupCoreErrorsMapper();
+ Aws::CleanupCrt();
+ if (options.loggingOptions.logLevel != Aws::Utils::Logging::LogLevel::Off)
{
+ Aws::Utils::Logging::ShutdownCRTLogging();
Aws::Utils::Logging::ShutdownAWSLogging();
}
-
- Aws::Client::CoreErrorsMapper::CleanupCoreErrorsMapper();
-
#ifdef USE_AWS_MEMORY_MANAGEMENT
if(options.memoryManagementOptions.memoryManager)
{
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/Globals.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/Globals.cpp
index 55f2ee9220..8c26d2389d 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/Globals.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/Globals.cpp
@@ -2,13 +2,61 @@
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
+
+#include <aws/crt/Api.h>
+#include <aws/crt/io/TlsOptions.h>
+#include <aws/crt/io/Bootstrap.h>
#include <aws/core/Globals.h>
#include <aws/core/utils/EnumParseOverflowContainer.h>
#include <aws/core/utils/memory/AWSMemory.h>
+#include <aws/auth/auth.h>
namespace Aws
{
static const char TAG[] = "GlobalEnumOverflowContainer";
+
+ static Aws::Crt::ApiHandle* g_apiHandle;
+ static std::shared_ptr<Aws::Crt::Io::ClientBootstrap> g_defaultClientBootstrap(nullptr);
+ static std::shared_ptr<Aws::Crt::Io::TlsConnectionOptions> g_defaultTlsConnectionOptions(nullptr);
+
+ Aws::Crt::ApiHandle* GetApiHandle()
+ {
+ return g_apiHandle;
+ }
+
+ void SetDefaultClientBootstrap(const std::shared_ptr<Aws::Crt::Io::ClientBootstrap>& clientBootstrap)
+ {
+ g_defaultClientBootstrap = clientBootstrap;
+ }
+
+ Aws::Crt::Io::ClientBootstrap* GetDefaultClientBootstrap()
+ {
+ return g_defaultClientBootstrap.get();
+ }
+
+ void SetDefaultTlsConnectionOptions(const std::shared_ptr<Aws::Crt::Io::TlsConnectionOptions>& tlsConnectionOptions)
+ {
+ g_defaultTlsConnectionOptions = tlsConnectionOptions;
+ }
+
+ Aws::Crt::Io::TlsConnectionOptions* GetDefaultTlsConnectionOptions()
+ {
+ return g_defaultTlsConnectionOptions.get();
+ }
+
+ void InitializeCrt()
+ {
+ g_apiHandle = Aws::New<Aws::Crt::ApiHandle>(TAG, Aws::get_aws_allocator());
+ }
+
+ void CleanupCrt()
+ {
+ Aws::SetDefaultClientBootstrap(nullptr);
+ Aws::SetDefaultTlsConnectionOptions(nullptr);
+ Aws::Delete(g_apiHandle);
+ g_apiHandle = nullptr;
+ }
+
static Utils::EnumParseOverflowContainer* g_enumOverflow;
Utils::EnumParseOverflowContainer* GetEnumOverflowContainer()
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/Region.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/Region.cpp
index 4b18bf2a2a..620923d6cd 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/Region.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/Region.cpp
@@ -15,6 +15,10 @@ namespace Aws
{
return Aws::Region::US_EAST_1;
}
+ else if (region == "fips-aws-global")
+ {
+ return Aws::Region::US_EAST_1;
+ }
else if (region == "s3-external-1")
{
return Aws::Region::US_EAST_1;
@@ -32,5 +36,18 @@ namespace Aws
return region;
}
}
+
+ bool IsFipsRegion(const Aws::String& region)
+ {
+ if (region.size() >= 5 && region.compare(0, 5, "fips-") == 0)
+ {
+ return true;
+ }
+ else if (region.size() >= 5 && region.compare(region.size() - 5, 5, "-fips") == 0)
+ {
+ return true;
+ }
+ return false;
+ }
}
} \ No newline at end of file
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSAuthSigner.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSAuthSigner.cpp
deleted file mode 100644
index 0baa00058f..0000000000
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSAuthSigner.cpp
+++ /dev/null
@@ -1,806 +0,0 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-
-#include <aws/core/auth/AWSAuthSigner.h>
-
-#include <aws/core/auth/AWSCredentialsProvider.h>
-#include <aws/core/client/ClientConfiguration.h>
-#include <aws/core/http/HttpRequest.h>
-#include <aws/core/http/HttpResponse.h>
-#include <aws/core/utils/DateTime.h>
-#include <aws/core/utils/HashingUtils.h>
-#include <aws/core/utils/Outcome.h>
-#include <aws/core/utils/StringUtils.h>
-#include <aws/core/utils/logging/LogMacros.h>
-#include <aws/core/utils/memory/AWSMemory.h>
-#include <aws/core/utils/crypto/Sha256.h>
-#include <aws/core/utils/crypto/Sha256HMAC.h>
-#include <aws/core/utils/stream/PreallocatedStreamBuf.h>
-#include <aws/core/utils/event/EventMessage.h>
-#include <aws/core/utils/event/EventHeader.h>
-
-#include <cstdio>
-#include <iomanip>
-#include <math.h>
-#include <cstring>
-
-using namespace Aws;
-using namespace Aws::Client;
-using namespace Aws::Auth;
-using namespace Aws::Http;
-using namespace Aws::Utils;
-using namespace Aws::Utils::Logging;
-
-static const char* EQ = "=";
-static const char* AWS_HMAC_SHA256 = "AWS4-HMAC-SHA256";
-static const char* EVENT_STREAM_CONTENT_SHA256 = "STREAMING-AWS4-HMAC-SHA256-EVENTS";
-static const char* EVENT_STREAM_PAYLOAD = "AWS4-HMAC-SHA256-PAYLOAD";
-static const char* AWS4_REQUEST = "aws4_request";
-static const char* SIGNED_HEADERS = "SignedHeaders";
-static const char* CREDENTIAL = "Credential";
-static const char* NEWLINE = "\n";
-static const char* X_AMZ_SIGNED_HEADERS = "X-Amz-SignedHeaders";
-static const char* X_AMZ_ALGORITHM = "X-Amz-Algorithm";
-static const char* X_AMZ_CREDENTIAL = "X-Amz-Credential";
-static const char* UNSIGNED_PAYLOAD = "UNSIGNED-PAYLOAD";
-static const char* X_AMZ_SIGNATURE = "X-Amz-Signature";
-static const char* X_AMZN_TRACE_ID = "x-amzn-trace-id";
-static const char* X_AMZ_CONTENT_SHA256 = "x-amz-content-sha256";
-static const char* USER_AGENT = "user-agent";
-static const char* SIGNING_KEY = "AWS4";
-static const char* SIMPLE_DATE_FORMAT_STR = "%Y%m%d";
-static const char* EMPTY_STRING_SHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855";
-
-static const char v4LogTag[] = "AWSAuthV4Signer";
-static const char v4StreamingLogTag[] = "AWSAuthEventStreamV4Signer";
-
-namespace Aws
-{
- namespace Auth
- {
- const char SIGNATURE[] = "Signature";
- const char SIGV4_SIGNER[] = "SignatureV4";
- const char EVENTSTREAM_SIGV4_SIGNER[] = "EventStreamSignatureV4";
- const char EVENTSTREAM_SIGNATURE_HEADER[] = ":chunk-signature";
- const char EVENTSTREAM_DATE_HEADER[] = ":date";
- const char NULL_SIGNER[] = "NullSigner";
- }
-}
-
-static Aws::String CanonicalizeRequestSigningString(HttpRequest& request, bool urlEscapePath)
-{
- request.CanonicalizeRequest();
- Aws::StringStream signingStringStream;
- signingStringStream << HttpMethodMapper::GetNameForHttpMethod(request.GetMethod());
-
- URI uriCpy = request.GetUri();
- // Many AWS services do not decode the URL before calculating SignatureV4 on their end.
- // This results in the signature getting calculated with a double encoded URL.
- // That means we have to double encode it here for the signature to match on the service side.
- if(urlEscapePath)
- {
- // RFC3986 is how we encode the URL before sending it on the wire.
- auto rfc3986EncodedPath = URI::URLEncodePathRFC3986(uriCpy.GetPath());
- uriCpy.SetPath(rfc3986EncodedPath);
- // However, SignatureV4 uses this URL encoding scheme
- signingStringStream << NEWLINE << uriCpy.GetURLEncodedPath() << NEWLINE;
- }
- else
- {
- // For the services that DO decode the URL first; we don't need to double encode it.
- uriCpy.SetPath(uriCpy.GetURLEncodedPath());
- signingStringStream << NEWLINE << uriCpy.GetPath() << NEWLINE;
- }
-
- if (request.GetQueryString().find('=') != std::string::npos)
- {
- signingStringStream << request.GetQueryString().substr(1) << NEWLINE;
- }
- else if (request.GetQueryString().size() > 1)
- {
- signingStringStream << request.GetQueryString().substr(1) << "=" << NEWLINE;
- }
- else
- {
- signingStringStream << NEWLINE;
- }
-
- return signingStringStream.str();
-}
-
-static Http::HeaderValueCollection CanonicalizeHeaders(Http::HeaderValueCollection&& headers)
-{
- Http::HeaderValueCollection canonicalHeaders;
- for (const auto& header : headers)
- {
- auto trimmedHeaderName = StringUtils::Trim(header.first.c_str());
- auto trimmedHeaderValue = StringUtils::Trim(header.second.c_str());
-
- //multiline gets converted to line1,line2,etc...
- auto headerMultiLine = StringUtils::SplitOnLine(trimmedHeaderValue);
- Aws::String headerValue = headerMultiLine.size() == 0 ? "" : headerMultiLine[0];
-
- if (headerMultiLine.size() > 1)
- {
- for(size_t i = 1; i < headerMultiLine.size(); ++i)
- {
- headerValue += ",";
- headerValue += StringUtils::Trim(headerMultiLine[i].c_str());
- }
- }
-
- //duplicate spaces need to be converted to one.
- Aws::String::iterator new_end =
- std::unique(headerValue.begin(), headerValue.end(),
- [=](char lhs, char rhs) { return (lhs == rhs) && (lhs == ' '); }
- );
- headerValue.erase(new_end, headerValue.end());
-
- canonicalHeaders[trimmedHeaderName] = headerValue;
- }
-
- return canonicalHeaders;
-}
-
-AWSAuthV4Signer::AWSAuthV4Signer(const std::shared_ptr<Auth::AWSCredentialsProvider>& credentialsProvider,
- const char* serviceName, const Aws::String& region, PayloadSigningPolicy signingPolicy, bool urlEscapePath) :
- m_includeSha256HashHeader(true),
- m_credentialsProvider(credentialsProvider),
- m_serviceName(serviceName),
- m_region(region),
- m_hash(Aws::MakeUnique<Aws::Utils::Crypto::Sha256>(v4LogTag)),
- m_HMAC(Aws::MakeUnique<Aws::Utils::Crypto::Sha256HMAC>(v4LogTag)),
- m_unsignedHeaders({USER_AGENT, X_AMZN_TRACE_ID}),
- m_payloadSigningPolicy(signingPolicy),
- m_urlEscapePath(urlEscapePath)
-{
- //go ahead and warm up the signing cache.
- ComputeHash(credentialsProvider->GetAWSCredentials().GetAWSSecretKey(), DateTime::CalculateGmtTimestampAsString(SIMPLE_DATE_FORMAT_STR), region, m_serviceName);
-}
-
-AWSAuthV4Signer::~AWSAuthV4Signer()
-{
- // empty destructor in .cpp file to keep from needing the implementation of (AWSCredentialsProvider, Sha256, Sha256HMAC) in the header file
-}
-
-
-bool AWSAuthV4Signer::ShouldSignHeader(const Aws::String& header) const
-{
- return m_unsignedHeaders.find(Aws::Utils::StringUtils::ToLower(header.c_str())) == m_unsignedHeaders.cend();
-}
-
-bool AWSAuthV4Signer::SignRequest(Aws::Http::HttpRequest& request, const char* region, const char* serviceName, bool signBody) const
-{
- AWSCredentials credentials = m_credentialsProvider->GetAWSCredentials();
-
- //don't sign anonymous requests
- if (credentials.GetAWSAccessKeyId().empty() || credentials.GetAWSSecretKey().empty())
- {
- return true;
- }
-
- if (!credentials.GetSessionToken().empty())
- {
- request.SetAwsSessionToken(credentials.GetSessionToken());
- }
-
- Aws::String payloadHash(UNSIGNED_PAYLOAD);
- switch(m_payloadSigningPolicy)
- {
- case PayloadSigningPolicy::Always:
- signBody = true;
- break;
- case PayloadSigningPolicy::Never:
- signBody = false;
- break;
- case PayloadSigningPolicy::RequestDependent:
- // respect the request setting
- default:
- break;
- }
-
- if(signBody || request.GetUri().GetScheme() != Http::Scheme::HTTPS)
- {
- payloadHash = ComputePayloadHash(request);
- if (payloadHash.empty())
- {
- return false;
- }
- }
- else
- {
- AWS_LOGSTREAM_DEBUG(v4LogTag, "Note: Http payloads are not being signed. signPayloads=" << signBody
- << " http scheme=" << Http::SchemeMapper::ToString(request.GetUri().GetScheme()));
- }
-
- if(m_includeSha256HashHeader)
- {
- request.SetHeaderValue(X_AMZ_CONTENT_SHA256, payloadHash);
- }
-
- //calculate date header to use in internal signature (this also goes into date header).
- DateTime now = GetSigningTimestamp();
- Aws::String dateHeaderValue = now.ToGmtString(DateFormat::ISO_8601_BASIC);
- request.SetHeaderValue(AWS_DATE_HEADER, dateHeaderValue);
-
- Aws::StringStream headersStream;
- Aws::StringStream signedHeadersStream;
-
- for (const auto& header : CanonicalizeHeaders(request.GetHeaders()))
- {
- if(ShouldSignHeader(header.first))
- {
- headersStream << header.first.c_str() << ":" << header.second.c_str() << NEWLINE;
- signedHeadersStream << header.first.c_str() << ";";
- }
- }
-
- Aws::String canonicalHeadersString = headersStream.str();
- AWS_LOGSTREAM_DEBUG(v4LogTag, "Canonical Header String: " << canonicalHeadersString);
-
- //calculate signed headers parameter
- Aws::String signedHeadersValue = signedHeadersStream.str();
- //remove that last semi-colon
- if (!signedHeadersValue.empty())
- {
- signedHeadersValue.pop_back();
- }
-
- AWS_LOGSTREAM_DEBUG(v4LogTag, "Signed Headers value:" << signedHeadersValue);
-
- //generate generalized canonicalized request string.
- Aws::String canonicalRequestString = CanonicalizeRequestSigningString(request, m_urlEscapePath);
-
- //append v4 stuff to the canonical request string.
- canonicalRequestString.append(canonicalHeadersString);
- canonicalRequestString.append(NEWLINE);
- canonicalRequestString.append(signedHeadersValue);
- canonicalRequestString.append(NEWLINE);
- canonicalRequestString.append(payloadHash);
-
- AWS_LOGSTREAM_DEBUG(v4LogTag, "Canonical Request String: " << canonicalRequestString);
-
- //now compute sha256 on that request string
- auto hashResult = m_hash->Calculate(canonicalRequestString);
- if (!hashResult.IsSuccess())
- {
- AWS_LOGSTREAM_ERROR(v4LogTag, "Failed to hash (sha256) request string");
- AWS_LOGSTREAM_DEBUG(v4LogTag, "The request string is: \"" << canonicalRequestString << "\"");
- return false;
- }
-
- auto sha256Digest = hashResult.GetResult();
- Aws::String canonicalRequestHash = HashingUtils::HexEncode(sha256Digest);
- Aws::String simpleDate = now.ToGmtString(SIMPLE_DATE_FORMAT_STR);
-
- Aws::String signingRegion = region ? region : m_region;
- Aws::String signingServiceName = serviceName ? serviceName : m_serviceName;
- Aws::String stringToSign = GenerateStringToSign(dateHeaderValue, simpleDate, canonicalRequestHash, signingRegion, signingServiceName);
- auto finalSignature = GenerateSignature(credentials, stringToSign, simpleDate, signingRegion, signingServiceName);
-
- Aws::StringStream ss;
- ss << AWS_HMAC_SHA256 << " " << CREDENTIAL << EQ << credentials.GetAWSAccessKeyId() << "/" << simpleDate
- << "/" << signingRegion << "/" << signingServiceName << "/" << AWS4_REQUEST << ", " << SIGNED_HEADERS << EQ
- << signedHeadersValue << ", " << SIGNATURE << EQ << finalSignature;
-
- auto awsAuthString = ss.str();
- AWS_LOGSTREAM_DEBUG(v4LogTag, "Signing request with: " << awsAuthString);
- request.SetAwsAuthorization(awsAuthString);
- request.SetSigningAccessKey(credentials.GetAWSAccessKeyId());
- request.SetSigningRegion(signingRegion);
- return true;
-}
-
-bool AWSAuthV4Signer::PresignRequest(Aws::Http::HttpRequest& request, long long expirationTimeInSeconds) const
-{
- return PresignRequest(request, m_region.c_str(), expirationTimeInSeconds);
-}
-
-bool AWSAuthV4Signer::PresignRequest(Aws::Http::HttpRequest& request, const char* region, long long expirationInSeconds) const
-{
- return PresignRequest(request, region, m_serviceName.c_str(), expirationInSeconds);
-}
-
-bool AWSAuthV4Signer::PresignRequest(Aws::Http::HttpRequest& request, const char* region, const char* serviceName, long long expirationTimeInSeconds) const
-{
- AWSCredentials credentials = m_credentialsProvider->GetAWSCredentials();
-
- //don't sign anonymous requests
- if (credentials.GetAWSAccessKeyId().empty() || credentials.GetAWSSecretKey().empty())
- {
- return true;
- }
-
- Aws::StringStream intConversionStream;
- intConversionStream << expirationTimeInSeconds;
- request.AddQueryStringParameter(Http::X_AMZ_EXPIRES_HEADER, intConversionStream.str());
-
- if (!credentials.GetSessionToken().empty())
- {
- request.AddQueryStringParameter(Http::AWS_SECURITY_TOKEN, credentials.GetSessionToken());
- }
-
- //calculate date header to use in internal signature (this also goes into date header).
- DateTime now = GetSigningTimestamp();
- Aws::String dateQueryValue = now.ToGmtString(DateFormat::ISO_8601_BASIC);
- request.AddQueryStringParameter(Http::AWS_DATE_HEADER, dateQueryValue);
-
- Aws::StringStream headersStream;
- Aws::StringStream signedHeadersStream;
- for (const auto& header : CanonicalizeHeaders(request.GetHeaders()))
- {
- if(ShouldSignHeader(header.first))
- {
- headersStream << header.first.c_str() << ":" << header.second.c_str() << NEWLINE;
- signedHeadersStream << header.first.c_str() << ";";
- }
- }
-
- Aws::String canonicalHeadersString = headersStream.str();
- AWS_LOGSTREAM_DEBUG(v4LogTag, "Canonical Header String: " << canonicalHeadersString);
-
- //calculate signed headers parameter
- Aws::String signedHeadersValue(signedHeadersStream.str());
- //remove that last semi-colon
- if (!signedHeadersValue.empty())
- {
- signedHeadersValue.pop_back();
- }
-
- request.AddQueryStringParameter(X_AMZ_SIGNED_HEADERS, signedHeadersValue);
- AWS_LOGSTREAM_DEBUG(v4LogTag, "Signed Headers value: " << signedHeadersValue);
-
- Aws::StringStream ss;
- Aws::String signingRegion = region ? region : m_region;
- Aws::String signingServiceName = serviceName ? serviceName : m_serviceName;
- Aws::String simpleDate = now.ToGmtString(SIMPLE_DATE_FORMAT_STR);
- ss << credentials.GetAWSAccessKeyId() << "/" << simpleDate
- << "/" << signingRegion << "/" << signingServiceName << "/" << AWS4_REQUEST;
-
- request.AddQueryStringParameter(X_AMZ_ALGORITHM, AWS_HMAC_SHA256);
- request.AddQueryStringParameter(X_AMZ_CREDENTIAL, ss.str());
- ss.str("");
-
- request.SetSigningAccessKey(credentials.GetAWSAccessKeyId());
- request.SetSigningRegion(signingRegion);
-
- //generate generalized canonicalized request string.
- Aws::String canonicalRequestString = CanonicalizeRequestSigningString(request, m_urlEscapePath);
-
- //append v4 stuff to the canonical request string.
- canonicalRequestString.append(canonicalHeadersString);
- canonicalRequestString.append(NEWLINE);
- canonicalRequestString.append(signedHeadersValue);
- canonicalRequestString.append(NEWLINE);
- if (ServiceRequireUnsignedPayload(signingServiceName))
- {
- canonicalRequestString.append(UNSIGNED_PAYLOAD);
- }
- else
- {
- canonicalRequestString.append(EMPTY_STRING_SHA256);
- }
- AWS_LOGSTREAM_DEBUG(v4LogTag, "Canonical Request String: " << canonicalRequestString);
-
- //now compute sha256 on that request string
- auto hashResult = m_hash->Calculate(canonicalRequestString);
- if (!hashResult.IsSuccess())
- {
- AWS_LOGSTREAM_ERROR(v4LogTag, "Failed to hash (sha256) request string");
- AWS_LOGSTREAM_DEBUG(v4LogTag, "The request string is: \"" << canonicalRequestString << "\"");
- return false;
- }
-
- auto sha256Digest = hashResult.GetResult();
- auto canonicalRequestHash = HashingUtils::HexEncode(sha256Digest);
-
- auto stringToSign = GenerateStringToSign(dateQueryValue, simpleDate, canonicalRequestHash, signingRegion, signingServiceName);
- auto finalSigningHash = GenerateSignature(credentials, stringToSign, simpleDate, signingRegion, signingServiceName);
- if (finalSigningHash.empty())
- {
- return false;
- }
-
- //add that the signature to the query string
- request.AddQueryStringParameter(X_AMZ_SIGNATURE, finalSigningHash);
-
- return true;
-}
-
-bool AWSAuthV4Signer::ServiceRequireUnsignedPayload(const Aws::String& serviceName) const
-{
- // S3 uses a magic string (instead of the empty string) for its body hash for presigned URLs as outlined here:
- // https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
- // this is true for PUT, POST, GET, DELETE and HEAD operations.
- // However, other services (for example RDS) implement the specification as outlined here:
- // https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
- // which states that body-less requests should use the empty-string SHA256 hash.
- return "s3" == serviceName || "s3-object-lambda" == serviceName;
-}
-
-Aws::String AWSAuthV4Signer::GenerateSignature(const AWSCredentials& credentials, const Aws::String& stringToSign,
- const Aws::String& simpleDate, const Aws::String& region, const Aws::String& serviceName) const
-{
- auto key = ComputeHash(credentials.GetAWSSecretKey(), simpleDate, region, serviceName);
- return GenerateSignature(stringToSign, key);
-}
-
-Aws::String AWSAuthV4Signer::GenerateSignature(const Aws::String& stringToSign, const ByteBuffer& key) const
-{
- AWS_LOGSTREAM_DEBUG(v4LogTag, "Final String to sign: " << stringToSign);
-
- Aws::StringStream ss;
-
- auto hashResult = m_HMAC->Calculate(ByteBuffer((unsigned char*)stringToSign.c_str(), stringToSign.length()), key);
- if (!hashResult.IsSuccess())
- {
- AWS_LOGSTREAM_ERROR(v4LogTag, "Unable to hmac (sha256) final string");
- AWS_LOGSTREAM_DEBUG(v4LogTag, "The final string is: \"" << stringToSign << "\"");
- return {};
- }
-
- //now we finally sign our request string with our hex encoded derived hash.
- auto finalSigningDigest = hashResult.GetResult();
-
- auto finalSigningHash = HashingUtils::HexEncode(finalSigningDigest);
- AWS_LOGSTREAM_DEBUG(v4LogTag, "Final computed signing hash: " << finalSigningHash);
-
- return finalSigningHash;
-}
-
-Aws::String AWSAuthV4Signer::ComputePayloadHash(Aws::Http::HttpRequest& request) const
-{
- if (!request.GetContentBody())
- {
- AWS_LOGSTREAM_DEBUG(v4LogTag, "Using cached empty string sha256 " << EMPTY_STRING_SHA256 << " because payload is empty.");
- return EMPTY_STRING_SHA256;
- }
-
- //compute hash on payload if it exists.
- auto hashResult = m_hash->Calculate(*request.GetContentBody());
-
- if(request.GetContentBody())
- {
- request.GetContentBody()->clear();
- request.GetContentBody()->seekg(0);
- }
-
- if (!hashResult.IsSuccess())
- {
- AWS_LOGSTREAM_ERROR(v4LogTag, "Unable to hash (sha256) request body");
- return {};
- }
-
- auto sha256Digest = hashResult.GetResult();
-
- Aws::String payloadHash(HashingUtils::HexEncode(sha256Digest));
- AWS_LOGSTREAM_DEBUG(v4LogTag, "Calculated sha256 " << payloadHash << " for payload.");
- return payloadHash;
-}
-
-Aws::String AWSAuthV4Signer::GenerateStringToSign(const Aws::String& dateValue, const Aws::String& simpleDate,
- const Aws::String& canonicalRequestHash, const Aws::String& region, const Aws::String& serviceName) const
-{
- //generate the actual string we will use in signing the final request.
- Aws::StringStream ss;
-
- ss << AWS_HMAC_SHA256 << NEWLINE << dateValue << NEWLINE << simpleDate << "/" << region << "/"
- << serviceName << "/" << AWS4_REQUEST << NEWLINE << canonicalRequestHash;
-
- return ss.str();
-}
-
-Aws::Utils::ByteBuffer AWSAuthV4Signer::ComputeHash(const Aws::String& secretKey,
- const Aws::String& simpleDate, const Aws::String& region, const Aws::String& serviceName) const
-{
- Aws::String signingKey(SIGNING_KEY);
- signingKey.append(secretKey);
- auto hashResult = m_HMAC->Calculate(ByteBuffer((unsigned char*)simpleDate.c_str(), simpleDate.length()),
- ByteBuffer((unsigned char*)signingKey.c_str(), signingKey.length()));
-
- if (!hashResult.IsSuccess())
- {
- AWS_LOGSTREAM_ERROR(v4LogTag, "Failed to HMAC (SHA256) date string \"" << simpleDate << "\"");
- return {};
- }
-
- auto kDate = hashResult.GetResult();
- hashResult = m_HMAC->Calculate(ByteBuffer((unsigned char*)region.c_str(), region.length()), kDate);
- if (!hashResult.IsSuccess())
- {
- AWS_LOGSTREAM_ERROR(v4LogTag, "Failed to HMAC (SHA256) region string \"" << region << "\"");
- return {};
- }
-
- auto kRegion = hashResult.GetResult();
- hashResult = m_HMAC->Calculate(ByteBuffer((unsigned char*)serviceName.c_str(), serviceName.length()), kRegion);
- if (!hashResult.IsSuccess())
- {
- AWS_LOGSTREAM_ERROR(v4LogTag, "Failed to HMAC (SHA256) service string \"" << m_serviceName << "\"");
- return {};
- }
-
- auto kService = hashResult.GetResult();
- hashResult = m_HMAC->Calculate(ByteBuffer((unsigned char*)AWS4_REQUEST, strlen(AWS4_REQUEST)), kService);
- if (!hashResult.IsSuccess())
- {
- AWS_LOGSTREAM_ERROR(v4LogTag, "Unable to HMAC (SHA256) request string");
- AWS_LOGSTREAM_DEBUG(v4LogTag, "The request string is: \"" << AWS4_REQUEST << "\"");
- return {};
- }
- return hashResult.GetResult();
-}
-
-AWSAuthEventStreamV4Signer::AWSAuthEventStreamV4Signer(const std::shared_ptr<Auth::AWSCredentialsProvider>&
- credentialsProvider, const char* serviceName, const Aws::String& region) :
- m_serviceName(serviceName),
- m_region(region),
- m_credentialsProvider(credentialsProvider)
-{
-
- m_unsignedHeaders.emplace_back(X_AMZN_TRACE_ID);
- m_unsignedHeaders.emplace_back(USER_AGENT_HEADER);
-}
-
-bool AWSAuthEventStreamV4Signer::SignRequest(Aws::Http::HttpRequest& request, const char* region, const char* serviceName, bool /* signBody */) const
-{
- AWSCredentials credentials = m_credentialsProvider->GetAWSCredentials();
-
- //don't sign anonymous requests
- if (credentials.GetAWSAccessKeyId().empty() || credentials.GetAWSSecretKey().empty())
- {
- return true;
- }
-
- if (!credentials.GetSessionToken().empty())
- {
- request.SetAwsSessionToken(credentials.GetSessionToken());
- }
-
- request.SetHeaderValue(X_AMZ_CONTENT_SHA256, EVENT_STREAM_CONTENT_SHA256);
-
- //calculate date header to use in internal signature (this also goes into date header).
- DateTime now = GetSigningTimestamp();
- Aws::String dateHeaderValue = now.ToGmtString(DateFormat::ISO_8601_BASIC);
- request.SetHeaderValue(AWS_DATE_HEADER, dateHeaderValue);
-
- Aws::StringStream headersStream;
- Aws::StringStream signedHeadersStream;
-
- for (const auto& header : CanonicalizeHeaders(request.GetHeaders()))
- {
- if(ShouldSignHeader(header.first))
- {
- headersStream << header.first.c_str() << ":" << header.second.c_str() << NEWLINE;
- signedHeadersStream << header.first.c_str() << ";";
- }
- }
-
- Aws::String canonicalHeadersString = headersStream.str();
- AWS_LOGSTREAM_DEBUG(v4StreamingLogTag, "Canonical Header String: " << canonicalHeadersString);
-
- //calculate signed headers parameter
- Aws::String signedHeadersValue = signedHeadersStream.str();
- //remove that last semi-colon
- if (!signedHeadersValue.empty())
- {
- signedHeadersValue.pop_back();
- }
-
- AWS_LOGSTREAM_DEBUG(v4StreamingLogTag, "Signed Headers value:" << signedHeadersValue);
-
- //generate generalized canonicalized request string.
- Aws::String canonicalRequestString = CanonicalizeRequestSigningString(request, true/* m_urlEscapePath */);
-
- //append v4 stuff to the canonical request string.
- canonicalRequestString.append(canonicalHeadersString);
- canonicalRequestString.append(NEWLINE);
- canonicalRequestString.append(signedHeadersValue);
- canonicalRequestString.append(NEWLINE);
- canonicalRequestString.append(EVENT_STREAM_CONTENT_SHA256);
-
- AWS_LOGSTREAM_DEBUG(v4StreamingLogTag, "Canonical Request String: " << canonicalRequestString);
-
- //now compute sha256 on that request string
- auto hashResult = m_hash.Calculate(canonicalRequestString);
- if (!hashResult.IsSuccess())
- {
- AWS_LOGSTREAM_ERROR(v4StreamingLogTag, "Failed to hash (sha256) request string");
- AWS_LOGSTREAM_DEBUG(v4StreamingLogTag, "The request string is: \"" << canonicalRequestString << "\"");
- return false;
- }
-
- auto sha256Digest = hashResult.GetResult();
- Aws::String canonicalRequestHash = HashingUtils::HexEncode(sha256Digest);
- Aws::String simpleDate = now.ToGmtString(SIMPLE_DATE_FORMAT_STR);
-
- Aws::String signingRegion = region ? region : m_region;
- Aws::String signingServiceName = serviceName ? serviceName : m_serviceName;
- Aws::String stringToSign = GenerateStringToSign(dateHeaderValue, simpleDate, canonicalRequestHash, signingRegion, signingServiceName);
- auto finalSignature = GenerateSignature(credentials, stringToSign, simpleDate, signingRegion, signingServiceName);
-
- Aws::StringStream ss;
- ss << AWS_HMAC_SHA256 << " " << CREDENTIAL << EQ << credentials.GetAWSAccessKeyId() << "/" << simpleDate
- << "/" << signingRegion << "/" << signingServiceName << "/" << AWS4_REQUEST << ", " << SIGNED_HEADERS << EQ
- << signedHeadersValue << ", " << SIGNATURE << EQ << HashingUtils::HexEncode(finalSignature);
-
- auto awsAuthString = ss.str();
- AWS_LOGSTREAM_DEBUG(v4StreamingLogTag, "Signing request with: " << awsAuthString);
- request.SetAwsAuthorization(awsAuthString);
- request.SetSigningAccessKey(credentials.GetAWSAccessKeyId());
- request.SetSigningRegion(signingRegion);
- return true;
-}
-
-// this works regardless if the current machine is Big/Little Endian
-static void WriteBigEndian(Aws::String& str, uint64_t n)
-{
- int shift = 56;
- while(shift >= 0)
- {
- str.push_back((n >> shift) & 0xFF);
- shift -= 8;
- }
-}
-
-bool AWSAuthEventStreamV4Signer::SignEventMessage(Event::Message& message, Aws::String& priorSignature) const
-{
- using Event::EventHeaderValue;
-
- Aws::StringStream stringToSign;
- stringToSign << EVENT_STREAM_PAYLOAD << NEWLINE;
- const DateTime now = GetSigningTimestamp();
- const auto simpleDate = now.ToGmtString(SIMPLE_DATE_FORMAT_STR);
- stringToSign << now.ToGmtString(DateFormat::ISO_8601_BASIC) << NEWLINE
- << simpleDate << "/" << m_region << "/"
- << m_serviceName << "/aws4_request" << NEWLINE << priorSignature << NEWLINE;
-
-
- Aws::String nonSignatureHeaders;
- nonSignatureHeaders.push_back(char(sizeof(EVENTSTREAM_DATE_HEADER) - 1)); // length of the string
- nonSignatureHeaders += EVENTSTREAM_DATE_HEADER;
- nonSignatureHeaders.push_back(static_cast<char>(EventHeaderValue::EventHeaderType::TIMESTAMP)); // type of the value
- WriteBigEndian(nonSignatureHeaders, static_cast<uint64_t>(now.Millis())); // the value of the timestamp in big-endian
-
- auto hashOutcome = m_hash.Calculate(nonSignatureHeaders);
- if (!hashOutcome.IsSuccess())
- {
- AWS_LOGSTREAM_ERROR(v4StreamingLogTag, "Failed to hash (sha256) non-signature headers.");
- return false;
- }
-
- const auto nonSignatureHeadersHash = hashOutcome.GetResult();
- stringToSign << HashingUtils::HexEncode(nonSignatureHeadersHash) << NEWLINE;
-
- if (message.GetEventPayload().empty())
- {
- AWS_LOGSTREAM_WARN(v4StreamingLogTag, "Attempting to sign an empty message (no payload and no headers). "
- "It is unlikely that this is the intended behavior.");
- }
- else
- {
- // use a preallocatedStreamBuf to avoid making a copy.
- // The Hashing API requires either Aws::String or IStream as input.
- // TODO: the hashing API should be accept 'unsigned char*' as input.
- Utils::Stream::PreallocatedStreamBuf streamBuf(message.GetEventPayload().data(), message.GetEventPayload().size());
- Aws::IOStream payload(&streamBuf);
- hashOutcome = m_hash.Calculate(payload);
-
- if (!hashOutcome.IsSuccess())
- {
- AWS_LOGSTREAM_ERROR(v4StreamingLogTag, "Failed to hash (sha256) non-signature headers.");
- return false;
- }
- const auto payloadHash = hashOutcome.GetResult();
- stringToSign << HashingUtils::HexEncode(payloadHash);
- AWS_LOGSTREAM_DEBUG(v4StreamingLogTag, "Payload hash - " << HashingUtils::HexEncode(payloadHash));
- }
-
- Utils::ByteBuffer finalSignatureDigest = GenerateSignature(m_credentialsProvider->GetAWSCredentials(), stringToSign.str(), simpleDate, m_region, m_serviceName);
- const auto finalSignature = HashingUtils::HexEncode(finalSignatureDigest);
- AWS_LOGSTREAM_DEBUG(v4StreamingLogTag, "Final computed signing hash: " << finalSignature);
- priorSignature = finalSignature;
-
- message.InsertEventHeader(EVENTSTREAM_DATE_HEADER, EventHeaderValue(now.Millis(), EventHeaderValue::EventHeaderType::TIMESTAMP));
- message.InsertEventHeader(EVENTSTREAM_SIGNATURE_HEADER, std::move(finalSignatureDigest));
-
- AWS_LOGSTREAM_INFO(v4StreamingLogTag, "Event chunk final signature - " << finalSignature);
- return true;
-}
-
-bool AWSAuthEventStreamV4Signer::ShouldSignHeader(const Aws::String& header) const
-{
- return std::find(m_unsignedHeaders.cbegin(), m_unsignedHeaders.cend(), Aws::Utils::StringUtils::ToLower(header.c_str())) == m_unsignedHeaders.cend();
-}
-
-Utils::ByteBuffer AWSAuthEventStreamV4Signer::GenerateSignature(const AWSCredentials& credentials, const Aws::String& stringToSign,
- const Aws::String& simpleDate, const Aws::String& region, const Aws::String& serviceName) const
-{
- Utils::Threading::ReaderLockGuard guard(m_derivedKeyLock);
- const auto& secretKey = credentials.GetAWSSecretKey();
- if (secretKey != m_currentSecretKey || simpleDate != m_currentDateStr)
- {
- guard.UpgradeToWriterLock();
- // double-checked lock to prevent updating twice
- if (m_currentDateStr != simpleDate || m_currentSecretKey != secretKey)
- {
- m_currentSecretKey = secretKey;
- m_currentDateStr = simpleDate;
- m_derivedKey = ComputeHash(m_currentSecretKey, m_currentDateStr, region, serviceName);
- }
-
- }
- return GenerateSignature(stringToSign, m_derivedKey);
-}
-
-Utils::ByteBuffer AWSAuthEventStreamV4Signer::GenerateSignature(const Aws::String& stringToSign, const ByteBuffer& key) const
-{
- AWS_LOGSTREAM_DEBUG(v4StreamingLogTag, "Final String to sign: " << stringToSign);
-
- Aws::StringStream ss;
-
- auto hashResult = m_HMAC.Calculate(ByteBuffer((unsigned char*)stringToSign.c_str(), stringToSign.length()), key);
- if (!hashResult.IsSuccess())
- {
- AWS_LOGSTREAM_ERROR(v4StreamingLogTag, "Unable to hmac (sha256) final string");
- AWS_LOGSTREAM_DEBUG(v4StreamingLogTag, "The final string is: \"" << stringToSign << "\"");
- return {};
- }
-
- return hashResult.GetResult();
-}
-
-Aws::String AWSAuthEventStreamV4Signer::GenerateStringToSign(const Aws::String& dateValue, const Aws::String& simpleDate,
- const Aws::String& canonicalRequestHash, const Aws::String& region, const Aws::String& serviceName) const
-{
- //generate the actual string we will use in signing the final request.
- Aws::StringStream ss;
-
- ss << AWS_HMAC_SHA256 << NEWLINE << dateValue << NEWLINE << simpleDate << "/" << region << "/"
- << serviceName << "/" << AWS4_REQUEST << NEWLINE << canonicalRequestHash;
-
- return ss.str();
-}
-
-Aws::Utils::ByteBuffer AWSAuthEventStreamV4Signer::ComputeHash(const Aws::String& secretKey,
- const Aws::String& simpleDate, const Aws::String& region, const Aws::String& serviceName) const
-{
- Aws::String signingKey(SIGNING_KEY);
- signingKey.append(secretKey);
- auto hashResult = m_HMAC.Calculate(ByteBuffer((unsigned char*)simpleDate.c_str(), simpleDate.length()),
- ByteBuffer((unsigned char*)signingKey.c_str(), signingKey.length()));
-
- if (!hashResult.IsSuccess())
- {
- AWS_LOGSTREAM_ERROR(v4StreamingLogTag, "Failed to HMAC (SHA256) date string \"" << simpleDate << "\"");
- return {};
- }
-
- auto kDate = hashResult.GetResult();
- hashResult = m_HMAC.Calculate(ByteBuffer((unsigned char*)region.c_str(), region.length()), kDate);
- if (!hashResult.IsSuccess())
- {
- AWS_LOGSTREAM_ERROR(v4StreamingLogTag, "Failed to HMAC (SHA256) region string \"" << region << "\"");
- return {};
- }
-
- auto kRegion = hashResult.GetResult();
- hashResult = m_HMAC.Calculate(ByteBuffer((unsigned char*)serviceName.c_str(), serviceName.length()), kRegion);
- if (!hashResult.IsSuccess())
- {
- AWS_LOGSTREAM_ERROR(v4StreamingLogTag, "Failed to HMAC (SHA256) service string \"" << m_serviceName << "\"");
- return {};
- }
-
- auto kService = hashResult.GetResult();
- hashResult = m_HMAC.Calculate(ByteBuffer((unsigned char*)AWS4_REQUEST, strlen(AWS4_REQUEST)), kService);
- if (!hashResult.IsSuccess())
- {
- AWS_LOGSTREAM_ERROR(v4StreamingLogTag, "Unable to HMAC (SHA256) request string");
- AWS_LOGSTREAM_DEBUG(v4StreamingLogTag, "The request string is: \"" << AWS4_REQUEST << "\"");
- return {};
- }
- return hashResult.GetResult();
-}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSCredentialsProvider.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSCredentialsProvider.cpp
index bf20ede35e..084e4bca6e 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSCredentialsProvider.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSCredentialsProvider.cpp
@@ -48,7 +48,7 @@ static const char DEFAULT_CREDENTIALS_FILE[] = "credentials";
extern const char DEFAULT_CONFIG_FILE[] = "config";
-static const int EXPIRATION_GRACE_PERIOD = 5 * 1000;
+static const int AWS_CREDENTIAL_PROVIDER_EXPIRATION_GRACE_PERIOD = 5 * 1000;
void AWSCredentialsProvider::Reload()
{
@@ -183,9 +183,10 @@ AWSCredentials ProfileConfigFileAWSCredentialsProvider::GetAWSCredentials()
{
RefreshIfExpired();
ReaderLockGuard guard(m_reloadLock);
- auto credsFileProfileIter = m_credentialsFileLoader.GetProfiles().find(m_profileToUse);
+ const Aws::Map<Aws::String, Aws::Config::Profile>& profiles = m_credentialsFileLoader.GetProfiles();
+ auto credsFileProfileIter = profiles.find(m_profileToUse);
- if(credsFileProfileIter != m_credentialsFileLoader.GetProfiles().end())
+ if(credsFileProfileIter != profiles.end())
{
return credsFileProfileIter->second.GetCredentials();
}
@@ -239,37 +240,71 @@ AWSCredentials InstanceProfileCredentialsProvider::GetAWSCredentials()
{
RefreshIfExpired();
ReaderLockGuard guard(m_reloadLock);
+ if (m_ec2MetadataConfigLoader)
+ {
+ const Aws::Map<Aws::String, Aws::Config::Profile> &profiles = m_ec2MetadataConfigLoader->GetProfiles();
+ auto profileIter = profiles.find(Aws::Config::INSTANCE_PROFILE_KEY);
+
+ if (profileIter != profiles.end()) {
+ return profileIter->second.GetCredentials();
+ }
+ }
+ else
+ {
+ AWS_LOGSTREAM_ERROR(INSTANCE_LOG_TAG, "EC2 Metadata config loader is a nullptr");
+ }
+
+ return AWSCredentials();
+}
+
+bool InstanceProfileCredentialsProvider::ExpiresSoon() const
+{
+ ReaderLockGuard guard(m_reloadLock);
auto profileIter = m_ec2MetadataConfigLoader->GetProfiles().find(Aws::Config::INSTANCE_PROFILE_KEY);
+ AWSCredentials credentials;
if(profileIter != m_ec2MetadataConfigLoader->GetProfiles().end())
{
- return profileIter->second.GetCredentials();
+ credentials = profileIter->second.GetCredentials();
}
- return AWSCredentials();
+ return ((credentials.GetExpiration() - Aws::Utils::DateTime::Now()).count() < AWS_CREDENTIAL_PROVIDER_EXPIRATION_GRACE_PERIOD);
}
void InstanceProfileCredentialsProvider::Reload()
{
- AWS_LOGSTREAM_INFO(INSTANCE_LOG_TAG, "Credentials have expired attempting to repull from EC2 Metadata Service.");
- m_ec2MetadataConfigLoader->Load();
- AWSCredentialsProvider::Reload();
+ AWS_LOGSTREAM_INFO(INSTANCE_LOG_TAG, "Credentials have expired attempting to re-pull from EC2 Metadata Service.");
+ if (m_ec2MetadataConfigLoader) {
+ m_ec2MetadataConfigLoader->Load();
+ AWSCredentialsProvider::Reload();
+ } else {
+ AWS_LOGSTREAM_ERROR(INSTANCE_LOG_TAG, "EC2 Metadata config loader is a nullptr");
+ }
}
void InstanceProfileCredentialsProvider::RefreshIfExpired()
{
AWS_LOGSTREAM_DEBUG(INSTANCE_LOG_TAG, "Checking if latest credential pull has expired.");
ReaderLockGuard guard(m_reloadLock);
- if (!IsTimeToRefresh(m_loadFrequencyMs))
- {
- return;
- }
+ auto profileIter = m_ec2MetadataConfigLoader->GetProfiles().find(Aws::Config::INSTANCE_PROFILE_KEY);
+ AWSCredentials credentials;
- guard.UpgradeToWriterLock();
- if (!IsTimeToRefresh(m_loadFrequencyMs)) // double-checked lock to avoid refreshing twice
+ if(profileIter != m_ec2MetadataConfigLoader->GetProfiles().end())
{
- return;
+ credentials = profileIter->second.GetCredentials();
+
+ if (!credentials.IsEmpty() && !IsTimeToRefresh(m_loadFrequencyMs) && !ExpiresSoon())
+ {
+ return;
+ }
+
+ guard.UpgradeToWriterLock();
+ if (!credentials.IsEmpty() && !IsTimeToRefresh(m_loadFrequencyMs) && !ExpiresSoon()) // double-checked lock to avoid refreshing twice
+ {
+ return;
+ }
}
+
Reload();
}
@@ -306,12 +341,17 @@ AWSCredentials TaskRoleCredentialsProvider::GetAWSCredentials()
bool TaskRoleCredentialsProvider::ExpiresSoon() const
{
- return ((m_credentials.GetExpiration() - Aws::Utils::DateTime::Now()).count() < EXPIRATION_GRACE_PERIOD);
+ return ((m_credentials.GetExpiration() - Aws::Utils::DateTime::Now()).count() < AWS_CREDENTIAL_PROVIDER_EXPIRATION_GRACE_PERIOD);
}
void TaskRoleCredentialsProvider::Reload()
{
- AWS_LOGSTREAM_INFO(TASK_ROLE_LOG_TAG, "Credentials have expired or will expire, attempting to repull from ECS IAM Service.");
+ AWS_LOGSTREAM_INFO(TASK_ROLE_LOG_TAG, "Credentials have expired or will expire, attempting to re-pull from ECS IAM Service.");
+ if (!m_ecsCredentialsClient)
+ {
+ AWS_LOGSTREAM_ERROR(INSTANCE_LOG_TAG, "ECS Credentials client is a nullptr");
+ return;
+ }
auto credentialsStr = m_ecsCredentialsClient->GetECSCredentials();
if (credentialsStr.empty()) return;
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSCredentialsProviderChain.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSCredentialsProviderChain.cpp
index 8b019a1664..403bd380c4 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSCredentialsProviderChain.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSCredentialsProviderChain.cpp
@@ -77,3 +77,9 @@ DefaultAWSCredentialsProviderChain::DefaultAWSCredentialsProviderChain() : AWSCr
AWS_LOGSTREAM_INFO(DefaultCredentialsProviderChainTag, "Added EC2 metadata service credentials provider to the provider chain.");
}
}
+
+DefaultAWSCredentialsProviderChain::DefaultAWSCredentialsProviderChain(const DefaultAWSCredentialsProviderChain& chain) {
+ for (const auto& provider: chain.GetProviders()) {
+ AddProvider(provider);
+ }
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/SSOCredentialsProvider.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/SSOCredentialsProvider.cpp
index e8f780762e..9576e9d999 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/SSOCredentialsProvider.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/SSOCredentialsProvider.cpp
@@ -33,7 +33,8 @@ SSOCredentialsProvider::SSOCredentialsProvider() : m_profileToUse(GetConfigProfi
AWS_LOGSTREAM_INFO(SSO_CREDENTIALS_PROVIDER_LOG_TAG, "Setting sso credentials provider to read config from " << m_profileToUse);
}
-SSOCredentialsProvider::SSOCredentialsProvider(const Aws::String& profile) : m_profileToUse(profile)
+SSOCredentialsProvider::SSOCredentialsProvider(const Aws::String& profile) : m_profileToUse(profile),
+ m_bearerTokenProvider(profile)
{
AWS_LOGSTREAM_INFO(SSO_CREDENTIALS_PROVIDER_LOG_TAG, "Setting sso credentials provider to read config from " << m_profileToUse);
}
@@ -48,15 +49,24 @@ AWSCredentials SSOCredentialsProvider::GetAWSCredentials()
void SSOCredentialsProvider::Reload()
{
auto profile = Aws::Config::GetCachedConfigProfile(m_profileToUse);
-
- Aws::String hashedStartUrl = Aws::Utils::HashingUtils::HexEncode(Aws::Utils::HashingUtils::CalculateSHA1(profile.GetSsoStartUrl()));
- auto profileDirectory = ProfileConfigFileAWSCredentialsProvider::GetProfileDirectory();
- Aws::StringStream ssToken;
- ssToken << profileDirectory;
- ssToken << PATH_DELIM << "sso" << PATH_DELIM << "cache" << PATH_DELIM << hashedStartUrl << ".json";
- auto ssoTokenPath = ssToken.str();
- AWS_LOGSTREAM_DEBUG(SSO_CREDENTIALS_PROVIDER_LOG_TAG, "Loading token from: " << ssoTokenPath)
- Aws::String accessToken = LoadAccessTokenFile(ssoTokenPath);
+ const auto accessToken = [&]() -> Aws::String {
+ // If we have an SSO Session set, use the refreshed token.
+ if (profile.IsSsoSessionSet()) {
+ m_ssoRegion = profile.GetSsoSession().GetSsoRegion();
+ auto token = m_bearerTokenProvider.GetAWSBearerToken();
+ m_expiresAt = token.GetExpiration();
+ return token.GetToken();
+ }
+ Aws::String hashedStartUrl = Aws::Utils::HashingUtils::HexEncode(Aws::Utils::HashingUtils::CalculateSHA1(profile.GetSsoStartUrl()));
+ auto profileDirectory = ProfileConfigFileAWSCredentialsProvider::GetProfileDirectory();
+ Aws::StringStream ssToken;
+ ssToken << profileDirectory;
+ ssToken << PATH_DELIM << "sso" << PATH_DELIM << "cache" << PATH_DELIM << hashedStartUrl << ".json";
+ auto ssoTokenPath = ssToken.str();
+ AWS_LOGSTREAM_DEBUG(SSO_CREDENTIALS_PROVIDER_LOG_TAG, "Loading token from: " << ssoTokenPath)
+ m_ssoRegion = profile.GetSsoRegion();
+ return LoadAccessTokenFile(ssoTokenPath);
+ }();
if (accessToken.empty()) {
AWS_LOGSTREAM_TRACE(SSO_CREDENTIALS_PROVIDER_LOG_TAG, "Access token for SSO not available");
return;
@@ -72,7 +82,7 @@ void SSOCredentialsProvider::Reload()
Aws::Client::ClientConfiguration config;
config.scheme = Aws::Http::Scheme::HTTPS;
- config.region = profile.GetSsoRegion();
+ config.region = m_ssoRegion;
AWS_LOGSTREAM_DEBUG(SSO_CREDENTIALS_PROVIDER_LOG_TAG, "Passing config to client for region: " << m_ssoRegion);
Aws::Vector<Aws::String> retryableErrors;
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/STSCredentialsProvider.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/STSCredentialsProvider.cpp
index 3f48c9e0c7..b861e6132b 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/STSCredentialsProvider.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/STSCredentialsProvider.cpp
@@ -30,6 +30,8 @@ using Aws::Utils::Threading::ReaderLockGuard;
using Aws::Utils::Threading::WriterLockGuard;
static const char STS_ASSUME_ROLE_WEB_IDENTITY_LOG_TAG[] = "STSAssumeRoleWithWebIdentityCredentialsProvider";
+static const int STS_CREDENTIAL_PROVIDER_EXPIRATION_GRACE_PERIOD = 5 * 1000;
+
STSAssumeRoleWebIdentityCredentialsProvider::STSAssumeRoleWebIdentityCredentialsProvider() :
m_initialized(false)
{
@@ -145,16 +147,21 @@ void STSAssumeRoleWebIdentityCredentialsProvider::Reload()
m_credentials = result.creds;
}
+bool STSAssumeRoleWebIdentityCredentialsProvider::ExpiresSoon() const
+{
+ return ((m_credentials.GetExpiration() - Aws::Utils::DateTime::Now()).count() < STS_CREDENTIAL_PROVIDER_EXPIRATION_GRACE_PERIOD);
+}
+
void STSAssumeRoleWebIdentityCredentialsProvider::RefreshIfExpired()
{
ReaderLockGuard guard(m_reloadLock);
- if (!m_credentials.IsExpiredOrEmpty())
+ if (!m_credentials.IsEmpty() && !ExpiresSoon())
{
return;
}
guard.UpgradeToWriterLock();
- if (!m_credentials.IsExpiredOrEmpty()) // double-checked lock to avoid refreshing twice
+ if (!m_credentials.IsExpiredOrEmpty() && !ExpiresSoon()) // double-checked lock to avoid refreshing twice
{
return;
}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/bearer-token-provider/DefaultBearerTokenProviderChain.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/bearer-token-provider/DefaultBearerTokenProviderChain.cpp
new file mode 100644
index 0000000000..16b301cd67
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/bearer-token-provider/DefaultBearerTokenProviderChain.cpp
@@ -0,0 +1,35 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/core/auth/bearer-token-provider/DefaultBearerTokenProviderChain.h>
+#include <aws/core/auth/AWSBearerToken.h>
+#include <aws/core/auth/bearer-token-provider/SSOBearerTokenProvider.h>
+#include <aws/core/utils/logging/LogMacros.h>
+
+
+static const char SSO_DEFAULT_BEARER_TOKEN_PROVIDER_CHAIN_LOG_TAG[] = "SSOBearerTokenProvider";
+
+Aws::Auth::AWSBearerToken Aws::Auth::DefaultBearerTokenProviderChain::GetAWSBearerToken()
+{
+ for (auto&& bearerTokenProvider : m_providerChain)
+ {
+ if(!bearerTokenProvider) {
+ AWS_LOGSTREAM_FATAL(SSO_DEFAULT_BEARER_TOKEN_PROVIDER_CHAIN_LOG_TAG,
+ "Unexpected nullptr in DefaultBearerTokenProviderChain::m_providerChain");
+ break;
+ }
+ AWSBearerToken bearerToken = bearerTokenProvider->GetAWSBearerToken();
+ if(!bearerToken.IsExpiredOrEmpty())
+ {
+ return bearerToken;
+ }
+ }
+ return AWSBearerToken("", Aws::Utils::DateTime(0.0));
+}
+
+Aws::Auth::DefaultBearerTokenProviderChain::DefaultBearerTokenProviderChain()
+{
+ AddProvider(Aws::MakeShared<Aws::Auth::SSOBearerTokenProvider>(SSO_DEFAULT_BEARER_TOKEN_PROVIDER_CHAIN_LOG_TAG));
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/bearer-token-provider/SSOBearerTokenProvider.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/bearer-token-provider/SSOBearerTokenProvider.cpp
new file mode 100644
index 0000000000..b55131e340
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/bearer-token-provider/SSOBearerTokenProvider.cpp
@@ -0,0 +1,244 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+
+#include <aws/core/auth/bearer-token-provider/SSOBearerTokenProvider.h>
+#include <aws/core/auth/AWSCredentialsProvider.h>
+#include <aws/core/config/AWSProfileConfigLoader.h>
+#include <aws/core/internal/AWSHttpResourceClient.h>
+#include <aws/core/platform/Environment.h>
+#include <aws/core/platform/FileSystem.h>
+#include <aws/core/utils/logging/LogMacros.h>
+#include <aws/core/utils/FileSystemUtils.h>
+#include <aws/core/client/SpecifiedRetryableErrorsRetryStrategy.h>
+#include <aws/core/utils/HashingUtils.h>
+#include <aws/core/utils/json/JsonSerializer.h>
+
+using namespace Aws::Auth;
+
+using Aws::Utils::Threading::ReaderLockGuard;
+
+
+static const char SSO_BEARER_TOKEN_PROVIDER_LOG_TAG[] = "SSOBearerTokenProvider";
+static const char SSO_GRANT_TYPE[] = "refresh_token";
+
+const size_t SSOBearerTokenProvider::REFRESH_WINDOW_BEFORE_EXPIRATION_S = 600;
+const size_t SSOBearerTokenProvider::REFRESH_ATTEMPT_INTERVAL_S = 30;
+
+SSOBearerTokenProvider::SSOBearerTokenProvider()
+ : m_profileToUse(Aws::Auth::GetConfigProfileName()),
+ m_lastUpdateAttempt((int64_t) 0)
+{
+ AWS_LOGSTREAM_INFO(SSO_BEARER_TOKEN_PROVIDER_LOG_TAG, "Setting sso bearerToken provider to read config from " << m_profileToUse);
+}
+
+SSOBearerTokenProvider::SSOBearerTokenProvider(const Aws::String& awsProfile)
+ : m_profileToUse(awsProfile),
+ m_lastUpdateAttempt((int64_t) 0)
+{
+ AWS_LOGSTREAM_INFO(SSO_BEARER_TOKEN_PROVIDER_LOG_TAG, "Setting sso bearerToken provider to read config from " << m_profileToUse);
+}
+
+AWSBearerToken SSOBearerTokenProvider::GetAWSBearerToken()
+{
+ Aws::Utils::Threading::ReaderLockGuard guard(m_reloadLock);
+ if(m_token.IsEmpty())
+ {
+ Reload();
+ }
+ if(!m_token.IsEmpty())
+ {
+ const Aws::Utils::DateTime now = Aws::Utils::DateTime::Now();
+ if (now >= m_token.GetExpiration() - std::chrono::seconds(REFRESH_WINDOW_BEFORE_EXPIRATION_S) &&
+ m_lastUpdateAttempt + std::chrono::seconds(REFRESH_ATTEMPT_INTERVAL_S) < now)
+ {
+ guard.UpgradeToWriterLock();
+ RefreshFromSso();
+ }
+ }
+
+ if(m_token.IsExpiredOrEmpty())
+ {
+ /* If a loaded token has expired and has insufficient metadata to perform a refresh the SSO token
+ provider must raise an exception that the token has expired and cannot be refreshed.
+ Error logging and returning an empty object instead because of disabled exceptions and poor legacy API design. */
+ AWS_LOGSTREAM_ERROR(SSO_BEARER_TOKEN_PROVIDER_LOG_TAG, "SSOBearerTokenProvider is unable to provide a token");
+ return Aws::Auth::AWSBearerToken("", Aws::Utils::DateTime(0.0));
+ }
+ return m_token;
+}
+
+void SSOBearerTokenProvider::Reload()
+{
+ CachedSsoToken cachedSsoToken = LoadAccessTokenFile();
+ if(cachedSsoToken.accessToken.empty()) {
+ AWS_LOGSTREAM_TRACE(SSO_BEARER_TOKEN_PROVIDER_LOG_TAG, "Access token for SSO not available");
+ return;
+ }
+ const Aws::Utils::DateTime now = Aws::Utils::DateTime::Now();
+ if(cachedSsoToken.expiresAt < now) {
+ AWS_LOGSTREAM_ERROR(SSO_BEARER_TOKEN_PROVIDER_LOG_TAG, "Cached Token is already expired at " << cachedSsoToken.expiresAt.ToGmtString(Aws::Utils::DateFormat::ISO_8601));
+ return;
+ }
+
+ m_token.SetToken(cachedSsoToken.accessToken);
+ m_token.SetExpiration(cachedSsoToken.expiresAt);
+}
+
+void SSOBearerTokenProvider::RefreshFromSso()
+{
+ CachedSsoToken cachedSsoToken = LoadAccessTokenFile();
+
+ if(!m_client)
+ {
+ Aws::Client::ClientConfiguration config;
+ config.scheme = Aws::Http::Scheme::HTTPS;
+ /* The SSO token provider must not resolve if any SSO configuration values are present directly on the profile
+ * instead of an `sso-session` section. The SSO token provider must ignore these configuration values if these
+ * values are present directly on the profile instead of an `sso-session` section. */
+ // config.region = m_profile.GetSsoRegion(); // <- intentionally not used per comment above
+ config.region = cachedSsoToken.region;
+ m_client = Aws::MakeUnique<Aws::Internal::SSOCredentialsClient>(SSO_BEARER_TOKEN_PROVIDER_LOG_TAG, config);
+ }
+
+ Aws::Internal::SSOCredentialsClient::SSOCreateTokenRequest ssoCreateTokenRequest;
+ ssoCreateTokenRequest.clientId = cachedSsoToken.clientId;
+ ssoCreateTokenRequest.clientSecret = cachedSsoToken.clientSecret;
+ ssoCreateTokenRequest.grantType = SSO_GRANT_TYPE;
+ ssoCreateTokenRequest.refreshToken = cachedSsoToken.refreshToken;
+
+ if(!m_client) {
+ AWS_LOGSTREAM_FATAL(SSO_BEARER_TOKEN_PROVIDER_LOG_TAG, "Unexpected nullptr in SSOBearerTokenProvider::m_client");
+ return;
+ }
+ Aws::Internal::SSOCredentialsClient::SSOCreateTokenResult result = m_client->CreateToken(ssoCreateTokenRequest);
+ if(!result.accessToken.empty())
+ {
+ cachedSsoToken.accessToken = result.accessToken;
+ cachedSsoToken.expiresAt = Aws::Utils::DateTime::Now() + std::chrono::seconds(result.expiresIn);
+ if(!result.refreshToken.empty()) {
+ cachedSsoToken.refreshToken = result.refreshToken;
+ }
+ if(!result.clientId.empty()) {
+ cachedSsoToken.clientId = result.clientId;
+ }
+ }
+
+ if(WriteAccessTokenFile(cachedSsoToken))
+ {
+ m_token.SetToken(cachedSsoToken.accessToken);
+ m_token.SetExpiration(cachedSsoToken.expiresAt);
+ }
+
+}
+
+SSOBearerTokenProvider::CachedSsoToken SSOBearerTokenProvider::LoadAccessTokenFile() const
+{
+ SSOBearerTokenProvider::CachedSsoToken retValue;
+
+ const Aws::Config::Profile& profile = Aws::Config::GetCachedConfigProfile(m_profileToUse);
+ if(!profile.IsSsoSessionSet()) {
+ AWS_LOGSTREAM_ERROR(SSO_BEARER_TOKEN_PROVIDER_LOG_TAG, "SSOBearerTokenProvider set to use a profile " << m_profileToUse << " without a sso_session. Unable to load cached token.");
+ return retValue;
+ }
+
+ Aws::String hashedStartUrl = Aws::Utils::HashingUtils::HexEncode(Aws::Utils::HashingUtils::CalculateSHA1(profile.GetSsoSession().GetName()));
+ Aws::String profileDirectory = ProfileConfigFileAWSCredentialsProvider::GetProfileDirectory();
+ Aws::StringStream ssToken;
+ ssToken << profileDirectory;
+ ssToken << Aws::FileSystem::PATH_DELIM << "sso" << Aws::FileSystem::PATH_DELIM << "cache" << Aws::FileSystem::PATH_DELIM << hashedStartUrl << ".json";
+ auto ssoAccessTokenPath = ssToken.str();
+ AWS_LOGSTREAM_DEBUG(SSO_BEARER_TOKEN_PROVIDER_LOG_TAG, "Preparing to load token from: " << ssoAccessTokenPath);
+
+ Aws::IFStream inputFile(ssoAccessTokenPath.c_str());
+ if(inputFile)
+ {
+ AWS_LOGSTREAM_DEBUG(SSO_BEARER_TOKEN_PROVIDER_LOG_TAG, "Reading content from token file: " << ssoAccessTokenPath);
+
+ Aws::Utils::Json::JsonValue tokenDoc(inputFile);
+ if (!tokenDoc.WasParseSuccessful())
+ {
+ AWS_LOGSTREAM_ERROR(SSO_BEARER_TOKEN_PROVIDER_LOG_TAG, "Failed to parse token file: " << ssoAccessTokenPath);
+ return retValue;
+ }
+ Utils::Json::JsonView tokenView(tokenDoc);
+
+ retValue.accessToken = tokenView.GetString("accessToken");
+ retValue.expiresAt = Aws::Utils::DateTime(tokenView.GetString("expiresAt"), Aws::Utils::DateFormat::ISO_8601);
+ retValue.refreshToken = tokenView.GetString("refreshToken");
+ retValue.clientId = tokenView.GetString("clientId");
+ retValue.clientSecret = tokenView.GetString("clientSecret");
+ retValue.registrationExpiresAt = Aws::Utils::DateTime(tokenView.GetString("registrationExpiresAt"), Aws::Utils::DateFormat::ISO_8601);
+ retValue.region = tokenView.GetString("region");
+ retValue.startUrl = tokenView.GetString("startUrl");
+
+ return retValue;
+ }
+ else
+ {
+ AWS_LOGSTREAM_INFO(SSO_BEARER_TOKEN_PROVIDER_LOG_TAG, "Unable to open token file on path: " << ssoAccessTokenPath);
+ return retValue;
+ }
+}
+
+bool SSOBearerTokenProvider::WriteAccessTokenFile(const CachedSsoToken& token) const
+{
+ const Aws::Config::Profile& profile = Aws::Config::GetCachedConfigProfile(m_profileToUse);
+ if(!profile.IsSsoSessionSet()) {
+ AWS_LOGSTREAM_ERROR(SSO_BEARER_TOKEN_PROVIDER_LOG_TAG, "SSOBearerTokenProvider set to use a profile "
+ << m_profileToUse << " without a sso_session. Unable to write a cached token.");
+ return false;
+ }
+
+ Aws::String hashedStartUrl = Aws::Utils::HashingUtils::HexEncode(Aws::Utils::HashingUtils::CalculateSHA1(profile.GetSsoSession().GetName()));
+ Aws::String profileDirectory = ProfileConfigFileAWSCredentialsProvider::GetProfileDirectory();
+ Aws::StringStream ssToken;
+ ssToken << profileDirectory;
+ ssToken << Aws::FileSystem::PATH_DELIM << "sso" << Aws::FileSystem::PATH_DELIM << "cache" << Aws::FileSystem::PATH_DELIM << hashedStartUrl << ".json";
+ auto ssoAccessTokenPath = ssToken.str();
+ AWS_LOGSTREAM_DEBUG(SSO_BEARER_TOKEN_PROVIDER_LOG_TAG, "Preparing to write token to: " << ssoAccessTokenPath);
+
+ Aws::OFStream outputFileStream(ssoAccessTokenPath.c_str(), std::ios_base::out | std::ios_base::trunc);
+ if(outputFileStream && outputFileStream.good())
+ {
+ AWS_LOGSTREAM_DEBUG(SSO_BEARER_TOKEN_PROVIDER_LOG_TAG, "Writing content to token file: " << ssoAccessTokenPath);
+
+ Aws::Utils::Json::JsonValue cachedTokenDoc;
+ if(!token.accessToken.empty()) {
+ cachedTokenDoc.WithString("accessToken", token.accessToken);
+ }
+ if(token.expiresAt != 0.0) {
+ cachedTokenDoc.WithString("expiresAt", token.expiresAt.ToGmtString(Aws::Utils::DateFormat::ISO_8601));
+ }
+ if(!token.refreshToken.empty()) {
+ cachedTokenDoc.WithString("refreshToken", token.refreshToken);
+ }
+ if(!token.clientId.empty()) {
+ cachedTokenDoc.WithString("clientId", token.clientId);
+ }
+ if(!token.clientSecret.empty()) {
+ cachedTokenDoc.WithString("clientSecret", token.clientSecret);
+ }
+ if(token.registrationExpiresAt != 0.0) {
+ cachedTokenDoc.WithString("registrationExpiresAt", token.registrationExpiresAt.ToGmtString(Aws::Utils::DateFormat::ISO_8601));
+ }
+ if(!token.region.empty()) {
+ cachedTokenDoc.WithString("region", token.region);
+ }
+ if(!token.startUrl.empty()) {
+ cachedTokenDoc.WithString("startUrl", token.startUrl);
+ }
+
+ const Aws::String& resultingJsonStr = cachedTokenDoc.View().WriteReadable();;
+ outputFileStream << resultingJsonStr;
+
+ return outputFileStream.good();
+ }
+ else
+ {
+ AWS_LOGSTREAM_INFO(SSO_BEARER_TOKEN_PROVIDER_LOG_TAG, "Unable to open token file on path for writing: " << ssoAccessTokenPath);
+ return false;
+ }
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer-provider/BearerTokenAuthSignerProvider.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer-provider/BearerTokenAuthSignerProvider.cpp
new file mode 100644
index 0000000000..9bb9c5edae
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer-provider/BearerTokenAuthSignerProvider.cpp
@@ -0,0 +1,46 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/core/auth/signer-provider/BearerTokenAuthSignerProvider.h>
+
+#include <aws/core/auth/signer/AWSNullSigner.h>
+
+#include <aws/core/utils/logging/LogMacros.h>
+#include <aws/core/auth/AWSCredentialsProvider.h>
+#include <aws/core/utils/memory/stl/AWSAllocator.h>
+
+const char BEARER_TOKEN_AUTH_SIGNER_PROVIDER_ALLOC_TAG[] = "BearerTokenAuthSignerProvider";
+
+using namespace Aws::Auth;
+
+BearerTokenAuthSignerProvider::BearerTokenAuthSignerProvider(const std::shared_ptr<Aws::Auth::AWSBearerTokenProviderBase> bearerTokenProvider)
+{
+ m_signers.emplace_back(Aws::MakeShared<Aws::Client::AWSAuthBearerSigner>(BEARER_TOKEN_AUTH_SIGNER_PROVIDER_ALLOC_TAG, bearerTokenProvider));
+ m_signers.emplace_back(Aws::MakeShared<Aws::Client::AWSNullSigner>(BEARER_TOKEN_AUTH_SIGNER_PROVIDER_ALLOC_TAG));
+}
+
+std::shared_ptr<Aws::Client::AWSAuthSigner> BearerTokenAuthSignerProvider::GetSigner(const Aws::String& signerName) const
+{
+ for(const auto& signer : m_signers)
+ {
+ if(!signer) {
+ AWS_LOGSTREAM_FATAL(BEARER_TOKEN_AUTH_SIGNER_PROVIDER_ALLOC_TAG, "Unexpected nullptr in BearerTokenAuthSignerProvider::m_signers");
+ break;
+ }
+ if(signer->GetName() == signerName)
+ {
+ return signer;
+ }
+ }
+ AWS_LOGSTREAM_ERROR(BEARER_TOKEN_AUTH_SIGNER_PROVIDER_ALLOC_TAG, "Request's signer: '" << signerName << "' is not found in the signer's map.");
+ assert(false);
+ return nullptr;
+}
+
+void BearerTokenAuthSignerProvider::AddSigner(std::shared_ptr<Aws::Client::AWSAuthSigner>& signer)
+{
+ assert(signer);
+ m_signers.emplace_back(signer);
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSAuthSignerProvider.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer-provider/DefaultAuthSignerProvider.cpp
index 31fd6c006b..fb7e0cfa40 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/AWSAuthSignerProvider.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer-provider/DefaultAuthSignerProvider.cpp
@@ -2,10 +2,14 @@
* Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
* SPDX-License-Identifier: Apache-2.0.
*/
+
+#include <aws/core/auth/signer-provider/DefaultAuthSignerProvider.h>
+
+#include <aws/core/auth/signer/AWSAuthEventStreamV4Signer.h>
+#include <aws/core/auth/signer/AWSNullSigner.h>
+
#include <aws/core/utils/logging/LogMacros.h>
-#include <aws/core/auth/AWSAuthSignerProvider.h>
-#include <aws/core/auth/AWSAuthSigner.h>
#include <aws/core/auth/AWSCredentialsProvider.h>
#include <aws/core/utils/memory/stl/AWSAllocator.h>
@@ -14,9 +18,10 @@ const char CLASS_TAG[] = "AuthSignerProvider";
using namespace Aws::Auth;
DefaultAuthSignerProvider::DefaultAuthSignerProvider(const std::shared_ptr<AWSCredentialsProvider>& credentialsProvider,
- const Aws::String& serviceName, const Aws::String& region)
+ const Aws::String& serviceName, const Aws::String& region, Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy signingPolicy, bool urlEscapePath)
{
- m_signers.emplace_back(Aws::MakeShared<Aws::Client::AWSAuthV4Signer>(CLASS_TAG, credentialsProvider, serviceName.c_str(), region));
+ m_signers.emplace_back(Aws::MakeShared<Aws::Client::AWSAuthV4Signer>(CLASS_TAG, credentialsProvider, serviceName.c_str(), region, signingPolicy, urlEscapePath, AWSSigningAlgorithm::SIGV4));
+ m_signers.emplace_back(Aws::MakeShared<Aws::Client::AWSAuthV4Signer>(CLASS_TAG, credentialsProvider, serviceName.c_str(), region, signingPolicy, urlEscapePath, AWSSigningAlgorithm::ASYMMETRIC_SIGV4));
m_signers.emplace_back(Aws::MakeShared<Aws::Client::AWSAuthEventStreamV4Signer>(CLASS_TAG, credentialsProvider, serviceName.c_str(), region));
m_signers.emplace_back(Aws::MakeShared<Aws::Client::AWSNullSigner>(CLASS_TAG));
}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthBearerSigner.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthBearerSigner.cpp
new file mode 100644
index 0000000000..ff14c8a371
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthBearerSigner.cpp
@@ -0,0 +1,50 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/core/auth/signer/AWSAuthBearerSigner.h>
+#include <aws/core/auth/bearer-token-provider/AWSBearerTokenProviderBase.h>
+
+#include <aws/core/utils/logging/LogMacros.h>
+#include <aws/core/http/HttpRequest.h>
+
+namespace Aws
+{
+ namespace Auth
+ {
+ const char BEARER_SIGNER[] = "Bearer";
+ }
+
+ namespace Client
+ {
+ static const char LOGGING_TAG[] = "AWSAuthBearerSigner";
+ static const char AUTHORIZATION_HEADER[] = "authorization";
+
+ bool AWSAuthBearerSigner::SignRequest(Aws::Http::HttpRequest& ioRequest) const
+ {
+ if(Aws::Http::Scheme::HTTPS != ioRequest.GetUri().GetScheme())
+ {
+ // Clients MUST always use TLS (https) or equivalent transport security
+ // when making requests with bearer tokens.
+ // https://datatracker.ietf.org/doc/html/rfc6750
+ AWS_LOGSTREAM_ERROR(LOGGING_TAG, "HTTPS scheme must be used with a bearer token authorization");
+ return false;
+ }
+ if(!m_bearerTokenProvider)
+ {
+ AWS_LOGSTREAM_FATAL(LOGGING_TAG, "Unexpected nullptr AWSAuthBearerSigner::m_bearerTokenProvider");
+ return false;
+ }
+ const Aws::Auth::AWSBearerToken& token = m_bearerTokenProvider->GetAWSBearerToken();
+ if(token.IsExpiredOrEmpty())
+ {
+ AWS_LOGSTREAM_ERROR(LOGGING_TAG, "Invalid bearer token to use: expired or empty");
+ return false;
+ }
+
+ ioRequest.SetHeaderValue(AUTHORIZATION_HEADER, "Bearer " + token.GetToken());
+ return true;
+ }
+ }
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthEventStreamV4Signer.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthEventStreamV4Signer.cpp
new file mode 100644
index 0000000000..195e83a751
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthEventStreamV4Signer.cpp
@@ -0,0 +1,320 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/core/auth/signer/AWSAuthEventStreamV4Signer.h>
+#include <aws/core/auth/signer/AWSAuthSignerCommon.h>
+#include <aws/core/auth/signer/AWSAuthSignerHelper.h>
+
+#include <aws/core/auth/AWSCredentialsProvider.h>
+#include <aws/core/http/HttpRequest.h>
+#include <aws/core/utils/DateTime.h>
+#include <aws/core/utils/HashingUtils.h>
+#include <aws/core/utils/Outcome.h>
+#include <aws/core/utils/StringUtils.h>
+#include <aws/core/utils/logging/LogMacros.h>
+#include <aws/core/utils/memory/AWSMemory.h>
+#include <aws/core/utils/crypto/Sha256HMAC.h>
+#include <aws/core/utils/stream/PreallocatedStreamBuf.h>
+#include <aws/core/utils/event/EventMessage.h>
+#include <aws/core/utils/event/EventHeader.h>
+
+#include <aws/crt/auth/Credentials.h>
+#include <aws/crt/http/HttpRequestResponse.h>
+
+#include <iomanip>
+#include <cstring>
+
+using namespace Aws;
+using namespace Aws::Client;
+using namespace Aws::Auth;
+using namespace Aws::Http;
+using namespace Aws::Utils;
+using namespace Aws::Utils::Logging;
+
+static const char* EVENT_STREAM_CONTENT_SHA256 = "STREAMING-AWS4-HMAC-SHA256-EVENTS";
+static const char* EVENT_STREAM_PAYLOAD = "AWS4-HMAC-SHA256-PAYLOAD";
+static const char* v4StreamingLogTag = "AWSAuthEventStreamV4Signer";
+
+namespace Aws
+{
+ namespace Auth
+ {
+ const char EVENTSTREAM_SIGV4_SIGNER[] = "EventStreamSignatureV4";
+ const char EVENTSTREAM_SIGNATURE_HEADER[] = ":chunk-signature";
+ const char EVENTSTREAM_DATE_HEADER[] = ":date";
+ }
+}
+
+AWSAuthEventStreamV4Signer::AWSAuthEventStreamV4Signer(const std::shared_ptr<Auth::AWSCredentialsProvider>&
+ credentialsProvider, const char* serviceName, const Aws::String& region) :
+ m_serviceName(serviceName),
+ m_region(region),
+ m_credentialsProvider(credentialsProvider)
+{
+
+ m_unsignedHeaders.emplace_back(Aws::Auth::AWSAuthHelper::X_AMZN_TRACE_ID);
+ m_unsignedHeaders.emplace_back(USER_AGENT_HEADER);
+}
+
+bool AWSAuthEventStreamV4Signer::SignRequest(Aws::Http::HttpRequest& request, const char* region, const char* serviceName, bool /* signBody */) const
+{
+ AWSCredentials credentials = m_credentialsProvider->GetAWSCredentials();
+
+ //don't sign anonymous requests
+ if (credentials.GetAWSAccessKeyId().empty() || credentials.GetAWSSecretKey().empty())
+ {
+ return true;
+ }
+
+ if (!credentials.GetSessionToken().empty())
+ {
+ request.SetAwsSessionToken(credentials.GetSessionToken());
+ }
+
+ request.SetHeaderValue(Aws::Auth::AWSAuthHelper::X_AMZ_CONTENT_SHA256, EVENT_STREAM_CONTENT_SHA256);
+
+ //calculate date header to use in internal signature (this also goes into date header).
+ DateTime now = GetSigningTimestamp();
+ Aws::String dateHeaderValue = now.ToGmtString(DateFormat::ISO_8601_BASIC);
+ request.SetHeaderValue(AWS_DATE_HEADER, dateHeaderValue);
+
+ Aws::StringStream headersStream;
+ Aws::StringStream signedHeadersStream;
+
+ for (const auto& header : Aws::Auth::AWSAuthHelper::CanonicalizeHeaders(request.GetHeaders()))
+ {
+ if(ShouldSignHeader(header.first))
+ {
+ headersStream << header.first.c_str() << ":" << header.second.c_str() << Aws::Auth::AWSAuthHelper::NEWLINE;
+ signedHeadersStream << header.first.c_str() << ";";
+ }
+ }
+
+ Aws::String canonicalHeadersString = headersStream.str();
+ AWS_LOGSTREAM_DEBUG(v4StreamingLogTag, "Canonical Header String: " << canonicalHeadersString);
+
+ //calculate signed headers parameter
+ Aws::String signedHeadersValue = signedHeadersStream.str();
+ //remove that last semi-colon
+ if (!signedHeadersValue.empty())
+ {
+ signedHeadersValue.pop_back();
+ }
+
+ AWS_LOGSTREAM_DEBUG(v4StreamingLogTag, "Signed Headers value:" << signedHeadersValue);
+
+ //generate generalized canonicalized request string.
+ Aws::String canonicalRequestString = Aws::Auth::AWSAuthHelper::CanonicalizeRequestSigningString(request, true/* m_urlEscapePath */);
+
+ //append v4 stuff to the canonical request string.
+ canonicalRequestString.append(canonicalHeadersString);
+ canonicalRequestString.append(Aws::Auth::AWSAuthHelper::NEWLINE);
+ canonicalRequestString.append(signedHeadersValue);
+ canonicalRequestString.append(Aws::Auth::AWSAuthHelper::NEWLINE);
+ canonicalRequestString.append(EVENT_STREAM_CONTENT_SHA256);
+
+ AWS_LOGSTREAM_DEBUG(v4StreamingLogTag, "Canonical Request String: " << canonicalRequestString);
+
+ //now compute sha256 on that request string
+ auto hashResult = m_hash.Calculate(canonicalRequestString);
+ if (!hashResult.IsSuccess())
+ {
+ AWS_LOGSTREAM_ERROR(v4StreamingLogTag, "Failed to hash (sha256) request string");
+ AWS_LOGSTREAM_DEBUG(v4StreamingLogTag, "The request string is: \"" << canonicalRequestString << "\"");
+ return false;
+ }
+
+ auto sha256Digest = hashResult.GetResult();
+ Aws::String canonicalRequestHash = HashingUtils::HexEncode(sha256Digest);
+ Aws::String simpleDate = now.ToGmtString(Aws::Auth::AWSAuthHelper::SIMPLE_DATE_FORMAT_STR);
+
+ Aws::String signingRegion = region ? region : m_region;
+ Aws::String signingServiceName = serviceName ? serviceName : m_serviceName;
+ Aws::String stringToSign = GenerateStringToSign(dateHeaderValue, simpleDate, canonicalRequestHash, signingRegion, signingServiceName);
+ auto finalSignature = GenerateSignature(credentials, stringToSign, simpleDate, signingRegion, signingServiceName);
+
+ Aws::StringStream ss;
+ ss << Aws::Auth::AWSAuthHelper::AWS_HMAC_SHA256 << " " << Aws::Auth::AWSAuthHelper::CREDENTIAL << Aws::Auth::AWSAuthHelper::EQ << credentials.GetAWSAccessKeyId() << "/" << simpleDate
+ << "/" << signingRegion << "/" << signingServiceName << "/" << Aws::Auth::AWSAuthHelper::AWS4_REQUEST << ", " << Aws::Auth::AWSAuthHelper::SIGNED_HEADERS << Aws::Auth::AWSAuthHelper::EQ
+ << signedHeadersValue << ", " << SIGNATURE << Aws::Auth::AWSAuthHelper::EQ << HashingUtils::HexEncode(finalSignature);
+
+ auto awsAuthString = ss.str();
+ AWS_LOGSTREAM_DEBUG(v4StreamingLogTag, "Signing request with: " << awsAuthString);
+ request.SetAwsAuthorization(awsAuthString);
+ request.SetSigningAccessKey(credentials.GetAWSAccessKeyId());
+ request.SetSigningRegion(signingRegion);
+ return true;
+}
+
+// this works regardless if the current machine is Big/Little Endian
+static void WriteBigEndian(Aws::String& str, uint64_t n)
+{
+ int shift = 56;
+ while(shift >= 0)
+ {
+ str.push_back((n >> shift) & 0xFF);
+ shift -= 8;
+ }
+}
+
+bool AWSAuthEventStreamV4Signer::SignEventMessage(Event::Message& message, Aws::String& priorSignature) const
+{
+ using Event::EventHeaderValue;
+
+ Aws::StringStream stringToSign;
+ stringToSign << EVENT_STREAM_PAYLOAD << Aws::Auth::AWSAuthHelper::NEWLINE;
+ const DateTime now = GetSigningTimestamp();
+ const auto simpleDate = now.ToGmtString(Aws::Auth::AWSAuthHelper::SIMPLE_DATE_FORMAT_STR);
+ stringToSign << now.ToGmtString(DateFormat::ISO_8601_BASIC) << Aws::Auth::AWSAuthHelper::NEWLINE
+ << simpleDate << "/" << m_region << "/"
+ << m_serviceName << "/aws4_request" << Aws::Auth::AWSAuthHelper::NEWLINE << priorSignature << Aws::Auth::AWSAuthHelper::NEWLINE;
+
+
+ Aws::String nonSignatureHeaders;
+ nonSignatureHeaders.push_back(char(sizeof(EVENTSTREAM_DATE_HEADER) - 1)); // length of the string
+ nonSignatureHeaders += EVENTSTREAM_DATE_HEADER;
+ nonSignatureHeaders.push_back(static_cast<char>(EventHeaderValue::EventHeaderType::TIMESTAMP)); // type of the value
+ WriteBigEndian(nonSignatureHeaders, static_cast<uint64_t>(now.Millis())); // the value of the timestamp in big-endian
+
+ auto hashOutcome = m_hash.Calculate(nonSignatureHeaders);
+ if (!hashOutcome.IsSuccess())
+ {
+ AWS_LOGSTREAM_ERROR(v4StreamingLogTag, "Failed to hash (sha256) non-signature headers.");
+ return false;
+ }
+
+ const auto nonSignatureHeadersHash = hashOutcome.GetResult();
+ stringToSign << HashingUtils::HexEncode(nonSignatureHeadersHash) << Aws::Auth::AWSAuthHelper::NEWLINE;
+
+ if (message.GetEventPayload().empty())
+ {
+ AWS_LOGSTREAM_WARN(v4StreamingLogTag, "Attempting to sign an empty message (no payload and no headers). "
+ "It is unlikely that this is the intended behavior.");
+ }
+ else
+ {
+ // use a preallocatedStreamBuf to avoid making a copy.
+ // The Hashing API requires either Aws::String or IStream as input.
+ // TODO: the hashing API should be accept 'unsigned char*' as input.
+ Utils::Stream::PreallocatedStreamBuf streamBuf(message.GetEventPayload().data(), message.GetEventPayload().size());
+ Aws::IOStream payload(&streamBuf);
+ hashOutcome = m_hash.Calculate(payload);
+
+ if (!hashOutcome.IsSuccess())
+ {
+ AWS_LOGSTREAM_ERROR(v4StreamingLogTag, "Failed to hash (sha256) non-signature headers.");
+ return false;
+ }
+ const auto payloadHash = hashOutcome.GetResult();
+ stringToSign << HashingUtils::HexEncode(payloadHash);
+ AWS_LOGSTREAM_DEBUG(v4StreamingLogTag, "Payload hash - " << HashingUtils::HexEncode(payloadHash));
+ }
+
+ Aws::Utils::ByteBuffer finalSignatureDigest = GenerateSignature(m_credentialsProvider->GetAWSCredentials(), stringToSign.str(), simpleDate, m_region, m_serviceName);
+ const auto finalSignature = HashingUtils::HexEncode(finalSignatureDigest);
+ AWS_LOGSTREAM_DEBUG(v4StreamingLogTag, "Final computed signing hash: " << finalSignature);
+ priorSignature = finalSignature;
+
+ message.InsertEventHeader(EVENTSTREAM_DATE_HEADER, EventHeaderValue(now.Millis(), EventHeaderValue::EventHeaderType::TIMESTAMP));
+ message.InsertEventHeader(EVENTSTREAM_SIGNATURE_HEADER, std::move(finalSignatureDigest));
+
+ AWS_LOGSTREAM_INFO(v4StreamingLogTag, "Event chunk final signature - " << finalSignature);
+ return true;
+}
+
+bool AWSAuthEventStreamV4Signer::ShouldSignHeader(const Aws::String& header) const
+{
+ return std::find(m_unsignedHeaders.cbegin(), m_unsignedHeaders.cend(), Aws::Utils::StringUtils::ToLower(header.c_str())) == m_unsignedHeaders.cend();
+}
+
+Aws::Utils::ByteBuffer AWSAuthEventStreamV4Signer::GenerateSignature(const AWSCredentials& credentials, const Aws::String& stringToSign,
+ const Aws::String& simpleDate, const Aws::String& region, const Aws::String& serviceName) const
+{
+ Utils::Threading::ReaderLockGuard guard(m_derivedKeyLock);
+ const auto& secretKey = credentials.GetAWSSecretKey();
+ if (secretKey != m_currentSecretKey || simpleDate != m_currentDateStr)
+ {
+ guard.UpgradeToWriterLock();
+ // double-checked lock to prevent updating twice
+ if (m_currentDateStr != simpleDate || m_currentSecretKey != secretKey)
+ {
+ m_currentSecretKey = secretKey;
+ m_currentDateStr = simpleDate;
+ m_derivedKey = ComputeHash(m_currentSecretKey, m_currentDateStr, region, serviceName);
+ }
+
+ }
+ return GenerateSignature(stringToSign, m_derivedKey);
+}
+
+Aws::Utils::ByteBuffer AWSAuthEventStreamV4Signer::GenerateSignature(const Aws::String& stringToSign, const ByteBuffer& key) const
+{
+ AWS_LOGSTREAM_DEBUG(v4StreamingLogTag, "Final String to sign: " << stringToSign);
+
+ Aws::StringStream ss;
+
+ auto hashResult = m_HMAC.Calculate(ByteBuffer((unsigned char*)stringToSign.c_str(), stringToSign.length()), key);
+ if (!hashResult.IsSuccess())
+ {
+ AWS_LOGSTREAM_ERROR(v4StreamingLogTag, "Unable to hmac (sha256) final string");
+ AWS_LOGSTREAM_DEBUG(v4StreamingLogTag, "The final string is: \"" << stringToSign << "\"");
+ return {};
+ }
+
+ return hashResult.GetResult();
+}
+
+Aws::String AWSAuthEventStreamV4Signer::GenerateStringToSign(const Aws::String& dateValue, const Aws::String& simpleDate,
+ const Aws::String& canonicalRequestHash, const Aws::String& region, const Aws::String& serviceName) const
+{
+ //generate the actual string we will use in signing the final request.
+ Aws::StringStream ss;
+
+ ss << Aws::Auth::AWSAuthHelper::AWS_HMAC_SHA256 << Aws::Auth::AWSAuthHelper::NEWLINE << dateValue << Aws::Auth::AWSAuthHelper::NEWLINE << simpleDate << "/" << region << "/"
+ << serviceName << "/" << Aws::Auth::AWSAuthHelper::AWS4_REQUEST << Aws::Auth::AWSAuthHelper::NEWLINE << canonicalRequestHash;
+
+ return ss.str();
+}
+
+Aws::Utils::ByteBuffer AWSAuthEventStreamV4Signer::ComputeHash(const Aws::String& secretKey,
+ const Aws::String& simpleDate, const Aws::String& region, const Aws::String& serviceName) const
+{
+ Aws::String signingKey(Aws::Auth::AWSAuthHelper::SIGNING_KEY);
+ signingKey.append(secretKey);
+ auto hashResult = m_HMAC.Calculate(ByteBuffer((unsigned char*)simpleDate.c_str(), simpleDate.length()),
+ ByteBuffer((unsigned char*)signingKey.c_str(), signingKey.length()));
+
+ if (!hashResult.IsSuccess())
+ {
+ AWS_LOGSTREAM_ERROR(v4StreamingLogTag, "Failed to HMAC (SHA256) date string \"" << simpleDate << "\"");
+ return {};
+ }
+
+ auto kDate = hashResult.GetResult();
+ hashResult = m_HMAC.Calculate(ByteBuffer((unsigned char*)region.c_str(), region.length()), kDate);
+ if (!hashResult.IsSuccess())
+ {
+ AWS_LOGSTREAM_ERROR(v4StreamingLogTag, "Failed to HMAC (SHA256) region string \"" << region << "\"");
+ return {};
+ }
+
+ auto kRegion = hashResult.GetResult();
+ hashResult = m_HMAC.Calculate(ByteBuffer((unsigned char*)serviceName.c_str(), serviceName.length()), kRegion);
+ if (!hashResult.IsSuccess())
+ {
+ AWS_LOGSTREAM_ERROR(v4StreamingLogTag, "Failed to HMAC (SHA256) service string \"" << m_serviceName << "\"");
+ return {};
+ }
+
+ auto kService = hashResult.GetResult();
+ hashResult = m_HMAC.Calculate(ByteBuffer((unsigned char*)Aws::Auth::AWSAuthHelper::AWS4_REQUEST, strlen(Aws::Auth::AWSAuthHelper::AWS4_REQUEST)), kService);
+ if (!hashResult.IsSuccess())
+ {
+ AWS_LOGSTREAM_ERROR(v4StreamingLogTag, "Unable to HMAC (SHA256) request string");
+ AWS_LOGSTREAM_DEBUG(v4StreamingLogTag, "The request string is: \"" << Aws::Auth::AWSAuthHelper::AWS4_REQUEST << "\"");
+ return {};
+ }
+ return hashResult.GetResult();
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthSignerCommon.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthSignerCommon.cpp
new file mode 100644
index 0000000000..d26f41e6b3
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthSignerCommon.cpp
@@ -0,0 +1,14 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/core/auth/signer/AWSAuthSignerCommon.h>
+
+namespace Aws
+{
+namespace Auth
+{
+const char SIGNATURE[] = "Signature";
+} // namespace Auth
+} // namespace Aws \ No newline at end of file
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthSignerHelper.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthSignerHelper.cpp
new file mode 100644
index 0000000000..5f7005d1da
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthSignerHelper.cpp
@@ -0,0 +1,103 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/core/auth/signer/AWSAuthSignerHelper.h>
+#include <aws/core/http/HttpTypes.h>
+
+#include <aws/core/http/HttpRequest.h>
+#include <aws/core/http/URI.h>
+#include <aws/core/utils/StringUtils.h>
+
+namespace Aws
+{
+namespace Auth
+{
+
+const char* AWSAuthHelper::EQ = "=";
+const char* AWSAuthHelper::AWS_HMAC_SHA256 = "AWS4-HMAC-SHA256";
+const char* AWSAuthHelper::AWS4_REQUEST = "aws4_request";
+const char* AWSAuthHelper::SIGNED_HEADERS = "SignedHeaders";
+const char* AWSAuthHelper::CREDENTIAL = "Credential";
+const char* AWSAuthHelper::NEWLINE = "\n";
+const char* AWSAuthHelper::X_AMZN_TRACE_ID = "x-amzn-trace-id";
+const char* AWSAuthHelper::X_AMZ_CONTENT_SHA256 = "x-amz-content-sha256";
+const char* AWSAuthHelper::SIGNING_KEY = "AWS4";
+const char* AWSAuthHelper::SIMPLE_DATE_FORMAT_STR = "%Y%m%d";
+
+Aws::String Aws::Auth::AWSAuthHelper::CanonicalizeRequestSigningString(Aws::Http::HttpRequest& request, bool urlEscapePath)
+{
+ request.CanonicalizeRequest();
+ Aws::StringStream signingStringStream;
+ signingStringStream << Aws::Http::HttpMethodMapper::GetNameForHttpMethod(request.GetMethod());
+
+ Aws::Http::URI uriCpy = request.GetUri();
+ // Many AWS services do not decode the URL before calculating SignatureV4 on their end.
+ // This results in the signature getting calculated with a double encoded URL.
+ // That means we have to double encode it here for the signature to match on the service side.
+ if(urlEscapePath)
+ {
+ // RFC3986 is how we encode the URL before sending it on the wire.
+ uriCpy.SetPath(uriCpy.GetURLEncodedPathRFC3986());
+ // However, SignatureV4 uses this URL encoding scheme
+ signingStringStream << AWSAuthHelper::NEWLINE << uriCpy.GetURLEncodedPath() << AWSAuthHelper::NEWLINE;
+ }
+ else
+ {
+ // For the services that DO decode the URL first; we don't need to double encode it.
+ signingStringStream << AWSAuthHelper::NEWLINE << uriCpy.GetURLEncodedPath() << AWSAuthHelper::NEWLINE;
+ }
+
+ if (request.GetQueryString().find('=') != std::string::npos)
+ {
+ signingStringStream << request.GetQueryString().substr(1) << AWSAuthHelper::NEWLINE;
+ }
+ else if (request.GetQueryString().size() > 1)
+ {
+ signingStringStream << request.GetQueryString().substr(1) << "=" << AWSAuthHelper::NEWLINE;
+ }
+ else
+ {
+ signingStringStream << AWSAuthHelper::NEWLINE;
+ }
+
+ return signingStringStream.str();
+}
+
+Aws::Http::HeaderValueCollection Aws::Auth::AWSAuthHelper::CanonicalizeHeaders(Aws::Http::HeaderValueCollection&& headers)
+{
+ Aws::Http::HeaderValueCollection canonicalHeaders;
+ for (const auto& header : headers)
+ {
+ auto trimmedHeaderName = Aws::Utils::StringUtils::Trim(header.first.c_str());
+ auto trimmedHeaderValue = Aws::Utils::StringUtils::Trim(header.second.c_str());
+
+ //multiline gets converted to line1,line2,etc...
+ auto headerMultiLine = Aws::Utils::StringUtils::SplitOnLine(trimmedHeaderValue);
+ Aws::String headerValue = headerMultiLine.size() == 0 ? "" : headerMultiLine[0];
+
+ if (headerMultiLine.size() > 1)
+ {
+ for(size_t i = 1; i < headerMultiLine.size(); ++i)
+ {
+ headerValue += " ";
+ headerValue += Aws::Utils::StringUtils::Trim(headerMultiLine[i].c_str());
+ }
+ }
+
+ //duplicate spaces need to be converted to one.
+ Aws::String::iterator new_end =
+ std::unique(headerValue.begin(), headerValue.end(),
+ [=](char lhs, char rhs) { return (lhs == rhs) && (lhs == ' '); }
+ );
+ headerValue.erase(new_end, headerValue.end());
+
+ canonicalHeaders[trimmedHeaderName] = headerValue;
+ }
+
+ return canonicalHeaders;
+}
+
+} // namespace Auth
+} // namespace Aws \ No newline at end of file
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthV4Signer.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthV4Signer.cpp
new file mode 100644
index 0000000000..f8bfdbf867
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSAuthV4Signer.cpp
@@ -0,0 +1,580 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/core/auth/signer/AWSAuthV4Signer.h>
+#include <aws/core/auth/signer/AWSAuthSignerCommon.h>
+#include <aws/core/auth/signer/AWSAuthSignerHelper.h>
+
+#include <aws/core/auth/AWSCredentialsProvider.h>
+#include <aws/core/http/HttpRequest.h>
+#include <aws/core/http/URI.h>
+#include <aws/core/utils/DateTime.h>
+#include <aws/core/utils/HashingUtils.h>
+#include <aws/core/utils/Outcome.h>
+#include <aws/core/utils/StringUtils.h>
+#include <aws/core/utils/logging/LogMacros.h>
+#include <aws/core/utils/memory/AWSMemory.h>
+#include <aws/core/utils/crypto/Sha256.h>
+#include <aws/core/utils/crypto/Sha256HMAC.h>
+
+#include <aws/crt/auth/Credentials.h>
+#include <aws/crt/http/HttpRequestResponse.h>
+
+#include <iomanip>
+#include <cstring>
+
+using namespace Aws;
+using namespace Aws::Client;
+using namespace Aws::Auth;
+using namespace Aws::Http;
+using namespace Aws::Utils;
+using namespace Aws::Utils::Logging;
+
+static const char* X_AMZ_SIGNED_HEADERS = "X-Amz-SignedHeaders";
+static const char* X_AMZ_ALGORITHM = "X-Amz-Algorithm";
+static const char* X_AMZ_CREDENTIAL = "X-Amz-Credential";
+static const char* UNSIGNED_PAYLOAD = "UNSIGNED-PAYLOAD";
+static const char* STREAMING_UNSIGNED_PAYLOAD_TRAILER = "STREAMING-UNSIGNED-PAYLOAD-TRAILER";
+static const char* X_AMZ_SIGNATURE = "X-Amz-Signature";
+static const char* USER_AGENT = "user-agent";
+static const char* EMPTY_STRING_SHA256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855";
+
+static const char v4LogTag[] = "AWSAuthV4Signer";
+static const char v4AsymmetricLogTag[] = "AWSAuthSymmetricV4Signer";
+
+namespace Aws
+{
+ namespace Auth
+ {
+ const char SIGV4_SIGNER[] = "SignatureV4";
+ const char ASYMMETRIC_SIGV4_SIGNER[] = "AsymmetricSignatureV4";
+ }
+}
+
+AWSAuthV4Signer::AWSAuthV4Signer(const std::shared_ptr<Auth::AWSCredentialsProvider>& credentialsProvider, const char* serviceName,
+ const Aws::String& region, PayloadSigningPolicy signingPolicy, bool urlEscapePath, AWSSigningAlgorithm signingAlgorithm) :
+ m_includeSha256HashHeader(true),
+ m_signingAlgorithm(signingAlgorithm),
+ m_credentialsProvider(credentialsProvider),
+ m_serviceName(serviceName),
+ m_region(region),
+ m_hash(Aws::MakeUnique<Aws::Utils::Crypto::Sha256>(v4LogTag)),
+ m_HMAC(Aws::MakeUnique<Aws::Utils::Crypto::Sha256HMAC>(v4LogTag)),
+ m_unsignedHeaders({USER_AGENT, Aws::Auth::AWSAuthHelper::X_AMZN_TRACE_ID}),
+ m_payloadSigningPolicy(signingPolicy),
+ m_urlEscapePath(urlEscapePath)
+{
+ //go ahead and warm up the signing cache.
+ ComputeHash(credentialsProvider->GetAWSCredentials().GetAWSSecretKey(), DateTime::CalculateGmtTimestampAsString(Aws::Auth::AWSAuthHelper::SIMPLE_DATE_FORMAT_STR), region, m_serviceName);
+}
+
+AWSAuthV4Signer::~AWSAuthV4Signer()
+{
+ // empty destructor in .cpp file to keep from needing the implementation of (AWSCredentialsProvider, Sha256, Sha256HMAC) in the header file
+}
+
+bool AWSAuthV4Signer::SignRequestWithSigV4a(Aws::Http::HttpRequest& request, const char* region, const char* serviceName,
+ bool signBody, long long expirationTimeInSeconds, Aws::Crt::Auth::SignatureType signatureType) const
+{
+ AWSCredentials credentials = m_credentialsProvider->GetAWSCredentials();
+ auto crtCredentials = Aws::MakeShared<Aws::Crt::Auth::Credentials>(v4AsymmetricLogTag,
+ Aws::Crt::ByteCursorFromCString(credentials.GetAWSAccessKeyId().c_str()),
+ Aws::Crt::ByteCursorFromCString(credentials.GetAWSSecretKey().c_str()),
+ Aws::Crt::ByteCursorFromCString(credentials.GetSessionToken().c_str()),
+ credentials.GetExpiration().Seconds());
+
+ Aws::Crt::Auth::AwsSigningConfig awsSigningConfig;
+ awsSigningConfig.SetSigningAlgorithm(static_cast<Aws::Crt::Auth::SigningAlgorithm>(AWSSigningAlgorithm::ASYMMETRIC_SIGV4));
+ awsSigningConfig.SetSignatureType(signatureType);
+ awsSigningConfig.SetRegion(region);
+ awsSigningConfig.SetService(serviceName);
+ awsSigningConfig.SetSigningTimepoint(GetSigningTimestamp().UnderlyingTimestamp());
+ awsSigningConfig.SetUseDoubleUriEncode(m_urlEscapePath);
+ awsSigningConfig.SetShouldNormalizeUriPath(true);
+ awsSigningConfig.SetOmitSessionToken(false);
+ awsSigningConfig.SetShouldSignHeaderUserData(reinterpret_cast<void*>(const_cast<Aws::Set<Aws::String>*>(&m_unsignedHeaders)));
+ awsSigningConfig.SetShouldSignHeaderCallback([](const Aws::Crt::ByteCursor *name, void *user_data) {
+ Aws::Set<Aws::String>* unsignedHeaders = static_cast<Aws::Set<Aws::String>*>(user_data);
+ Aws::String headerKey(reinterpret_cast<const char*>(name->ptr), name->len);
+ return unsignedHeaders->find(Aws::Utils::StringUtils::ToLower(headerKey.c_str())) == unsignedHeaders->cend();
+ });
+ if (signatureType == Aws::Crt::Auth::SignatureType::HttpRequestViaHeaders)
+ {
+ Aws::String payloadHash(UNSIGNED_PAYLOAD);
+ if(signBody || request.GetUri().GetScheme() != Http::Scheme::HTTPS)
+ {
+ if (!request.GetContentBody())
+ {
+ AWS_LOGSTREAM_DEBUG(v4AsymmetricLogTag, "Using cached empty string sha256 " << EMPTY_STRING_SHA256 << " because payload is empty.");
+ payloadHash = EMPTY_STRING_SHA256;
+ }
+ else
+ {
+ // The hash will be calculated from the payload during signing.
+ payloadHash = {};
+ }
+ }
+ else
+ {
+ AWS_LOGSTREAM_DEBUG(v4AsymmetricLogTag, "Note: Http payloads are not being signed. signPayloads=" << signBody
+ << " http scheme=" << Http::SchemeMapper::ToString(request.GetUri().GetScheme()));
+ }
+ awsSigningConfig.SetSignedBodyValue(payloadHash.c_str());
+ awsSigningConfig.SetSignedBodyHeader(m_includeSha256HashHeader ? Aws::Crt::Auth::SignedBodyHeaderType::XAmzContentSha256 : Aws::Crt::Auth::SignedBodyHeaderType::None);
+ }
+ else if (signatureType == Aws::Crt::Auth::SignatureType::HttpRequestViaQueryParams)
+ {
+ if (ServiceRequireUnsignedPayload(serviceName))
+ {
+ awsSigningConfig.SetSignedBodyValue(UNSIGNED_PAYLOAD);
+ }
+ else
+ {
+ awsSigningConfig.SetSignedBodyValue(EMPTY_STRING_SHA256);
+ }
+ }
+ else
+ {
+ AWS_LOGSTREAM_ERROR(v4AsymmetricLogTag, "The signature type should be either \"HttpRequestViaHeaders\" or \"HttpRequestViaQueryParams\"");
+ return false;
+ }
+ awsSigningConfig.SetExpirationInSeconds(static_cast<uint64_t>(expirationTimeInSeconds));
+ awsSigningConfig.SetCredentials(crtCredentials);
+
+ std::shared_ptr<Aws::Crt::Http::HttpRequest> crtHttpRequest = request.ToCrtHttpRequest();
+
+ auto sigv4HttpRequestSigner = Aws::MakeShared<Aws::Crt::Auth::Sigv4HttpRequestSigner>(v4AsymmetricLogTag);
+ bool success = true;
+ sigv4HttpRequestSigner->SignRequest(crtHttpRequest, awsSigningConfig,
+ [&request, &success, signatureType](const std::shared_ptr<Aws::Crt::Http::HttpRequest>& signedCrtHttpRequest, int errorCode) {
+ success = (errorCode == AWS_ERROR_SUCCESS);
+ if (success)
+ {
+ if (signatureType == Aws::Crt::Auth::SignatureType::HttpRequestViaHeaders)
+ {
+ for (size_t i = 0; i < signedCrtHttpRequest->GetHeaderCount(); i++)
+ {
+ Aws::Crt::Optional<Aws::Crt::Http::HttpHeader> httpHeader = signedCrtHttpRequest->GetHeader(i);
+ request.SetHeaderValue(Aws::String(reinterpret_cast<const char*>(httpHeader->name.ptr), httpHeader->name.len),
+ Aws::String(reinterpret_cast<const char*>(httpHeader->value.ptr), httpHeader->value.len));
+ }
+ }
+ else if (signatureType == Aws::Crt::Auth::SignatureType::HttpRequestViaQueryParams)
+ {
+ Aws::Http::URI newPath(reinterpret_cast<const char*>(signedCrtHttpRequest->GetPath()->ptr));
+ request.GetUri().SetQueryString(newPath.GetQueryString());
+ }
+ else
+ {
+ AWS_LOGSTREAM_ERROR(v4AsymmetricLogTag, "No action to take when signature type is neither \"HttpRequestViaHeaders\" nor \"HttpRequestViaQueryParams\"");
+ success = false;
+ }
+ }
+ else
+ {
+ AWS_LOGSTREAM_ERROR(v4AsymmetricLogTag, "Encountered internal error during signing process with AWS signature version 4 (Asymmetric):" << aws_error_str(errorCode));
+ }
+ }
+ );
+ return success;
+}
+
+bool AWSAuthV4Signer::ShouldSignHeader(const Aws::String& header) const
+{
+ return m_unsignedHeaders.find(Aws::Utils::StringUtils::ToLower(header.c_str())) == m_unsignedHeaders.cend();
+}
+
+bool AWSAuthV4Signer::SignRequest(Aws::Http::HttpRequest& request, const char* region, const char* serviceName, bool signBody) const
+{
+ Aws::String signingRegion = region ? region : m_region;
+ Aws::String signingServiceName = serviceName ? serviceName : m_serviceName;
+ AWSCredentials credentials = m_credentialsProvider->GetAWSCredentials();
+
+ //don't sign anonymous requests
+ if (credentials.GetAWSAccessKeyId().empty() || credentials.GetAWSSecretKey().empty())
+ {
+ return true;
+ }
+
+ request.SetSigningAccessKey(credentials.GetAWSAccessKeyId());
+ request.SetSigningRegion(signingRegion);
+
+ Aws::String payloadHash(UNSIGNED_PAYLOAD);
+ switch(m_payloadSigningPolicy)
+ {
+ case PayloadSigningPolicy::Always:
+ signBody = true;
+ break;
+ case PayloadSigningPolicy::Never:
+ signBody = false;
+ break;
+ case PayloadSigningPolicy::RequestDependent:
+ // respect the request setting
+ default:
+ break;
+ }
+
+ if (m_signingAlgorithm == AWSSigningAlgorithm::ASYMMETRIC_SIGV4)
+ {
+ // Replace m_serviceName with signingServiceName after rebasing on S3 outposts.
+ return SignRequestWithSigV4a(request, signingRegion.c_str(), m_serviceName.c_str(), signBody,
+ 0 /* expirationTimeInSeconds doesn't matter for HttpRequestViaHeaders */, Aws::Crt::Auth::SignatureType::HttpRequestViaHeaders);
+ }
+
+ if (!credentials.GetSessionToken().empty())
+ {
+ request.SetAwsSessionToken(credentials.GetSessionToken());
+ }
+
+ if(signBody || request.GetUri().GetScheme() != Http::Scheme::HTTPS)
+ {
+ payloadHash = ComputePayloadHash(request);
+ if (payloadHash.empty())
+ {
+ return false;
+ }
+ if (request.GetRequestHash().second != nullptr)
+ {
+ Aws::String checksumHeaderKey = Aws::String("x-amz-checksum-") + request.GetRequestHash().first;
+ Aws::String checksumHeaderValue = HashingUtils::Base64Encode(request.GetRequestHash().second->Calculate(*(request.GetContentBody())).GetResult());
+ request.SetHeaderValue(checksumHeaderKey, checksumHeaderValue);
+ request.SetRequestHash("", nullptr);
+ }
+ }
+ else
+ {
+ AWS_LOGSTREAM_DEBUG(v4LogTag, "Note: Http payloads are not being signed. signPayloads=" << signBody
+ << " http scheme=" << Http::SchemeMapper::ToString(request.GetUri().GetScheme()));
+ if (request.GetRequestHash().second != nullptr)
+ {
+ payloadHash = STREAMING_UNSIGNED_PAYLOAD_TRAILER;
+ Aws::String trailerHeaderValue = Aws::String("x-amz-checksum-") + request.GetRequestHash().first;
+ request.SetHeaderValue(Http::AWS_TRAILER_HEADER, trailerHeaderValue);
+ request.SetTransferEncoding(CHUNKED_VALUE);
+ request.SetHeaderValue(Http::CONTENT_ENCODING_HEADER, Http::AWS_CHUNKED_VALUE);
+ request.SetHeaderValue(Http::DECODED_CONTENT_LENGTH_HEADER, request.GetHeaderValue(Http::CONTENT_LENGTH_HEADER));
+ request.DeleteHeader(Http::CONTENT_LENGTH_HEADER);
+ }
+ }
+
+ if(m_includeSha256HashHeader)
+ {
+ request.SetHeaderValue(Aws::Auth::AWSAuthHelper::X_AMZ_CONTENT_SHA256, payloadHash);
+ }
+
+ //calculate date header to use in internal signature (this also goes into date header).
+ DateTime now = GetSigningTimestamp();
+ Aws::String dateHeaderValue = now.ToGmtString(DateFormat::ISO_8601_BASIC);
+ request.SetHeaderValue(AWS_DATE_HEADER, dateHeaderValue);
+
+ Aws::StringStream headersStream;
+ Aws::StringStream signedHeadersStream;
+
+ for (const auto& header : Aws::Auth::AWSAuthHelper::CanonicalizeHeaders(request.GetHeaders()))
+ {
+ if(ShouldSignHeader(header.first))
+ {
+ headersStream << header.first.c_str() << ":" << header.second.c_str() << Aws::Auth::AWSAuthHelper::NEWLINE;
+ signedHeadersStream << header.first.c_str() << ";";
+ }
+ }
+
+ Aws::String canonicalHeadersString = headersStream.str();
+ AWS_LOGSTREAM_DEBUG(v4LogTag, "Canonical Header String: " << canonicalHeadersString);
+
+ //calculate signed headers parameter
+ Aws::String signedHeadersValue = signedHeadersStream.str();
+ //remove that last semi-colon
+ if (!signedHeadersValue.empty())
+ {
+ signedHeadersValue.pop_back();
+ }
+
+ AWS_LOGSTREAM_DEBUG(v4LogTag, "Signed Headers value:" << signedHeadersValue);
+
+ //generate generalized canonicalized request string.
+ Aws::String canonicalRequestString = Aws::Auth::AWSAuthHelper::CanonicalizeRequestSigningString(request, m_urlEscapePath);
+
+ //append v4 stuff to the canonical request string.
+ canonicalRequestString.append(canonicalHeadersString);
+ canonicalRequestString.append(Aws::Auth::AWSAuthHelper::NEWLINE);
+ canonicalRequestString.append(signedHeadersValue);
+ canonicalRequestString.append(Aws::Auth::AWSAuthHelper::NEWLINE);
+ canonicalRequestString.append(payloadHash);
+
+ AWS_LOGSTREAM_DEBUG(v4LogTag, "Canonical Request String: " << canonicalRequestString);
+
+ //now compute sha256 on that request string
+ auto hashResult = m_hash->Calculate(canonicalRequestString);
+ if (!hashResult.IsSuccess())
+ {
+ AWS_LOGSTREAM_ERROR(v4LogTag, "Failed to hash (sha256) request string");
+ AWS_LOGSTREAM_DEBUG(v4LogTag, "The request string is: \"" << canonicalRequestString << "\"");
+ return false;
+ }
+
+ auto sha256Digest = hashResult.GetResult();
+ Aws::String canonicalRequestHash = HashingUtils::HexEncode(sha256Digest);
+ Aws::String simpleDate = now.ToGmtString(Aws::Auth::AWSAuthHelper::SIMPLE_DATE_FORMAT_STR);
+
+ Aws::String stringToSign = GenerateStringToSign(dateHeaderValue, simpleDate, canonicalRequestHash, signingRegion, signingServiceName);
+ auto finalSignature = GenerateSignature(credentials, stringToSign, simpleDate, signingRegion, signingServiceName);
+
+ Aws::StringStream ss;
+ ss << Aws::Auth::AWSAuthHelper::AWS_HMAC_SHA256 << " " << Aws::Auth::AWSAuthHelper::CREDENTIAL << Aws::Auth::AWSAuthHelper::EQ << credentials.GetAWSAccessKeyId() << "/" << simpleDate
+ << "/" << signingRegion << "/" << signingServiceName << "/" << Aws::Auth::AWSAuthHelper::AWS4_REQUEST << ", " << Aws::Auth::AWSAuthHelper::SIGNED_HEADERS << Aws::Auth::AWSAuthHelper::EQ
+ << signedHeadersValue << ", " << SIGNATURE << Aws::Auth::AWSAuthHelper::EQ << finalSignature;
+
+ auto awsAuthString = ss.str();
+ AWS_LOGSTREAM_DEBUG(v4LogTag, "Signing request with: " << awsAuthString);
+ request.SetAwsAuthorization(awsAuthString);
+ return true;
+}
+
+bool AWSAuthV4Signer::PresignRequest(Aws::Http::HttpRequest& request, long long expirationTimeInSeconds) const
+{
+ return PresignRequest(request, m_region.c_str(), expirationTimeInSeconds);
+}
+
+bool AWSAuthV4Signer::PresignRequest(Aws::Http::HttpRequest& request, const char* region, long long expirationInSeconds) const
+{
+ return PresignRequest(request, region, m_serviceName.c_str(), expirationInSeconds);
+}
+
+bool AWSAuthV4Signer::PresignRequest(Aws::Http::HttpRequest& request, const char* region, const char* serviceName, long long expirationTimeInSeconds) const
+{
+ Aws::String signingRegion = region ? region : m_region;
+ Aws::String signingServiceName = serviceName ? serviceName : m_serviceName;
+ AWSCredentials credentials = m_credentialsProvider->GetAWSCredentials();
+
+ //don't sign anonymous requests
+ if (credentials.GetAWSAccessKeyId().empty() || credentials.GetAWSSecretKey().empty())
+ {
+ return true;
+ }
+
+ if (m_signingAlgorithm == AWSSigningAlgorithm::ASYMMETRIC_SIGV4)
+ {
+ return SignRequestWithSigV4a(request, signingRegion.c_str(), signingServiceName.c_str(), false /* signBody doesn't matter for HttpRequestViaHeaders */,
+ expirationTimeInSeconds, Aws::Crt::Auth::SignatureType::HttpRequestViaQueryParams);
+ }
+
+ Aws::StringStream intConversionStream;
+ intConversionStream << expirationTimeInSeconds;
+ request.AddQueryStringParameter(Http::X_AMZ_EXPIRES_HEADER, intConversionStream.str());
+
+ if (!credentials.GetSessionToken().empty())
+ {
+ request.AddQueryStringParameter(Http::AWS_SECURITY_TOKEN, credentials.GetSessionToken());
+ }
+
+ //calculate date header to use in internal signature (this also goes into date header).
+ DateTime now = GetSigningTimestamp();
+ Aws::String dateQueryValue = now.ToGmtString(DateFormat::ISO_8601_BASIC);
+ request.AddQueryStringParameter(Http::AWS_DATE_HEADER, dateQueryValue);
+
+ Aws::StringStream headersStream;
+ Aws::StringStream signedHeadersStream;
+ for (const auto& header : Aws::Auth::AWSAuthHelper::CanonicalizeHeaders(request.GetHeaders()))
+ {
+ if(ShouldSignHeader(header.first))
+ {
+ headersStream << header.first.c_str() << ":" << header.second.c_str() << Aws::Auth::AWSAuthHelper::NEWLINE;
+ signedHeadersStream << header.first.c_str() << ";";
+ }
+ }
+
+ Aws::String canonicalHeadersString = headersStream.str();
+ AWS_LOGSTREAM_DEBUG(v4LogTag, "Canonical Header String: " << canonicalHeadersString);
+
+ //calculate signed headers parameter
+ Aws::String signedHeadersValue(signedHeadersStream.str());
+ //remove that last semi-colon
+ if (!signedHeadersValue.empty())
+ {
+ signedHeadersValue.pop_back();
+ }
+
+ request.AddQueryStringParameter(X_AMZ_SIGNED_HEADERS, signedHeadersValue);
+ AWS_LOGSTREAM_DEBUG(v4LogTag, "Signed Headers value: " << signedHeadersValue);
+
+ Aws::StringStream ss;
+ Aws::String simpleDate = now.ToGmtString(Aws::Auth::AWSAuthHelper::SIMPLE_DATE_FORMAT_STR);
+ ss << credentials.GetAWSAccessKeyId() << "/" << simpleDate
+ << "/" << signingRegion << "/" << signingServiceName << "/" << Aws::Auth::AWSAuthHelper::AWS4_REQUEST;
+
+ request.AddQueryStringParameter(X_AMZ_ALGORITHM, Aws::Auth::AWSAuthHelper::AWS_HMAC_SHA256);
+ request.AddQueryStringParameter(X_AMZ_CREDENTIAL, ss.str());
+ ss.str("");
+
+ request.SetSigningAccessKey(credentials.GetAWSAccessKeyId());
+ request.SetSigningRegion(signingRegion);
+
+ //generate generalized canonicalized request string.
+ Aws::String canonicalRequestString = Aws::Auth::AWSAuthHelper::CanonicalizeRequestSigningString(request, m_urlEscapePath);
+
+ //append v4 stuff to the canonical request string.
+ canonicalRequestString.append(canonicalHeadersString);
+ canonicalRequestString.append(Aws::Auth::AWSAuthHelper::NEWLINE);
+ canonicalRequestString.append(signedHeadersValue);
+ canonicalRequestString.append(Aws::Auth::AWSAuthHelper::NEWLINE);
+ if (ServiceRequireUnsignedPayload(signingServiceName))
+ {
+ canonicalRequestString.append(UNSIGNED_PAYLOAD);
+ }
+ else
+ {
+ canonicalRequestString.append(EMPTY_STRING_SHA256);
+ }
+ AWS_LOGSTREAM_DEBUG(v4LogTag, "Canonical Request String: " << canonicalRequestString);
+
+ //now compute sha256 on that request string
+ auto hashResult = m_hash->Calculate(canonicalRequestString);
+ if (!hashResult.IsSuccess())
+ {
+ AWS_LOGSTREAM_ERROR(v4LogTag, "Failed to hash (sha256) request string");
+ AWS_LOGSTREAM_DEBUG(v4LogTag, "The request string is: \"" << canonicalRequestString << "\"");
+ return false;
+ }
+
+ auto sha256Digest = hashResult.GetResult();
+ auto canonicalRequestHash = HashingUtils::HexEncode(sha256Digest);
+
+ auto stringToSign = GenerateStringToSign(dateQueryValue, simpleDate, canonicalRequestHash, signingRegion, signingServiceName);
+ auto finalSigningHash = GenerateSignature(credentials, stringToSign, simpleDate, signingRegion, signingServiceName);
+ if (finalSigningHash.empty())
+ {
+ return false;
+ }
+
+ //add that the signature to the query string
+ request.AddQueryStringParameter(X_AMZ_SIGNATURE, finalSigningHash);
+
+ return true;
+}
+
+bool AWSAuthV4Signer::ServiceRequireUnsignedPayload(const Aws::String& serviceName) const
+{
+ // S3 uses a magic string (instead of the empty string) for its body hash for presigned URLs as outlined here:
+ // https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html
+ // this is true for PUT, POST, GET, DELETE and HEAD operations.
+ // However, other services (for example RDS) implement the specification as outlined here:
+ // https://docs.aws.amazon.com/general/latest/gr/sigv4-create-canonical-request.html
+ // which states that body-less requests should use the empty-string SHA256 hash.
+ return "s3" == serviceName || "s3-object-lambda" == serviceName;
+}
+
+Aws::String AWSAuthV4Signer::GenerateSignature(const AWSCredentials& credentials, const Aws::String& stringToSign,
+ const Aws::String& simpleDate, const Aws::String& region, const Aws::String& serviceName) const
+{
+ auto key = ComputeHash(credentials.GetAWSSecretKey(), simpleDate, region, serviceName);
+ return GenerateSignature(stringToSign, key);
+}
+
+Aws::String AWSAuthV4Signer::GenerateSignature(const Aws::String& stringToSign, const ByteBuffer& key) const
+{
+ AWS_LOGSTREAM_DEBUG(v4LogTag, "Final String to sign: " << stringToSign);
+
+ Aws::StringStream ss;
+
+ auto hashResult = m_HMAC->Calculate(ByteBuffer((unsigned char*)stringToSign.c_str(), stringToSign.length()), key);
+ if (!hashResult.IsSuccess())
+ {
+ AWS_LOGSTREAM_ERROR(v4LogTag, "Unable to hmac (sha256) final string");
+ AWS_LOGSTREAM_DEBUG(v4LogTag, "The final string is: \"" << stringToSign << "\"");
+ return {};
+ }
+
+ //now we finally sign our request string with our hex encoded derived hash.
+ auto finalSigningDigest = hashResult.GetResult();
+
+ auto finalSigningHash = HashingUtils::HexEncode(finalSigningDigest);
+ AWS_LOGSTREAM_DEBUG(v4LogTag, "Final computed signing hash: " << finalSigningHash);
+
+ return finalSigningHash;
+}
+
+Aws::String AWSAuthV4Signer::ComputePayloadHash(Aws::Http::HttpRequest& request) const
+{
+ if (!request.GetContentBody())
+ {
+ AWS_LOGSTREAM_DEBUG(v4LogTag, "Using cached empty string sha256 " << EMPTY_STRING_SHA256 << " because payload is empty.");
+ return EMPTY_STRING_SHA256;
+ }
+
+ //compute hash on payload if it exists.
+ auto hashResult = m_hash->Calculate(*request.GetContentBody());
+
+ if(request.GetContentBody())
+ {
+ request.GetContentBody()->clear();
+ request.GetContentBody()->seekg(0);
+ }
+
+ if (!hashResult.IsSuccess())
+ {
+ AWS_LOGSTREAM_ERROR(v4LogTag, "Unable to hash (sha256) request body");
+ return {};
+ }
+
+ auto sha256Digest = hashResult.GetResult();
+
+ Aws::String payloadHash(HashingUtils::HexEncode(sha256Digest));
+ AWS_LOGSTREAM_DEBUG(v4LogTag, "Calculated sha256 " << payloadHash << " for payload.");
+ return payloadHash;
+}
+
+Aws::String AWSAuthV4Signer::GenerateStringToSign(const Aws::String& dateValue, const Aws::String& simpleDate,
+ const Aws::String& canonicalRequestHash, const Aws::String& region, const Aws::String& serviceName) const
+{
+ //generate the actual string we will use in signing the final request.
+ Aws::StringStream ss;
+
+ ss << Aws::Auth::AWSAuthHelper::AWS_HMAC_SHA256 << Aws::Auth::AWSAuthHelper::NEWLINE << dateValue << Aws::Auth::AWSAuthHelper::NEWLINE << simpleDate << "/" << region << "/"
+ << serviceName << "/" << Aws::Auth::AWSAuthHelper::AWS4_REQUEST << Aws::Auth::AWSAuthHelper::NEWLINE << canonicalRequestHash;
+
+ return ss.str();
+}
+
+Aws::Utils::ByteBuffer AWSAuthV4Signer::ComputeHash(const Aws::String& secretKey,
+ const Aws::String& simpleDate, const Aws::String& region, const Aws::String& serviceName) const
+{
+ Aws::String signingKey(Aws::Auth::AWSAuthHelper::SIGNING_KEY);
+ signingKey.append(secretKey);
+ auto hashResult = m_HMAC->Calculate(ByteBuffer((unsigned char*)simpleDate.c_str(), simpleDate.length()),
+ ByteBuffer((unsigned char*)signingKey.c_str(), signingKey.length()));
+
+ if (!hashResult.IsSuccess())
+ {
+ AWS_LOGSTREAM_ERROR(v4LogTag, "Failed to HMAC (SHA256) date string \"" << simpleDate << "\"");
+ return {};
+ }
+
+ auto kDate = hashResult.GetResult();
+ hashResult = m_HMAC->Calculate(ByteBuffer((unsigned char*)region.c_str(), region.length()), kDate);
+ if (!hashResult.IsSuccess())
+ {
+ AWS_LOGSTREAM_ERROR(v4LogTag, "Failed to HMAC (SHA256) region string \"" << region << "\"");
+ return {};
+ }
+
+ auto kRegion = hashResult.GetResult();
+ hashResult = m_HMAC->Calculate(ByteBuffer((unsigned char*)serviceName.c_str(), serviceName.length()), kRegion);
+ if (!hashResult.IsSuccess())
+ {
+ AWS_LOGSTREAM_ERROR(v4LogTag, "Failed to HMAC (SHA256) service string \"" << m_serviceName << "\"");
+ return {};
+ }
+
+ auto kService = hashResult.GetResult();
+ hashResult = m_HMAC->Calculate(ByteBuffer((unsigned char*)Aws::Auth::AWSAuthHelper::AWS4_REQUEST, strlen(Aws::Auth::AWSAuthHelper::AWS4_REQUEST)), kService);
+ if (!hashResult.IsSuccess())
+ {
+ AWS_LOGSTREAM_ERROR(v4LogTag, "Unable to HMAC (SHA256) request string");
+ AWS_LOGSTREAM_DEBUG(v4LogTag, "The request string is: \"" << Aws::Auth::AWSAuthHelper::AWS4_REQUEST << "\"");
+ return {};
+ }
+ return hashResult.GetResult();
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSNullSigner.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSNullSigner.cpp
new file mode 100644
index 0000000000..d94cb421f2
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/auth/signer/AWSNullSigner.cpp
@@ -0,0 +1,14 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/core/auth/signer/AWSNullSigner.h>
+
+namespace Aws
+{
+ namespace Auth
+ {
+ const char NULL_SIGNER[] = "NullSigner";
+ }
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSClient.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSClient.cpp
index 4b2a38b4e6..d1a6f262c9 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSClient.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSClient.cpp
@@ -7,11 +7,13 @@
#include <aws/core/AmazonWebServiceRequest.h>
#include <aws/core/auth/AWSAuthSigner.h>
#include <aws/core/auth/AWSAuthSignerProvider.h>
+#include <aws/core/client/AWSUrlPresigner.h>
#include <aws/core/client/AWSError.h>
#include <aws/core/client/AWSErrorMarshaller.h>
#include <aws/core/client/ClientConfiguration.h>
#include <aws/core/client/CoreErrors.h>
#include <aws/core/client/RetryStrategy.h>
+#include <aws/core/client/RequestCompression.h>
#include <aws/core/http/HttpClient.h>
#include <aws/core/http/HttpClientFactory.h>
#include <aws/core/http/HttpResponse.h>
@@ -27,6 +29,9 @@
#include <aws/core/Globals.h>
#include <aws/core/utils/EnumParseOverflowContainer.h>
#include <aws/core/utils/crypto/MD5.h>
+#include <aws/core/utils/crypto/CRC32.h>
+#include <aws/core/utils/crypto/Sha256.h>
+#include <aws/core/utils/crypto/Sha1.h>
#include <aws/core/utils/HashingUtils.h>
#include <aws/core/utils/crypto/Factories.h>
#include <aws/core/utils/event/EventStream.h>
@@ -35,10 +40,12 @@
#include <aws/core/Region.h>
#include <aws/core/utils/DNS.h>
#include <aws/core/Version.h>
+#include <aws/core/platform/Environment.h>
#include <aws/core/platform/OSVersionInfo.h>
#include <cstring>
#include <cassert>
+#include <iomanip>
using namespace Aws;
using namespace Aws::Client;
@@ -51,12 +58,15 @@ static const int SUCCESS_RESPONSE_MIN = 200;
static const int SUCCESS_RESPONSE_MAX = 299;
static const char AWS_CLIENT_LOG_TAG[] = "AWSClient";
+static const char AWS_LAMBDA_FUNCTION_NAME[] = "AWS_LAMBDA_FUNCTION_NAME";
+static const char X_AMZN_TRACE_ID[] = "_X_AMZN_TRACE_ID";
+
//4 Minutes
static const std::chrono::milliseconds TIME_DIFF_MAX = std::chrono::minutes(4);
//-4 Minutes
static const std::chrono::milliseconds TIME_DIFF_MIN = std::chrono::minutes(-4);
-static CoreErrors GuessBodylessErrorType(Aws::Http::HttpResponseCode responseCode)
+CoreErrors AWSClient::GuessBodylessErrorType(Aws::Http::HttpResponseCode responseCode)
{
switch (responseCode)
{
@@ -70,6 +80,14 @@ static CoreErrors GuessBodylessErrorType(Aws::Http::HttpResponseCode responseCod
}
}
+bool AWSClient::DoesResponseGenerateError(const std::shared_ptr<HttpResponse>& response)
+{
+ if (response->HasClientError()) return true;
+
+ int responseCode = static_cast<int>(response->GetResponseCode());
+ return responseCode < SUCCESS_RESPONSE_MIN || responseCode > SUCCESS_RESPONSE_MAX;
+}
+
struct RequestInfo
{
Aws::Utils::DateTime ttl;
@@ -107,9 +125,10 @@ AWSClient::AWSClient(const Aws::Client::ClientConfiguration& configuration,
m_customizedUserAgent(!m_userAgent.empty()),
m_hash(Aws::Utils::Crypto::CreateMD5Implementation()),
m_requestTimeoutMs(configuration.requestTimeoutMs),
- m_enableClockSkewAdjustment(configuration.enableClockSkewAdjustment)
+ m_enableClockSkewAdjustment(configuration.enableClockSkewAdjustment),
+ m_requestCompressionConfig(configuration.requestCompressionConfig)
{
- SetServiceClientName("AWSBaseClient");
+ AWSClient::SetServiceClientName("AWSBaseClient");
}
AWSClient::AWSClient(const Aws::Client::ClientConfiguration& configuration,
@@ -126,9 +145,10 @@ AWSClient::AWSClient(const Aws::Client::ClientConfiguration& configuration,
m_customizedUserAgent(!m_userAgent.empty()),
m_hash(Aws::Utils::Crypto::CreateMD5Implementation()),
m_requestTimeoutMs(configuration.requestTimeoutMs),
- m_enableClockSkewAdjustment(configuration.enableClockSkewAdjustment)
+ m_enableClockSkewAdjustment(configuration.enableClockSkewAdjustment),
+ m_requestCompressionConfig(configuration.requestCompressionConfig)
{
- SetServiceClientName("AWSBaseClient");
+ AWSClient::SetServiceClientName("AWSBaseClient");
}
void AWSClient::SetServiceClientName(const Aws::String& name)
@@ -136,10 +156,7 @@ void AWSClient::SetServiceClientName(const Aws::String& name)
m_serviceName = name;
if (!m_customizedUserAgent)
{
- Aws::StringStream ss;
- ss << "aws-sdk-cpp/" << Version::GetVersionString() << " " << Aws::OSVersionInfo::ComputeOSVersionString()
- << " " << Version::GetCompilerVersionString();
- m_userAgent = ss.str();
+ m_userAgent = Aws::Client::ComputeUserAgentString();
}
}
@@ -232,16 +249,24 @@ HttpResponseOutcome AWSClient::AttemptExhaustively(const Aws::Http::URI& uri,
const char* signerRegion = signerRegionOverride;
Aws::String regionFromResponse;
- Aws::String invocationId = UUID::RandomUUID();
+ Aws::String invocationId = Aws::Utils::UUID::RandomUUID();
RequestInfo requestInfo;
requestInfo.attempt = 1;
requestInfo.maxAttempts = 0;
httpRequest->SetHeaderValue(Http::SDK_INVOCATION_ID_HEADER, invocationId);
httpRequest->SetHeaderValue(Http::SDK_REQUEST_HEADER, requestInfo);
+ AppendRecursionDetectionHeader(httpRequest);
for (long retries = 0;; retries++)
{
- m_retryStrategy->GetSendToken();
+ if(!m_retryStrategy->HasSendToken())
+ {
+ return HttpResponseOutcome(AWSError<CoreErrors>(CoreErrors::SLOW_DOWN,
+ "",
+ "Unable to acquire enough send tokens to execute request.",
+ false/*retryable*/));
+
+ };
httpRequest->SetEventStreamRequest(request.IsEventStreamRequest());
outcome = AttemptOneRequest(httpRequest, request, signerName, signerRegion, signerServiceNameOverride);
@@ -358,16 +383,24 @@ HttpResponseOutcome AWSClient::AttemptExhaustively(const Aws::Http::URI& uri,
const char* signerRegion = signerRegionOverride;
Aws::String regionFromResponse;
- Aws::String invocationId = UUID::RandomUUID();
+ Aws::String invocationId = Aws::Utils::UUID::RandomUUID();
RequestInfo requestInfo;
requestInfo.attempt = 1;
requestInfo.maxAttempts = 0;
httpRequest->SetHeaderValue(Http::SDK_INVOCATION_ID_HEADER, invocationId);
httpRequest->SetHeaderValue(Http::SDK_REQUEST_HEADER, requestInfo);
+ AppendRecursionDetectionHeader(httpRequest);
for (long retries = 0;; retries++)
{
- m_retryStrategy->GetSendToken();
+ if(!m_retryStrategy->HasSendToken())
+ {
+ return HttpResponseOutcome(AWSError<CoreErrors>(CoreErrors::SLOW_DOWN,
+ "",
+ "Unable to acquire enough send tokens to execute request.",
+ false/*retryable*/));
+
+ };
outcome = AttemptOneRequest(httpRequest, signerName, requestName, signerRegion, signerServiceNameOverride);
if (retries == 0)
{
@@ -452,15 +485,6 @@ HttpResponseOutcome AWSClient::AttemptExhaustively(const Aws::Http::URI& uri,
return outcome;
}
-static bool DoesResponseGenerateError(const std::shared_ptr<HttpResponse>& response)
-{
- if (response->HasClientError()) return true;
-
- int responseCode = static_cast<int>(response->GetResponseCode());
- return responseCode < SUCCESS_RESPONSE_MIN || responseCode > SUCCESS_RESPONSE_MAX;
-
-}
-
HttpResponseOutcome AWSClient::AttemptOneRequest(const std::shared_ptr<HttpRequest>& httpRequest, const Aws::AmazonWebServiceRequest& request,
const char* signerName, const char* signerRegionOverride, const char* signerServiceNameOverride) const
{
@@ -481,7 +505,31 @@ HttpResponseOutcome AWSClient::AttemptOneRequest(const std::shared_ptr<HttpReque
std::shared_ptr<HttpResponse> httpResponse(
m_httpClient->MakeRequest(httpRequest, m_readRateLimiter.get(), m_writeRateLimiter.get()));
- if (DoesResponseGenerateError(httpResponse))
+ if (request.ShouldValidateResponseChecksum())
+ {
+ for (const auto& hashIterator : httpRequest->GetResponseValidationHashes())
+ {
+ Aws::String checksumHeaderKey = Aws::String("x-amz-checksum-") + hashIterator.first;
+ // TODO: If checksum ends with -#, then skip
+ if (httpResponse->HasHeader(checksumHeaderKey.c_str()))
+ {
+ Aws::String checksumHeaderValue = httpResponse->GetHeader(checksumHeaderKey.c_str());
+ if (HashingUtils::Base64Encode(hashIterator.second->GetHash().GetResult()) != checksumHeaderValue)
+ {
+ AWSError<CoreErrors> error(CoreErrors::VALIDATION, "", "Response checksums mismatch", false/*retryable*/);
+ error.SetResponseHeaders(httpResponse->GetHeaders());
+ error.SetResponseCode(httpResponse->GetResponseCode());
+ error.SetRemoteHostIpAddress(httpResponse->GetOriginatingRequest().GetResolvedRemoteHost());
+ AWS_LOGSTREAM_ERROR(AWS_CLIENT_LOG_TAG, error);
+ return HttpResponseOutcome(error);
+ }
+ // Validate only a single checksum returned in an HTTP response
+ break;
+ }
+ }
+ }
+
+ if (DoesResponseGenerateError(httpResponse) || request.HasEmbeddedError(httpResponse->GetResponseBody(), httpResponse->GetHeaders()))
{
AWS_LOGSTREAM_DEBUG(AWS_CLIENT_LOG_TAG, "Request returned error. Attempting to generate appropriate error codes from response");
auto error = BuildAWSError(httpResponse);
@@ -560,6 +608,54 @@ StreamOutcome AWSClient::MakeRequestWithUnparsedResponse(const Aws::Http::URI& u
return StreamOutcome(std::move(httpResponseOutcome));
}
+StreamOutcome AWSClient::MakeRequestWithUnparsedResponse(const Aws::AmazonWebServiceRequest& request,
+ const Aws::Endpoint::AWSEndpoint& endpoint,
+ Http::HttpMethod method,
+ const char* signerName,
+ const char* signerRegionOverride,
+ const char* signerServiceNameOverride) const
+{
+ const Aws::Http::URI& uri = endpoint.GetURI();
+ if (endpoint.GetAttributes()) {
+ signerName = endpoint.GetAttributes()->authScheme.GetName().c_str();
+ if (endpoint.GetAttributes()->authScheme.GetSigningRegion()) {
+ signerRegionOverride = endpoint.GetAttributes()->authScheme.GetSigningRegion()->c_str();
+ }
+ if (endpoint.GetAttributes()->authScheme.GetSigningRegionSet()) {
+ signerRegionOverride = endpoint.GetAttributes()->authScheme.GetSigningRegionSet()->c_str();
+ }
+ if (endpoint.GetAttributes()->authScheme.GetSigningName()) {
+ signerServiceNameOverride = endpoint.GetAttributes()->authScheme.GetSigningName()->c_str();
+ }
+ }
+
+ return MakeRequestWithUnparsedResponse(uri, request, method, signerName, signerRegionOverride, signerServiceNameOverride);
+}
+
+XmlOutcome AWSXMLClient::MakeRequestWithEventStream(const Aws::AmazonWebServiceRequest& request,
+ const Aws::Endpoint::AWSEndpoint& endpoint,
+ Http::HttpMethod method,
+ const char* signerName,
+ const char* signerRegionOverride,
+ const char* signerServiceNameOverride) const
+{
+ const Aws::Http::URI& uri = endpoint.GetURI();
+ if (endpoint.GetAttributes()) {
+ signerName = endpoint.GetAttributes()->authScheme.GetName().c_str();
+ if (endpoint.GetAttributes()->authScheme.GetSigningRegion()) {
+ signerRegionOverride = endpoint.GetAttributes()->authScheme.GetSigningRegion()->c_str();
+ }
+ if (endpoint.GetAttributes()->authScheme.GetSigningRegionSet()) {
+ signerRegionOverride = endpoint.GetAttributes()->authScheme.GetSigningRegionSet()->c_str();
+ }
+ if (endpoint.GetAttributes()->authScheme.GetSigningName()) {
+ signerServiceNameOverride = endpoint.GetAttributes()->authScheme.GetSigningName()->c_str();
+ }
+ }
+
+ return MakeRequestWithEventStream(uri, request, method, signerName, signerRegionOverride, signerServiceNameOverride);
+}
+
XmlOutcome AWSXMLClient::MakeRequestWithEventStream(const Aws::Http::URI& uri,
const Aws::AmazonWebServiceRequest& request,
Http::HttpMethod method,
@@ -603,6 +699,119 @@ void AWSClient::AddHeadersToRequest(const std::shared_ptr<Aws::Http::HttpRequest
AddCommonHeaders(*httpRequest);
}
+void AWSClient::AppendHeaderValueToRequest(const std::shared_ptr<HttpRequest> &httpRequest, const String header, const String value) const
+{
+ if (!httpRequest->HasHeader(header.c_str()))
+ {
+ httpRequest->SetHeaderValue(header, value);
+ }
+ else
+ {
+ Aws::String contentEncoding = httpRequest->GetHeaderValue(header.c_str());
+ contentEncoding.append(",").append(value);
+ httpRequest->SetHeaderValue(header, contentEncoding);
+ }
+}
+
+void AWSClient::AddChecksumToRequest(const std::shared_ptr<Aws::Http::HttpRequest>& httpRequest,
+ const Aws::AmazonWebServiceRequest& request) const
+{
+ Aws::String checksumAlgorithmName = Aws::Utils::StringUtils::ToLower(request.GetChecksumAlgorithmName().c_str());
+
+ // Request checksums
+ if (!checksumAlgorithmName.empty())
+ {
+ // For non-streaming payload, the resolved checksum location is always header.
+ // For streaming payload, the resolved checksum location depends on whether it is an unsigned payload, we let AwsAuthSigner decide it.
+ if (checksumAlgorithmName == "crc32")
+ {
+ if (request.IsStreaming())
+ {
+ httpRequest->SetRequestHash("crc32", Aws::MakeShared<Crypto::CRC32>(AWS_CLIENT_LOG_TAG));
+ }
+ else
+ {
+ httpRequest->SetHeaderValue("x-amz-checksum-crc32", HashingUtils::Base64Encode(HashingUtils::CalculateCRC32(*(GetBodyStream(request)))));
+ }
+ }
+ else if (checksumAlgorithmName == "crc32c")
+ {
+ if (request.IsStreaming())
+ {
+ httpRequest->SetRequestHash("crc32c", Aws::MakeShared<Crypto::CRC32C>(AWS_CLIENT_LOG_TAG));
+ }
+ else
+ {
+ httpRequest->SetHeaderValue("x-amz-checksum-crc32c", HashingUtils::Base64Encode(HashingUtils::CalculateCRC32C(*(GetBodyStream(request)))));
+ }
+ }
+ else if (checksumAlgorithmName == "sha256")
+ {
+ if (request.IsStreaming())
+ {
+ httpRequest->SetRequestHash("sha256", Aws::MakeShared<Crypto::Sha256>(AWS_CLIENT_LOG_TAG));
+ }
+ else
+ {
+ httpRequest->SetHeaderValue("x-amz-checksum-sha256", HashingUtils::Base64Encode(HashingUtils::CalculateSHA256(*(GetBodyStream(request)))));
+ }
+ }
+ else if (checksumAlgorithmName == "sha1")
+ {
+ if (request.IsStreaming())
+ {
+ httpRequest->SetRequestHash("sha1", Aws::MakeShared<Crypto::Sha1>(AWS_CLIENT_LOG_TAG));
+ }
+ else
+ {
+ httpRequest->SetHeaderValue("x-amz-checksum-sha1", HashingUtils::Base64Encode(HashingUtils::CalculateSHA1(*(GetBodyStream(request)))));
+ }
+ }
+ else if (checksumAlgorithmName == "md5")
+ {
+ httpRequest->SetHeaderValue(Http::CONTENT_MD5_HEADER, HashingUtils::Base64Encode(HashingUtils::CalculateMD5(*(GetBodyStream(request)))));
+ }
+ else
+ {
+ AWS_LOGSTREAM_WARN(AWS_CLIENT_LOG_TAG, "Checksum algorithm: " << checksumAlgorithmName << "is not supported by SDK.");
+ }
+ }
+
+ // Response checksums
+ if (request.ShouldValidateResponseChecksum())
+ {
+ for (const Aws::String& responseChecksumAlgorithmName : request.GetResponseChecksumAlgorithmNames())
+ {
+ checksumAlgorithmName = Aws::Utils::StringUtils::ToLower(responseChecksumAlgorithmName.c_str());
+
+ if (checksumAlgorithmName == "crc32c")
+ {
+ std::shared_ptr<Aws::Utils::Crypto::CRC32C> crc32c = Aws::MakeShared<Aws::Utils::Crypto::CRC32C>(AWS_CLIENT_LOG_TAG);
+ httpRequest->AddResponseValidationHash("crc32c", crc32c);
+ }
+ else if (checksumAlgorithmName == "crc32")
+ {
+ std::shared_ptr<Aws::Utils::Crypto::CRC32> crc32 = Aws::MakeShared<Aws::Utils::Crypto::CRC32>(AWS_CLIENT_LOG_TAG);
+ httpRequest->AddResponseValidationHash("crc", crc32);
+ }
+ else if (checksumAlgorithmName == "sha1")
+ {
+ std::shared_ptr<Aws::Utils::Crypto::Sha1> sha1 = Aws::MakeShared<Aws::Utils::Crypto::Sha1>(AWS_CLIENT_LOG_TAG);
+ httpRequest->AddResponseValidationHash("sha1", sha1);
+ }
+ else if (checksumAlgorithmName == "sha256")
+ {
+ std::shared_ptr<Aws::Utils::Crypto::Sha256> sha256 = Aws::MakeShared<Aws::Utils::Crypto::Sha256>(AWS_CLIENT_LOG_TAG);
+ httpRequest->AddResponseValidationHash("sha256", sha256);
+ }
+ else
+ {
+ AWS_LOGSTREAM_WARN(AWS_CLIENT_LOG_TAG, "Checksum algorithm: " << checksumAlgorithmName << " is not supported in validating response body yet.");
+ }
+ }
+ }
+}
+
void AWSClient::AddContentBodyToRequest(const std::shared_ptr<Aws::Http::HttpRequest>& httpRequest,
const std::shared_ptr<Aws::IOStream>& body, bool needsContentMd5, bool isChunked) const
{
@@ -610,7 +819,7 @@ void AWSClient::AddContentBodyToRequest(const std::shared_ptr<Aws::Http::HttpReq
//If there is no body, we have a content length of 0
//note: we also used to remove content-type, but S3 actually needs content-type on InitiateMultipartUpload and it isn't
- //forbiden by the spec. If we start getting weird errors related to this, make sure it isn't caused by this removal.
+ //forbidden by the spec. If we start getting weird errors related to this, make sure it isn't caused by this removal.
if (!body)
{
AWS_LOGSTREAM_TRACE(AWS_CLIENT_LOG_TAG, "No content body, content-length headers");
@@ -682,11 +891,11 @@ Aws::String Aws::Client::GetAuthorizationHeader(const Aws::Http::HttpRequest& ht
return authHeader.substr(signaturePosition + strlen(Aws::Auth::SIGNATURE) + 1);
}
-void AWSClient::BuildHttpRequest(const Aws::AmazonWebServiceRequest& request,
- const std::shared_ptr<HttpRequest>& httpRequest) const
+void AWSClient::BuildHttpRequest(const Aws::AmazonWebServiceRequest& request, const std::shared_ptr<HttpRequest>& httpRequest) const
{
- //do headers first since the request likely will set content-length as it's own header.
+ //do headers first since the request likely will set content-length as its own header.
AddHeadersToRequest(httpRequest, request.GetHeaders());
+ AddHeadersToRequest(httpRequest, request.GetAdditionalCustomHeaders());
if (request.IsEventStreamRequest())
{
@@ -694,9 +903,31 @@ void AWSClient::BuildHttpRequest(const Aws::AmazonWebServiceRequest& request,
}
else
{
- AddContentBodyToRequest(httpRequest, request.GetBody(), request.ShouldComputeContentMd5(), request.IsStreaming() && request.IsChunked() && m_httpClient->SupportsChunkedTransferEncoding());
+ //Check if compression is required
+ CompressionAlgorithm selectedCompressionAlgorithm =
+ request.GetSelectedCompressionAlgorithm(m_requestCompressionConfig);
+ if (Aws::Client::CompressionAlgorithm::NONE != selectedCompressionAlgorithm) {
+ Aws::Client::RequestCompression rc;
+ auto compressOutcome = rc.compress(request.GetBody(), selectedCompressionAlgorithm);
+
+ if (compressOutcome.IsSuccess()) {
+ Aws::String compressionAlgorithmId = Aws::Client::GetCompressionAlgorithmId(selectedCompressionAlgorithm);
+ AppendHeaderValueToRequest(httpRequest, CONTENT_ENCODING_HEADER, compressionAlgorithmId);
+ AddContentBodyToRequest(
+ httpRequest, compressOutcome.GetResult(),
+ request.ShouldComputeContentMd5(),
+ request.IsStreaming() && request.IsChunked() &&
+ m_httpClient->SupportsChunkedTransferEncoding());
+ } else {
+ AWS_LOGSTREAM_ERROR(AWS_CLIENT_LOG_TAG, "Failed to compress request, submitting uncompressed");
+ AddContentBodyToRequest(httpRequest, request.GetBody(), request.ShouldComputeContentMd5(), request.IsStreaming() && request.IsChunked() && m_httpClient->SupportsChunkedTransferEncoding());
+ }
+ } else {
+ AddContentBodyToRequest(httpRequest, request.GetBody(), request.ShouldComputeContentMd5(), request.IsStreaming() && request.IsChunked() && m_httpClient->SupportsChunkedTransferEncoding());
+ }
}
+ AddChecksumToRequest(httpRequest, request);
// Pass along handlers for processing data sent/received in bytes
httpRequest->SetDataReceivedEventHandler(request.GetDataReceivedEventHandler());
httpRequest->SetDataSentEventHandler(request.GetDataSentEventHandler());
@@ -710,389 +941,132 @@ void AWSClient::AddCommonHeaders(HttpRequest& httpRequest) const
httpRequest.SetUserAgent(m_userAgent);
}
-Aws::String AWSClient::GeneratePresignedUrl(URI& uri, HttpMethod method, long long expirationInSeconds)
+Aws::String AWSClient::GeneratePresignedUrl(const URI& uri, HttpMethod method, long long expirationInSeconds)
{
- std::shared_ptr<HttpRequest> request = CreateHttpRequest(uri, method, Aws::Utils::Stream::DefaultResponseStreamFactoryMethod);
- auto signer = GetSignerByName(Aws::Auth::SIGV4_SIGNER);
- if (signer->PresignRequest(*request, expirationInSeconds))
- {
- return request->GetURIString();
- }
-
- return {};
-}
-
-Aws::String AWSClient::GeneratePresignedUrl(URI& uri, HttpMethod method, const Aws::Http::HeaderValueCollection& customizedHeaders, long long expirationInSeconds)
-{
- std::shared_ptr<HttpRequest> request = CreateHttpRequest(uri, method, Aws::Utils::Stream::DefaultResponseStreamFactoryMethod);
- for (const auto& it: customizedHeaders)
- {
- request->SetHeaderValue(it.first.c_str(), it.second);
- }
- auto signer = GetSignerByName(Aws::Auth::SIGV4_SIGNER);
- if (signer->PresignRequest(*request, expirationInSeconds))
- {
- return request->GetURIString();
- }
-
- return {};
+ return AWSUrlPresigner(*this).GeneratePresignedUrl(uri, method, expirationInSeconds);
}
-Aws::String AWSClient::GeneratePresignedUrl(URI& uri, HttpMethod method, const char* region, long long expirationInSeconds) const
+Aws::String AWSClient::GeneratePresignedUrl(const URI& uri, HttpMethod method, const Aws::Http::HeaderValueCollection& customizedHeaders, long long expirationInSeconds)
{
- std::shared_ptr<HttpRequest> request = CreateHttpRequest(uri, method, Aws::Utils::Stream::DefaultResponseStreamFactoryMethod);
- auto signer = GetSignerByName(Aws::Auth::SIGV4_SIGNER);
- if (signer->PresignRequest(*request, region, expirationInSeconds))
- {
- return request->GetURIString();
- }
-
- return {};
+ return AWSUrlPresigner(*this).GeneratePresignedUrl(uri, method, customizedHeaders, expirationInSeconds);
}
-Aws::String AWSClient::GeneratePresignedUrl(URI& uri, HttpMethod method, const char* region, const Aws::Http::HeaderValueCollection& customizedHeaders, long long expirationInSeconds)
+Aws::String AWSClient::GeneratePresignedUrl(const URI& uri, HttpMethod method, const char* region, long long expirationInSeconds) const
{
- std::shared_ptr<HttpRequest> request = CreateHttpRequest(uri, method, Aws::Utils::Stream::DefaultResponseStreamFactoryMethod);
- for (const auto& it: customizedHeaders)
- {
- request->SetHeaderValue(it.first.c_str(), it.second);
- }
- auto signer = GetSignerByName(Aws::Auth::SIGV4_SIGNER);
- if (signer->PresignRequest(*request, region, expirationInSeconds))
- {
- return request->GetURIString();
- }
-
- return {};
+ return AWSUrlPresigner(*this).GeneratePresignedUrl(uri, method, region, expirationInSeconds);
}
-Aws::String AWSClient::GeneratePresignedUrl(Aws::Http::URI& uri, Aws::Http::HttpMethod method, const char* region, const char* serviceName, long long expirationInSeconds) const
+Aws::String AWSClient::GeneratePresignedUrl(const URI& uri, HttpMethod method, const char* region, const Aws::Http::HeaderValueCollection& customizedHeaders, long long expirationInSeconds)
{
- std::shared_ptr<HttpRequest> request = CreateHttpRequest(uri, method, Aws::Utils::Stream::DefaultResponseStreamFactoryMethod);
- auto signer = GetSignerByName(Aws::Auth::SIGV4_SIGNER);
- if (signer->PresignRequest(*request, region, serviceName, expirationInSeconds))
- {
- return request->GetURIString();
- }
-
- return {};
+ return AWSUrlPresigner(*this).GeneratePresignedUrl(uri, method, region, customizedHeaders, expirationInSeconds);
}
-Aws::String AWSClient::GeneratePresignedUrl(Aws::Http::URI& uri, Aws::Http::HttpMethod method, const char* region, const char* serviceName, const Aws::Http::HeaderValueCollection& customizedHeaders, long long expirationInSeconds)
+Aws::String AWSClient::GeneratePresignedUrl(const Aws::Http::URI& uri, Aws::Http::HttpMethod method, const char* region, const char* serviceName, long long expirationInSeconds) const
{
- std::shared_ptr<HttpRequest> request = CreateHttpRequest(uri, method, Aws::Utils::Stream::DefaultResponseStreamFactoryMethod);
- for (const auto& it: customizedHeaders)
- {
- request->SetHeaderValue(it.first.c_str(), it.second);
- }
- auto signer = GetSignerByName(Aws::Auth::SIGV4_SIGNER);
- if (signer->PresignRequest(*request, region, serviceName, expirationInSeconds))
- {
- return request->GetURIString();
- }
-
- return {};
+ return AWSUrlPresigner(*this).GeneratePresignedUrl(uri, method, region, serviceName, expirationInSeconds);
}
-Aws::String AWSClient::GeneratePresignedUrl(const Aws::AmazonWebServiceRequest& request, Aws::Http::URI& uri, Aws::Http::HttpMethod method, const char* region,
- const Aws::Http::QueryStringParameterCollection& extraParams, long long expirationInSeconds) const
+Aws::String AWSClient::GeneratePresignedUrl(const Aws::Http::URI& uri, Aws::Http::HttpMethod method, const char* region, const char* serviceName, const Aws::Http::HeaderValueCollection& customizedHeaders, long long expirationInSeconds)
{
- std::shared_ptr<HttpRequest> httpRequest =
- ConvertToRequestForPresigning(request, uri, method, extraParams);
- auto signer = GetSignerByName(Aws::Auth::SIGV4_SIGNER);
- if (signer->PresignRequest(*httpRequest, region, expirationInSeconds))
- {
- return httpRequest->GetURIString();
- }
-
- return {};
+ return AWSUrlPresigner(*this).GeneratePresignedUrl(uri, method, region, serviceName, customizedHeaders, expirationInSeconds);
}
-Aws::String AWSClient::GeneratePresignedUrl(const Aws::AmazonWebServiceRequest& request, Aws::Http::URI& uri, Aws::Http::HttpMethod method, const char* region, const char* serviceName,
-const Aws::Http::QueryStringParameterCollection& extraParams, long long expirationInSeconds) const
+Aws::String AWSClient::GeneratePresignedUrl(const Aws::Http::URI& uri, Aws::Http::HttpMethod method, const char* region, const char* serviceName, const char* signerName, long long expirationInSeconds) const
{
- std::shared_ptr<HttpRequest> httpRequest =
- ConvertToRequestForPresigning(request, uri, method, extraParams);
- auto signer = GetSignerByName(Aws::Auth::SIGV4_SIGNER);
- if (signer->PresignRequest(*httpRequest, region, serviceName, expirationInSeconds))
- {
- return httpRequest->GetURIString();
- }
-
- return {};
+ return AWSUrlPresigner(*this).GeneratePresignedUrl(uri, method, region, serviceName, signerName, expirationInSeconds);
}
-Aws::String AWSClient::GeneratePresignedUrl(const Aws::AmazonWebServiceRequest& request, Aws::Http::URI& uri, Aws::Http::HttpMethod method,
- const Aws::Http::QueryStringParameterCollection& extraParams, long long expirationInSeconds) const
+Aws::String AWSClient::GeneratePresignedUrl(const Aws::Http::URI& uri, Aws::Http::HttpMethod method, const char* region, const char* serviceName, const char* signerName, const Aws::Http::HeaderValueCollection& customizedHeaders, long long expirationInSeconds)
{
- std::shared_ptr<HttpRequest> httpRequest =
- ConvertToRequestForPresigning(request, uri, method, extraParams);
- auto signer = GetSignerByName(Aws::Auth::SIGV4_SIGNER);
- if (signer->PresignRequest(*httpRequest, expirationInSeconds))
- {
- return httpRequest->GetURIString();
- }
-
- return {};
+ return AWSUrlPresigner(*this).GeneratePresignedUrl(uri, method, region, serviceName, signerName, customizedHeaders, expirationInSeconds);
}
-std::shared_ptr<Aws::Http::HttpRequest> AWSClient::ConvertToRequestForPresigning(const Aws::AmazonWebServiceRequest& request, Aws::Http::URI& uri,
- Aws::Http::HttpMethod method, const Aws::Http::QueryStringParameterCollection& extraParams) const
+Aws::String AWSClient::GeneratePresignedUrl(const Aws::Endpoint::AWSEndpoint& endpoint,
+ Aws::Http::HttpMethod method /* = Http::HttpMethod::HTTP_POST */,
+ const Aws::Http::HeaderValueCollection& customizedHeaders /* = {} */,
+ uint64_t expirationInSeconds /* = 0 */,
+ const char* signerName /* = Aws::Auth::SIGV4_SIGNER */,
+ const char* signerRegionOverride /* = nullptr */,
+ const char* signerServiceNameOverride /* = nullptr */)
{
- request.PutToPresignedUrl(uri);
- std::shared_ptr<HttpRequest> httpRequest = CreateHttpRequest(uri, method, Aws::Utils::Stream::DefaultResponseStreamFactoryMethod);
-
- for (auto& param : extraParams)
- {
- httpRequest->AddQueryStringParameter(param.first.c_str(), param.second);
- }
-
- return httpRequest;
+ return AWSUrlPresigner(*this).GeneratePresignedUrl(endpoint, method, customizedHeaders, expirationInSeconds, signerName, signerRegionOverride, signerServiceNameOverride);
}
-std::shared_ptr<Aws::Http::HttpResponse> AWSClient::MakeHttpRequest(std::shared_ptr<Aws::Http::HttpRequest>& request) const
+Aws::String AWSClient::GeneratePresignedUrl(const Aws::AmazonWebServiceRequest& request, const Aws::Http::URI& uri, Aws::Http::HttpMethod method, const char* region,
+ const Aws::Http::QueryStringParameterCollection& extraParams, long long expirationInSeconds) const
{
- return m_httpClient->MakeRequest(request, m_readRateLimiter.get(), m_writeRateLimiter.get());
+ return AWSUrlPresigner(*this).GeneratePresignedUrl(request, uri, method, region, extraParams, expirationInSeconds);
}
-
-////////////////////////////////////////////////////////////////////////////
-AWSJsonClient::AWSJsonClient(const Aws::Client::ClientConfiguration& configuration,
- const std::shared_ptr<Aws::Client::AWSAuthSigner>& signer,
- const std::shared_ptr<AWSErrorMarshaller>& errorMarshaller) :
- BASECLASS(configuration, signer, errorMarshaller)
+Aws::String AWSClient::GeneratePresignedUrl(const Aws::AmazonWebServiceRequest& request, const Aws::Http::URI& uri, Aws::Http::HttpMethod method, const char* region, const char* serviceName,
+ const Aws::Http::QueryStringParameterCollection& extraParams, long long expirationInSeconds) const
{
+ return AWSUrlPresigner(*this).GeneratePresignedUrl(request, uri, method, region, serviceName, extraParams, expirationInSeconds);
}
-AWSJsonClient::AWSJsonClient(const Aws::Client::ClientConfiguration& configuration,
- const std::shared_ptr<Aws::Auth::AWSAuthSignerProvider>& signerProvider,
- const std::shared_ptr<AWSErrorMarshaller>& errorMarshaller) :
- BASECLASS(configuration, signerProvider, errorMarshaller)
+Aws::String AWSClient::GeneratePresignedUrl(const Aws::AmazonWebServiceRequest& request,
+ const Aws::Http::URI& uri,
+ Aws::Http::HttpMethod method,
+ const char* region,
+ const char* serviceName,
+ const char* signerName,
+ const Aws::Http::QueryStringParameterCollection& extraParams,
+ long long expirationInSeconds) const
{
+ return AWSUrlPresigner(*this).GeneratePresignedUrl(request, uri, method, region, serviceName, signerName, extraParams, expirationInSeconds);
}
-
-JsonOutcome AWSJsonClient::MakeRequest(const Aws::Http::URI& uri,
- const Aws::AmazonWebServiceRequest& request,
- Http::HttpMethod method,
- const char* signerName,
- const char* signerRegionOverride,
- const char* signerServiceNameOverride) const
+Aws::String AWSClient::GeneratePresignedUrl(const Aws::AmazonWebServiceRequest& request, const Aws::Http::URI& uri, Aws::Http::HttpMethod method,
+ const Aws::Http::QueryStringParameterCollection& extraParams, long long expirationInSeconds) const
{
- HttpResponseOutcome httpOutcome(BASECLASS::AttemptExhaustively(uri, request, method, signerName, signerRegionOverride, signerServiceNameOverride));
- if (!httpOutcome.IsSuccess())
- {
- return JsonOutcome(std::move(httpOutcome));
- }
-
- if (httpOutcome.GetResult()->GetResponseBody().tellp() > 0)
- //this is stupid, but gcc doesn't pick up the covariant on the dereference so we have to give it a little hint.
- return JsonOutcome(AmazonWebServiceResult<JsonValue>(JsonValue(httpOutcome.GetResult()->GetResponseBody()),
- httpOutcome.GetResult()->GetHeaders(),
- httpOutcome.GetResult()->GetResponseCode()));
-
- else
- return JsonOutcome(AmazonWebServiceResult<JsonValue>(JsonValue(), httpOutcome.GetResult()->GetHeaders()));
+ return AWSUrlPresigner(*this).GeneratePresignedUrl(request, uri, method, extraParams, expirationInSeconds);
}
-JsonOutcome AWSJsonClient::MakeRequest(const Aws::Http::URI& uri,
- Http::HttpMethod method,
- const char* signerName,
- const char* requestName,
- const char* signerRegionOverride,
- const char* signerServiceNameOverride) const
-{
- HttpResponseOutcome httpOutcome(BASECLASS::AttemptExhaustively(uri, method, signerName, requestName, signerRegionOverride, signerServiceNameOverride));
- if (!httpOutcome.IsSuccess())
- {
- return JsonOutcome(std::move(httpOutcome));
- }
-
- if (httpOutcome.GetResult()->GetResponseBody().tellp() > 0)
- {
- JsonValue jsonValue(httpOutcome.GetResult()->GetResponseBody());
- if (!jsonValue.WasParseSuccessful())
- {
- return JsonOutcome(AWSError<CoreErrors>(CoreErrors::UNKNOWN, "Json Parser Error", jsonValue.GetErrorMessage(), false));
- }
-
- //this is stupid, but gcc doesn't pick up the covariant on the dereference so we have to give it a little hint.
- return JsonOutcome(AmazonWebServiceResult<JsonValue>(std::move(jsonValue),
- httpOutcome.GetResult()->GetHeaders(),
- httpOutcome.GetResult()->GetResponseCode()));
+std::shared_ptr<Aws::IOStream> AWSClient::GetBodyStream(const Aws::AmazonWebServiceRequest& request) const {
+ if (request.GetBody() != nullptr) {
+ return request.GetBody();
}
-
- return JsonOutcome(AmazonWebServiceResult<JsonValue>(JsonValue(), httpOutcome.GetResult()->GetHeaders()));
+ // Return an empty string stream for no body
+ return Aws::MakeShared<Aws::StringStream>(AWS_CLIENT_LOG_TAG, "");
}
-JsonOutcome AWSJsonClient::MakeEventStreamRequest(std::shared_ptr<Aws::Http::HttpRequest>& request) const
+std::shared_ptr<Aws::Http::HttpResponse> AWSClient::MakeHttpRequest(std::shared_ptr<Aws::Http::HttpRequest>& request) const
{
- // request is assumed to be signed
- std::shared_ptr<HttpResponse> httpResponse = MakeHttpRequest(request);
-
- if (DoesResponseGenerateError(httpResponse))
- {
- AWS_LOGSTREAM_DEBUG(AWS_CLIENT_LOG_TAG, "Request returned error. Attempting to generate appropriate error codes from response");
- auto error = BuildAWSError(httpResponse);
- return JsonOutcome(std::move(error));
- }
-
- AWS_LOGSTREAM_DEBUG(AWS_CLIENT_LOG_TAG, "Request returned successful response.");
-
- HttpResponseOutcome httpOutcome(std::move(httpResponse));
-
- if (httpOutcome.GetResult()->GetResponseBody().tellp() > 0)
- {
- JsonValue jsonValue(httpOutcome.GetResult()->GetResponseBody());
- if (!jsonValue.WasParseSuccessful())
- {
- return JsonOutcome(AWSError<CoreErrors>(CoreErrors::UNKNOWN, "Json Parser Error", jsonValue.GetErrorMessage(), false));
- }
-
- //this is stupid, but gcc doesn't pick up the covariant on the dereference so we have to give it a little hint.
- return JsonOutcome(AmazonWebServiceResult<JsonValue>(std::move(jsonValue),
- httpOutcome.GetResult()->GetHeaders(),
- httpOutcome.GetResult()->GetResponseCode()));
- }
-
- return JsonOutcome(AmazonWebServiceResult<JsonValue>(JsonValue(), httpOutcome.GetResult()->GetHeaders()));
+ return m_httpClient->MakeRequest(request, m_readRateLimiter.get(), m_writeRateLimiter.get());
}
-AWSError<CoreErrors> AWSJsonClient::BuildAWSError(
- const std::shared_ptr<Aws::Http::HttpResponse>& httpResponse) const
+void AWSClient::AppendRecursionDetectionHeader(std::shared_ptr<Aws::Http::HttpRequest> ioRequest)
{
- AWSError<CoreErrors> error;
- if (httpResponse->HasClientError())
- {
- bool retryable = httpResponse->GetClientErrorType() == CoreErrors::NETWORK_CONNECTION ? true : false;
- error = AWSError<CoreErrors>(httpResponse->GetClientErrorType(), "", httpResponse->GetClientErrorMessage(), retryable);
+ if(!ioRequest || ioRequest->HasHeader(Aws::Http::X_AMZN_TRACE_ID_HEADER)) {
+ return;
}
- else if (!httpResponse->GetResponseBody() || httpResponse->GetResponseBody().tellp() < 1)
- {
- auto responseCode = httpResponse->GetResponseCode();
- auto errorCode = GuessBodylessErrorType(responseCode);
-
- Aws::StringStream ss;
- ss << "No response body.";
- error = AWSError<CoreErrors>(errorCode, "", ss.str(),
- IsRetryableHttpResponseCode(responseCode));
- }
- else
- {
- assert(httpResponse->GetResponseCode() != HttpResponseCode::OK);
- error = GetErrorMarshaller()->Marshall(*httpResponse);
+ Aws::String awsLambdaFunctionName = Aws::Environment::GetEnv(AWS_LAMBDA_FUNCTION_NAME);
+ if(awsLambdaFunctionName.empty()) {
+ return;
}
-
- error.SetResponseHeaders(httpResponse->GetHeaders());
- error.SetResponseCode(httpResponse->GetResponseCode());
- error.SetRemoteHostIpAddress(httpResponse->GetOriginatingRequest().GetResolvedRemoteHost());
- AWS_LOGSTREAM_ERROR(AWS_CLIENT_LOG_TAG, error);
- return error;
-}
-
-/////////////////////////////////////////////////////////////////////////////////////////
-AWSXMLClient::AWSXMLClient(const Aws::Client::ClientConfiguration& configuration,
- const std::shared_ptr<Aws::Client::AWSAuthSigner>& signer,
- const std::shared_ptr<AWSErrorMarshaller>& errorMarshaller) :
- BASECLASS(configuration, signer, errorMarshaller)
-{
-}
-
-AWSXMLClient::AWSXMLClient(const Aws::Client::ClientConfiguration& configuration,
- const std::shared_ptr<Aws::Auth::AWSAuthSignerProvider>& signerProvider,
- const std::shared_ptr<AWSErrorMarshaller>& errorMarshaller) :
- BASECLASS(configuration, signerProvider, errorMarshaller)
-{
-}
-
-XmlOutcome AWSXMLClient::MakeRequest(const Aws::Http::URI& uri,
- const Aws::AmazonWebServiceRequest& request,
- Http::HttpMethod method,
- const char* signerName,
- const char* signerRegionOverride,
- const char* signerServiceNameOverride) const
-{
- HttpResponseOutcome httpOutcome(BASECLASS::AttemptExhaustively(uri, request, method, signerName, signerRegionOverride, signerServiceNameOverride));
- if (!httpOutcome.IsSuccess())
- {
- return XmlOutcome(std::move(httpOutcome));
+ Aws::String xAmznTraceIdVal = Aws::Environment::GetEnv(X_AMZN_TRACE_ID);
+ if(xAmznTraceIdVal.empty()) {
+ return;
}
- if (httpOutcome.GetResult()->GetResponseBody().tellp() > 0)
+ // Escape all non-printable ASCII characters by percent encoding
+ Aws::OStringStream xAmznTraceIdValEncodedStr;
+ for(const char ch : xAmznTraceIdVal)
{
- XmlDocument xmlDoc = XmlDocument::CreateFromXmlStream(httpOutcome.GetResult()->GetResponseBody());
-
- if (!xmlDoc.WasParseSuccessful())
+ if (ch >= 0x20 && ch <= 0x7e) // ascii chars [32-126] or [' ' to '~'] are not escaped
{
- AWS_LOGSTREAM_ERROR(AWS_CLIENT_LOG_TAG, "Xml parsing for error failed with message " << xmlDoc.GetErrorMessage().c_str());
- return AWSError<CoreErrors>(CoreErrors::UNKNOWN, "Xml Parse Error", xmlDoc.GetErrorMessage(), false);
+ xAmznTraceIdValEncodedStr << ch;
}
-
- return XmlOutcome(AmazonWebServiceResult<XmlDocument>(std::move(xmlDoc),
- httpOutcome.GetResult()->GetHeaders(), httpOutcome.GetResult()->GetResponseCode()));
- }
-
- return XmlOutcome(AmazonWebServiceResult<XmlDocument>(XmlDocument(), httpOutcome.GetResult()->GetHeaders()));
-}
-
-XmlOutcome AWSXMLClient::MakeRequest(const Aws::Http::URI& uri,
- Http::HttpMethod method,
- const char* signerName,
- const char* requestName,
- const char* signerRegionOverride,
- const char* signerServiceNameOverride) const
-{
- HttpResponseOutcome httpOutcome(BASECLASS::AttemptExhaustively(uri, method, signerName, requestName, signerRegionOverride, signerServiceNameOverride));
- if (!httpOutcome.IsSuccess())
- {
- return XmlOutcome(std::move(httpOutcome));
- }
-
- if (httpOutcome.GetResult()->GetResponseBody().tellp() > 0)
- {
- return XmlOutcome(AmazonWebServiceResult<XmlDocument>(
- XmlDocument::CreateFromXmlStream(httpOutcome.GetResult()->GetResponseBody()),
- httpOutcome.GetResult()->GetHeaders(), httpOutcome.GetResult()->GetResponseCode()));
- }
-
- return XmlOutcome(AmazonWebServiceResult<XmlDocument>(XmlDocument(), httpOutcome.GetResult()->GetHeaders()));
-}
-
-AWSError<CoreErrors> AWSXMLClient::BuildAWSError(const std::shared_ptr<Http::HttpResponse>& httpResponse) const
-{
- AWSError<CoreErrors> error;
- if (httpResponse->HasClientError())
- {
- bool retryable = httpResponse->GetClientErrorType() == CoreErrors::NETWORK_CONNECTION ? true : false;
- error = AWSError<CoreErrors>(httpResponse->GetClientErrorType(), "", httpResponse->GetClientErrorMessage(), retryable);
- }
- else if (!httpResponse->GetResponseBody() || httpResponse->GetResponseBody().tellp() < 1)
- {
- auto responseCode = httpResponse->GetResponseCode();
- auto errorCode = GuessBodylessErrorType(responseCode);
-
- Aws::StringStream ss;
- ss << "No response body.";
- error = AWSError<CoreErrors>(errorCode, "", ss.str(), IsRetryableHttpResponseCode(responseCode));
- }
- else
- {
- assert(httpResponse->GetResponseCode() != HttpResponseCode::OK);
-
- // When trying to build an AWS Error from a response which is an FStream, we need to rewind the
- // file pointer back to the beginning in order to correctly read the input using the XML string iterator
- if ((httpResponse->GetResponseBody().tellp() > 0)
- && (httpResponse->GetResponseBody().tellg() > 0))
+ else
{
- httpResponse->GetResponseBody().seekg(0);
+ // A percent-encoded octet is encoded as a character triplet
+ xAmznTraceIdValEncodedStr << '%' // consisting of the percent character "%"
+ << std::hex << std::setfill('0') << std::setw(2) << std::uppercase
+ << (size_t) ch //followed by the two hexadecimal digits representing that octet's numeric value
+ << std::dec << std::setfill(' ') << std::setw(0) << std::nouppercase;
}
-
- error = GetErrorMarshaller()->Marshall(*httpResponse);
}
+ xAmznTraceIdVal = xAmznTraceIdValEncodedStr.str();
- error.SetResponseHeaders(httpResponse->GetHeaders());
- error.SetResponseCode(httpResponse->GetResponseCode());
- error.SetRemoteHostIpAddress(httpResponse->GetOriginatingRequest().GetResolvedRemoteHost());
- AWS_LOGSTREAM_ERROR(AWS_CLIENT_LOG_TAG, error);
- return error;
+ ioRequest->SetHeaderValue(Aws::Http::X_AMZN_TRACE_ID_HEADER, xAmznTraceIdVal);
}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSErrorMarshaller.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSErrorMarshaller.cpp
index f5fa676f98..a905dddb5c 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSErrorMarshaller.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSErrorMarshaller.cpp
@@ -23,6 +23,7 @@ AWS_CORE_API extern const char MESSAGE_LOWER_CASE[] = "message";
AWS_CORE_API extern const char MESSAGE_CAMEL_CASE[] = "Message";
AWS_CORE_API extern const char ERROR_TYPE_HEADER[] = "x-amzn-ErrorType";
AWS_CORE_API extern const char REQUEST_ID_HEADER[] = "x-amzn-RequestId";
+AWS_CORE_API extern const char QUERY_ERROR_HEADER[] = "x-amzn-query-error";
AWS_CORE_API extern const char TYPE[] = "__type";
AWSError<CoreErrors> JsonErrorMarshaller::Marshall(const Aws::Http::HttpResponse& httpResponse) const
@@ -50,6 +51,24 @@ AWSError<CoreErrors> JsonErrorMarshaller::Marshall(const Aws::Http::HttpResponse
error = FindErrorByHttpResponseCode(httpResponse.GetResponseCode());
error.SetMessage(message);
}
+
+ if (httpResponse.HasHeader(QUERY_ERROR_HEADER))
+ {
+ auto errorCodeString = httpResponse.GetHeader(QUERY_ERROR_HEADER);
+ auto locationOfSemicolon = errorCodeString.find_first_of(';');
+ Aws::String errorCode;
+
+ if (locationOfSemicolon != Aws::String::npos)
+ {
+ errorCode = errorCodeString.substr(0, locationOfSemicolon);
+ }
+ else
+ {
+ errorCode = errorCodeString;
+ }
+
+ error.SetExceptionName(errorCode);
+ }
}
else
{
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSJsonClient.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSJsonClient.cpp
new file mode 100644
index 0000000000..b3e19d9977
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSJsonClient.cpp
@@ -0,0 +1,212 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/core/client/AWSJsonClient.h>
+#include <aws/core/AmazonWebServiceRequest.h>
+#include <aws/core/auth/AWSAuthSignerProvider.h>
+#include <aws/core/client/AWSError.h>
+#include <aws/core/client/AWSErrorMarshaller.h>
+#include <aws/core/client/ClientConfiguration.h>
+#include <aws/core/client/CoreErrors.h>
+#include <aws/core/client/RetryStrategy.h>
+#include <aws/core/http/HttpClient.h>
+#include <aws/core/http/HttpResponse.h>
+#include <aws/core/http/URI.h>
+#include <aws/core/utils/json/JsonSerializer.h>
+#include <aws/core/utils/Outcome.h>
+#include <aws/core/utils/xml/XmlSerializer.h>
+#include <aws/core/utils/memory/stl/AWSStringStream.h>
+#include <aws/core/utils/logging/LogMacros.h>
+#include <aws/core/utils/event/EventStream.h>
+#include <aws/core/utils/UUID.h>
+#include <aws/core/monitoring/MonitoringManager.h>
+
+#include <cassert>
+
+
+using namespace Aws;
+using namespace Aws::Client;
+using namespace Aws::Http;
+using namespace Aws::Utils;
+using namespace Aws::Utils::Json;
+
+static const char AWS_JSON_CLIENT_LOG_TAG[] = "AWSJsonClient";
+
+AWSJsonClient::AWSJsonClient(const Aws::Client::ClientConfiguration& configuration,
+ const std::shared_ptr<Aws::Client::AWSAuthSigner>& signer,
+ const std::shared_ptr<AWSErrorMarshaller>& errorMarshaller) :
+ BASECLASS(configuration, signer, errorMarshaller)
+{
+}
+
+AWSJsonClient::AWSJsonClient(const Aws::Client::ClientConfiguration& configuration,
+ const std::shared_ptr<Aws::Auth::AWSAuthSignerProvider>& signerProvider,
+ const std::shared_ptr<AWSErrorMarshaller>& errorMarshaller) :
+ BASECLASS(configuration, signerProvider, errorMarshaller)
+{
+}
+
+JsonOutcome AWSJsonClient::MakeRequest(const Aws::AmazonWebServiceRequest& request,
+ const Aws::Endpoint::AWSEndpoint& endpoint,
+ Http::HttpMethod method /* = Http::HttpMethod::HTTP_POST */,
+ const char* signerName /* = Aws::Auth::NULL_SIGNER */,
+ const char* signerRegionOverride /* = nullptr */,
+ const char* signerServiceNameOverride /* = nullptr */) const
+{
+ const Aws::Http::URI& uri = endpoint.GetURI();
+ if (endpoint.GetAttributes()) {
+ signerName = endpoint.GetAttributes()->authScheme.GetName().c_str();
+ if (endpoint.GetAttributes()->authScheme.GetSigningRegion()) {
+ signerRegionOverride = endpoint.GetAttributes()->authScheme.GetSigningRegion()->c_str();
+ }
+ if (endpoint.GetAttributes()->authScheme.GetSigningRegionSet()) {
+ signerRegionOverride = endpoint.GetAttributes()->authScheme.GetSigningRegionSet()->c_str();
+ }
+ if (endpoint.GetAttributes()->authScheme.GetSigningName()) {
+ signerServiceNameOverride = endpoint.GetAttributes()->authScheme.GetSigningName()->c_str();
+ }
+ }
+ return MakeRequest(uri, request, method, signerName, signerRegionOverride, signerServiceNameOverride);
+}
+
+JsonOutcome AWSJsonClient::MakeRequest(const Aws::Endpoint::AWSEndpoint& endpoint,
+ Http::HttpMethod method /* = Http::HttpMethod::HTTP_POST */,
+ const char* signerName /* = Aws::Auth::NULL_SIGNER */,
+ const char* signerRegionOverride /* = nullptr */,
+ const char* signerServiceNameOverride /* = nullptr */) const
+{
+ const Aws::Http::URI& uri = endpoint.GetURI();
+ if (endpoint.GetAttributes()) {
+ signerName = endpoint.GetAttributes()->authScheme.GetName().c_str();
+ if (endpoint.GetAttributes()->authScheme.GetSigningRegion()) {
+ signerRegionOverride = endpoint.GetAttributes()->authScheme.GetSigningRegion()->c_str();
+ }
+ if (endpoint.GetAttributes()->authScheme.GetSigningRegionSet()) {
+ signerRegionOverride = endpoint.GetAttributes()->authScheme.GetSigningRegionSet()->c_str();
+ }
+ if (endpoint.GetAttributes()->authScheme.GetSigningName()) {
+ signerServiceNameOverride = endpoint.GetAttributes()->authScheme.GetSigningName()->c_str();
+ }
+ }
+ return MakeRequest(uri, method, signerName, signerRegionOverride, signerServiceNameOverride);
+}
+
+JsonOutcome AWSJsonClient::MakeRequest(const Aws::Http::URI& uri,
+ const Aws::AmazonWebServiceRequest& request,
+ Http::HttpMethod method,
+ const char* signerName,
+ const char* signerRegionOverride,
+ const char* signerServiceNameOverride) const
+{
+ HttpResponseOutcome httpOutcome(BASECLASS::AttemptExhaustively(uri, request, method, signerName, signerRegionOverride, signerServiceNameOverride));
+ if (!httpOutcome.IsSuccess())
+ {
+ return JsonOutcome(std::move(httpOutcome));
+ }
+
+ if (httpOutcome.GetResult()->GetResponseBody().tellp() > 0)
+ //this is stupid, but gcc doesn't pick up the covariant on the dereference so we have to give it a little hint.
+ return JsonOutcome(AmazonWebServiceResult<JsonValue>(JsonValue(httpOutcome.GetResult()->GetResponseBody()),
+ httpOutcome.GetResult()->GetHeaders(),
+ httpOutcome.GetResult()->GetResponseCode()));
+
+ else
+ return JsonOutcome(AmazonWebServiceResult<JsonValue>(JsonValue(), httpOutcome.GetResult()->GetHeaders()));
+}
+
+JsonOutcome AWSJsonClient::MakeRequest(const Aws::Http::URI& uri,
+ Http::HttpMethod method,
+ const char* signerName,
+ const char* requestName,
+ const char* signerRegionOverride,
+ const char* signerServiceNameOverride) const
+{
+ HttpResponseOutcome httpOutcome(BASECLASS::AttemptExhaustively(uri, method, signerName, requestName, signerRegionOverride, signerServiceNameOverride));
+ if (!httpOutcome.IsSuccess())
+ {
+ return JsonOutcome(std::move(httpOutcome));
+ }
+
+ if (httpOutcome.GetResult()->GetResponseBody().tellp() > 0)
+ {
+ JsonValue jsonValue(httpOutcome.GetResult()->GetResponseBody());
+ if (!jsonValue.WasParseSuccessful())
+ {
+ return JsonOutcome(AWSError<CoreErrors>(CoreErrors::UNKNOWN, "Json Parser Error", jsonValue.GetErrorMessage(), false));
+ }
+
+ //this is stupid, but gcc doesn't pick up the covariant on the dereference so we have to give it a little hint.
+ return JsonOutcome(AmazonWebServiceResult<JsonValue>(std::move(jsonValue),
+ httpOutcome.GetResult()->GetHeaders(),
+ httpOutcome.GetResult()->GetResponseCode()));
+ }
+
+ return JsonOutcome(AmazonWebServiceResult<JsonValue>(JsonValue(), httpOutcome.GetResult()->GetHeaders()));
+}
+
+JsonOutcome AWSJsonClient::MakeEventStreamRequest(std::shared_ptr<Aws::Http::HttpRequest>& request) const
+{
+ // request is assumed to be signed
+ std::shared_ptr<HttpResponse> httpResponse = MakeHttpRequest(request);
+
+ if (DoesResponseGenerateError(httpResponse))
+ {
+ AWS_LOGSTREAM_DEBUG(AWS_JSON_CLIENT_LOG_TAG, "Request returned error. Attempting to generate appropriate error codes from response");
+ auto error = BuildAWSError(httpResponse);
+ return JsonOutcome(std::move(error));
+ }
+
+ AWS_LOGSTREAM_DEBUG(AWS_JSON_CLIENT_LOG_TAG, "Request returned successful response.");
+
+ HttpResponseOutcome httpOutcome(std::move(httpResponse));
+
+ if (httpOutcome.GetResult()->GetResponseBody().tellp() > 0)
+ {
+ JsonValue jsonValue(httpOutcome.GetResult()->GetResponseBody());
+ if (!jsonValue.WasParseSuccessful())
+ {
+ return JsonOutcome(AWSError<CoreErrors>(CoreErrors::UNKNOWN, "Json Parser Error", jsonValue.GetErrorMessage(), false));
+ }
+
+ //this is stupid, but gcc doesn't pick up the covariant on the dereference so we have to give it a little hint.
+ return JsonOutcome(AmazonWebServiceResult<JsonValue>(std::move(jsonValue),
+ httpOutcome.GetResult()->GetHeaders(),
+ httpOutcome.GetResult()->GetResponseCode()));
+ }
+
+ return JsonOutcome(AmazonWebServiceResult<JsonValue>(JsonValue(), httpOutcome.GetResult()->GetHeaders()));
+}
+
+AWSError<CoreErrors> AWSJsonClient::BuildAWSError(
+ const std::shared_ptr<Aws::Http::HttpResponse>& httpResponse) const
+{
+ AWSError<CoreErrors> error;
+ if (httpResponse->HasClientError())
+ {
+ bool retryable = httpResponse->GetClientErrorType() == CoreErrors::NETWORK_CONNECTION ? true : false;
+ error = AWSError<CoreErrors>(httpResponse->GetClientErrorType(), "", httpResponse->GetClientErrorMessage(), retryable);
+ }
+ else if (!httpResponse->GetResponseBody() || httpResponse->GetResponseBody().tellp() < 1)
+ {
+ auto responseCode = httpResponse->GetResponseCode();
+ auto errorCode = AWSClient::GuessBodylessErrorType(responseCode);
+
+ Aws::StringStream ss;
+ ss << "No response body.";
+ error = AWSError<CoreErrors>(errorCode, "", ss.str(),
+ IsRetryableHttpResponseCode(responseCode));
+ }
+ else
+ {
+ assert(httpResponse->GetResponseCode() != HttpResponseCode::OK);
+ error = GetErrorMarshaller()->Marshall(*httpResponse);
+ }
+
+ error.SetResponseHeaders(httpResponse->GetHeaders());
+ error.SetResponseCode(httpResponse->GetResponseCode());
+ error.SetRemoteHostIpAddress(httpResponse->GetOriginatingRequest().GetResolvedRemoteHost());
+ AWS_LOGSTREAM_ERROR(AWS_JSON_CLIENT_LOG_TAG, error);
+ return error;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSUrlPresigner.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSUrlPresigner.cpp
new file mode 100644
index 0000000000..a0bc808838
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSUrlPresigner.cpp
@@ -0,0 +1,236 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/core/client/AWSUrlPresigner.h>
+#include <aws/core/client/AWSClient.h>
+#include <aws/core/http/HttpClientFactory.h>
+
+namespace Aws
+{
+namespace Client
+{
+
+using HttpRequest = Http::HttpRequest;
+using HttpMethod = Http::HttpMethod;
+using URI = Http::URI;
+
+
+AWSUrlPresigner::AWSUrlPresigner(const AWSClient& client)
+ : m_awsClient(client)
+{}
+
+
+Aws::Client::AWSAuthSigner* AWSUrlPresigner::GetSignerByName(const char* name) const
+{
+ return m_awsClient.GetSignerByName(name);
+}
+
+std::shared_ptr<Aws::Http::HttpRequest>
+ConvertToRequestForPresigning(const Aws::AmazonWebServiceRequest& request,
+ const Aws::Http::URI& uri,
+ Aws::Http::HttpMethod method,
+ const Aws::Http::QueryStringParameterCollection& extraParams)
+{
+ Aws::Http::URI uriCopy = uri;
+ request.PutToPresignedUrl(uriCopy);
+ std::shared_ptr<HttpRequest> httpRequest = CreateHttpRequest(uriCopy, method, Aws::Utils::Stream::DefaultResponseStreamFactoryMethod);
+
+ for (auto& param : extraParams)
+ {
+ httpRequest->AddQueryStringParameter(param.first.c_str(), param.second);
+ }
+
+ return httpRequest;
+}
+
+Aws::String AWSUrlPresigner::GeneratePresignedUrl(const URI& uri,
+ HttpMethod method,
+ long long expirationInSeconds) const
+{
+ const char* regionOverride = nullptr;
+ const char* serviceNameOverride = nullptr;
+ const char* signerName = Aws::Auth::SIGV4_SIGNER;
+ return GeneratePresignedUrl(uri, method, regionOverride, serviceNameOverride, signerName, expirationInSeconds);
+}
+
+Aws::String AWSUrlPresigner::GeneratePresignedUrl(const URI& uri,
+ HttpMethod method,
+ const Aws::Http::HeaderValueCollection& customizedHeaders,
+ long long expirationInSeconds) const
+{
+ const char* regionOverride = nullptr;
+ const char* serviceNameOverride = nullptr;
+ const char* signerName = Aws::Auth::SIGV4_SIGNER;
+ return GeneratePresignedUrl(uri, method, regionOverride, serviceNameOverride, signerName, customizedHeaders, expirationInSeconds);
+}
+
+Aws::String AWSUrlPresigner::GeneratePresignedUrl(const URI& uri,
+ HttpMethod method,
+ const char* regionOverride,
+ long long expirationInSeconds) const
+{
+ const char* serviceNameOverride = nullptr;
+ const char* signerName = Aws::Auth::SIGV4_SIGNER;
+ return GeneratePresignedUrl(uri, method, regionOverride, serviceNameOverride, signerName, expirationInSeconds);
+}
+
+Aws::String AWSUrlPresigner::GeneratePresignedUrl(const URI& uri,
+ HttpMethod method,
+ const char* regionOverride,
+ const Aws::Http::HeaderValueCollection& customizedHeaders,
+ long long expirationInSeconds) const
+{
+ const char* serviceNameOverride = nullptr;
+ const char* signerName = Aws::Auth::SIGV4_SIGNER;
+ return GeneratePresignedUrl(uri, method, regionOverride, serviceNameOverride, signerName, customizedHeaders, expirationInSeconds);
+}
+
+Aws::String AWSUrlPresigner::GeneratePresignedUrl(const Aws::Http::URI& uri,
+ Aws::Http::HttpMethod method,
+ const char* regionOverride,
+ const char* serviceNameOverride,
+ long long expirationInSeconds) const
+{
+ const char* signerName = Aws::Auth::SIGV4_SIGNER;
+ return GeneratePresignedUrl(uri, method, regionOverride, serviceNameOverride, signerName, expirationInSeconds);
+}
+
+Aws::String AWSUrlPresigner::GeneratePresignedUrl(const Aws::Http::URI& uri,
+ Aws::Http::HttpMethod method,
+ const char* regionOverride,
+ const char* serviceNameOverride,
+ const Aws::Http::HeaderValueCollection& customizedHeaders,
+ long long expirationInSeconds) const
+{
+ const char* signerName = Aws::Auth::SIGV4_SIGNER;
+ return GeneratePresignedUrl(uri, method, regionOverride, serviceNameOverride, signerName, customizedHeaders, expirationInSeconds);
+}
+
+Aws::String AWSUrlPresigner::GeneratePresignedUrl(const Aws::Http::URI& uri,
+ Aws::Http::HttpMethod method,
+ const char* regionOverride,
+ const char* serviceNameOverride,
+ const char* signerName,
+ long long expirationInSeconds) const
+{
+ const Aws::Http::HeaderValueCollection& customizedHeaders = {};
+ return GeneratePresignedUrl(uri, method, regionOverride, serviceNameOverride, signerName, customizedHeaders, expirationInSeconds);
+}
+
+Aws::String AWSUrlPresigner::GeneratePresignedUrl(const Aws::Http::URI& uri,
+ Aws::Http::HttpMethod method,
+ const char* region,
+ const char* serviceName,
+ const char* signerName,
+ const Aws::Http::HeaderValueCollection& customizedHeaders,
+ long long expirationInSeconds) const
+{
+ /* a real method implementation */
+ if (!signerName) {
+ signerName = Aws::Auth::SIGV4_SIGNER;
+ }
+ std::shared_ptr<HttpRequest> request = CreateHttpRequest(uri, method, Aws::Utils::Stream::DefaultResponseStreamFactoryMethod);
+ for (const auto& it: customizedHeaders)
+ {
+ request->SetHeaderValue(it.first.c_str(), it.second);
+ }
+ auto signer = GetSignerByName(signerName);
+ if (signer->PresignRequest(*request, region, serviceName, expirationInSeconds))
+ {
+ return request->GetURIString();
+ }
+
+ return {};
+}
+
+Aws::String AWSUrlPresigner::GeneratePresignedUrl(const Aws::Endpoint::AWSEndpoint& endpoint,
+ Aws::Http::HttpMethod method /* = Http::HttpMethod::HTTP_POST */,
+ const Aws::Http::HeaderValueCollection& customizedHeaders /* = {} */,
+ uint64_t expirationInSeconds /* = 0 */,
+ const char* signerName /* = Aws::Auth::SIGV4_SIGNER */,
+ const char* signerRegionOverride /* = nullptr */,
+ const char* signerServiceNameOverride /* = nullptr */) const
+{
+ const Aws::Http::URI& uri = endpoint.GetURI();
+ if (endpoint.GetAttributes()) {
+ signerName = endpoint.GetAttributes()->authScheme.GetName().c_str();
+ if (endpoint.GetAttributes()->authScheme.GetSigningRegion()) {
+ signerRegionOverride = endpoint.GetAttributes()->authScheme.GetSigningRegion()->c_str();
+ }
+ if (endpoint.GetAttributes()->authScheme.GetSigningRegionSet()) {
+ signerRegionOverride = endpoint.GetAttributes()->authScheme.GetSigningRegionSet()->c_str();
+ }
+ if (endpoint.GetAttributes()->authScheme.GetSigningName()) {
+ signerServiceNameOverride = endpoint.GetAttributes()->authScheme.GetSigningName()->c_str();
+ }
+ }
+
+ return GeneratePresignedUrl(uri, method, signerRegionOverride, signerServiceNameOverride, signerName, customizedHeaders, expirationInSeconds);
+}
+
+Aws::String AWSUrlPresigner::GeneratePresignedUrl(const Aws::AmazonWebServiceRequest& request,
+ const Aws::Http::URI& uri,
+ Aws::Http::HttpMethod method,
+ const char* regionOverride,
+ const Aws::Http::QueryStringParameterCollection& extraParams,
+ long long expirationInSeconds) const
+{
+ const char* serviceNameOverride = nullptr;
+ const char* signerName = Aws::Auth::SIGV4_SIGNER;
+
+ return GeneratePresignedUrl(request, uri, method, regionOverride, serviceNameOverride, signerName, extraParams, expirationInSeconds);
+}
+
+Aws::String AWSUrlPresigner::GeneratePresignedUrl(const Aws::AmazonWebServiceRequest& request,
+ const Aws::Http::URI& uri,
+ Aws::Http::HttpMethod method,
+ const char* regionOverride,
+ const char* serviceNameOverride,
+ const char* signerName,
+ const Aws::Http::QueryStringParameterCollection& extraParams,
+ long long expirationInSeconds) const
+{
+ /* a real method implementation */
+ if (!signerName) {
+ signerName = Aws::Auth::SIGV4_SIGNER;
+ }
+ std::shared_ptr<HttpRequest> httpRequest =
+ ConvertToRequestForPresigning(request, uri, method, extraParams);
+ auto signer = GetSignerByName(signerName);
+ if (signer->PresignRequest(*httpRequest, regionOverride, serviceNameOverride, expirationInSeconds))
+ {
+ return httpRequest->GetURIString();
+ }
+
+ return {};
+}
+
+Aws::String AWSUrlPresigner::GeneratePresignedUrl(const Aws::AmazonWebServiceRequest& request,
+ const Aws::Http::URI& uri,
+ Aws::Http::HttpMethod method,
+ const char* regionOverride,
+ const char* serviceNameOverride,
+ const Aws::Http::QueryStringParameterCollection& extraParams,
+ long long expirationInSeconds) const
+{
+ const char* signerName = Aws::Auth::SIGV4_SIGNER;
+ return GeneratePresignedUrl(request, uri, method, regionOverride, serviceNameOverride, signerName, extraParams, expirationInSeconds);
+}
+
+Aws::String AWSUrlPresigner::GeneratePresignedUrl(const Aws::AmazonWebServiceRequest& request,
+ const Aws::Http::URI& uri,
+ Aws::Http::HttpMethod method,
+ const Aws::Http::QueryStringParameterCollection& extraParams,
+ long long expirationInSeconds) const
+{
+ const char* regionOverride = nullptr;
+ const char* serviceNameOverride = nullptr;
+ const char* signerName = Aws::Auth::SIGV4_SIGNER;
+
+ return GeneratePresignedUrl(request, uri, method, regionOverride, serviceNameOverride, signerName, extraParams, expirationInSeconds);
+}
+
+} // namespace Client
+} // namespace Aws \ No newline at end of file
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSXmlClient.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSXmlClient.cpp
new file mode 100644
index 0000000000..129595b917
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AWSXmlClient.cpp
@@ -0,0 +1,180 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/core/client/AWSXmlClient.h>
+#include <aws/core/AmazonWebServiceRequest.h>
+#include <aws/core/auth/AWSAuthSignerProvider.h>
+#include <aws/core/client/AWSError.h>
+#include <aws/core/client/AWSErrorMarshaller.h>
+#include <aws/core/client/ClientConfiguration.h>
+#include <aws/core/client/CoreErrors.h>
+#include <aws/core/client/RetryStrategy.h>
+#include <aws/core/http/HttpClient.h>
+#include <aws/core/http/HttpResponse.h>
+#include <aws/core/http/URI.h>
+#include <aws/core/utils/Outcome.h>
+#include <aws/core/utils/xml/XmlSerializer.h>
+#include <aws/core/utils/memory/stl/AWSStringStream.h>
+#include <aws/core/utils/logging/LogMacros.h>
+#include <aws/core/utils/event/EventStream.h>
+#include <aws/core/utils/UUID.h>
+
+using namespace Aws;
+using namespace Aws::Client;
+using namespace Aws::Http;
+using namespace Aws::Utils;
+using namespace Aws::Utils::Xml;
+
+static const char AWS_XML_CLIENT_LOG_TAG[] = "AWSXmlClient";
+
+AWSXMLClient::AWSXMLClient(const Aws::Client::ClientConfiguration& configuration,
+ const std::shared_ptr<Aws::Client::AWSAuthSigner>& signer,
+ const std::shared_ptr<AWSErrorMarshaller>& errorMarshaller) :
+ BASECLASS(configuration, signer, errorMarshaller)
+{
+}
+
+AWSXMLClient::AWSXMLClient(const Aws::Client::ClientConfiguration& configuration,
+ const std::shared_ptr<Aws::Auth::AWSAuthSignerProvider>& signerProvider,
+ const std::shared_ptr<AWSErrorMarshaller>& errorMarshaller) :
+ BASECLASS(configuration, signerProvider, errorMarshaller)
+{
+}
+
+XmlOutcome AWSXMLClient::MakeRequest(const Aws::AmazonWebServiceRequest& request,
+ const Aws::Endpoint::AWSEndpoint& endpoint,
+ Http::HttpMethod method /* = Http::HttpMethod::HTTP_POST */,
+ const char* signerName /* = Aws::Auth::NULL_SIGNER */,
+ const char* signerRegionOverride /* = nullptr */,
+ const char* signerServiceNameOverride /* = nullptr */) const
+{
+ const Aws::Http::URI& uri = endpoint.GetURI();
+ if (endpoint.GetAttributes()) {
+ signerName = endpoint.GetAttributes()->authScheme.GetName().c_str();
+ if (endpoint.GetAttributes()->authScheme.GetSigningRegion()) {
+ signerRegionOverride = endpoint.GetAttributes()->authScheme.GetSigningRegion()->c_str();
+ }
+ if (endpoint.GetAttributes()->authScheme.GetSigningRegionSet()) {
+ signerRegionOverride = endpoint.GetAttributes()->authScheme.GetSigningRegionSet()->c_str();
+ }
+ if (endpoint.GetAttributes()->authScheme.GetSigningName()) {
+ signerServiceNameOverride = endpoint.GetAttributes()->authScheme.GetSigningName()->c_str();
+ }
+ }
+ return MakeRequest(uri, request, method, signerName, signerRegionOverride, signerServiceNameOverride);
+}
+
+XmlOutcome AWSXMLClient::MakeRequest(const Aws::Endpoint::AWSEndpoint& endpoint,
+ const char* requestName /* = "" */,
+ Http::HttpMethod method /* = Http::HttpMethod::HTTP_POST */,
+ const char* signerName /* = Aws::Auth::NULL_SIGNER */,
+ const char* signerRegionOverride /* = nullptr */,
+ const char* signerServiceNameOverride /* = nullptr */) const
+{
+ const Aws::Http::URI& uri = endpoint.GetURI();
+ if (endpoint.GetAttributes()) {
+ signerName = endpoint.GetAttributes()->authScheme.GetName().c_str();
+ if (endpoint.GetAttributes()->authScheme.GetSigningRegion()) {
+ signerRegionOverride = endpoint.GetAttributes()->authScheme.GetSigningRegion()->c_str();
+ }
+ if (endpoint.GetAttributes()->authScheme.GetSigningRegionSet()) {
+ signerRegionOverride = endpoint.GetAttributes()->authScheme.GetSigningRegionSet()->c_str();
+ }
+ if (endpoint.GetAttributes()->authScheme.GetSigningName()) {
+ signerServiceNameOverride = endpoint.GetAttributes()->authScheme.GetSigningName()->c_str();
+ }
+ }
+ return MakeRequest(uri, method, signerName, requestName, signerRegionOverride, signerServiceNameOverride);
+}
+
+XmlOutcome AWSXMLClient::MakeRequest(const Aws::Http::URI& uri,
+ const Aws::AmazonWebServiceRequest& request,
+ Http::HttpMethod method,
+ const char* signerName,
+ const char* signerRegionOverride,
+ const char* signerServiceNameOverride) const
+{
+ HttpResponseOutcome httpOutcome(BASECLASS::AttemptExhaustively(uri, request, method, signerName, signerRegionOverride, signerServiceNameOverride));
+ if (!httpOutcome.IsSuccess())
+ {
+ return XmlOutcome(std::move(httpOutcome));
+ }
+
+ if (httpOutcome.GetResult()->GetResponseBody().tellp() > 0)
+ {
+ XmlDocument xmlDoc = XmlDocument::CreateFromXmlStream(httpOutcome.GetResult()->GetResponseBody());
+
+ if (!xmlDoc.WasParseSuccessful())
+ {
+ AWS_LOGSTREAM_ERROR(AWS_XML_CLIENT_LOG_TAG, "Xml parsing for error failed with message " << xmlDoc.GetErrorMessage().c_str());
+ return AWSError<CoreErrors>(CoreErrors::UNKNOWN, "Xml Parse Error", xmlDoc.GetErrorMessage(), false);
+ }
+
+ return XmlOutcome(AmazonWebServiceResult<XmlDocument>(std::move(xmlDoc),
+ httpOutcome.GetResult()->GetHeaders(), httpOutcome.GetResult()->GetResponseCode()));
+ }
+
+ return XmlOutcome(AmazonWebServiceResult<XmlDocument>(XmlDocument(), httpOutcome.GetResult()->GetHeaders()));
+}
+
+XmlOutcome AWSXMLClient::MakeRequest(const Aws::Http::URI& uri,
+ Http::HttpMethod method,
+ const char* signerName,
+ const char* requestName,
+ const char* signerRegionOverride,
+ const char* signerServiceNameOverride) const
+{
+ HttpResponseOutcome httpOutcome(BASECLASS::AttemptExhaustively(uri, method, signerName, requestName, signerRegionOverride, signerServiceNameOverride));
+ if (!httpOutcome.IsSuccess())
+ {
+ return XmlOutcome(std::move(httpOutcome));
+ }
+
+ if (httpOutcome.GetResult()->GetResponseBody().tellp() > 0)
+ {
+ return XmlOutcome(AmazonWebServiceResult<XmlDocument>(
+ XmlDocument::CreateFromXmlStream(httpOutcome.GetResult()->GetResponseBody()),
+ httpOutcome.GetResult()->GetHeaders(), httpOutcome.GetResult()->GetResponseCode()));
+ }
+
+ return XmlOutcome(AmazonWebServiceResult<XmlDocument>(XmlDocument(), httpOutcome.GetResult()->GetHeaders()));
+}
+
+AWSError<CoreErrors> AWSXMLClient::BuildAWSError(const std::shared_ptr<Http::HttpResponse>& httpResponse) const
+{
+ AWSError<CoreErrors> error;
+ if (httpResponse->HasClientError())
+ {
+ bool retryable = httpResponse->GetClientErrorType() == CoreErrors::NETWORK_CONNECTION ? true : false;
+ error = AWSError<CoreErrors>(httpResponse->GetClientErrorType(), "", httpResponse->GetClientErrorMessage(), retryable);
+ }
+ else if (!httpResponse->GetResponseBody() || httpResponse->GetResponseBody().tellp() < 1)
+ {
+ auto responseCode = httpResponse->GetResponseCode();
+ auto errorCode = AWSClient::GuessBodylessErrorType(responseCode);
+
+ Aws::StringStream ss;
+ ss << "No response body.";
+ error = AWSError<CoreErrors>(errorCode, "", ss.str(), IsRetryableHttpResponseCode(responseCode));
+ }
+ else
+ {
+ // When trying to build an AWS Error from a response which is an FStream, we need to rewind the
+ // file pointer back to the beginning in order to correctly read the input using the XML string iterator
+ if ((httpResponse->GetResponseBody().tellp() > 0)
+ && (httpResponse->GetResponseBody().tellg() > 0))
+ {
+ httpResponse->GetResponseBody().seekg(0);
+ }
+
+ error = GetErrorMarshaller()->Marshall(*httpResponse);
+ }
+
+ error.SetResponseHeaders(httpResponse->GetHeaders());
+ error.SetResponseCode(httpResponse->GetResponseCode());
+ error.SetRemoteHostIpAddress(httpResponse->GetOriginatingRequest().GetResolvedRemoteHost());
+ AWS_LOGSTREAM_ERROR(AWS_XML_CLIENT_LOG_TAG, error);
+ return error;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AdaptiveRetryStrategy.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AdaptiveRetryStrategy.cpp
new file mode 100644
index 0000000000..0907b81137
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/AdaptiveRetryStrategy.cpp
@@ -0,0 +1,228 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/core/client/AdaptiveRetryStrategy.h>
+
+#include <aws/core/client/AWSError.h>
+#include <aws/core/client/CoreErrors.h>
+#include <aws/core/utils/memory/stl/AWSSet.h>
+
+#include <cmath>
+#include <thread>
+
+using namespace Aws::Utils::Threading;
+
+namespace Aws
+{
+ namespace Client
+ {
+ static const double MIN_FILL_RATE = 0.5;
+ static const double MIN_CAPACITY = 1;
+
+ static const double SMOOTH = 0.8;
+ static const double BETA = 0.7;
+ static const double SCALE_CONSTANT = 0.4;
+
+ // A static list containing all service exception names classified as throttled.
+ static const char* THROTTLING_EXCEPTIONS[] {
+ "Throttling", "ThrottlingException", "ThrottledException", "RequestThrottledException",
+ "TooManyRequestsException", "ProvisionedThroughputExceededException", "TransactionInProgressException",
+ "RequestLimitExceeded", "BandwidthLimitExceeded", "LimitExceededException", "RequestThrottled",
+ "SlowDown", "PriorRequestNotComplete", "EC2ThrottledException"};
+ static const size_t THROTTLING_EXCEPTIONS_SZ = sizeof(THROTTLING_EXCEPTIONS) / sizeof(THROTTLING_EXCEPTIONS[0]);
+
+
+ // C-tor for unit testing
+ RetryTokenBucket::RetryTokenBucket(double fillRate, double maxCapacity, double currentCapacity,
+ const Aws::Utils::DateTime& lastTimestamp, double measuredTxRate, double lastTxRateBucket,
+ size_t requestCount, bool enabled, double lastMaxRate, const Aws::Utils::DateTime& lastThrottleTime)
+ :
+ m_fillRate(fillRate), m_maxCapacity(maxCapacity), m_currentCapacity(currentCapacity),
+ m_lastTimestamp(lastTimestamp), m_measuredTxRate(measuredTxRate),
+ m_lastTxRateBucket(lastTxRateBucket), m_requestCount(requestCount), m_enabled(enabled),
+ m_lastMaxRate(lastMaxRate), m_lastThrottleTime(lastThrottleTime)
+ {}
+
+ bool RetryTokenBucket::Acquire(size_t amount, bool fastFail)
+ {
+ std::lock_guard<std::recursive_mutex> locker(m_mutex);
+ if (!m_enabled)
+ {
+ return true;
+ }
+ Refill();
+ bool notEnough = amount > m_currentCapacity;
+ if (notEnough && fastFail) {
+ return false;
+ }
+ // If all the tokens couldn't be acquired immediately, wait enough
+ // time to fill the remainder.
+ if (notEnough) {
+ std::chrono::duration<double> waitTime((amount - m_currentCapacity) / m_fillRate);
+ std::this_thread::sleep_for(waitTime);
+ Refill();
+ }
+ m_currentCapacity -= amount;
+ return true;
+ }
+
+ void RetryTokenBucket::Refill(const Aws::Utils::DateTime& now)
+ {
+ std::lock_guard<std::recursive_mutex> locker(m_mutex);
+
+ if (0 == m_lastTimestamp.Millis()) {
+ m_lastTimestamp = now;
+ return;
+ }
+
+ double fillAmount = (std::abs(now.Millis() - m_lastTimestamp.Millis()))/1000.0 * m_fillRate;
+ m_currentCapacity = (std::min)(m_maxCapacity, m_currentCapacity + fillAmount);
+ m_lastTimestamp = now;
+ }
+
+ void RetryTokenBucket::UpdateRate(double newRps, const Aws::Utils::DateTime& now)
+ {
+ std::lock_guard<std::recursive_mutex> locker(m_mutex);
+
+ Refill(now);
+ m_fillRate = (std::max)(newRps, MIN_FILL_RATE);
+ m_maxCapacity = (std::max)(newRps, MIN_CAPACITY);
+ m_currentCapacity = (std::min)(m_currentCapacity, m_maxCapacity);
+ }
+
+ void RetryTokenBucket::UpdateMeasuredRate(const Aws::Utils::DateTime& now)
+ {
+ std::lock_guard<std::recursive_mutex> locker(m_mutex);
+
+ double t = now.Millis() / 1000.0;
+ double timeBucket = floor(t * 2.0) / 2.0;
+ m_requestCount += 1;
+ if (timeBucket > m_lastTxRateBucket) {
+ double currentRate = m_requestCount / (timeBucket - m_lastTxRateBucket);
+ m_measuredTxRate = (currentRate * SMOOTH) + (m_measuredTxRate * (1 - SMOOTH));
+ m_requestCount = 0;
+ m_lastTxRateBucket = timeBucket;
+ }
+ }
+
+ void RetryTokenBucket::UpdateClientSendingRate(bool isThrottlingResponse, const Aws::Utils::DateTime& now)
+ {
+ std::lock_guard<std::recursive_mutex> locker(m_mutex);
+
+ UpdateMeasuredRate(now);
+
+ double calculatedRate = 0.0;
+ if (isThrottlingResponse)
+ {
+ double rateToUse = m_measuredTxRate;
+ if (m_enabled)
+ rateToUse = (std::min)(rateToUse, m_fillRate);
+
+ m_lastMaxRate = rateToUse;
+ m_lastThrottleTime = now;
+
+ calculatedRate = CUBICThrottle(rateToUse);
+ Enable();
+ }
+ else
+ {
+ double timeWindow = CalculateTimeWindow();
+ calculatedRate = CUBICSuccess(now, timeWindow);
+ }
+
+ double newRate = (std::min)(calculatedRate, 2.0 * m_measuredTxRate);
+ UpdateRate(newRate, now);
+ }
+
+ void RetryTokenBucket::Enable()
+ {
+ std::lock_guard<std::recursive_mutex> locker(m_mutex);
+ m_enabled = true;
+ }
+
+ double RetryTokenBucket::CalculateTimeWindow() const
+ {
+ return pow(((m_lastMaxRate * (1.0 - BETA)) / SCALE_CONSTANT), (1.0 / 3));
+ }
+
+ double RetryTokenBucket::CUBICSuccess(const Aws::Utils::DateTime& timestamp, const double timeWindow) const
+ {
+ double dt = (timestamp.Millis() - m_lastThrottleTime.Millis()) / 1000.0;
+ double calculatedRate = SCALE_CONSTANT * pow(dt - timeWindow, 3.0) + m_lastMaxRate;
+ return calculatedRate;
+ }
+
+ double RetryTokenBucket::CUBICThrottle(const double rateToUse) const
+ {
+ double calculatedRate = rateToUse * BETA;
+ return calculatedRate;
+ }
+
+
+ AdaptiveRetryStrategy::AdaptiveRetryStrategy(long maxAttempts) :
+ StandardRetryStrategy(maxAttempts)
+ {}
+
+ AdaptiveRetryStrategy::AdaptiveRetryStrategy(std::shared_ptr<RetryQuotaContainer> retryQuotaContainer, long maxAttempts) :
+ StandardRetryStrategy(retryQuotaContainer, maxAttempts)
+ {}
+
+ bool AdaptiveRetryStrategy::HasSendToken()
+ {
+ return m_retryTokenBucket.Acquire(1, m_fastFail);
+ }
+
+ void AdaptiveRetryStrategy::RequestBookkeeping(const HttpResponseOutcome& httpResponseOutcome)
+ {
+ if (httpResponseOutcome.IsSuccess())
+ {
+ m_retryQuotaContainer->ReleaseRetryQuota(Aws::Client::NO_RETRY_INCREMENT);
+ m_retryTokenBucket.UpdateClientSendingRate(false);
+ }
+ else
+ {
+ m_retryTokenBucket.UpdateClientSendingRate(IsThrottlingResponse(httpResponseOutcome));
+ }
+ }
+
+ void AdaptiveRetryStrategy::RequestBookkeeping(const HttpResponseOutcome& httpResponseOutcome, const AWSError<CoreErrors>& lastError)
+ {
+ if (httpResponseOutcome.IsSuccess())
+ {
+ m_retryQuotaContainer->ReleaseRetryQuota(lastError);
+ m_retryTokenBucket.UpdateClientSendingRate(false);
+ }
+ else
+ {
+ m_retryTokenBucket.UpdateClientSendingRate(IsThrottlingResponse(httpResponseOutcome));
+ }
+ }
+
+ bool AdaptiveRetryStrategy::IsThrottlingResponse(const HttpResponseOutcome& httpResponseOutcome)
+ {
+ if(httpResponseOutcome.IsSuccess())
+ return false;
+
+ const AWSError<CoreErrors>& error = httpResponseOutcome.GetError();
+ const Aws::Client::CoreErrors enumValue = error.GetErrorType();
+ switch(enumValue)
+ {
+ case Aws::Client::CoreErrors::THROTTLING:
+ case Aws::Client::CoreErrors::SLOW_DOWN:
+ return true;
+ default:
+ break;
+ }
+
+ if(std::find(THROTTLING_EXCEPTIONS,
+ THROTTLING_EXCEPTIONS + THROTTLING_EXCEPTIONS_SZ, error.GetExceptionName()) != THROTTLING_EXCEPTIONS + THROTTLING_EXCEPTIONS_SZ)
+ {
+ return true;
+ }
+
+ return false;
+ }
+ }
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/ClientConfiguration.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/ClientConfiguration.cpp
index e517379a77..647c6e3f49 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/ClientConfiguration.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/ClientConfiguration.cpp
@@ -4,8 +4,10 @@
*/
#include <aws/core/client/ClientConfiguration.h>
+#include <aws/core/config/defaults/ClientConfigurationDefaults.h>
#include <aws/core/auth/AWSCredentialsProvider.h>
#include <aws/core/client/DefaultRetryStrategy.h>
+#include <aws/core/client/AdaptiveRetryStrategy.h>
#include <aws/core/platform/Environment.h>
#include <aws/core/platform/OSVersionInfo.h>
#include <aws/core/utils/memory/AWSMemory.h>
@@ -26,43 +28,217 @@ namespace Client
{
static const char* CLIENT_CONFIG_TAG = "ClientConfiguration";
+static const char* USE_REQUEST_COMPRESSION_ENV_VAR = "USE_REQUEST_COMPRESSION";
+static const char* USE_REQUEST_COMPRESSION_CONFIG_VAR = "use_request_compression";
+static const char* REQUEST_MIN_COMPRESSION_SIZE_BYTES_ENV_VAR = "REQUEST_MIN_COMPRESSION_SIZE_BYTES";
+static const char* REQUEST_MIN_COMPRESSION_SIZE_BYTES_CONFIG_VAR = "request_min_compression_size_bytes";
-AWS_CORE_API Aws::String ComputeUserAgentString()
+Aws::String ComputeUserAgentString()
{
Aws::StringStream ss;
- ss << "aws-sdk-cpp/" << Version::GetVersionString() << " " << Aws::OSVersionInfo::ComputeOSVersionString()
- << " " << Version::GetCompilerVersionString();
+ ss << "aws-sdk-cpp/" << Version::GetVersionString() << " "
+#if defined(AWS_USER_AGENT_CUSTOMIZATION)
+#define XSTR(V) STR(V)
+#define STR(V) #V
+ << XSTR(AWS_USER_AGENT_CUSTOMIZATION) << " "
+#undef STR
+#undef XSTR
+#endif
+ << Aws::OSVersionInfo::ComputeOSVersionString() << " "
+ << Version::GetCompilerVersionString();
return ss.str();
}
-ClientConfiguration::ClientConfiguration() :
- scheme(Aws::Http::Scheme::HTTPS),
- useDualStack(false),
- maxConnections(25),
- httpRequestTimeoutMs(0),
- requestTimeoutMs(3000),
- connectTimeoutMs(1000),
- enableTcpKeepAlive(true),
- tcpKeepAliveIntervalMs(30000),
- lowSpeedLimit(1),
- proxyScheme(Aws::Http::Scheme::HTTP),
- proxyPort(0),
- executor(Aws::MakeShared<Aws::Utils::Threading::DefaultExecutor>(CLIENT_CONFIG_TAG)),
- verifySSL(true),
- writeRateLimiter(nullptr),
- readRateLimiter(nullptr),
- httpLibOverride(Aws::Http::TransferLibType::DEFAULT_CLIENT),
- followRedirects(FollowRedirectsPolicy::DEFAULT),
- disableExpectHeader(false),
- enableClockSkewAdjustment(true),
- enableHostPrefixInjection(true),
- enableEndpointDiscovery(false),
- profileName(Aws::Auth::GetConfigProfileName())
+void setLegacyClientConfigurationParameters(ClientConfiguration& clientConfig)
{
- AWS_LOGSTREAM_DEBUG(CLIENT_CONFIG_TAG, "ClientConfiguration will use SDK Auto Resolved profile: [" << profileName << "] if not specified by users.");
+ clientConfig.scheme = Aws::Http::Scheme::HTTPS;
+ clientConfig.useDualStack = false;
+ clientConfig.useFIPS = false;
+ clientConfig.maxConnections = 25;
+ clientConfig.httpRequestTimeoutMs = 0;
+ clientConfig.requestTimeoutMs = 3000;
+ clientConfig.connectTimeoutMs = 1000;
+ clientConfig.enableTcpKeepAlive = true;
+ clientConfig.tcpKeepAliveIntervalMs = 30000;
+ clientConfig.lowSpeedLimit = 1;
+ clientConfig.proxyScheme = Aws::Http::Scheme::HTTP;
+ clientConfig.proxyPort = 0;
+ clientConfig.executor = Aws::MakeShared<Aws::Utils::Threading::DefaultExecutor>(CLIENT_CONFIG_TAG);
+ clientConfig.verifySSL = true;
+ clientConfig.writeRateLimiter = nullptr;
+ clientConfig.readRateLimiter = nullptr;
+ clientConfig.httpLibOverride = Aws::Http::TransferLibType::DEFAULT_CLIENT;
+ clientConfig.followRedirects = FollowRedirectsPolicy::DEFAULT;
+ clientConfig.disableExpectHeader = false;
+ clientConfig.enableClockSkewAdjustment = true;
+ clientConfig.enableHostPrefixInjection = true;
+ clientConfig.profileName = Aws::Auth::GetConfigProfileName();
- // Initialize Retry Strategy
- int maxAttempts;
+ Aws::String useCompressionConfig = clientConfig.LoadConfigFromEnvOrProfile(
+ USE_REQUEST_COMPRESSION_ENV_VAR,
+ Aws::Auth::GetConfigProfileName(),
+ USE_REQUEST_COMPRESSION_CONFIG_VAR,
+ {"ENABLE", "DISABLE", "enable", "disable"},
+ "ENABLE"
+ );
+
+ if (Aws::Utils::StringUtils::ToLower(useCompressionConfig.c_str()) == "disable") {
+ clientConfig.requestCompressionConfig.useRequestCompression = Aws::Client::UseRequestCompression::DISABLE;
+ AWS_LOGSTREAM_DEBUG(CLIENT_CONFIG_TAG, "Request Compression disabled");
+ } else {
+ //Using default to true for forward compatibility in case new config is added but SDK is not updated.
+ clientConfig.requestCompressionConfig.useRequestCompression = Aws::Client::UseRequestCompression::ENABLE;
+ AWS_LOGSTREAM_DEBUG(CLIENT_CONFIG_TAG, "Request Compression enabled");
+ }
+
+ // Getting min request compression length
+ Aws::String minRequestCompressionString = Aws::Environment::GetEnv(REQUEST_MIN_COMPRESSION_SIZE_BYTES_ENV_VAR);
+ if (minRequestCompressionString.empty())
+ {
+ minRequestCompressionString = Aws::Config::GetCachedConfigValue(REQUEST_MIN_COMPRESSION_SIZE_BYTES_CONFIG_VAR);
+ }
+ if (!minRequestCompressionString.empty()) {
+ clientConfig.requestCompressionConfig.requestMinCompressionSizeBytes = static_cast<int>(Aws::Utils::StringUtils::ConvertToInt32(minRequestCompressionString.c_str()));
+ if (clientConfig.requestCompressionConfig.requestMinCompressionSizeBytes > 10485760) {
+ AWS_LOGSTREAM_ERROR(CLIENT_CONFIG_TAG, "ClientConfiguration for MinReqCompression is unsupported, received: " << clientConfig.requestCompressionConfig.requestMinCompressionSizeBytes);
+ }
+ }
+ AWS_LOGSTREAM_DEBUG(CLIENT_CONFIG_TAG, "ClientConfiguration will use MinReqCompression: " << clientConfig.requestCompressionConfig.requestMinCompressionSizeBytes);
+
+ AWS_LOGSTREAM_DEBUG(CLIENT_CONFIG_TAG, "ClientConfiguration will use SDK Auto Resolved profile: [" << clientConfig.profileName << "] if not specified by users.");
+
+ // Automatically determine the AWS region from environment variables, configuration file and EC2 metadata.
+ clientConfig.region = Aws::Environment::GetEnv("AWS_DEFAULT_REGION");
+ if (!clientConfig.region.empty())
+ {
+ return;
+ }
+
+ clientConfig.region = Aws::Environment::GetEnv("AWS_REGION");
+ if (!clientConfig.region.empty())
+ {
+ return;
+ }
+
+ clientConfig.region = Aws::Config::GetCachedConfigValue("region");
+ if (!clientConfig.region.empty())
+ {
+ return;
+ }
+
+ // Set the endpoint to interact with EC2 instance's metadata service
+ Aws::String ec2MetadataServiceEndpoint = Aws::Environment::GetEnv("AWS_EC2_METADATA_SERVICE_ENDPOINT");
+ if (! ec2MetadataServiceEndpoint.empty())
+ {
+ //By default we use the IPv4 default metadata service address
+ auto client = Aws::Internal::GetEC2MetadataClient();
+ if (client != nullptr)
+ {
+ client->SetEndpoint(ec2MetadataServiceEndpoint);
+ }
+ }
+}
+
+ClientConfiguration::ClientConfiguration()
+{
+ this->disableIMDS = false;
+ setLegacyClientConfigurationParameters(*this);
+ retryStrategy = InitRetryStrategy();
+
+ if (!this->disableIMDS &&
+ region.empty() &&
+ Aws::Utils::StringUtils::ToLower(Aws::Environment::GetEnv("AWS_EC2_METADATA_DISABLED").c_str()) != "true")
+ {
+ auto client = Aws::Internal::GetEC2MetadataClient();
+ if (client)
+ {
+ region = client->GetCurrentRegion();
+ }
+ }
+ if (!region.empty())
+ {
+ return;
+ }
+ region = Aws::String(Aws::Region::US_EAST_1);
+}
+
+ClientConfiguration::ClientConfiguration(const char* profile, bool shouldDisableIMDS)
+{
+ this->disableIMDS = shouldDisableIMDS;
+ setLegacyClientConfigurationParameters(*this);
+ // Call EC2 Instance Metadata service only once
+ Aws::String ec2MetadataRegion;
+ bool hasEc2MetadataRegion = false;
+ if (!this->disableIMDS &&
+ region.empty() &&
+ Aws::Utils::StringUtils::ToLower(Aws::Environment::GetEnv("AWS_EC2_METADATA_DISABLED").c_str()) != "true") {
+ auto client = Aws::Internal::GetEC2MetadataClient();
+ if (client)
+ {
+ ec2MetadataRegion = client->GetCurrentRegion();
+ hasEc2MetadataRegion = true;
+ region = ec2MetadataRegion;
+ }
+ }
+
+ if(region.empty())
+ {
+ region = Aws::String(Aws::Region::US_EAST_1);
+ }
+
+ if (profile && Aws::Config::HasCachedConfigProfile(profile)) {
+ this->profileName = Aws::String(profile);
+ AWS_LOGSTREAM_DEBUG(CLIENT_CONFIG_TAG,
+ "Use user specified profile: [" << this->profileName << "] for ClientConfiguration.");
+ auto tmpRegion = Aws::Config::GetCachedConfigProfile(this->profileName).GetRegion();
+ if (!tmpRegion.empty()) {
+ region = tmpRegion;
+ }
+
+ Aws::String profileDefaultsMode = Aws::Config::GetCachedConfigProfile(this->profileName).GetDefaultsMode();
+ Aws::Config::Defaults::SetSmartDefaultsConfigurationParameters(*this, profileDefaultsMode,
+ hasEc2MetadataRegion, ec2MetadataRegion);
+ return;
+ }
+ if (!retryStrategy)
+ {
+ retryStrategy = InitRetryStrategy();
+ }
+
+ AWS_LOGSTREAM_WARN(CLIENT_CONFIG_TAG, "User specified profile: [" << profile << "] is not found, will use the SDK resolved one.");
+}
+
+ClientConfiguration::ClientConfiguration(bool /*useSmartDefaults*/, const char* defaultMode, bool shouldDisableIMDS)
+{
+ this->disableIMDS = shouldDisableIMDS;
+ setLegacyClientConfigurationParameters(*this);
+
+ // Call EC2 Instance Metadata service only once
+ Aws::String ec2MetadataRegion;
+ bool hasEc2MetadataRegion = false;
+ if (!this->disableIMDS &&
+ region.empty() &&
+ Aws::Utils::StringUtils::ToLower(Aws::Environment::GetEnv("AWS_EC2_METADATA_DISABLED").c_str()) != "true")
+ {
+ auto client = Aws::Internal::GetEC2MetadataClient();
+ if (client)
+ {
+ ec2MetadataRegion = client->GetCurrentRegion();
+ hasEc2MetadataRegion = true;
+ region = ec2MetadataRegion;
+ }
+ }
+ if (region.empty())
+ {
+ region = Aws::String(Aws::Region::US_EAST_1);
+ }
+
+ Aws::Config::Defaults::SetSmartDefaultsConfigurationParameters(*this, defaultMode, hasEc2MetadataRegion, ec2MetadataRegion);
+}
+
+std::shared_ptr<RetryStrategy> InitRetryStrategy(Aws::String retryMode)
+{
+ int maxAttempts = 0;
Aws::String maxAttemptsString = Aws::Environment::GetEnv("AWS_MAX_ATTEMPTS");
if (maxAttemptsString.empty())
{
@@ -83,15 +259,21 @@ ClientConfiguration::ClientConfiguration() :
}
}
- Aws::String retryMode = Aws::Environment::GetEnv("AWS_RETRY_MODE");
+ if (retryMode.empty())
+ {
+ retryMode = Aws::Environment::GetEnv("AWS_RETRY_MODE");
+ }
if (retryMode.empty())
{
retryMode = Aws::Config::GetCachedConfigValue("retry_mode");
}
+
+ std::shared_ptr<RetryStrategy> retryStrategy;
if (retryMode == "standard")
{
if (maxAttempts < 0)
{
+ // negative value set above force usage of default max attempts
retryStrategy = Aws::MakeShared<StandardRetryStrategy>(CLIENT_CONFIG_TAG);
}
else
@@ -99,61 +281,55 @@ ClientConfiguration::ClientConfiguration() :
retryStrategy = Aws::MakeShared<StandardRetryStrategy>(CLIENT_CONFIG_TAG, maxAttempts);
}
}
- else
- {
- retryStrategy = Aws::MakeShared<DefaultRetryStrategy>(CLIENT_CONFIG_TAG);
- }
-
- // Automatically determine the AWS region from environment variables, configuration file and EC2 metadata.
- region = Aws::Environment::GetEnv("AWS_DEFAULT_REGION");
- if (!region.empty())
- {
- return;
- }
-
- region = Aws::Environment::GetEnv("AWS_REGION");
- if (!region.empty())
+ else if (retryMode == "adaptive")
{
- return;
- }
-
- region = Aws::Config::GetCachedConfigValue("region");
- if (!region.empty())
- {
- return;
- }
-
- if (Aws::Utils::StringUtils::ToLower(Aws::Environment::GetEnv("AWS_EC2_METADATA_DISABLED").c_str()) != "true")
- {
- auto client = Aws::Internal::GetEC2MetadataClient();
- if (client)
+ if (maxAttempts < 0)
{
- region = client->GetCurrentRegion();
+ // negative value set above force usage of default max attempts
+ retryStrategy = Aws::MakeShared<AdaptiveRetryStrategy>(CLIENT_CONFIG_TAG);
+ }
+ else
+ {
+ retryStrategy = Aws::MakeShared<AdaptiveRetryStrategy>(CLIENT_CONFIG_TAG, maxAttempts);
}
}
-
- if (!region.empty())
+ else
{
- return;
+ retryStrategy = Aws::MakeShared<DefaultRetryStrategy>(CLIENT_CONFIG_TAG);
}
- region = Aws::String(Aws::Region::US_EAST_1);
+ return retryStrategy;
}
-ClientConfiguration::ClientConfiguration(const char* profile) : ClientConfiguration()
+Aws::String ClientConfiguration::LoadConfigFromEnvOrProfile(const Aws::String& envKey,
+ const Aws::String& profile,
+ const Aws::String& profileProperty,
+ const Aws::Vector<Aws::String>& allowedValues,
+ const Aws::String& defaultValue)
{
- if (profile && Aws::Config::HasCachedConfigProfile(profile))
- {
- this->profileName = Aws::String(profile);
- AWS_LOGSTREAM_DEBUG(CLIENT_CONFIG_TAG, "Use user specified profile: [" << this->profileName << "] for ClientConfiguration.");
- auto tmpRegion = Aws::Config::GetCachedConfigProfile(this->profileName).GetRegion();
- if (!tmpRegion.empty())
- {
- region = tmpRegion;
+ Aws::String option = Aws::Environment::GetEnv(envKey.c_str());
+ if (option.empty()) {
+ option = Aws::Config::GetCachedConfigValue(profile, profileProperty);
+ }
+ option = Aws::Utils::StringUtils::ToLower(option.c_str());
+ if (option.empty()) {
+ return defaultValue;
+ }
+
+ if (!allowedValues.empty() && std::find(allowedValues.cbegin(), allowedValues.cend(), option) == allowedValues.cend()) {
+ Aws::OStringStream expectedStr;
+ expectedStr << "[";
+ for(const auto& allowed : allowedValues) {
+ expectedStr << allowed << ";";
}
- return;
+ expectedStr << "]";
+
+ AWS_LOGSTREAM_WARN(CLIENT_CONFIG_TAG, "Unrecognised value for " << envKey << ": " << option <<
+ ". Using default instead: " << defaultValue <<
+ ". Expected empty or one of: " << expectedStr.str());
+ option = defaultValue;
}
- AWS_LOGSTREAM_WARN(CLIENT_CONFIG_TAG, "User specified profile: [" << profile << "] is not found, will use the SDK resolved one.");
+ return option;
}
} // namespace Client
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/CoreErrors.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/CoreErrors.cpp
index 8c2c288dcd..50a7f9308d 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/CoreErrors.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/CoreErrors.cpp
@@ -18,7 +18,8 @@ using namespace Aws::Http;
#pragma warning(disable : 4592)
#endif
-static Aws::UniquePtr<Aws::Map<Aws::String, AWSError<CoreErrors> > > s_CoreErrorsMapper(nullptr);
+using ErrorsMapperContainer = Aws::Map<Aws::String, AWSError<CoreErrors> >;
+static ErrorsMapperContainer* s_CoreErrorsMapper(nullptr);
#ifdef _MSC_VER
#pragma warning(pop)
@@ -30,7 +31,7 @@ void CoreErrorsMapper::InitCoreErrorsMapper()
{
return;
}
- s_CoreErrorsMapper = Aws::MakeUnique<Aws::Map<Aws::String, AWSError<CoreErrors> > >("InitCoreErrorsMapper");
+ s_CoreErrorsMapper = Aws::New<ErrorsMapperContainer>("InitCoreErrorsMapper");
s_CoreErrorsMapper->emplace("IncompleteSignature", AWSError<CoreErrors>(CoreErrors::INCOMPLETE_SIGNATURE, false));
s_CoreErrorsMapper->emplace("IncompleteSignatureException", AWSError<CoreErrors>(CoreErrors::INCOMPLETE_SIGNATURE, false));
@@ -92,10 +93,8 @@ void CoreErrorsMapper::InitCoreErrorsMapper()
void CoreErrorsMapper::CleanupCoreErrorsMapper()
{
- if (s_CoreErrorsMapper)
- {
- s_CoreErrorsMapper = nullptr;
- }
+ Aws::Delete(s_CoreErrorsMapper);
+ s_CoreErrorsMapper = nullptr;
}
AWSError<CoreErrors> CoreErrorsMapper::GetErrorForName(const char* errorName)
@@ -149,3 +148,12 @@ AWS_CORE_API AWSError<CoreErrors> CoreErrorsMapper::GetErrorForHttpResponseCode(
error.SetResponseCode(code);
return error;
}
+
+/**
+ * Overload ostream operator<< for CoreErrors enum class for a prettier output such as "103" and not "<67-00 00-00>"
+ */
+Aws::OStream& Aws::Client::operator<< (Aws::OStream& oStream, CoreErrors code)
+{
+ oStream << Aws::Utils::StringUtils::to_string(static_cast<typename std::underlying_type<HttpResponseCode>::type>(code));
+ return oStream;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/DefaultRetryStrategy.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/DefaultRetryStrategy.cpp
index 7e57c79ffc..405d7566cf 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/DefaultRetryStrategy.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/DefaultRetryStrategy.cpp
@@ -28,5 +28,5 @@ long DefaultRetryStrategy::CalculateDelayBeforeNextRetry(const AWSError<CoreErro
return 0;
}
- return (1 << attemptedRetries) * m_scaleFactor;
+ return (1UL << attemptedRetries) * m_scaleFactor;
}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/GenericClientConfiguration.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/GenericClientConfiguration.cpp
new file mode 100644
index 0000000000..f0a4e91d5b
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/GenericClientConfiguration.cpp
@@ -0,0 +1,103 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/core/client/GenericClientConfiguration.h>
+#include <aws/core/platform/Environment.h>
+#include <aws/core/utils/memory/AWSMemory.h>
+#include <aws/core/utils/threading/Executor.h>
+
+
+namespace Aws
+{
+namespace Client
+{
+template struct AWS_CORE_API GenericClientConfiguration<false>;
+
+bool IsEndpointDiscoveryEnabled(const Aws::String& endpointOverride, const Aws::String &profileName)
+{
+ bool enabled = true; // default value for AWS Services with enabled discovery trait
+ if (!endpointOverride.empty())
+ {
+ enabled = false;
+ }
+ else
+ {
+ static const char* AWS_ENABLE_ENDPOINT_DISCOVERY_ENV_KEY = "AWS_ENABLE_ENDPOINT_DISCOVERY";
+ static const char* AWS_ENABLE_ENDPOINT_DISCOVERY_PROFILE_KEY = "AWS_ENABLE_ENDPOINT_DISCOVERY";
+ static const char* AWS_ENABLE_ENDPOINT_ENABLED = "true";
+ static const char* AWS_ENABLE_ENDPOINT_DISABLED = "false";
+
+ Aws::String enableEndpointDiscovery = ClientConfiguration::LoadConfigFromEnvOrProfile(AWS_ENABLE_ENDPOINT_DISCOVERY_ENV_KEY,
+ profileName,
+ AWS_ENABLE_ENDPOINT_DISCOVERY_PROFILE_KEY,
+ {AWS_ENABLE_ENDPOINT_ENABLED, AWS_ENABLE_ENDPOINT_DISABLED},
+ AWS_ENABLE_ENDPOINT_ENABLED);
+
+ if (AWS_ENABLE_ENDPOINT_DISABLED == enableEndpointDiscovery)
+ {
+ // enabled by default unless explicitly disabled in ENV, profile config file, or programmatically later
+ enabled = false;
+ }
+ }
+ return enabled;
+}
+
+GenericClientConfiguration<true>::GenericClientConfiguration()
+ : ClientConfiguration(),
+ enableHostPrefixInjection(ClientConfiguration::enableHostPrefixInjection),
+ enableEndpointDiscovery(ClientConfiguration::enableEndpointDiscovery)
+{
+ enableEndpointDiscovery = IsEndpointDiscoveryEnabled(this->endpointOverride, this->profileName);
+ enableHostPrefixInjection = false; // disabled by default in the SDK
+}
+
+GenericClientConfiguration<true>::GenericClientConfiguration(const char* inputProfileName, bool shouldDisableIMDS)
+ : ClientConfiguration(inputProfileName, shouldDisableIMDS),
+ enableHostPrefixInjection(ClientConfiguration::enableHostPrefixInjection),
+ enableEndpointDiscovery(ClientConfiguration::enableEndpointDiscovery)
+{
+ enableEndpointDiscovery = IsEndpointDiscoveryEnabled(this->endpointOverride, this->profileName);
+ enableHostPrefixInjection = false; // disabled by default in the SDK
+}
+
+GenericClientConfiguration<true>::GenericClientConfiguration(bool useSmartDefaults, const char* inputDefaultMode, bool shouldDisableIMDS)
+ : ClientConfiguration(useSmartDefaults, inputDefaultMode, shouldDisableIMDS),
+ enableHostPrefixInjection(ClientConfiguration::enableHostPrefixInjection),
+ enableEndpointDiscovery(ClientConfiguration::enableEndpointDiscovery)
+{
+ enableEndpointDiscovery = IsEndpointDiscoveryEnabled(this->endpointOverride, this->profileName);
+ enableHostPrefixInjection = false; // disabled by default in the SDK
+}
+
+GenericClientConfiguration<true>::GenericClientConfiguration(const ClientConfiguration& config)
+ : ClientConfiguration(config),
+ enableHostPrefixInjection(ClientConfiguration::enableHostPrefixInjection),
+ enableEndpointDiscovery(ClientConfiguration::enableEndpointDiscovery)
+{
+ enableEndpointDiscovery = IsEndpointDiscoveryEnabled(this->endpointOverride, this->profileName);
+ enableHostPrefixInjection = false; // disabled by default in the SDK
+}
+
+GenericClientConfiguration<true>::GenericClientConfiguration(const GenericClientConfiguration<true>& other)
+ : ClientConfiguration(static_cast<ClientConfiguration>(other)),
+ enableHostPrefixInjection(ClientConfiguration::enableHostPrefixInjection),
+ enableEndpointDiscovery(ClientConfiguration::enableEndpointDiscovery)
+{
+ if (other.enableEndpointDiscovery) {
+ enableEndpointDiscovery = other.enableEndpointDiscovery.value();
+ }
+ enableHostPrefixInjection = other.enableHostPrefixInjection;
+}
+
+GenericClientConfiguration<true>& GenericClientConfiguration<true>::operator=(const GenericClientConfiguration<true>& other)
+{
+ if (this != &other) {
+ *static_cast<ClientConfiguration*>(this) = static_cast<ClientConfiguration>(other);
+ }
+ return *this;
+}
+
+} // namespace Client
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/RequestCompression.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/RequestCompression.cpp
new file mode 100644
index 0000000000..e51a49049b
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/RequestCompression.cpp
@@ -0,0 +1,336 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/core/client/RequestCompression.h>
+#include <aws/core/utils/logging/LogMacros.h>
+#include <aws/core/utils/memory/AWSMemory.h>
+#include <algorithm>
+#include <aws/core/utils/memory/stl/AWSAllocator.h>
+
+#ifdef ENABLED_ZLIB_REQUEST_COMPRESSION
+#include "zlib.h"
+
+#if defined(MSDOS) || defined(OS2) || defined(WIN32) || defined(__CYGWIN__)
+#include <fcntl.h>
+#include <io.h>
+#define SET_BINARY_MODE(file) setmode(fileno(file), O_BINARY)
+#else
+#define SET_BINARY_MODE(file)
+#endif // defined(MSDOS) || defined(OS2) || defined(WIN32) || defined(__CYGWIN__)
+// Defining zlib chunks to be 256k
+static const size_t ZLIB_CHUNK=263144;
+static const char AWS_REQUEST_COMPRESSION_ALLOCATION_TAG[] =
+ "RequestCompressionAlloc";
+#endif // ENABLED_ZLIB_REQUEST_COMPRESSION
+
+static const char AWS_REQUEST_COMPRESSION_LOG_TAG[] = "RequestCompression";
+
+Aws::String Aws::Client::GetCompressionAlgorithmId(const CompressionAlgorithm &algorithm)
+{
+ switch (algorithm)
+ {
+ case CompressionAlgorithm::GZIP:
+ return "gzip";
+ default:
+ return "";
+ }
+}
+
+#ifdef ENABLED_ZLIB_REQUEST_COMPRESSION
+#ifdef USE_AWS_MEMORY_MANAGEMENT
+static const char* ZlibMemTag = "zlib";
+static const size_t offset = sizeof(size_t); // to make space for size of the array
+//Define custom memory allocation for zlib
+// if fail to allocate, return Z_NULL
+void* aws_zalloc(void * /* opaque */, unsigned items, unsigned size)
+{
+ unsigned sizeToAllocate = items*size;
+ size_t sizeToAllocateWithOffset = sizeToAllocate + offset;
+ if ((size != 0 && sizeToAllocate / size != items)
+ || (sizeToAllocateWithOffset <= sizeToAllocate ))
+ {
+ return Z_NULL;
+ }
+ char* newMem = reinterpret_cast<char*>(Aws::Malloc(ZlibMemTag, sizeToAllocateWithOffset));
+ if (newMem != nullptr) {
+ std::size_t* pointerToSize = reinterpret_cast<std::size_t*>(newMem);
+ *pointerToSize = size;
+ return reinterpret_cast<void*>(newMem + offset);
+ }
+ else
+ {
+ return Z_NULL;
+ }
+}
+
+void aws_zfree(void * /* opaque */, void * ptr)
+{
+ if(ptr)
+ {
+ char* shiftedMemory = reinterpret_cast<char*>(ptr);
+ Aws::Free(shiftedMemory - offset);
+ }
+}
+
+#endif // AWS_CUSTOM_MEMORY_MANAGEMENT
+#endif // ENABLED_ZLIB_REQUEST_COMPRESSION
+
+
+iostream_outcome Aws::Client::RequestCompression::compress(std::shared_ptr<Aws::IOStream> input,
+ const CompressionAlgorithm &algorithm) const
+{
+#ifdef ENABLED_ZLIB_REQUEST_COMPRESSION
+ if (algorithm == CompressionAlgorithm::GZIP)
+ {
+ // calculating stream size
+ input->seekg(0, input->end);
+ size_t streamSize = input->tellg();
+ input->seekg(0, input->beg);
+
+ AWS_LOGSTREAM_TRACE(AWS_REQUEST_COMPRESSION_LOG_TAG, "Compressing request of " << streamSize << " bytes.");
+
+ // Preparing output
+ std::shared_ptr<Aws::IOStream> output = Aws::MakeShared<Aws::StringStream>(AWS_REQUEST_COMPRESSION_ALLOCATION_TAG);
+ if(!output)
+ {
+ AWS_LOGSTREAM_ERROR(AWS_REQUEST_COMPRESSION_LOG_TAG, "Failed to allocate output while compressing")
+ return false;
+ }
+ // Prepare ZLIB to compress
+ int ret = Z_NULL;
+ int flush = Z_NO_FLUSH;
+ z_stream strm = {};
+ auto in = Aws::MakeUniqueArray<unsigned char>(ZLIB_CHUNK, AWS_REQUEST_COMPRESSION_ALLOCATION_TAG);
+ if(!in)
+ {
+ AWS_LOGSTREAM_ERROR(AWS_REQUEST_COMPRESSION_LOG_TAG, "Failed to allocate in buffer while compressing")
+ return false;
+ }
+
+ auto out = Aws::MakeUniqueArray<unsigned char>(ZLIB_CHUNK, AWS_REQUEST_COMPRESSION_ALLOCATION_TAG);
+ if(!out)
+ {
+ AWS_LOGSTREAM_ERROR(AWS_REQUEST_COMPRESSION_LOG_TAG, "Failed to allocate out buffer while compressing")
+ return false;
+ }
+
+ //Preparing allocators
+#ifdef USE_AWS_MEMORY_MANAGEMENT
+ strm.zalloc = (void *(*)(void *, unsigned, unsigned)) aws_zalloc;
+ strm.zfree = (void (*)(void *, void *)) aws_zfree;
+#else
+ strm.zalloc = Z_NULL;
+ strm.zfree = Z_NULL;
+#endif
+ strm.opaque = Z_NULL;
+
+ const int MAX_WINDOW_GZIP = 31;
+ const int DEFAULT_MEM_LEVEL_USAGE = 8;
+ ret = deflateInit2(&strm, Z_DEFAULT_COMPRESSION, Z_DEFLATED, MAX_WINDOW_GZIP, DEFAULT_MEM_LEVEL_USAGE, Z_DEFAULT_STRATEGY);
+ if(ret != Z_OK)
+ {
+ return false;
+ }
+
+ //Adding one to the stream size counter to account for the EOF marker.
+ streamSize++;
+ size_t toRead;
+ // Compress
+ do {
+ toRead = std::min(streamSize, ZLIB_CHUNK);
+ // Fill the buffer
+ if (! input->read(reinterpret_cast<char *>(in.get()), toRead))
+ {
+ if (input->eof())
+ {
+ //Last read need flush when exit loop
+ flush = Z_FINISH;
+ }
+ else {
+ AWS_LOGSTREAM_ERROR(
+ AWS_REQUEST_COMPRESSION_LOG_TAG,
+ "Uncompress request failed to read from stream");
+ return false;
+ }
+ }
+ streamSize -= toRead; //left to read
+ strm.avail_in = (flush == Z_FINISH)?toRead-1:toRead; //skip EOF if included
+ strm.next_in = in.get();
+ do
+ {
+ // Run deflate on buffers
+ strm.avail_out = ZLIB_CHUNK;
+ strm.next_out = out.get();
+
+ ret = deflate(&strm, flush);
+
+ // writing the output
+ assert(ZLIB_CHUNK >= strm.avail_out);
+ unsigned output_size = ZLIB_CHUNK - strm.avail_out;
+ if(! output->write(reinterpret_cast<char *>(out.get()), output_size))
+ {
+ AWS_LOGSTREAM_ERROR(AWS_REQUEST_COMPRESSION_LOG_TAG, "Compressed request failed to write to output stream");
+ return false;
+ }
+ } while (strm.avail_out == 0);
+ assert(strm.avail_in == 0); // All data was read
+ } while (flush != Z_FINISH);
+ assert(ret == Z_STREAM_END); // Completed stream
+ AWS_LOGSTREAM_TRACE(AWS_REQUEST_COMPRESSION_LOG_TAG, "Compressed request to: " << strm.total_out << " bytes");
+ deflateEnd(&strm);
+ return output;
+ }
+ else
+ {
+ AWS_LOGSTREAM_ERROR(AWS_REQUEST_COMPRESSION_LOG_TAG, "Compress request requested in runtime without support: " << GetCompressionAlgorithmId(algorithm));
+ return false;
+ }
+#else
+ // If there is no support to compress, always fail calls to this method.
+ AWS_LOGSTREAM_ERROR(AWS_REQUEST_COMPRESSION_LOG_TAG, "Compress request requested in runtime without support: " << GetCompressionAlgorithmId(algorithm));
+ AWS_UNREFERENCED_PARAM(input); // silencing warning;
+ return false;
+#endif
+}
+
+Aws::Utils::Outcome<std::shared_ptr<Aws::IOStream>, bool>
+Aws::Client::RequestCompression::uncompress(std::shared_ptr<Aws::IOStream> input, const CompressionAlgorithm &algorithm) const
+{
+#ifdef ENABLED_ZLIB_REQUEST_COMPRESSION
+ if (algorithm == CompressionAlgorithm::GZIP)
+ {
+ // calculating stream size
+ input->seekg(0, input->end);
+ size_t streamSize = input->tellg();
+ input->seekg(0, input->beg);
+
+ AWS_LOGSTREAM_TRACE(AWS_REQUEST_COMPRESSION_LOG_TAG, "Uncompressing request of " << streamSize << " bytes.");
+
+ // Preparing output
+ std::shared_ptr<Aws::IOStream> output = Aws::MakeShared<Aws::StringStream>( AWS_REQUEST_COMPRESSION_ALLOCATION_TAG);
+ if(!output)
+ {
+ AWS_LOGSTREAM_ERROR(AWS_REQUEST_COMPRESSION_LOG_TAG, "Failed to allocate output while uncompressing")
+ return false;
+ }
+
+ // Prepare ZLIB to uncompress
+ int ret = Z_NULL;
+ z_stream strm = {};
+ auto in = Aws::MakeUniqueArray<unsigned char>(ZLIB_CHUNK, AWS_REQUEST_COMPRESSION_ALLOCATION_TAG);
+ if(!in)
+ {
+ AWS_LOGSTREAM_ERROR(AWS_REQUEST_COMPRESSION_LOG_TAG, "Failed to allocate in buffer while uncompressing")
+ return false;
+ }
+
+ auto out = Aws::MakeUniqueArray<unsigned char>(ZLIB_CHUNK, AWS_REQUEST_COMPRESSION_ALLOCATION_TAG);
+ if(!out)
+ {
+ AWS_LOGSTREAM_ERROR(AWS_REQUEST_COMPRESSION_LOG_TAG, "Failed to allocate out buffer while uncompressing")
+ return false;
+ }
+
+ //preparing allocation
+#ifdef USE_AWS_MEMORY_MANAGEMENT
+ strm.zalloc = (void *(*)(void *, unsigned, unsigned)) aws_zalloc;
+ strm.zfree = (void (*)(void *, void *)) aws_zfree;
+#else
+ strm.zalloc = Z_NULL;
+ strm.zfree = Z_NULL;
+#endif
+ strm.opaque = Z_NULL;
+ strm.avail_in = 0;
+ strm.next_in = Z_NULL;
+
+ const int MAX_WINDOW_GZIP = 31;
+ ret = inflateInit2(&strm, MAX_WINDOW_GZIP);
+ if (ret != Z_OK)
+ {
+ return false;
+ }
+
+ //Adding one to the stream size counter to account for the EOF marker.
+ streamSize++;
+ size_t toRead;
+ // Decompress
+ do {
+ toRead = (streamSize < ZLIB_CHUNK)?streamSize:ZLIB_CHUNK;
+ if (toRead < 1) break; // Nothing left to read
+ // Fill the buffer
+ if(! input->read(reinterpret_cast<char *>(in.get()), toRead))
+ {
+ if (input->eof())
+ {
+ //skip passing the EOF to the buffer
+ toRead--;
+ }
+ else
+ {
+ AWS_LOGSTREAM_ERROR(
+ AWS_REQUEST_COMPRESSION_LOG_TAG,
+ "Compress request failed to read from stream");
+ return false;
+ }
+ }
+
+ // Filling input buffer to decompress
+ strm.avail_in = toRead;
+ strm.next_in = in.get();
+ do
+ {
+ // Run inflate on buffers
+ strm.avail_out = ZLIB_CHUNK;
+ strm.next_out = out.get();
+
+ ret = inflate(&strm, Z_NO_FLUSH);
+ // Catch errors
+ switch (ret)
+ {
+ case Z_NEED_DICT:
+ AWS_LOGSTREAM_ERROR(AWS_REQUEST_COMPRESSION_LOG_TAG, "Compressed request failed to inflate with code: Z_NEED_DICT");
+ return false;
+ case Z_DATA_ERROR:
+ AWS_LOGSTREAM_ERROR(AWS_REQUEST_COMPRESSION_LOG_TAG, "Compressed request failed to inflate with code: Z_DATA_ERROR");
+ return false;
+ case Z_MEM_ERROR:
+ (void)inflateEnd(&strm);
+ AWS_LOGSTREAM_ERROR(AWS_REQUEST_COMPRESSION_LOG_TAG, "Compressed request failed to inflate with code: Z_MEM_ERROR");
+ return false;
+ }
+
+ // writing the output
+ unsigned output_size = ZLIB_CHUNK - strm.avail_out;
+ if(! output->write(reinterpret_cast<char *>(out.get()), output_size)) {
+ AWS_LOGSTREAM_ERROR(AWS_REQUEST_COMPRESSION_LOG_TAG, "Uncompressed request failed to write to output stream");
+ return false;
+ }
+ } while (strm.avail_out == 0);
+ } while (ret != Z_STREAM_END);
+ // clean up
+ (void)inflateEnd(&strm);
+ if (ret == Z_STREAM_END)
+ {
+ AWS_LOGSTREAM_TRACE(AWS_REQUEST_COMPRESSION_LOG_TAG, "Decompressed request to: " << strm.total_out << " bytes");
+ return output;
+ }
+ else
+ {
+ AWS_LOGSTREAM_ERROR(AWS_REQUEST_COMPRESSION_LOG_TAG, "Failed to decompress after read input completely");
+ return false;
+ }
+ }
+ else
+ {
+ AWS_LOGSTREAM_ERROR(AWS_REQUEST_COMPRESSION_LOG_TAG, "Uncompress request requested in runtime without support: " << GetCompressionAlgorithmId(algorithm));
+ return false;
+ }
+#else
+ // If there is no support to compress, always fail calls to this method.
+ AWS_LOGSTREAM_ERROR(AWS_REQUEST_COMPRESSION_LOG_TAG, "Uncompress request requested in runtime without support: " << GetCompressionAlgorithmId(algorithm));
+ AWS_UNREFERENCED_PARAM(input); // silencing warning;
+ return false;
+#endif
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/RetryStrategy.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/RetryStrategy.cpp
index b439b7ca99..77b6f5abbb 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/RetryStrategy.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/client/RetryStrategy.cpp
@@ -17,18 +17,21 @@ namespace Aws
{
static const int INITIAL_RETRY_TOKENS = 500;
static const int RETRY_COST = 5;
- static const int NO_RETRY_INCREMENT = 1;
static const int TIMEOUT_RETRY_COST = 10;
StandardRetryStrategy::StandardRetryStrategy(long maxAttempts) :
m_retryQuotaContainer(Aws::MakeShared<DefaultRetryQuotaContainer>("StandardRetryStrategy")),
m_maxAttempts(maxAttempts)
- {}
+ {
+ srand((unsigned int)time(NULL));
+ }
StandardRetryStrategy::StandardRetryStrategy(std::shared_ptr<RetryQuotaContainer> retryQuotaContainer, long maxAttempts) :
m_retryQuotaContainer(retryQuotaContainer),
m_maxAttempts(maxAttempts)
- {}
+ {
+ srand((unsigned int)time(NULL));
+ }
void StandardRetryStrategy::RequestBookkeeping(const HttpResponseOutcome& httpResponseOutcome)
{
@@ -60,7 +63,8 @@ namespace Aws
long StandardRetryStrategy::CalculateDelayBeforeNextRetry(const AWSError<CoreErrors>& error, long attemptedRetries) const
{
AWS_UNREFERENCED_PARAM(error);
- return (std::min)(rand() % 1000 * (1 << attemptedRetries), 20000);
+ // Maximum left shift factor is capped by ceil(log2(max_delay)), to avoid wrap-around and overflow into negative values:
+ return (std::min)(rand() % 1000 * (1 << (std::min)(attemptedRetries, 15L)), 20000);
}
DefaultRetryQuotaContainer::DefaultRetryQuotaContainer() : m_retryQuota(INITIAL_RETRY_TOKENS)
@@ -99,4 +103,4 @@ namespace Aws
ReleaseRetryQuota(capacityAmount);
}
}
-} \ No newline at end of file
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/AWSConfigFileProfileConfigLoader.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/AWSConfigFileProfileConfigLoader.cpp
new file mode 100644
index 0000000000..ba0079bb5e
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/AWSConfigFileProfileConfigLoader.cpp
@@ -0,0 +1,629 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/core/config/AWSProfileConfigLoader.h>
+#include <aws/core/utils/memory/stl/AWSSet.h>
+#include <aws/core/utils/memory/stl/AWSStreamFwd.h>
+#include <aws/core/utils/StringUtils.h>
+#include <aws/core/utils/logging/LogMacros.h>
+#include <fstream>
+
+namespace Aws
+{
+ namespace Config
+ {
+ using namespace Aws::Utils;
+ using namespace Aws::Auth;
+
+ static const char REGION_KEY[] = "region";
+ static const char ACCESS_KEY_ID_KEY[] = "aws_access_key_id";
+ static const char SECRET_KEY_KEY[] = "aws_secret_access_key";
+ static const char SESSION_TOKEN_KEY[] = "aws_session_token";
+ static const char SSO_START_URL_KEY[] = "sso_start_url";
+ static const char SSO_REGION_KEY[] = "sso_region";
+ static const char SSO_ACCOUNT_ID_KEY[] = "sso_account_id";
+ static const char SSO_ROLE_NAME_KEY[] = "sso_role_name";
+ static const char SSO_SESSION_KEY[] = "sso_session";
+ static const char ROLE_ARN_KEY[] = "role_arn";
+ static const char EXTERNAL_ID_KEY[] = "external_id";
+ static const char CREDENTIAL_PROCESS_COMMAND[] = "credential_process";
+ static const char SOURCE_PROFILE_KEY[] = "source_profile";
+ static const char PROFILE_SECTION[] = "profile";
+ static const char DEFAULT[] = "default";
+ static const char SSO_SESSION_SECTION[] = "sso-session";
+ static const char DEFAULTS_MODE_KEY[] = "defaults_mode";
+ static const char EQ = '=';
+ static const char LEFT_BRACKET = '[';
+ static const char RIGHT_BRACKET = ']';
+ static const char PARSER_TAG[] = "Aws::Config::ConfigFileProfileFSM";
+
+ // generated by python from identifier regex pattern from the spec: R"([A-Za-z0-9_\-/.%@:\+]+)":
+ // #py: ''.join(chr(i) for i in range(128) if re.match("[A-Za-z0-9_\-\/.%@:\+]", chr(i)))
+ const char IDENTIFIER_ALLOWED_CHARACTERS[] = R"(%+-./0123456789:@ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz)";
+ static const size_t IDENTIFIER_ALLOWED_CHARACTERS_SZ = sizeof(IDENTIFIER_ALLOWED_CHARACTERS) - 1;
+ const char WHITESPACE_CHARACTERS[] = "\t ";
+ static const size_t WHITESPACE_CHARACTERS_SZ = sizeof(WHITESPACE_CHARACTERS) - 1;
+ const char COMMENT_START[] = "#;";
+ static const size_t COMMENT_START_SZ = sizeof(COMMENT_START) - 1;
+
+ struct ProfilePropertyAccessFunctions
+ {
+ const char* PropertyKey;
+ std::function<void(Profile&, const Aws::String&)> Setter;
+ std::function<const Aws::String&(const Profile&)> Getter;
+ };
+
+ static const ProfilePropertyAccessFunctions PROFILE_PROPERTY_FUNCS[] =
+ {{REGION_KEY, &Profile::SetRegion, &Profile::GetRegion},
+ //ACCESS_KEY_ID_KEY, - AwsCredentials require special handling
+ //SECRET_KEY_KEY,
+ //SESSION_TOKEN_KEY,
+ {SSO_START_URL_KEY, &Profile::SetSsoStartUrl, &Profile::GetSsoStartUrl},
+ {SSO_REGION_KEY, &Profile::SetSsoRegion, &Profile::GetSsoRegion},
+ {SSO_ACCOUNT_ID_KEY, &Profile::SetSsoAccountId, &Profile::GetSsoAccountId},
+ {SSO_ROLE_NAME_KEY, &Profile::SetSsoRoleName, &Profile::GetSsoRoleName},
+ //SSO_SESSION_KEY - SsoSession requires special handling
+ {ROLE_ARN_KEY, &Profile::SetRoleArn, &Profile::GetRoleArn},
+ {EXTERNAL_ID_KEY, &Profile::SetExternalId, &Profile::GetExternalId},
+ {CREDENTIAL_PROCESS_COMMAND, &Profile::SetCredentialProcess, &Profile::GetCredentialProcess},
+ {SOURCE_PROFILE_KEY, &Profile::SetSourceProfile, &Profile::GetSourceProfile},
+ {DEFAULTS_MODE_KEY, &Profile::SetDefaultsMode, &Profile::GetDefaultsMode}};
+
+ template<typename EntryT, size_t N>
+ const EntryT* FindInStaticArray(const EntryT (&array)[N], const Aws::String& searchKey)
+ {
+ const EntryT* found = std::find_if(array, array + N,
+ [&searchKey](const EntryT& entry)
+ {
+ return searchKey == entry.PropertyKey;
+ });
+
+ if(!!found && found != array + N)
+ return found;
+
+ return nullptr;
+ }
+
+ static const char* PROFILE_KEY_SPECIAL_HANDLING[] =
+ {ACCESS_KEY_ID_KEY, SECRET_KEY_KEY, SESSION_TOKEN_KEY, SSO_SESSION_KEY};
+ static const size_t PROFILE_KEY_SPECIAL_HANDLING_SZ = sizeof(PROFILE_KEY_SPECIAL_HANDLING) / sizeof(PROFILE_KEY_SPECIAL_HANDLING[0]);
+
+ struct SsoSessionPropertyAccessFunctions
+ {
+ const char* PropertyKey;
+ std::function<void(Profile::SsoSession&, const Aws::String&)> Setter;
+ std::function<const Aws::String&(const Profile::SsoSession&)> Getter;
+ };
+ static const SsoSessionPropertyAccessFunctions SSO_SESSION_PROPERTY_FUNCS[] =
+ {{SSO_REGION_KEY, &Profile::SsoSession::SetSsoRegion, &Profile::SsoSession::GetSsoRegion},
+ {SSO_START_URL_KEY, &Profile::SsoSession::SetSsoStartUrl, &Profile::SsoSession::GetSsoStartUrl}};
+
+ class ConfigFileProfileFSM
+ {
+ public:
+ ConfigFileProfileFSM(bool useProfilePrefix)
+ : m_useProfilePrefix(useProfilePrefix)
+ {}
+
+ const Aws::Map<String, Profile>& GetProfiles() const { return m_foundProfiles; }
+
+ void ParseStream(Aws::IStream& stream)
+ {
+ static const size_t ASSUME_EMPTY_LEN = 3;
+ State currentState = START;
+ Aws::String currentSectionName;
+ Aws::Map<Aws::String, Aws::String> currentKeyValues;
+
+ Aws::String rawLine;
+ while(std::getline(stream, rawLine) && currentState != FAILURE)
+ {
+ Aws::String line = rawLine.substr(0, rawLine.find_first_of(COMMENT_START)); // ignore comments
+ if (line.empty() || line.length() < ASSUME_EMPTY_LEN || line.find_first_not_of(WHITESPACE_CHARACTERS) == Aws::String::npos)
+ {
+ continue;
+ }
+
+ auto openPos = line.find(LEFT_BRACKET);
+ auto closePos = line.find(RIGHT_BRACKET);
+
+ if(openPos != std::string::npos && closePos != std::string::npos)
+ {
+ FlushSection(currentState, currentSectionName, currentKeyValues);
+ currentKeyValues.clear();
+ ParseSectionDeclaration(line, currentSectionName, currentState);
+ continue;
+ }
+
+ if(PROFILE_FOUND == currentState || SSO_SESSION_FOUND == currentState)
+ {
+ auto equalsPos = line.find(EQ);
+ if (equalsPos != std::string::npos)
+ {
+ auto key = StringUtils::Trim(line.substr(0, equalsPos).c_str());
+ auto value = StringUtils::Trim(line.substr(equalsPos + 1).c_str());
+ currentKeyValues[key] = value;
+ continue;
+ }
+ }
+
+ if(UNKNOWN_SECTION_FOUND == currentState)
+ {
+ // skip any unknown sections
+ continue;
+ }
+
+ AWS_LOGSTREAM_ERROR(PARSER_TAG, "Unexpected line in the aws shared profile: " << rawLine);
+ currentState = FAILURE;
+ break;
+ }
+
+ FlushSection(currentState, currentSectionName, currentKeyValues);
+
+ // Put sso-sessions into profiles
+ for(auto& profile : m_foundProfiles)
+ {
+ const Aws::String& profileSsoSessionName = profile.second.GetValue(SSO_SESSION_KEY);
+ if(!profileSsoSessionName.empty())
+ {
+ auto ssoSessionIt = m_foundSsoSessions.find(profileSsoSessionName);
+ if(ssoSessionIt == m_foundSsoSessions.end())
+ {
+ AWS_LOGSTREAM_ERROR(PARSER_TAG, "AWS profile has reference to a missing sso_session: " << profileSsoSessionName);
+ currentState = FAILURE;
+ continue;
+ }
+ auto ssoSession = ssoSessionIt->second;
+ auto prof = profile.second;
+ // If sso session and profile have conflicting start url or region, fail to parse
+ // the session/sso specific profile properties
+ auto hasConflictingStartUrls = !ssoSession.GetSsoStartUrl().empty()
+ && !prof.GetSsoStartUrl().empty()
+ && ssoSession.GetSsoStartUrl() != prof.GetSsoStartUrl();
+ auto hasConflictingRegions = !ssoSession.GetSsoRegion().empty()
+ && !prof.GetSsoRegion().empty()
+ && ssoSession.GetSsoRegion() != prof.GetSsoRegion();
+ if (hasConflictingStartUrls || hasConflictingRegions) {
+ AWS_LOGSTREAM_ERROR(PARSER_TAG,
+ "SSO profile has a start url or region conflict with sso session");
+ prof.SetSsoStartUrl("");
+ prof.SetSsoRegion("");
+ prof.SetSsoAccountId("");
+ prof.SetSsoRoleName("");
+ continue;
+ }
+ profile.second.SetSsoSession(ssoSessionIt->second);
+ }
+ }
+
+ if(FAILURE == currentState)
+ {
+ AWS_LOGSTREAM_ERROR(PARSER_TAG, "AWS shared profile config parsing failed");
+ }
+ }
+
+ private:
+ // true means Shared Config parsing, false means Shared Credentials parsing
+ bool m_useProfilePrefix = false;
+
+ enum State
+ {
+ START = 0,
+ PROFILE_FOUND,
+ SSO_SESSION_FOUND,
+ UNKNOWN_SECTION_FOUND,
+ FAILURE
+ };
+
+ /**
+ * Helper function to parse a single word (aka section identifier) containing allowed characters from a line and a pos
+ * i.e. line="[ profile default ]";identifierBegin=10 will return "default"
+ * @param line, a section definition line being parsed
+ * @param identifierBegin, an Aws::String position to start parsing
+ * @param oErrorMsg, a reference to Aws::String to store error message in case of a parsing error.
+ * @return Aws::String, e.g. "default"
+ */
+ Aws::String ParseIdentifier(const Aws::String& line, Aws::String::size_type identifierBegin, Aws::String& oErrorMsg)
+ {
+ // pos at the beginning of section Identifier (or sso_session section keyword)
+ Aws::String::size_type identifierLength = 0;
+ Aws::String::size_type pos = identifierBegin;
+ while(pos < line.length())
+ {
+ if(std::find(IDENTIFIER_ALLOWED_CHARACTERS,
+ IDENTIFIER_ALLOWED_CHARACTERS + IDENTIFIER_ALLOWED_CHARACTERS_SZ,
+ line[pos]) != IDENTIFIER_ALLOWED_CHARACTERS + IDENTIFIER_ALLOWED_CHARACTERS_SZ)
+ {
+ identifierLength++;
+ pos++;
+ }
+ else
+ {
+ break;
+ }
+ }
+ const Aws::String SECTION_END_CHARS_TO_SKIP = Aws::String(WHITESPACE_CHARACTERS) + RIGHT_BRACKET;
+
+ if(identifierLength == 0)
+ {
+ oErrorMsg = "identifier is missing";
+ return "";
+ }
+ if(pos >= line.size() || SECTION_END_CHARS_TO_SKIP.find(line[pos]) == Aws::String::npos) {
+ oErrorMsg = "a blank space character or closing bracket is expected after Identifier";
+ return "";
+ }
+ Aws::String sectionIdentifier = line.substr(identifierBegin, identifierLength);
+
+ return sectionIdentifier;
+ }
+
+ /**
+ * A helper function to parse config section declaration line
+ * @param line, an input line, e.g. "[profile default]"
+ * @param ioSectionName, a return argument representing parsed section Identifier, e.g. "default"
+ * @param ioState, a return argument representing parser state, e.g. PROFILE_FOUND
+ */
+ void ParseSectionDeclaration(const Aws::String& line,
+ Aws::String& ioSectionName,
+ State& ioState)
+ {
+ do { // goto in a form of "do { break; } while(0);"
+ Aws::String::size_type pos = 0;
+ pos = line.find_first_not_of(WHITESPACE_CHARACTERS, pos);
+ if(pos != Aws::String::npos && LEFT_BRACKET != line[pos])
+ {
+ AWS_LOGSTREAM_ERROR(PARSER_TAG, "First non-blank space character of a section definition must be [, line:" << line);
+ break;
+ }
+ pos++;
+ pos = line.find_first_not_of(WHITESPACE_CHARACTERS, pos);
+ if(pos == Aws::String::npos || pos >= line.size())
+ {
+ AWS_LOGSTREAM_ERROR(PARSER_TAG, "Unknown section found in the aws config file: " << line);
+ break;
+ }
+ bool defaultProfileOrSsoSectionRequired = false;
+ if (m_useProfilePrefix)
+ {
+ // in configuration files, the profile name must start with profile. (eg. [profile profile-name]),
+ // except where the profile name is default. When the profile name is default it may start with profile
+ static const size_t PROFILE_KEYWORD_LENGTH = 7;
+ if(line.rfind(PROFILE_SECTION, pos + PROFILE_KEYWORD_LENGTH) != Aws::String::npos)
+ {
+ // skipping required (optional for default) profile keyword
+ pos += PROFILE_KEYWORD_LENGTH;
+ if(pos >= line.size() ||
+ std::find(WHITESPACE_CHARACTERS,
+ WHITESPACE_CHARACTERS + WHITESPACE_CHARACTERS_SZ,
+ line[pos]) == WHITESPACE_CHARACTERS + WHITESPACE_CHARACTERS_SZ)
+ {
+ AWS_LOGSTREAM_ERROR(PARSER_TAG, "Expected a blank space after \"profile\" keyword: " << line);
+ break;
+ }
+ pos = line.find_first_not_of(WHITESPACE_CHARACTERS, pos);
+ }
+ else
+ {
+ defaultProfileOrSsoSectionRequired = true;
+ }
+ }
+
+ Aws::String errorMsg;
+ Aws::String sectionIdentifier = ParseIdentifier(line, pos, errorMsg);
+ if (!errorMsg.empty())
+ {
+ AWS_LOGSTREAM_ERROR(PARSER_TAG, "Failed to parse section identifier: " << errorMsg << " " << line);
+ break;
+ }
+ pos += sectionIdentifier.length();
+
+ if(defaultProfileOrSsoSectionRequired)
+ {
+ if (sectionIdentifier != DEFAULT && sectionIdentifier != SSO_SESSION_SECTION)
+ {
+ AWS_LOGSTREAM_ERROR(PARSER_TAG, "In configuration files, the profile name must start with "
+ "profile keyword (except default profile): " << line);
+ break;
+ }
+ if (sectionIdentifier != SSO_SESSION_SECTION)
+ {
+ // profile found, still pending check for closing bracket
+ ioState = PROFILE_FOUND;
+ ioSectionName = sectionIdentifier;
+ }
+ }
+
+ if(!m_useProfilePrefix || sectionIdentifier != SSO_SESSION_SECTION)
+ {
+ // profile found, still pending check for closing bracket
+ ioState = PROFILE_FOUND;
+ ioSectionName = sectionIdentifier;
+ }
+
+ if(m_useProfilePrefix && sectionIdentifier == SSO_SESSION_SECTION)
+ {
+ // "[sso_session..." found, continue parsing for sso_session identifier
+ pos = line.find_first_not_of(WHITESPACE_CHARACTERS, pos);
+ if(pos == Aws::String::npos)
+ {
+ AWS_LOGSTREAM_ERROR(PARSER_TAG, "Expected a blank space after \"sso_session\" keyword: " << line);
+ break;
+ }
+
+ sectionIdentifier = ParseIdentifier(line, pos, errorMsg);
+ if (!errorMsg.empty())
+ {
+ AWS_LOGSTREAM_ERROR(PARSER_TAG, "Failed to parse section identifier: " << errorMsg << " " << line);
+ break;
+ }
+ pos += sectionIdentifier.length();
+ // sso_session found, still pending check for closing bracket
+ ioState = SSO_SESSION_FOUND;
+ ioSectionName = sectionIdentifier;
+ }
+
+ pos = line.find_first_not_of(WHITESPACE_CHARACTERS, pos);
+ if(pos == Aws::String::npos)
+ {
+ AWS_LOGSTREAM_ERROR(PARSER_TAG, "Expected a non-blank space after section identifier (i.e. missing \"]\"): " << line);
+ break;
+ }
+ if(line[pos] != RIGHT_BRACKET)
+ {
+ AWS_LOGSTREAM_ERROR(PARSER_TAG, "Missing closing bracket after Section Identifier "
+ "(i.e. missing \"]\" or extra non-blank characters before \"]\"): " << line);
+ break;
+ }
+ pos++;
+ pos = line.find_first_not_of(WHITESPACE_CHARACTERS, pos);
+ if(pos != Aws::String::npos &&
+ std::find(COMMENT_START, COMMENT_START + COMMENT_START_SZ, line[pos]) == COMMENT_START + COMMENT_START_SZ)
+ {
+ AWS_LOGSTREAM_ERROR(PARSER_TAG, "Found unexpected characters after closing bracket of Section Identifier " << line);
+ break;
+ }
+ // the rest is a comment, and we don't care about it.
+ if ((ioState != SSO_SESSION_FOUND && ioState != PROFILE_FOUND) || ioSectionName.empty())
+ {
+ AWS_LOGSTREAM_FATAL(PARSER_TAG, "Unexpected parser state after attempting to parse section " << line);
+ break;
+ }
+ return;
+ } while(0); // end of goto in a form of "do { break; } while(0);"
+
+ ioSectionName.erase();
+ ioState = UNKNOWN_SECTION_FOUND;
+ return;
+ }
+
+ /**
+ * A helper function to store currently being parsed section along with its properties
+ * (i.e. [profile default] and its key1=val1 under).
+ * @param currentState, a current parser State, e.g. PROFILE_FOUND
+ * @param currentSectionName, a current section identifier, e.g. "default"
+ * @param currentKeyValues, a map of parsed key-value properties of a section definition being recorded
+ */
+ void FlushSection(const State currentState, const Aws::String& currentSectionName, Aws::Map<Aws::String, Aws::String>& currentKeyValues)
+ {
+ if(START == currentState || currentSectionName.empty())
+ {
+ return; //nothing to flush
+ }
+
+ if(PROFILE_FOUND == currentState)
+ {
+ Profile& profile = m_foundProfiles[currentSectionName];
+
+ for(const auto& keyVal : currentKeyValues)
+ {
+ auto setterFuncPtr = FindInStaticArray(PROFILE_PROPERTY_FUNCS, keyVal.first);
+ if(setterFuncPtr)
+ {
+ AWS_LOGSTREAM_DEBUG(PARSER_TAG, "Found " << setterFuncPtr->PropertyKey << " " << keyVal.second);
+ setterFuncPtr->Setter(profile, keyVal.second);
+ }
+ else
+ {
+ auto specialPropertyKey = std::find_if(PROFILE_KEY_SPECIAL_HANDLING, PROFILE_KEY_SPECIAL_HANDLING + PROFILE_KEY_SPECIAL_HANDLING_SZ,
+ [&keyVal](const char* entry)
+ {
+ return !!entry && keyVal.first == entry;
+ });
+
+ if (specialPropertyKey && specialPropertyKey != PROFILE_KEY_SPECIAL_HANDLING + PROFILE_KEY_SPECIAL_HANDLING_SZ)
+ {
+ AWS_LOGSTREAM_INFO(PARSER_TAG, "Unknown property: " << keyVal.first << " in the profile: " << currentSectionName);
+ }
+ }
+ }
+
+ auto accessKeyIdIter = currentKeyValues.find(ACCESS_KEY_ID_KEY);
+ Aws::String accessKey, secretKey, sessionToken;
+ if (accessKeyIdIter != currentKeyValues.end())
+ {
+ accessKey = accessKeyIdIter->second;
+ AWS_LOGSTREAM_DEBUG(PARSER_TAG, "found access key " << accessKey);
+
+ auto secretAccessKeyIter = currentKeyValues.find(SECRET_KEY_KEY);
+ auto sessionTokenIter = currentKeyValues.find(SESSION_TOKEN_KEY);
+ if (secretAccessKeyIter != currentKeyValues.end())
+ {
+ secretKey = secretAccessKeyIter->second;
+ }
+ else
+ {
+ AWS_LOGSTREAM_ERROR(PARSER_TAG, "No secret access key found even though an access key was specified. This will cause all signed AWS calls to fail.");
+ }
+
+ if (sessionTokenIter != currentKeyValues.end())
+ {
+ sessionToken = sessionTokenIter->second;
+ }
+
+ profile.SetCredentials(Aws::Auth::AWSCredentials(accessKey, secretKey, sessionToken));
+ }
+
+ if (!profile.GetSsoStartUrl().empty() || !profile.GetSsoRegion().empty()
+ || !profile.GetSsoAccountId().empty() || !profile.GetSsoRoleName().empty())
+ {
+ // If there is no sso session, all fields are required. If an SSO session is present,
+ // then only account id and sso role name are required.
+ auto hasSession = currentKeyValues.find(SSO_SESSION_KEY) != currentKeyValues.end();
+ auto hasInvalidProfileWithoutSession = !hasSession &&
+ (profile.GetSsoStartUrl().empty()
+ || profile.GetSsoRegion().empty()
+ || profile.GetSsoAccountId().empty()
+ || profile.GetSsoRoleName().empty());
+ auto hasInvalidProfileWithSession = hasSession &&
+ (profile.GetSsoAccountId().empty()
+ || profile.GetSsoRoleName().empty());
+ if (hasInvalidProfileWithoutSession || hasInvalidProfileWithSession) {
+ profile.SetSsoStartUrl("");
+ profile.SetSsoRegion("");
+ profile.SetSsoAccountId("");
+ profile.SetSsoRoleName("");
+ AWS_LOGSTREAM_ERROR(PARSER_TAG, "invalid SSO configuration for aws profile " << currentSectionName);
+ }
+ }
+
+ profile.SetName(currentSectionName);
+ profile.SetAllKeyValPairs(std::move(currentKeyValues));
+ }
+ else if (SSO_SESSION_FOUND == currentState) {
+ Profile::SsoSession& ssoSession = m_foundSsoSessions[currentSectionName];
+ for(const auto& keyVal : currentKeyValues)
+ {
+ auto setterFuncPtr = FindInStaticArray(SSO_SESSION_PROPERTY_FUNCS, keyVal.first);
+ if(setterFuncPtr)
+ {
+ AWS_LOGSTREAM_DEBUG(PARSER_TAG, "Found sso-session property " << setterFuncPtr->PropertyKey << " " << keyVal.second);
+ setterFuncPtr->Setter(ssoSession, keyVal.second);
+ }
+ else
+ {
+ AWS_LOGSTREAM_INFO(PARSER_TAG, "Unknown property: " << keyVal.first << " in the sso-session: " << currentSectionName);
+ }
+ }
+ ssoSession.SetName(currentSectionName);
+ ssoSession.SetAllKeyValPairs(std::move(currentKeyValues));
+ }
+ else
+ {
+ AWS_LOGSTREAM_FATAL(PARSER_TAG, "Unknown parser error: unexpected state " << currentState);
+ }
+ }
+
+ Aws::Map<String, Profile> m_foundProfiles;
+ Aws::Map<String, Profile::SsoSession> m_foundSsoSessions;
+ };
+
+ static const char* const CONFIG_FILE_LOADER = "Aws::Config::AWSConfigFileProfileConfigLoader";
+
+ AWSConfigFileProfileConfigLoader::AWSConfigFileProfileConfigLoader(const Aws::String& fileName, bool useProfilePrefix) :
+ m_fileName(fileName), m_useProfilePrefix(useProfilePrefix)
+ {
+ AWS_LOGSTREAM_INFO(CONFIG_FILE_LOADER, "Initializing config loader against fileName "
+ << fileName << " and using profilePrefix = " << useProfilePrefix);
+ }
+
+ bool AWSConfigFileProfileConfigLoader::LoadInternal()
+ {
+ m_profiles.clear();
+
+ Aws::IFStream inputFile(m_fileName.c_str());
+ if(inputFile)
+ {
+ ConfigFileProfileFSM parser(m_useProfilePrefix);
+ parser.ParseStream(inputFile);
+ m_profiles = parser.GetProfiles();
+ return m_profiles.size() > 0;
+ }
+
+ AWS_LOGSTREAM_INFO(CONFIG_FILE_LOADER, "Unable to open config file " << m_fileName << " for reading.");
+
+ return false;
+ }
+
+ bool AWSConfigFileProfileConfigLoader::PersistInternal(const Aws::Map<Aws::String, Profile>& profiles)
+ {
+ Aws::OFStream outputFile(m_fileName.c_str(), std::ios_base::out | std::ios_base::trunc);
+ if(outputFile)
+ {
+ Aws::UnorderedMap<Aws::String, std::reference_wrapper<const Profile::SsoSession>> ssoSessionsToDump;
+
+ for(const auto& profile : profiles)
+ {
+ Aws::String prefix = m_useProfilePrefix ? PROFILE_SECTION : "";
+
+ AWS_LOGSTREAM_DEBUG(CONFIG_FILE_LOADER, "Writing profile " << profile.first << " to disk.");
+
+ outputFile << LEFT_BRACKET << prefix << " " << profile.second.GetName() << RIGHT_BRACKET << std::endl;
+ const Aws::Auth::AWSCredentials& credentials = profile.second.GetCredentials();
+ if (!credentials.GetAWSAccessKeyId().empty()) {
+ outputFile << ACCESS_KEY_ID_KEY << EQ << credentials.GetAWSAccessKeyId() << std::endl;
+ }
+ if (!credentials.GetAWSSecretKey().empty()) {
+ outputFile << SECRET_KEY_KEY << EQ << credentials.GetAWSSecretKey() << std::endl;
+ }
+ if(!credentials.GetSessionToken().empty()) {
+ outputFile << SESSION_TOKEN_KEY << EQ << credentials.GetSessionToken() << std::endl;
+ }
+ // credentials.GetExpiration().Millis() <- is not present in a config.
+
+ for(const auto& profilePropertyPair : PROFILE_PROPERTY_FUNCS)
+ {
+ const auto& profilePropertyValue = profilePropertyPair.Getter(profile.second);
+ if(!profilePropertyValue.empty())
+ {
+ outputFile << profilePropertyPair.PropertyKey << EQ << profilePropertyValue << std::endl;
+ }
+ }
+
+ if(profile.second.IsSsoSessionSet())
+ {
+ const auto& ssoSession = profile.second.GetSsoSession();
+ const auto alreadyScheduledForDumpIt = ssoSessionsToDump.find(ssoSession.GetName());
+ if (alreadyScheduledForDumpIt != ssoSessionsToDump.end() &&
+ alreadyScheduledForDumpIt->second.get() != ssoSession)
+ {
+ AWS_LOGSTREAM_WARN(CONFIG_FILE_LOADER, "2 or more profiles reference 'sso-session' section "
+ "with the same name but different properties: " << ssoSession.GetName());
+ }
+ else
+ {
+ ssoSessionsToDump.insert({ssoSession.GetName(), std::cref(ssoSession)});
+ }
+ outputFile << SSO_SESSION_KEY << EQ << ssoSession.GetName() << std::endl;
+ }
+ outputFile << std::endl;
+ }
+
+ for(const auto& ssoSessionPair : ssoSessionsToDump)
+ {
+ AWS_LOGSTREAM_DEBUG(CONFIG_FILE_LOADER, "Writing sso-session " << ssoSessionPair.first << " to disk.");
+ const Profile::SsoSession& ssoSession = ssoSessionPair.second.get();
+ outputFile << LEFT_BRACKET << SSO_SESSION_SECTION << " " << ssoSession.GetName() << RIGHT_BRACKET << std::endl;
+ for(const auto& ssoSessionPropertyPair : SSO_SESSION_PROPERTY_FUNCS)
+ {
+ const auto& profilePropertyValue = ssoSessionPropertyPair.Getter(ssoSession);
+ if(!profilePropertyValue.empty())
+ {
+ outputFile << ssoSessionPropertyPair.PropertyKey << EQ << profilePropertyValue << std::endl;
+ }
+ }
+ outputFile << std::endl;
+ }
+
+ AWS_LOGSTREAM_INFO(CONFIG_FILE_LOADER, "Profiles written to config file " << m_fileName);
+
+ return true;
+ }
+
+ AWS_LOGSTREAM_WARN(CONFIG_FILE_LOADER, "Unable to open config file " << m_fileName << " for writing.");
+
+ return false;
+ }
+ } // Config namespace
+} // Aws namespace
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/AWSProfileConfigLoader.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/AWSProfileConfigLoader.cpp
deleted file mode 100644
index bb6acd0b3a..0000000000
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/AWSProfileConfigLoader.cpp
+++ /dev/null
@@ -1,571 +0,0 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-
-#include <aws/core/config/AWSProfileConfigLoader.h>
-#include <aws/core/internal/AWSHttpResourceClient.h>
-#include <aws/core/auth/AWSCredentialsProvider.h>
-#include <aws/core/utils/memory/stl/AWSList.h>
-#include <aws/core/utils/memory/stl/AWSStreamFwd.h>
-#include <aws/core/utils/StringUtils.h>
-#include <aws/core/utils/logging/LogMacros.h>
-#include <aws/core/utils/json/JsonSerializer.h>
-#include <fstream>
-
-namespace Aws
-{
- namespace Config
- {
- using namespace Aws::Utils;
- using namespace Aws::Auth;
-
- static const char* const CONFIG_LOADER_TAG = "Aws::Config::AWSProfileConfigLoader";
- #ifdef _MSC_VER
- // VS2015 compiler's bug, warning s_CoreErrorsMapper: symbol will be dynamically initialized (implementation limitation)
- AWS_SUPPRESS_WARNING(4592,
- static Aws::UniquePtr<ConfigAndCredentialsCacheManager> s_configManager(nullptr);
- )
- #else
- static Aws::UniquePtr<ConfigAndCredentialsCacheManager> s_configManager(nullptr);
- #endif
-
- static const char CONFIG_CREDENTIALS_CACHE_MANAGER_TAG[] = "ConfigAndCredentialsCacheManager";
-
- bool AWSProfileConfigLoader::Load()
- {
- if(LoadInternal())
- {
- AWS_LOGSTREAM_INFO(CONFIG_LOADER_TAG, "Successfully reloaded configuration.");
- m_lastLoadTime = DateTime::Now();
- AWS_LOGSTREAM_TRACE(CONFIG_LOADER_TAG, "reloaded config at "
- << m_lastLoadTime.ToGmtString(DateFormat::ISO_8601));
- return true;
- }
-
- AWS_LOGSTREAM_INFO(CONFIG_LOADER_TAG, "Failed to reload configuration.");
- return false;
- }
-
- bool AWSProfileConfigLoader::PersistProfiles(const Aws::Map<Aws::String, Profile>& profiles)
- {
- if(PersistInternal(profiles))
- {
- AWS_LOGSTREAM_INFO(CONFIG_LOADER_TAG, "Successfully persisted configuration.");
- m_profiles = profiles;
- m_lastLoadTime = DateTime::Now();
- AWS_LOGSTREAM_TRACE(CONFIG_LOADER_TAG, "persisted config at "
- << m_lastLoadTime.ToGmtString(DateFormat::ISO_8601));
- return true;
- }
-
- AWS_LOGSTREAM_WARN(CONFIG_LOADER_TAG, "Failed to persist configuration.");
- return false;
- }
-
- static const char REGION_KEY[] = "region";
- static const char ACCESS_KEY_ID_KEY[] = "aws_access_key_id";
- static const char SECRET_KEY_KEY[] = "aws_secret_access_key";
- static const char SESSION_TOKEN_KEY[] = "aws_session_token";
- static const char SSO_START_URL_KEY[] = "sso_start_url";
- static const char SSO_REGION_KEY[] = "sso_region";
- static const char SSO_ACCOUNT_ID_KEY[] = "sso_account_id";
- static const char SSO_ROLE_NAME_KEY[] = "sso_role_name";
- static const char ROLE_ARN_KEY[] = "role_arn";
- static const char EXTERNAL_ID_KEY[] = "external_id";
- static const char CREDENTIAL_PROCESS_COMMAND[] = "credential_process";
- static const char SOURCE_PROFILE_KEY[] = "source_profile";
- static const char PROFILE_PREFIX[] = "profile ";
- static const char EQ = '=';
- static const char LEFT_BRACKET = '[';
- static const char RIGHT_BRACKET = ']';
- static const char PARSER_TAG[] = "Aws::Config::ConfigFileProfileFSM";
-
- class ConfigFileProfileFSM
- {
- public:
- ConfigFileProfileFSM() : m_parserState(START) {}
-
- const Aws::Map<String, Profile>& GetProfiles() const { return m_foundProfiles; }
-
- void ParseStream(Aws::IStream& stream)
- {
- static const size_t ASSUME_EMPTY_LEN = 3;
-
- Aws::String line;
- while(std::getline(stream, line) && m_parserState != FAILURE)
- {
- if (line.empty() || line.length() < ASSUME_EMPTY_LEN)
- {
- continue;
- }
-
- auto openPos = line.find(LEFT_BRACKET);
- auto closePos = line.find(RIGHT_BRACKET);
-
- switch(m_parserState)
- {
-
- case START:
- if(openPos != std::string::npos && closePos != std::string::npos)
- {
- FlushProfileAndReset(line, openPos, closePos);
- m_parserState = PROFILE_FOUND;
- }
- break;
-
- //fallthrough here is intentional to reduce duplicate logic
- case PROFILE_KEY_VALUE_FOUND:
- if(openPos != std::string::npos && closePos != std::string::npos)
- {
- m_parserState = PROFILE_FOUND;
- FlushProfileAndReset(line, openPos, closePos);
- break;
- }
- // fall through
- case PROFILE_FOUND:
- {
- auto equalsPos = line.find(EQ);
- if (equalsPos != std::string::npos)
- {
- auto key = line.substr(0, equalsPos);
- auto value = line.substr(equalsPos + 1);
- m_profileKeyValuePairs[StringUtils::Trim(key.c_str())] =
- StringUtils::Trim(value.c_str());
- m_parserState = PROFILE_KEY_VALUE_FOUND;
- }
-
- break;
- }
- default:
- m_parserState = FAILURE;
- break;
- }
- }
-
- FlushProfileAndReset(line, std::string::npos, std::string::npos);
- }
-
- private:
-
- void FlushProfileAndReset(Aws::String& line, size_t openPos, size_t closePos)
- {
- if(!m_currentWorkingProfile.empty() && !m_profileKeyValuePairs.empty())
- {
- Profile profile;
- profile.SetName(m_currentWorkingProfile);
-
- auto regionIter = m_profileKeyValuePairs.find(REGION_KEY);
- if (regionIter != m_profileKeyValuePairs.end())
- {
- AWS_LOGSTREAM_DEBUG(PARSER_TAG, "found region " << regionIter->second);
- profile.SetRegion(regionIter->second);
- }
-
- auto accessKeyIdIter = m_profileKeyValuePairs.find(ACCESS_KEY_ID_KEY);
- Aws::String accessKey, secretKey, sessionToken;
- if (accessKeyIdIter != m_profileKeyValuePairs.end())
- {
- accessKey = accessKeyIdIter->second;
- AWS_LOGSTREAM_DEBUG(PARSER_TAG, "found access key " << accessKey);
-
- auto secretAccessKeyIter = m_profileKeyValuePairs.find(SECRET_KEY_KEY);
- auto sessionTokenIter = m_profileKeyValuePairs.find(SESSION_TOKEN_KEY);
- if (secretAccessKeyIter != m_profileKeyValuePairs.end())
- {
- secretKey = secretAccessKeyIter->second;
- }
- else
- {
- AWS_LOGSTREAM_ERROR(PARSER_TAG, "No secret access key found even though an access key was specified. This will cause all signed AWS calls to fail.");
- }
-
- if (sessionTokenIter != m_profileKeyValuePairs.end())
- {
- sessionToken = sessionTokenIter->second;
- }
-
- profile.SetCredentials(Aws::Auth::AWSCredentials(accessKey, secretKey, sessionToken));
- }
-
- auto ssoStartUrlIter = m_profileKeyValuePairs.find(SSO_START_URL_KEY);
- auto ssoRegionIter = m_profileKeyValuePairs.find(SSO_REGION_KEY);
- auto ssoRoleNameIter = m_profileKeyValuePairs.find(SSO_ROLE_NAME_KEY);
- auto ssoAccountIdIter = m_profileKeyValuePairs.find(SSO_ACCOUNT_ID_KEY);
- if (ssoStartUrlIter != m_profileKeyValuePairs.end()
- || ssoRegionIter != m_profileKeyValuePairs.end()
- || ssoRoleNameIter != m_profileKeyValuePairs.end()
- || ssoAccountIdIter != m_profileKeyValuePairs.end())
- {
- if (ssoStartUrlIter != m_profileKeyValuePairs.end()
- && ssoRegionIter != m_profileKeyValuePairs.end()
- && ssoRoleNameIter != m_profileKeyValuePairs.end()
- && ssoAccountIdIter != m_profileKeyValuePairs.end())
- {
- AWS_LOGSTREAM_DEBUG(PARSER_TAG, "found sso_start_url " << ssoStartUrlIter->second);
- profile.SetSsoStartUrl(ssoStartUrlIter->second);
- AWS_LOGSTREAM_DEBUG(PARSER_TAG, "found sso_region " << ssoRegionIter->second);
- profile.SetSsoRegion(ssoRegionIter->second);
- AWS_LOGSTREAM_DEBUG(PARSER_TAG, "found sso_account_id " << ssoAccountIdIter->second);
- profile.SetSsoAccountId(ssoAccountIdIter->second);
- AWS_LOGSTREAM_DEBUG(PARSER_TAG, "found sso_role_name " << ssoRoleNameIter->second);
- profile.SetSsoRoleName(ssoRoleNameIter->second);
- } else {
- AWS_LOGSTREAM_ERROR(PARSER_TAG, "invalid configuration for sso profile " << profile.GetName());
- }
- }
-
- auto assumeRoleArnIter = m_profileKeyValuePairs.find(ROLE_ARN_KEY);
- if (assumeRoleArnIter != m_profileKeyValuePairs.end())
- {
- AWS_LOGSTREAM_DEBUG(PARSER_TAG, "found role arn " << assumeRoleArnIter->second);
- profile.SetRoleArn(assumeRoleArnIter->second);
- }
-
- auto externalIdIter = m_profileKeyValuePairs.find(EXTERNAL_ID_KEY);
- if (externalIdIter != m_profileKeyValuePairs.end())
- {
- AWS_LOGSTREAM_DEBUG(PARSER_TAG, "found external id " << externalIdIter->second);
- profile.SetExternalId(externalIdIter->second);
- }
-
- auto sourceProfileIter = m_profileKeyValuePairs.find(SOURCE_PROFILE_KEY);
- if (sourceProfileIter != m_profileKeyValuePairs.end())
- {
- AWS_LOGSTREAM_DEBUG(PARSER_TAG, "found source profile " << sourceProfileIter->second);
- profile.SetSourceProfile(sourceProfileIter->second);
- }
-
- auto credentialProcessIter = m_profileKeyValuePairs.find(CREDENTIAL_PROCESS_COMMAND);
- if (credentialProcessIter != m_profileKeyValuePairs.end())
- {
- AWS_LOGSTREAM_DEBUG(PARSER_TAG, "found credential process " << credentialProcessIter->second);
- profile.SetCredentialProcess(credentialProcessIter->second);
- }
- profile.SetAllKeyValPairs(m_profileKeyValuePairs);
-
- m_foundProfiles[profile.GetName()] = std::move(profile);
- m_currentWorkingProfile.clear();
- m_profileKeyValuePairs.clear();
- }
-
- if(!line.empty() && openPos != std::string::npos && closePos != std::string::npos)
- {
- m_currentWorkingProfile = StringUtils::Trim(line.substr(openPos + 1, closePos - openPos - 1).c_str());
- StringUtils::Replace(m_currentWorkingProfile, PROFILE_PREFIX, "");
- AWS_LOGSTREAM_DEBUG(PARSER_TAG, "found profile " << m_currentWorkingProfile);
- }
- }
-
- enum State
- {
- START = 0,
- PROFILE_FOUND,
- PROFILE_KEY_VALUE_FOUND,
- FAILURE
- };
-
- Aws::String m_currentWorkingProfile;
- Aws::Map<String, String> m_profileKeyValuePairs;
- State m_parserState;
- Aws::Map<String, Profile> m_foundProfiles;
- };
-
- static const char* const CONFIG_FILE_LOADER = "Aws::Config::AWSConfigFileProfileConfigLoader";
-
- AWSConfigFileProfileConfigLoader::AWSConfigFileProfileConfigLoader(const Aws::String& fileName, bool useProfilePrefix) :
- m_fileName(fileName), m_useProfilePrefix(useProfilePrefix)
- {
- AWS_LOGSTREAM_INFO(CONFIG_FILE_LOADER, "Initializing config loader against fileName "
- << fileName << " and using profilePrefix = " << useProfilePrefix);
- }
-
- bool AWSConfigFileProfileConfigLoader::LoadInternal()
- {
- m_profiles.clear();
-
- Aws::IFStream inputFile(m_fileName.c_str());
- if(inputFile)
- {
- ConfigFileProfileFSM parser;
- parser.ParseStream(inputFile);
- m_profiles = parser.GetProfiles();
- return m_profiles.size() > 0;
- }
-
- AWS_LOGSTREAM_INFO(CONFIG_FILE_LOADER, "Unable to open config file " << m_fileName << " for reading.");
-
- return false;
- }
-
- bool AWSConfigFileProfileConfigLoader::PersistInternal(const Aws::Map<Aws::String, Profile>& profiles)
- {
- Aws::OFStream outputFile(m_fileName.c_str(), std::ios_base::out | std::ios_base::trunc);
- if(outputFile)
- {
- for(auto& profile : profiles)
- {
- Aws::String prefix = m_useProfilePrefix ? PROFILE_PREFIX : "";
-
- AWS_LOGSTREAM_DEBUG(CONFIG_FILE_LOADER, "Writing profile " << profile.first << " to disk.");
-
- outputFile << LEFT_BRACKET << prefix << profile.second.GetName() << RIGHT_BRACKET << std::endl;
- const Aws::Auth::AWSCredentials& credentials = profile.second.GetCredentials();
- outputFile << ACCESS_KEY_ID_KEY << EQ << credentials.GetAWSAccessKeyId() << std::endl;
- outputFile << SECRET_KEY_KEY << EQ << credentials.GetAWSSecretKey() << std::endl;
-
- if(!credentials.GetSessionToken().empty())
- {
- outputFile << SESSION_TOKEN_KEY << EQ << credentials.GetSessionToken() << std::endl;
- }
-
- if(!profile.second.GetRegion().empty())
- {
- outputFile << REGION_KEY << EQ << profile.second.GetRegion() << std::endl;
- }
-
- if(!profile.second.GetRoleArn().empty())
- {
- outputFile << ROLE_ARN_KEY << EQ << profile.second.GetRoleArn() << std::endl;
- }
-
- if(!profile.second.GetSourceProfile().empty())
- {
- outputFile << SOURCE_PROFILE_KEY << EQ << profile.second.GetSourceProfile() << std::endl;
- }
-
- outputFile << std::endl;
- }
-
- AWS_LOGSTREAM_INFO(CONFIG_FILE_LOADER, "Profiles written to config file " << m_fileName);
-
- return true;
- }
-
- AWS_LOGSTREAM_WARN(CONFIG_FILE_LOADER, "Unable to open config file " << m_fileName << " for writing.");
-
- return false;
- }
-
- static const char* const EC2_INSTANCE_PROFILE_LOG_TAG = "Aws::Config::EC2InstanceProfileConfigLoader";
-
- EC2InstanceProfileConfigLoader::EC2InstanceProfileConfigLoader(const std::shared_ptr<Aws::Internal::EC2MetadataClient>& client)
- : m_ec2metadataClient(client == nullptr ? Aws::MakeShared<Aws::Internal::EC2MetadataClient>(EC2_INSTANCE_PROFILE_LOG_TAG) : client)
- {
- }
-
- bool EC2InstanceProfileConfigLoader::LoadInternal()
- {
- auto credentialsStr = m_ec2metadataClient->GetDefaultCredentialsSecurely();
- if(credentialsStr.empty()) return false;
-
- Json::JsonValue credentialsDoc(credentialsStr);
- if (!credentialsDoc.WasParseSuccessful())
- {
- AWS_LOGSTREAM_ERROR(EC2_INSTANCE_PROFILE_LOG_TAG,
- "Failed to parse output from EC2MetadataService.");
- return false;
- }
- const char* accessKeyId = "AccessKeyId";
- const char* secretAccessKey = "SecretAccessKey";
- Aws::String accessKey, secretKey, token;
-
- auto credentialsView = credentialsDoc.View();
- accessKey = credentialsView.GetString(accessKeyId);
- AWS_LOGSTREAM_INFO(EC2_INSTANCE_PROFILE_LOG_TAG,
- "Successfully pulled credentials from metadata service with access key " << accessKey);
-
- secretKey = credentialsView.GetString(secretAccessKey);
- token = credentialsView.GetString("Token");
-
- auto region = m_ec2metadataClient->GetCurrentRegion();
-
- Profile profile;
- profile.SetCredentials(AWSCredentials(accessKey, secretKey, token));
- profile.SetRegion(region);
- profile.SetName(INSTANCE_PROFILE_KEY);
-
- m_profiles[INSTANCE_PROFILE_KEY] = profile;
-
- return true;
- }
-
- ConfigAndCredentialsCacheManager::ConfigAndCredentialsCacheManager() :
- m_credentialsFileLoader(Aws::Auth::ProfileConfigFileAWSCredentialsProvider::GetCredentialsProfileFilename()),
- m_configFileLoader(Aws::Auth::GetConfigProfileFilename(), true/*use profile prefix*/)
- {
- ReloadCredentialsFile();
- ReloadConfigFile();
- }
-
- void ConfigAndCredentialsCacheManager::ReloadConfigFile()
- {
- Aws::Utils::Threading::WriterLockGuard guard(m_configLock);
- m_configFileLoader.SetFileName(Aws::Auth::GetConfigProfileFilename());
- m_configFileLoader.Load();
- }
-
- void ConfigAndCredentialsCacheManager::ReloadCredentialsFile()
- {
- Aws::Utils::Threading::WriterLockGuard guard(m_credentialsLock);
- m_credentialsFileLoader.SetFileName(Aws::Auth::ProfileConfigFileAWSCredentialsProvider::GetCredentialsProfileFilename());
- m_credentialsFileLoader.Load();
- }
-
- bool ConfigAndCredentialsCacheManager::HasConfigProfile(const Aws::String& profileName) const
- {
- Aws::Utils::Threading::ReaderLockGuard guard(m_configLock);
- return (m_configFileLoader.GetProfiles().count(profileName) == 1);
- }
-
- Aws::Config::Profile ConfigAndCredentialsCacheManager::GetConfigProfile(const Aws::String& profileName) const
- {
- Aws::Utils::Threading::ReaderLockGuard guard(m_configLock);
- const auto& profiles = m_configFileLoader.GetProfiles();
- const auto &iter = profiles.find(profileName);
- if (iter == profiles.end())
- {
- return {};
- }
- return iter->second;
- }
-
- Aws::Map<Aws::String, Aws::Config::Profile> ConfigAndCredentialsCacheManager::GetConfigProfiles() const
- {
- Aws::Utils::Threading::ReaderLockGuard guard(m_configLock);
- return m_configFileLoader.GetProfiles();
- }
-
- Aws::String ConfigAndCredentialsCacheManager::GetConfig(const Aws::String& profileName, const Aws::String& key) const
- {
- Aws::Utils::Threading::ReaderLockGuard guard(m_configLock);
- const auto& profiles = m_configFileLoader.GetProfiles();
- const auto &iter = profiles.find(profileName);
- if (iter == profiles.end())
- {
- return {};
- }
- return iter->second.GetValue(key);
- }
-
- bool ConfigAndCredentialsCacheManager::HasCredentialsProfile(const Aws::String& profileName) const
- {
- Aws::Utils::Threading::ReaderLockGuard guard(m_credentialsLock);
- return (m_credentialsFileLoader.GetProfiles().count(profileName) == 1);
- }
-
- Aws::Config::Profile ConfigAndCredentialsCacheManager::GetCredentialsProfile(const Aws::String& profileName) const
- {
- Aws::Utils::Threading::ReaderLockGuard guard(m_credentialsLock);
- const auto &profiles = m_credentialsFileLoader.GetProfiles();
- const auto &iter = profiles.find(profileName);
- if (iter == profiles.end())
- {
- return {};
- }
- return iter->second;
- }
-
- Aws::Map<Aws::String, Aws::Config::Profile> ConfigAndCredentialsCacheManager::GetCredentialsProfiles() const
- {
- Aws::Utils::Threading::ReaderLockGuard guard(m_credentialsLock);
- return m_credentialsFileLoader.GetProfiles();
- }
-
- Aws::Auth::AWSCredentials ConfigAndCredentialsCacheManager::GetCredentials(const Aws::String& profileName) const
- {
- Aws::Utils::Threading::ReaderLockGuard guard(m_credentialsLock);
- const auto& profiles = m_credentialsFileLoader.GetProfiles();
- const auto &iter = profiles.find(profileName);
- if (iter == profiles.end())
- {
- return {};
- }
- return iter->second.GetCredentials();
- }
-
- void InitConfigAndCredentialsCacheManager()
- {
- if (s_configManager)
- {
- return;
- }
- s_configManager = Aws::MakeUnique<ConfigAndCredentialsCacheManager>(CONFIG_CREDENTIALS_CACHE_MANAGER_TAG);
- }
-
- void CleanupConfigAndCredentialsCacheManager()
- {
- if (!s_configManager)
- {
- return;
- }
- s_configManager = nullptr;
- }
-
- void ReloadCachedConfigFile()
- {
- assert(s_configManager);
- s_configManager->ReloadConfigFile();
- }
-
- void ReloadCachedCredentialsFile()
- {
- assert(s_configManager);
- s_configManager->ReloadCredentialsFile();
- }
-
- bool HasCachedConfigProfile(const Aws::String& profileName)
- {
- assert(s_configManager);
- return s_configManager->HasConfigProfile(profileName);
- }
-
- Aws::Config::Profile GetCachedConfigProfile(const Aws::String& profileName)
- {
- assert(s_configManager);
- return s_configManager->GetConfigProfile(profileName);
- }
-
- Aws::Map<Aws::String, Aws::Config::Profile> GetCachedConfigProfiles()
- {
- assert(s_configManager);
- return s_configManager->GetConfigProfiles();
- }
-
- Aws::String GetCachedConfigValue(const Aws::String &profileName, const Aws::String &key)
- {
- assert(s_configManager);
- return s_configManager->GetConfig(profileName, key);
- }
-
- Aws::String GetCachedConfigValue(const Aws::String &key)
- {
- assert(s_configManager);
- return s_configManager->GetConfig(Aws::Auth::GetConfigProfileName(), key);
- }
-
- bool HasCachedCredentialsProfile(const Aws::String& profileName)
- {
- assert(s_configManager);
- return s_configManager->HasCredentialsProfile(profileName);
- }
-
- Aws::Config::Profile GetCachedCredentialsProfile(const Aws::String &profileName)
- {
- assert(s_configManager);
- return s_configManager->GetCredentialsProfile(profileName);
- }
-
- Aws::Map<Aws::String, Aws::Config::Profile> GetCachedCredentialsProfiles()
- {
- assert(s_configManager);
- return s_configManager->GetCredentialsProfiles();
- }
-
- Aws::Auth::AWSCredentials GetCachedCredentials(const Aws::String &profileName)
- {
- assert(s_configManager);
- return s_configManager->GetCredentials(profileName);
- }
- } // Config namespace
-} // Aws namespace
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/AWSProfileConfigLoaderBase.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/AWSProfileConfigLoaderBase.cpp
new file mode 100644
index 0000000000..cb7b19d0ce
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/AWSProfileConfigLoaderBase.cpp
@@ -0,0 +1,50 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/core/config/AWSProfileConfigLoaderBase.h>
+#include <aws/core/utils/logging/LogMacros.h>
+#include <fstream>
+
+namespace Aws
+{
+ namespace Config
+ {
+ using namespace Aws::Utils;
+ using namespace Aws::Auth;
+
+ static const char* const CONFIG_LOADER_BASE_TAG = "Aws::Config::AWSProfileConfigLoaderBase";
+
+ bool AWSProfileConfigLoader::Load()
+ {
+ if(LoadInternal())
+ {
+ AWS_LOGSTREAM_INFO(CONFIG_LOADER_BASE_TAG, "Successfully reloaded configuration.");
+ m_lastLoadTime = DateTime::Now();
+ AWS_LOGSTREAM_TRACE(CONFIG_LOADER_BASE_TAG, "reloaded config at "
+ << m_lastLoadTime.ToGmtString(DateFormat::ISO_8601));
+ return true;
+ }
+
+ AWS_LOGSTREAM_INFO(CONFIG_LOADER_BASE_TAG, "Failed to reload configuration.");
+ return false;
+ }
+
+ bool AWSProfileConfigLoader::PersistProfiles(const Aws::Map<Aws::String, Profile>& profiles)
+ {
+ if(PersistInternal(profiles))
+ {
+ AWS_LOGSTREAM_INFO(CONFIG_LOADER_BASE_TAG, "Successfully persisted configuration.");
+ m_profiles = profiles;
+ m_lastLoadTime = DateTime::Now();
+ AWS_LOGSTREAM_TRACE(CONFIG_LOADER_BASE_TAG, "persisted config at "
+ << m_lastLoadTime.ToGmtString(DateFormat::ISO_8601));
+ return true;
+ }
+
+ AWS_LOGSTREAM_WARN(CONFIG_LOADER_BASE_TAG, "Failed to persist configuration.");
+ return false;
+ }
+ } // Config namespace
+} // Aws namespace
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/ConfigAndCredentialsCacheManager.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/ConfigAndCredentialsCacheManager.cpp
new file mode 100644
index 0000000000..b47fe72a0a
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/ConfigAndCredentialsCacheManager.cpp
@@ -0,0 +1,206 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/core/config/ConfigAndCredentialsCacheManager.h>
+#include <aws/core/auth/AWSCredentialsProvider.h>
+#include <aws/core/utils/memory/stl/AWSList.h>
+#include <aws/core/utils/json/JsonSerializer.h>
+#include <fstream>
+
+namespace Aws
+{
+ namespace Config
+ {
+ using namespace Aws::Utils;
+ using namespace Aws::Auth;
+
+ #ifdef _MSC_VER
+ // VS2015 compiler's bug, warning s_CoreErrorsMapper: symbol will be dynamically initialized (implementation limitation)
+ AWS_SUPPRESS_WARNING(4592,
+ static ConfigAndCredentialsCacheManager* s_configManager(nullptr);
+ )
+ #else
+ static ConfigAndCredentialsCacheManager* s_configManager(nullptr);
+ #endif
+
+ static const char CONFIG_CREDENTIALS_CACHE_MANAGER_TAG[] = "ConfigAndCredentialsCacheManager";
+
+
+ ConfigAndCredentialsCacheManager::ConfigAndCredentialsCacheManager() :
+ m_credentialsFileLoader(Aws::Auth::ProfileConfigFileAWSCredentialsProvider::GetCredentialsProfileFilename()),
+ m_configFileLoader(Aws::Auth::GetConfigProfileFilename(), true/*use profile prefix*/)
+ {
+ ReloadCredentialsFile();
+ ReloadConfigFile();
+ }
+
+ void ConfigAndCredentialsCacheManager::ReloadConfigFile()
+ {
+ Aws::Utils::Threading::WriterLockGuard guard(m_configLock);
+ m_configFileLoader.SetFileName(Aws::Auth::GetConfigProfileFilename());
+ m_configFileLoader.Load();
+ }
+
+ void ConfigAndCredentialsCacheManager::ReloadCredentialsFile()
+ {
+ Aws::Utils::Threading::WriterLockGuard guard(m_credentialsLock);
+ m_credentialsFileLoader.SetFileName(Aws::Auth::ProfileConfigFileAWSCredentialsProvider::GetCredentialsProfileFilename());
+ m_credentialsFileLoader.Load();
+ }
+
+ bool ConfigAndCredentialsCacheManager::HasConfigProfile(const Aws::String& profileName) const
+ {
+ Aws::Utils::Threading::ReaderLockGuard guard(m_configLock);
+ return (m_configFileLoader.GetProfiles().count(profileName) == 1);
+ }
+
+ Aws::Config::Profile ConfigAndCredentialsCacheManager::GetConfigProfile(const Aws::String& profileName) const
+ {
+ Aws::Utils::Threading::ReaderLockGuard guard(m_configLock);
+ const auto& profiles = m_configFileLoader.GetProfiles();
+ const auto &iter = profiles.find(profileName);
+ if (iter == profiles.end())
+ {
+ return {};
+ }
+ return iter->second;
+ }
+
+ Aws::Map<Aws::String, Aws::Config::Profile> ConfigAndCredentialsCacheManager::GetConfigProfiles() const
+ {
+ Aws::Utils::Threading::ReaderLockGuard guard(m_configLock);
+ return m_configFileLoader.GetProfiles();
+ }
+
+ Aws::String ConfigAndCredentialsCacheManager::GetConfig(const Aws::String& profileName, const Aws::String& key) const
+ {
+ Aws::Utils::Threading::ReaderLockGuard guard(m_configLock);
+ const auto& profiles = m_configFileLoader.GetProfiles();
+ const auto &iter = profiles.find(profileName);
+ if (iter == profiles.end())
+ {
+ return {};
+ }
+ return iter->second.GetValue(key);
+ }
+
+ bool ConfigAndCredentialsCacheManager::HasCredentialsProfile(const Aws::String& profileName) const
+ {
+ Aws::Utils::Threading::ReaderLockGuard guard(m_credentialsLock);
+ return (m_credentialsFileLoader.GetProfiles().count(profileName) == 1);
+ }
+
+ Aws::Config::Profile ConfigAndCredentialsCacheManager::GetCredentialsProfile(const Aws::String& profileName) const
+ {
+ Aws::Utils::Threading::ReaderLockGuard guard(m_credentialsLock);
+ const auto &profiles = m_credentialsFileLoader.GetProfiles();
+ const auto &iter = profiles.find(profileName);
+ if (iter == profiles.end())
+ {
+ return {};
+ }
+ return iter->second;
+ }
+
+ Aws::Map<Aws::String, Aws::Config::Profile> ConfigAndCredentialsCacheManager::GetCredentialsProfiles() const
+ {
+ Aws::Utils::Threading::ReaderLockGuard guard(m_credentialsLock);
+ return m_credentialsFileLoader.GetProfiles();
+ }
+
+ Aws::Auth::AWSCredentials ConfigAndCredentialsCacheManager::GetCredentials(const Aws::String& profileName) const
+ {
+ Aws::Utils::Threading::ReaderLockGuard guard(m_credentialsLock);
+ const auto& profiles = m_credentialsFileLoader.GetProfiles();
+ const auto &iter = profiles.find(profileName);
+ if (iter == profiles.end())
+ {
+ return {};
+ }
+ return iter->second.GetCredentials();
+ }
+
+ void InitConfigAndCredentialsCacheManager()
+ {
+ if (s_configManager)
+ {
+ return;
+ }
+ s_configManager = Aws::New<ConfigAndCredentialsCacheManager>(CONFIG_CREDENTIALS_CACHE_MANAGER_TAG);
+ }
+
+ void CleanupConfigAndCredentialsCacheManager()
+ {
+ Aws::Delete(s_configManager);
+ s_configManager = nullptr;
+ }
+
+ void ReloadCachedConfigFile()
+ {
+ assert(s_configManager);
+ s_configManager->ReloadConfigFile();
+ }
+
+ void ReloadCachedCredentialsFile()
+ {
+ assert(s_configManager);
+ s_configManager->ReloadCredentialsFile();
+ }
+
+ bool HasCachedConfigProfile(const Aws::String& profileName)
+ {
+ assert(s_configManager);
+ return s_configManager->HasConfigProfile(profileName);
+ }
+
+ Aws::Config::Profile GetCachedConfigProfile(const Aws::String& profileName)
+ {
+ assert(s_configManager);
+ return s_configManager->GetConfigProfile(profileName);
+ }
+
+ Aws::Map<Aws::String, Aws::Config::Profile> GetCachedConfigProfiles()
+ {
+ assert(s_configManager);
+ return s_configManager->GetConfigProfiles();
+ }
+
+ Aws::String GetCachedConfigValue(const Aws::String &profileName, const Aws::String &key)
+ {
+ assert(s_configManager);
+ return s_configManager->GetConfig(profileName, key);
+ }
+
+ Aws::String GetCachedConfigValue(const Aws::String &key)
+ {
+ assert(s_configManager);
+ return s_configManager->GetConfig(Aws::Auth::GetConfigProfileName(), key);
+ }
+
+ bool HasCachedCredentialsProfile(const Aws::String& profileName)
+ {
+ assert(s_configManager);
+ return s_configManager->HasCredentialsProfile(profileName);
+ }
+
+ Aws::Config::Profile GetCachedCredentialsProfile(const Aws::String &profileName)
+ {
+ assert(s_configManager);
+ return s_configManager->GetCredentialsProfile(profileName);
+ }
+
+ Aws::Map<Aws::String, Aws::Config::Profile> GetCachedCredentialsProfiles()
+ {
+ assert(s_configManager);
+ return s_configManager->GetCredentialsProfiles();
+ }
+
+ Aws::Auth::AWSCredentials GetCachedCredentials(const Aws::String &profileName)
+ {
+ assert(s_configManager);
+ return s_configManager->GetCredentials(profileName);
+ }
+ } // Config namespace
+} // Aws namespace
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/EC2InstanceProfileConfigLoader.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/EC2InstanceProfileConfigLoader.cpp
new file mode 100644
index 0000000000..0b505b2c00
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/EC2InstanceProfileConfigLoader.cpp
@@ -0,0 +1,112 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/core/config/AWSProfileConfigLoader.h>
+#include <aws/core/internal/AWSHttpResourceClient.h>
+#include <aws/core/auth/AWSCredentialsProvider.h>
+#include <aws/core/utils/memory/stl/AWSList.h>
+#include <aws/core/utils/logging/LogMacros.h>
+#include <aws/core/utils/json/JsonSerializer.h>
+#include <fstream>
+#include <random>
+
+namespace Aws
+{
+ namespace Config
+ {
+ using namespace Aws::Utils;
+ using namespace Aws::Auth;
+
+ static const char* const INTERNAL_EXCEPTION_PHRASE = "InternalServiceException";
+ static const int64_t FIVE_MINUTE_MILLIS = 60000 * 5;
+ static const int64_t TEN_MINUTE_MILLIS = 60000 * 10;
+
+ static const char* const EC2_INSTANCE_PROFILE_LOG_TAG = "Aws::Config::EC2InstanceProfileConfigLoader";
+
+ EC2InstanceProfileConfigLoader::EC2InstanceProfileConfigLoader(const std::shared_ptr<Aws::Internal::EC2MetadataClient>& client)
+ {
+ if(client == nullptr)
+ {
+ Aws::Internal::InitEC2MetadataClient();
+ m_ec2metadataClient = Aws::Internal::GetEC2MetadataClient();
+ }
+ else
+ {
+ m_ec2metadataClient = client;
+ }
+ }
+
+ bool EC2InstanceProfileConfigLoader::LoadInternal()
+ {
+ // re-use old credentials until we need to call IMDS again.
+ if (DateTime::Now().Millis() < this->credentialsValidUntilMillis) {
+ AWS_LOGSTREAM_ERROR(EC2_INSTANCE_PROFILE_LOG_TAG,
+ "Skipping IMDS call until " << this->credentialsValidUntilMillis);
+ return true;
+ }
+ this->credentialsValidUntilMillis = DateTime::Now().Millis();
+
+ if (!m_ec2metadataClient) {
+ AWS_LOGSTREAM_FATAL(EC2_INSTANCE_PROFILE_LOG_TAG, "EC2MetadataClient is a nullptr!")
+ return false;
+ }
+ auto credentialsStr = m_ec2metadataClient->GetDefaultCredentialsSecurely();
+ if(credentialsStr.empty()) return false;
+
+ Json::JsonValue credentialsDoc(credentialsStr);
+ if (!credentialsDoc.WasParseSuccessful())
+ {
+ AWS_LOGSTREAM_ERROR(EC2_INSTANCE_PROFILE_LOG_TAG,
+ "Failed to parse output from EC2MetadataService.");
+ return false;
+ }
+
+ const char* accessKeyId = "AccessKeyId";
+ const char* secretAccessKey = "SecretAccessKey";
+ const char* expiration = "Expiration";
+ const char* code = "Code";
+ Aws::String accessKey, secretKey, token;
+
+ auto credentialsView = credentialsDoc.View();
+ DateTime expirationTime(credentialsView.GetString(expiration), Aws::Utils::DateFormat::ISO_8601);
+ // re-use old credentials and not block if the IMDS call failed or if the latest credential is in the past
+ if (expirationTime.WasParseSuccessful() && DateTime::Now() > expirationTime) {
+ AWS_LOGSTREAM_ERROR(EC2_INSTANCE_PROFILE_LOG_TAG,
+ "Expiration Time of Credentials in the past, refusing to update credentials");
+ this->credentialsValidUntilMillis = DateTime::Now().Millis() + calculateRetryTime();
+ return true;
+ } else if (credentialsView.GetString(code) == INTERNAL_EXCEPTION_PHRASE) {
+ AWS_LOGSTREAM_ERROR(EC2_INSTANCE_PROFILE_LOG_TAG,
+ "IMDS call failed, refusing to update credentials");
+ this->credentialsValidUntilMillis = DateTime::Now().Millis() + calculateRetryTime();
+ return true;
+ }
+ accessKey = credentialsView.GetString(accessKeyId);
+ AWS_LOGSTREAM_INFO(EC2_INSTANCE_PROFILE_LOG_TAG,
+ "Successfully pulled credentials from metadata service with access key " << accessKey);
+
+ secretKey = credentialsView.GetString(secretAccessKey);
+ token = credentialsView.GetString("Token");
+
+ auto region = m_ec2metadataClient->GetCurrentRegion();
+
+ Profile profile;
+ profile.SetCredentials(AWSCredentials(accessKey, secretKey, token));
+ profile.SetRegion(region);
+ profile.SetName(INSTANCE_PROFILE_KEY);
+
+ m_profiles[INSTANCE_PROFILE_KEY] = profile;
+
+ return true;
+ }
+
+ int64_t EC2InstanceProfileConfigLoader::calculateRetryTime() const {
+ std::random_device rd;
+ std::mt19937_64 gen(rd());
+ std::uniform_int_distribution<int64_t> dist(FIVE_MINUTE_MILLIS, TEN_MINUTE_MILLIS);
+ return dist(gen);
+ }
+ } // Config namespace
+} // Aws namespace
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/defaults/ClientConfigurationDefaults.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/defaults/ClientConfigurationDefaults.cpp
new file mode 100644
index 0000000000..7b54066fb3
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/config/defaults/ClientConfigurationDefaults.cpp
@@ -0,0 +1,197 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+/**
+ * Please note that this file is autogenerated.
+ * The backwards compatibility of the default values provided by new client configuration defaults is not guaranteed;
+ * the values might change over time.
+ */
+
+#include <aws/common/platform.h> // for AWS_OS_IOS macro
+#include <aws/core/config/defaults/ClientConfigurationDefaults.h>
+#include <aws/core/config/AWSProfileConfigLoader.h>
+#include <aws/core/client/ClientConfiguration.h>
+#include <aws/core/internal/AWSHttpResourceClient.h>
+#include <aws/core/platform/Environment.h>
+#include <aws/core/utils/StringUtils.h>
+#include <aws/core/utils/logging/LogMacros.h>
+
+namespace Aws
+{
+ namespace Config
+ {
+ namespace Defaults
+ {
+ static const char* CLIENT_CONFIG_DEFAULTS_TAG = "ClientConfigurationDefaults";
+
+ void SetSmartDefaultsConfigurationParameters(Aws::Client::ClientConfiguration& clientConfig,
+ const Aws::String& defaultMode,
+ bool hasEc2MetadataRegion,
+ const Aws::String& ec2MetadataRegion)
+ {
+ const Aws::String caseInsensitiveMode = ResolveDefaultModeName(clientConfig,
+ defaultMode,
+ Aws::Config::GetCachedConfigValue("defaults_mode"),
+ hasEc2MetadataRegion,
+ ec2MetadataRegion);
+
+ if(caseInsensitiveMode == "legacy")
+ {
+ return SetLegacyClientConfiguration(clientConfig);
+ }
+
+ if(caseInsensitiveMode == "standard")
+ {
+ return SetStandardClientConfiguration(clientConfig);
+ }
+
+ if(caseInsensitiveMode == "in-region")
+ {
+ return SetInRegionClientConfiguration(clientConfig);
+ }
+
+ if(caseInsensitiveMode == "cross-region")
+ {
+ return SetCrossRegionClientConfiguration(clientConfig);
+ }
+
+ if(caseInsensitiveMode == "mobile")
+ {
+ return SetMobileClientConfiguration(clientConfig);
+ }
+ return SetLegacyClientConfiguration(clientConfig);
+ }
+
+ bool isMobile()
+ {
+#if defined(AWS_OS_IOS) || defined (__ANDROID__)
+ return true;
+#else
+ return false;
+#endif
+ }
+
+ const char* ResolveAutoClientConfiguration(const Aws::Client::ClientConfiguration& clientConfig,
+ const Aws::String& ec2MetadataRegion)
+ {
+ // Check if we're on mobile, CPP SDK is statically built, so we can check how we were built
+ if(isMobile())
+ {
+ return "mobile";
+ }
+ // We're not on mobile (best we can tell). See if we can determine whether we're an in-region or
+ // cross-region client.
+ Aws::String current_region;
+ Aws::String env_region = Aws::Environment::GetEnv("AWS_DEFAULT_REGION");
+ if(!Aws::Environment::GetEnv("AWS_EXECUTION_ENV").empty())
+ {
+ // We're running in an AWS service environment, so we can trust the region environment variables
+ // to be the current region, if they're set
+ current_region = Aws::Environment::GetEnv("AWS_REGION");
+ if(current_region.empty())
+ {
+ current_region = Aws::Environment::GetEnv("AWS_DEFAULT_REGION");
+ }
+ }
+ if(current_region.empty())
+ {
+ current_region = ec2MetadataRegion;
+ }
+ if(!current_region.empty() && !clientConfig.region.empty())
+ {
+ if(current_region == clientConfig.region)
+ {
+ return "in-region";
+ }
+ else
+ {
+ return "cross-region";
+ }
+ }
+ // We don't seem to be mobile, and we couldn't determine whether we're running within an AWS region.
+ // Fall back to standard.
+ return "standard";
+ }
+
+ void SetLegacyClientConfiguration(Aws::Client::ClientConfiguration& clientConfig)
+ {
+ clientConfig.retryStrategy = Aws::Client::InitRetryStrategy("default");
+ }
+
+ void SetStandardClientConfiguration(Aws::Client::ClientConfiguration& clientConfig)
+ {
+ clientConfig.connectTimeoutMs = 3100;
+ clientConfig.retryStrategy = Aws::Client::InitRetryStrategy("standard");
+ }
+
+ void SetInRegionClientConfiguration(Aws::Client::ClientConfiguration& clientConfig)
+ {
+ clientConfig.connectTimeoutMs = 1100;
+ clientConfig.retryStrategy = Aws::Client::InitRetryStrategy("standard");
+ }
+
+ void SetCrossRegionClientConfiguration(Aws::Client::ClientConfiguration& clientConfig)
+ {
+ clientConfig.connectTimeoutMs = 3100;
+ clientConfig.retryStrategy = Aws::Client::InitRetryStrategy("standard");
+ }
+
+ void SetMobileClientConfiguration(Aws::Client::ClientConfiguration& clientConfig)
+ {
+ clientConfig.connectTimeoutMs = 30000;
+ clientConfig.retryStrategy = Aws::Client::InitRetryStrategy("standard");
+ }
+
+ Aws::String ResolveDefaultModeName(const Aws::Client::ClientConfiguration& clientConfig,
+ Aws::String requestedDefaultMode,
+ const Aws::String& configFileDefaultMode,
+ bool hasEc2MetadataRegion,
+ Aws::String ec2MetadataRegion)
+ {
+ if (requestedDefaultMode.empty())
+ {
+ requestedDefaultMode = Aws::Environment::GetEnv("AWS_DEFAULTS_MODE");
+ }
+ if (requestedDefaultMode.empty())
+ {
+ requestedDefaultMode = configFileDefaultMode;
+ }
+ if (Aws::Utils::StringUtils::ToLower(requestedDefaultMode.c_str()) == "auto")
+ {
+ if (!hasEc2MetadataRegion &&
+ Aws::Utils::StringUtils::ToLower(Aws::Environment::GetEnv("AWS_EC2_METADATA_DISABLED").c_str()) != "true")
+ {
+ auto client = Aws::Internal::GetEC2MetadataClient();
+ if (client)
+ {
+ ec2MetadataRegion = client->GetCurrentRegion();
+ }
+ }
+ requestedDefaultMode = ResolveAutoClientConfiguration(clientConfig, ec2MetadataRegion);
+ return requestedDefaultMode;
+ }
+ if (requestedDefaultMode.empty())
+ {
+ requestedDefaultMode = "legacy";
+ return requestedDefaultMode;
+ }
+
+ requestedDefaultMode = Aws::Utils::StringUtils::ToLower(requestedDefaultMode.c_str());
+ if (requestedDefaultMode != "legacy" &&
+ requestedDefaultMode != "standard" &&
+ requestedDefaultMode != "in-region" &&
+ requestedDefaultMode != "cross-region" &&
+ requestedDefaultMode != "mobile")
+ {
+ AWS_LOGSTREAM_WARN(CLIENT_CONFIG_DEFAULTS_TAG, "User specified client configuration: ["
+ << requestedDefaultMode
+ << "] is not found, will use the SDK default legacy one.");
+ requestedDefaultMode = "legacy";
+ }
+ return requestedDefaultMode;
+ }
+ } //namespace Defaults
+ } //namespace Config
+} //namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/AWSEndpoint.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/AWSEndpoint.cpp
new file mode 100644
index 0000000000..4990faff24
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/AWSEndpoint.cpp
@@ -0,0 +1,86 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/core/endpoint/AWSEndpoint.h>
+#include <aws/core/utils/DNS.h>
+
+namespace Aws
+{
+namespace Endpoint
+{
+
+Aws::String AWSEndpoint::GetURL() const
+{
+ return m_uri.GetURIString();
+}
+
+void AWSEndpoint::SetURL(Aws::String url)
+{
+ m_uri = std::move(url);
+}
+
+const Aws::Http::URI& AWSEndpoint::GetURI() const
+{
+ return m_uri;
+}
+
+void AWSEndpoint::SetURI(Aws::Http::URI uri)
+{
+ m_uri = std::move(uri);
+}
+
+AWSEndpoint::OptionalError AWSEndpoint::AddPrefixIfMissing(const Aws::String& prefix)
+{
+ if (m_uri.GetAuthority().rfind(prefix, 0) == 0)
+ {
+ // uri already starts with a prefix
+ return OptionalError();
+ }
+
+ if (Aws::Utils::IsValidHost(prefix + m_uri.GetAuthority()))
+ {
+ m_uri.SetAuthority(prefix + m_uri.GetAuthority());
+ return OptionalError();
+ }
+
+ return OptionalError(
+ Aws::Client::AWSError<Aws::Client::CoreErrors>(
+ Aws::Client::CoreErrors::ENDPOINT_RESOLUTION_FAILURE, "",
+ Aws::String("Failed to add host prefix, resulting uri is an invalid hostname: ") + prefix + m_uri.GetAuthority(),
+ false/*retryable*/));
+}
+
+void AWSEndpoint::SetQueryString(const Aws::String& queryString)
+{
+ m_uri.SetQueryString(queryString);
+}
+
+const Crt::Optional<AWSEndpoint::EndpointAttributes>& AWSEndpoint::GetAttributes() const
+{
+ return m_attributes;
+}
+
+Crt::Optional<AWSEndpoint::EndpointAttributes>& AWSEndpoint::AccessAttributes()
+{
+ return m_attributes;
+}
+
+void AWSEndpoint::SetAttributes(AWSEndpoint::EndpointAttributes&& attributes)
+{
+ m_attributes = std::move(attributes);
+}
+
+const Aws::UnorderedMap<Aws::String, Aws::String>& AWSEndpoint::GetHeaders() const
+{
+ return m_headers;
+}
+
+void AWSEndpoint::SetHeaders(Aws::UnorderedMap<Aws::String, Aws::String> headers)
+{
+ m_headers = std::move(headers);
+}
+
+} // namespace Endpoint
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/AWSPartitions.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/AWSPartitions.cpp
new file mode 100644
index 0000000000..bfe3cf6147
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/AWSPartitions.cpp
@@ -0,0 +1,153 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/core/endpoint/AWSPartitions.h>
+#include <aws/core/utils/memory/stl/AWSArray.h>
+
+namespace Aws
+{
+namespace Endpoint
+{
+const size_t AWSPartitions::PartitionsBlobStrLen = 3166;
+const size_t AWSPartitions::PartitionsBlobSize = 3167;
+
+using PartitionsBlobT = Aws::Array<const char, AWSPartitions::PartitionsBlobSize>;
+static constexpr PartitionsBlobT PartitionsBlob = {{
+'{','"','p','a','r','t','i','t','i','o','n','s','"',':','[','{','"','i','d','"',':','"','a','w','s',
+'"',',','"','o','u','t','p','u','t','s','"',':','{','"','d','n','s','S','u','f','f','i','x','"',':',
+'"','a','m','a','z','o','n','a','w','s','.','c','o','m','"',',','"','d','u','a','l','S','t','a','c',
+'k','D','n','s','S','u','f','f','i','x','"',':','"','a','p','i','.','a','w','s','"',',','"','n','a',
+'m','e','"',':','"','a','w','s','"',',','"','s','u','p','p','o','r','t','s','D','u','a','l','S','t',
+'a','c','k','"',':','t','r','u','e',',','"','s','u','p','p','o','r','t','s','F','I','P','S','"',':',
+'t','r','u','e','}',',','"','r','e','g','i','o','n','R','e','g','e','x','"',':','"','^','(','u','s',
+'|','e','u','|','a','p','|','s','a','|','c','a','|','m','e','|','a','f',')','\\','\\','-','\\','\\','w',
+'+','\\','\\','-','\\','\\','d','+','$','"',',','"','r','e','g','i','o','n','s','"',':','{','"','a','f',
+'-','s','o','u','t','h','-','1','"',':','{','"','d','e','s','c','r','i','p','t','i','o','n','"',':',
+'"','A','f','r','i','c','a',' ','(','C','a','p','e',' ','T','o','w','n',')','"','}',',','"','a','p',
+'-','e','a','s','t','-','1','"',':','{','"','d','e','s','c','r','i','p','t','i','o','n','"',':','"',
+'A','s','i','a',' ','P','a','c','i','f','i','c',' ','(','H','o','n','g',' ','K','o','n','g',')','"',
+'}',',','"','a','p','-','n','o','r','t','h','e','a','s','t','-','1','"',':','{','"','d','e','s','c',
+'r','i','p','t','i','o','n','"',':','"','A','s','i','a',' ','P','a','c','i','f','i','c',' ','(','T',
+'o','k','y','o',')','"','}',',','"','a','p','-','n','o','r','t','h','e','a','s','t','-','2','"',':',
+'{','"','d','e','s','c','r','i','p','t','i','o','n','"',':','"','A','s','i','a',' ','P','a','c','i',
+'f','i','c',' ','(','S','e','o','u','l',')','"','}',',','"','a','p','-','n','o','r','t','h','e','a',
+'s','t','-','3','"',':','{','"','d','e','s','c','r','i','p','t','i','o','n','"',':','"','A','s','i',
+'a',' ','P','a','c','i','f','i','c',' ','(','O','s','a','k','a',')','"','}',',','"','a','p','-','s',
+'o','u','t','h','-','1','"',':','{','"','d','e','s','c','r','i','p','t','i','o','n','"',':','"','A',
+'s','i','a',' ','P','a','c','i','f','i','c',' ','(','M','u','m','b','a','i',')','"','}',',','"','a',
+'p','-','s','o','u','t','h','-','2','"',':','{','"','d','e','s','c','r','i','p','t','i','o','n','"',
+':','"','A','s','i','a',' ','P','a','c','i','f','i','c',' ','(','H','y','d','e','r','a','b','a','d',
+')','"','}',',','"','a','p','-','s','o','u','t','h','e','a','s','t','-','1','"',':','{','"','d','e',
+'s','c','r','i','p','t','i','o','n','"',':','"','A','s','i','a',' ','P','a','c','i','f','i','c',' ',
+'(','S','i','n','g','a','p','o','r','e',')','"','}',',','"','a','p','-','s','o','u','t','h','e','a',
+'s','t','-','2','"',':','{','"','d','e','s','c','r','i','p','t','i','o','n','"',':','"','A','s','i',
+'a',' ','P','a','c','i','f','i','c',' ','(','S','y','d','n','e','y',')','"','}',',','"','a','p','-',
+'s','o','u','t','h','e','a','s','t','-','3','"',':','{','"','d','e','s','c','r','i','p','t','i','o',
+'n','"',':','"','A','s','i','a',' ','P','a','c','i','f','i','c',' ','(','J','a','k','a','r','t','a',
+')','"','}',',','"','a','p','-','s','o','u','t','h','e','a','s','t','-','4','"',':','{','"','d','e',
+'s','c','r','i','p','t','i','o','n','"',':','"','A','s','i','a',' ','P','a','c','i','f','i','c',' ',
+'(','M','e','l','b','o','u','r','n','e',')','"','}',',','"','a','w','s','-','g','l','o','b','a','l',
+'"',':','{','"','d','e','s','c','r','i','p','t','i','o','n','"',':','"','A','W','S',' ','S','t','a',
+'n','d','a','r','d',' ','g','l','o','b','a','l',' ','r','e','g','i','o','n','"','}',',','"','c','a',
+'-','c','e','n','t','r','a','l','-','1','"',':','{','"','d','e','s','c','r','i','p','t','i','o','n',
+'"',':','"','C','a','n','a','d','a',' ','(','C','e','n','t','r','a','l',')','"','}',',','"','e','u',
+'-','c','e','n','t','r','a','l','-','1','"',':','{','"','d','e','s','c','r','i','p','t','i','o','n',
+'"',':','"','E','u','r','o','p','e',' ','(','F','r','a','n','k','f','u','r','t',')','"','}',',','"',
+'e','u','-','c','e','n','t','r','a','l','-','2','"',':','{','"','d','e','s','c','r','i','p','t','i',
+'o','n','"',':','"','E','u','r','o','p','e',' ','(','Z','u','r','i','c','h',')','"','}',',','"','e',
+'u','-','n','o','r','t','h','-','1','"',':','{','"','d','e','s','c','r','i','p','t','i','o','n','"',
+':','"','E','u','r','o','p','e',' ','(','S','t','o','c','k','h','o','l','m',')','"','}',',','"','e',
+'u','-','s','o','u','t','h','-','1','"',':','{','"','d','e','s','c','r','i','p','t','i','o','n','"',
+':','"','E','u','r','o','p','e',' ','(','M','i','l','a','n',')','"','}',',','"','e','u','-','s','o',
+'u','t','h','-','2','"',':','{','"','d','e','s','c','r','i','p','t','i','o','n','"',':','"','E','u',
+'r','o','p','e',' ','(','S','p','a','i','n',')','"','}',',','"','e','u','-','w','e','s','t','-','1',
+'"',':','{','"','d','e','s','c','r','i','p','t','i','o','n','"',':','"','E','u','r','o','p','e',' ',
+'(','I','r','e','l','a','n','d',')','"','}',',','"','e','u','-','w','e','s','t','-','2','"',':','{',
+'"','d','e','s','c','r','i','p','t','i','o','n','"',':','"','E','u','r','o','p','e',' ','(','L','o',
+'n','d','o','n',')','"','}',',','"','e','u','-','w','e','s','t','-','3','"',':','{','"','d','e','s',
+'c','r','i','p','t','i','o','n','"',':','"','E','u','r','o','p','e',' ','(','P','a','r','i','s',')',
+'"','}',',','"','m','e','-','c','e','n','t','r','a','l','-','1','"',':','{','"','d','e','s','c','r',
+'i','p','t','i','o','n','"',':','"','M','i','d','d','l','e',' ','E','a','s','t',' ','(','U','A','E',
+')','"','}',',','"','m','e','-','s','o','u','t','h','-','1','"',':','{','"','d','e','s','c','r','i',
+'p','t','i','o','n','"',':','"','M','i','d','d','l','e',' ','E','a','s','t',' ','(','B','a','h','r',
+'a','i','n',')','"','}',',','"','s','a','-','e','a','s','t','-','1','"',':','{','"','d','e','s','c',
+'r','i','p','t','i','o','n','"',':','"','S','o','u','t','h',' ','A','m','e','r','i','c','a',' ','(',
+'S','a','o',' ','P','a','u','l','o',')','"','}',',','"','u','s','-','e','a','s','t','-','1','"',':',
+'{','"','d','e','s','c','r','i','p','t','i','o','n','"',':','"','U','S',' ','E','a','s','t',' ','(',
+'N','.',' ','V','i','r','g','i','n','i','a',')','"','}',',','"','u','s','-','e','a','s','t','-','2',
+'"',':','{','"','d','e','s','c','r','i','p','t','i','o','n','"',':','"','U','S',' ','E','a','s','t',
+' ','(','O','h','i','o',')','"','}',',','"','u','s','-','w','e','s','t','-','1','"',':','{','"','d',
+'e','s','c','r','i','p','t','i','o','n','"',':','"','U','S',' ','W','e','s','t',' ','(','N','.',' ',
+'C','a','l','i','f','o','r','n','i','a',')','"','}',',','"','u','s','-','w','e','s','t','-','2','"',
+':','{','"','d','e','s','c','r','i','p','t','i','o','n','"',':','"','U','S',' ','W','e','s','t',' ',
+'(','O','r','e','g','o','n',')','"','}','}','}',',','{','"','i','d','"',':','"','a','w','s','-','c',
+'n','"',',','"','o','u','t','p','u','t','s','"',':','{','"','d','n','s','S','u','f','f','i','x','"',
+':','"','a','m','a','z','o','n','a','w','s','.','c','o','m','.','c','n','"',',','"','d','u','a','l',
+'S','t','a','c','k','D','n','s','S','u','f','f','i','x','"',':','"','a','p','i','.','a','m','a','z',
+'o','n','w','e','b','s','e','r','v','i','c','e','s','.','c','o','m','.','c','n','"',',','"','n','a',
+'m','e','"',':','"','a','w','s','-','c','n','"',',','"','s','u','p','p','o','r','t','s','D','u','a',
+'l','S','t','a','c','k','"',':','t','r','u','e',',','"','s','u','p','p','o','r','t','s','F','I','P',
+'S','"',':','t','r','u','e','}',',','"','r','e','g','i','o','n','R','e','g','e','x','"',':','"','^',
+'c','n','\\','\\','-','\\','\\','w','+','\\','\\','-','\\','\\','d','+','$','"',',','"','r','e','g','i','o',
+'n','s','"',':','{','"','a','w','s','-','c','n','-','g','l','o','b','a','l','"',':','{','"','d','e',
+'s','c','r','i','p','t','i','o','n','"',':','"','A','W','S',' ','C','h','i','n','a',' ','g','l','o',
+'b','a','l',' ','r','e','g','i','o','n','"','}',',','"','c','n','-','n','o','r','t','h','-','1','"',
+':','{','"','d','e','s','c','r','i','p','t','i','o','n','"',':','"','C','h','i','n','a',' ','(','B',
+'e','i','j','i','n','g',')','"','}',',','"','c','n','-','n','o','r','t','h','w','e','s','t','-','1',
+'"',':','{','"','d','e','s','c','r','i','p','t','i','o','n','"',':','"','C','h','i','n','a',' ','(',
+'N','i','n','g','x','i','a',')','"','}','}','}',',','{','"','i','d','"',':','"','a','w','s','-','u',
+'s','-','g','o','v','"',',','"','o','u','t','p','u','t','s','"',':','{','"','d','n','s','S','u','f',
+'f','i','x','"',':','"','a','m','a','z','o','n','a','w','s','.','c','o','m','"',',','"','d','u','a',
+'l','S','t','a','c','k','D','n','s','S','u','f','f','i','x','"',':','"','a','p','i','.','a','w','s',
+'"',',','"','n','a','m','e','"',':','"','a','w','s','-','u','s','-','g','o','v','"',',','"','s','u',
+'p','p','o','r','t','s','D','u','a','l','S','t','a','c','k','"',':','t','r','u','e',',','"','s','u',
+'p','p','o','r','t','s','F','I','P','S','"',':','t','r','u','e','}',',','"','r','e','g','i','o','n',
+'R','e','g','e','x','"',':','"','^','u','s','\\','\\','-','g','o','v','\\','\\','-','\\','\\','w','+','\\',
+'\\','-','\\','\\','d','+','$','"',',','"','r','e','g','i','o','n','s','"',':','{','"','a','w','s','-',
+'u','s','-','g','o','v','-','g','l','o','b','a','l','"',':','{','"','d','e','s','c','r','i','p','t',
+'i','o','n','"',':','"','A','W','S',' ','G','o','v','C','l','o','u','d',' ','(','U','S',')',' ','g',
+'l','o','b','a','l',' ','r','e','g','i','o','n','"','}',',','"','u','s','-','g','o','v','-','e','a',
+'s','t','-','1','"',':','{','"','d','e','s','c','r','i','p','t','i','o','n','"',':','"','A','W','S',
+' ','G','o','v','C','l','o','u','d',' ','(','U','S','-','E','a','s','t',')','"','}',',','"','u','s',
+'-','g','o','v','-','w','e','s','t','-','1','"',':','{','"','d','e','s','c','r','i','p','t','i','o',
+'n','"',':','"','A','W','S',' ','G','o','v','C','l','o','u','d',' ','(','U','S','-','W','e','s','t',
+')','"','}','}','}',',','{','"','i','d','"',':','"','a','w','s','-','i','s','o','"',',','"','o','u',
+'t','p','u','t','s','"',':','{','"','d','n','s','S','u','f','f','i','x','"',':','"','c','2','s','.',
+'i','c','.','g','o','v','"',',','"','d','u','a','l','S','t','a','c','k','D','n','s','S','u','f','f',
+'i','x','"',':','"','c','2','s','.','i','c','.','g','o','v','"',',','"','n','a','m','e','"',':','"',
+'a','w','s','-','i','s','o','"',',','"','s','u','p','p','o','r','t','s','D','u','a','l','S','t','a',
+'c','k','"',':','f','a','l','s','e',',','"','s','u','p','p','o','r','t','s','F','I','P','S','"',':',
+'t','r','u','e','}',',','"','r','e','g','i','o','n','R','e','g','e','x','"',':','"','^','u','s','\\',
+'\\','-','i','s','o','\\','\\','-','\\','\\','w','+','\\','\\','-','\\','\\','d','+','$','"',',','"','r','e',
+'g','i','o','n','s','"',':','{','"','a','w','s','-','i','s','o','-','g','l','o','b','a','l','"',':',
+'{','"','d','e','s','c','r','i','p','t','i','o','n','"',':','"','A','W','S',' ','I','S','O',' ','(',
+'U','S',')',' ','g','l','o','b','a','l',' ','r','e','g','i','o','n','"','}',',','"','u','s','-','i',
+'s','o','-','e','a','s','t','-','1','"',':','{','"','d','e','s','c','r','i','p','t','i','o','n','"',
+':','"','U','S',' ','I','S','O',' ','E','a','s','t','"','}',',','"','u','s','-','i','s','o','-','w',
+'e','s','t','-','1','"',':','{','"','d','e','s','c','r','i','p','t','i','o','n','"',':','"','U','S',
+' ','I','S','O',' ','W','E','S','T','"','}','}','}',',','{','"','i','d','"',':','"','a','w','s','-',
+'i','s','o','-','b','"',',','"','o','u','t','p','u','t','s','"',':','{','"','d','n','s','S','u','f',
+'f','i','x','"',':','"','s','c','2','s','.','s','g','o','v','.','g','o','v','"',',','"','d','u','a',
+'l','S','t','a','c','k','D','n','s','S','u','f','f','i','x','"',':','"','s','c','2','s','.','s','g',
+'o','v','.','g','o','v','"',',','"','n','a','m','e','"',':','"','a','w','s','-','i','s','o','-','b',
+'"',',','"','s','u','p','p','o','r','t','s','D','u','a','l','S','t','a','c','k','"',':','f','a','l',
+'s','e',',','"','s','u','p','p','o','r','t','s','F','I','P','S','"',':','t','r','u','e','}',',','"',
+'r','e','g','i','o','n','R','e','g','e','x','"',':','"','^','u','s','\\','\\','-','i','s','o','b','\\',
+'\\','-','\\','\\','w','+','\\','\\','-','\\','\\','d','+','$','"',',','"','r','e','g','i','o','n','s','"',
+':','{','"','a','w','s','-','i','s','o','-','b','-','g','l','o','b','a','l','"',':','{','"','d','e',
+'s','c','r','i','p','t','i','o','n','"',':','"','A','W','S',' ','I','S','O','B',' ','(','U','S',')',
+' ','g','l','o','b','a','l',' ','r','e','g','i','o','n','"','}',',','"','u','s','-','i','s','o','b',
+'-','e','a','s','t','-','1','"',':','{','"','d','e','s','c','r','i','p','t','i','o','n','"',':','"',
+'U','S',' ','I','S','O','B',' ','E','a','s','t',' ','(','O','h','i','o',')','"','}','}','}',']',',',
+'"','v','e','r','s','i','o','n','"',':','"','1','.','1','"','}','\0'
+}};
+
+const char* AWSPartitions::GetPartitionsBlob()
+{
+ return PartitionsBlob.data();
+}
+
+} // namespace Endpoint
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/BuiltInParameters.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/BuiltInParameters.cpp
new file mode 100644
index 0000000000..43c3e2f0f9
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/BuiltInParameters.cpp
@@ -0,0 +1,135 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/core/endpoint/BuiltInParameters.h>
+#include <aws/core/utils/logging/LogMacros.h>
+
+static const char ENDPOINT_BUILTIN_LOG_TAG[] = "EndpointBuiltInParameters";
+
+namespace Aws
+{
+namespace Endpoint
+{
+ void BuiltInParameters::OverrideEndpoint(const Aws::String& endpoint, const Aws::Http::Scheme& scheme)
+ {
+ static const char* SDK_ENDPOINT = "Endpoint";
+
+ if (endpoint.compare(0, 7, "http://") == 0 || endpoint.compare(0, 8, "https://") == 0)
+ {
+ SetStringParameter(SDK_ENDPOINT, endpoint);
+ }
+ else
+ {
+ SetStringParameter(SDK_ENDPOINT, Aws::String(Aws::Http::SchemeMapper::ToString(scheme)) + "://" + endpoint);
+ }
+ }
+
+ bool StringEndsWith(const Aws::String& str, const Aws::String& suffix)
+ {
+ if (suffix.size() > str.size())
+ return false;
+ return std::equal(suffix.rbegin(), suffix.rend(), str.rbegin());
+ }
+
+ void BuiltInParameters::SetFromClientConfiguration(const Client::ClientConfiguration& config)
+ {
+ bool forceFIPS = false;
+ static const char* AWS_REGION = "Region";
+ if (!config.region.empty()) {
+ static const char* FIPS_PREFIX = "fips-";
+ static const char* FIPS_SUFFIX = "-fips";
+ if (config.region.rfind(FIPS_PREFIX, 0) == 0) {
+ // Backward compatibility layer for code hacking previous SDK version
+ Aws::String regionOverride = config.region.substr(sizeof(FIPS_PREFIX) - 1);
+ forceFIPS = true;
+ SetStringParameter(AWS_REGION, regionOverride);
+ } else if (StringEndsWith(config.region, FIPS_SUFFIX)) {
+ Aws::String regionOverride = config.region.substr(0, config.region.size() - sizeof(FIPS_SUFFIX) - 1);
+ forceFIPS = true;
+ SetStringParameter(AWS_REGION, regionOverride);
+ } else {
+ SetStringParameter(AWS_REGION, config.region);
+ }
+ }
+
+ static const char* AWS_USE_FIPS = "UseFIPS";
+ SetBooleanParameter(AWS_USE_FIPS, config.useFIPS || forceFIPS);
+
+ static const char* AWS_USE_DUAL_STACK = "UseDualStack";
+ SetBooleanParameter(AWS_USE_DUAL_STACK, config.useDualStack);
+
+ if (!config.endpointOverride.empty()) {
+ OverrideEndpoint(config.endpointOverride, config.scheme);
+
+ if (config.region.empty()) {
+ AWS_LOGSTREAM_WARN(ENDPOINT_BUILTIN_LOG_TAG,
+ "Endpoint is overridden but region is not set. "
+ "Region is required my many endpoint rule sets to resolve the endpoint. "
+ "And it is required to compute an aws signature.");
+ SetStringParameter(AWS_REGION, "region-not-set"); // dummy endpoint resolution parameter
+ }
+ }
+ }
+
+ void BuiltInParameters::SetFromClientConfiguration(const Client::GenericClientConfiguration<false>& config)
+ {
+ return SetFromClientConfiguration(static_cast<const Client::ClientConfiguration&>(config));
+ }
+
+ void BuiltInParameters::SetFromClientConfiguration(const Client::GenericClientConfiguration<true>& config)
+ {
+ SetFromClientConfiguration(static_cast<const Client::ClientConfiguration&>(config));
+ }
+
+ const BuiltInParameters::EndpointParameter& BuiltInParameters::GetParameter(const Aws::String& name) const
+ {
+ const auto foundIt = std::find_if(m_params.begin(), m_params.end(),
+ [name](const BuiltInParameters::EndpointParameter& item)
+ {
+ return item.GetName() == name;
+ });
+
+ if (foundIt != m_params.end())
+ {
+ return *foundIt;
+ }
+ else
+ {
+ static const BuiltInParameters::EndpointParameter BUILTIN_NOT_FOUND_PARAMETER("PARAMETER_NOT_SET", false, EndpointParameter::ParameterOrigin::CLIENT_CONTEXT);
+ return BUILTIN_NOT_FOUND_PARAMETER;
+ }
+ }
+
+ void BuiltInParameters::SetParameter(EndpointParameter param)
+ {
+ const auto foundIt = std::find_if(m_params.begin(), m_params.end(),
+ [param](const BuiltInParameters::EndpointParameter& item)
+ {
+ return item.GetName() == param.GetName();
+ });
+
+ if (foundIt != m_params.end())
+ {
+ m_params.erase(foundIt);
+ }
+ m_params.emplace_back(std::move(param));
+ }
+
+ void BuiltInParameters::SetStringParameter(Aws::String name, Aws::String value)
+ {
+ return SetParameter(EndpointParameter(std::move(name), std::move(value), EndpointParameter::ParameterOrigin::BUILT_IN));
+ }
+
+ void BuiltInParameters::SetBooleanParameter(Aws::String name, bool value)
+ {
+ return SetParameter(EndpointParameter(std::move(name), value, EndpointParameter::ParameterOrigin::BUILT_IN));
+ }
+
+ const Aws::Vector<BuiltInParameters::EndpointParameter>& BuiltInParameters::GetAllParameters() const
+ {
+ return m_params;
+ }
+} // namespace Endpoint
+} // namespace Aws \ No newline at end of file
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/ClientContextParameters.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/ClientContextParameters.cpp
new file mode 100644
index 0000000000..cdff71f669
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/ClientContextParameters.cpp
@@ -0,0 +1,61 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/core/endpoint/ClientContextParameters.h>
+
+namespace Aws
+{
+namespace Endpoint
+{
+ const ClientContextParameters::EndpointParameter& ClientContextParameters::GetParameter(const Aws::String& name) const
+ {
+ const auto foundIt = std::find_if(m_params.begin(), m_params.end(),
+ [name](const ClientContextParameters::EndpointParameter& item)
+ {
+ return item.GetName() == name;
+ });
+
+ if (foundIt != m_params.end())
+ {
+ return *foundIt;
+ }
+ else
+ {
+ static const ClientContextParameters::EndpointParameter CTX_NOT_FOUND_PARAMETER("PARAMETER_NOT_SET", false, EndpointParameter::ParameterOrigin::CLIENT_CONTEXT);
+ return CTX_NOT_FOUND_PARAMETER;
+ }
+ }
+
+ void ClientContextParameters::SetParameter(EndpointParameter param)
+ {
+ const auto foundIt = std::find_if(m_params.begin(), m_params.end(),
+ [param](const ClientContextParameters::EndpointParameter& item)
+ {
+ return item.GetName() == param.GetName();
+ });
+
+ if (foundIt != m_params.end())
+ {
+ m_params.erase(foundIt);
+ }
+ m_params.emplace_back(std::move(param));
+ }
+
+ void ClientContextParameters::SetStringParameter(Aws::String name, Aws::String value)
+ {
+ return SetParameter(EndpointParameter(std::move(name), std::move(value), EndpointParameter::ParameterOrigin::CLIENT_CONTEXT));
+ }
+
+ void ClientContextParameters::SetBooleanParameter(Aws::String name, bool value)
+ {
+ return SetParameter(EndpointParameter(std::move(name), value, EndpointParameter::ParameterOrigin::CLIENT_CONTEXT));
+ }
+
+ const Aws::Vector<ClientContextParameters::EndpointParameter>& ClientContextParameters::GetAllParameters() const
+ {
+ return m_params;
+ }
+} // namespace Endpoint
+} // namespace Aws \ No newline at end of file
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/DefaultEndpointProvider.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/DefaultEndpointProvider.cpp
new file mode 100644
index 0000000000..58370cd425
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/DefaultEndpointProvider.cpp
@@ -0,0 +1,236 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/core/endpoint/DefaultEndpointProvider.h>
+#include <aws/core/utils/memory/stl/AWSMap.h>
+#include <aws/crt/Api.h>
+
+namespace Aws
+{
+namespace Endpoint
+{
+
+/**
+ * Export endpoint provider symbols from DLL
+ */
+template class AWS_CORE_API DefaultEndpointProvider<Aws::Client::GenericClientConfiguration<false>,
+ Aws::Endpoint::BuiltInParameters,
+ Aws::Endpoint::ClientContextParameters>;
+
+char CharToDec(const char c)
+{
+ if(c >= '0' && c <= '9')
+ return c - '0';
+ if(c >= 'A' && c <= 'F')
+ return c - 'A' + 10;
+ if(c >= 'a' && c <= 'f')
+ return c - 'a' + 10;
+ return 0;
+}
+
+Aws::String PercentDecode(Aws::String inputString)
+{
+ if (inputString.find_first_of("%") == Aws::String::npos)
+ {
+ return inputString;
+ }
+ Aws::String result;
+ result.reserve(inputString.size());
+
+ bool percentFound = false;
+ char firstOctet = 0;
+ char secondOctet = 0;
+ for(size_t i = 0; i < inputString.size(); ++i)
+ {
+ const char currentChar = inputString[i];
+ if ('%' == currentChar)
+ {
+ if (percentFound)
+ {
+ // not percent-encoded string
+ result += currentChar;
+ }
+ percentFound = true;
+ continue;
+ }
+
+ if (percentFound)
+ {
+ if ((currentChar >= '0' && currentChar <= '9') ||
+ (currentChar >= 'A' && currentChar <= 'F') ||
+ (currentChar >= 'a' && currentChar <= 'f'))
+ {
+ if(!firstOctet)
+ {
+ firstOctet = currentChar;
+ continue;
+ }
+ if(!secondOctet)
+ {
+ secondOctet = currentChar;
+ char encodedChar = CharToDec(firstOctet) * 16 + CharToDec(secondOctet);
+ result += encodedChar;
+
+ percentFound = false;
+ firstOctet = 0;
+ secondOctet = 0;
+ continue;
+ }
+ } else {
+ // Non-percent encoded sequence
+ result += '%';
+ if(!firstOctet)
+ result += firstOctet;
+ result += currentChar;
+ percentFound = false;
+ firstOctet = 0;
+ secondOctet = 0;
+ continue;
+ }
+ }
+
+ if ('+' == currentChar)
+ {
+ result += ' ';
+ continue;
+ }
+ result += currentChar;
+ }
+ return result;
+}
+
+AWS_CORE_API ResolveEndpointOutcome
+ResolveEndpointDefaultImpl(const Aws::Crt::Endpoints::RuleEngine& ruleEngine,
+ const EndpointParameters& builtInParameters,
+ const EndpointParameters& clientContextParameters,
+ const EndpointParameters& endpointParameters)
+{
+ if(!ruleEngine) {
+ AWS_LOGSTREAM_FATAL(DEFAULT_ENDPOINT_PROVIDER_TAG, "Invalid CRT Rule Engine state");
+ return ResolveEndpointOutcome(
+ Aws::Client::AWSError<Aws::Client::CoreErrors>(
+ Aws::Client::CoreErrors::INTERNAL_FAILURE,
+ "",
+ "CRT Endpoint rule engine is not initialized",
+ false/*retryable*/));
+ }
+
+ Aws::Crt::Endpoints::RequestContext crtRequestCtx;
+
+ const Aws::Vector<std::reference_wrapper<const EndpointParameters>> allParameters
+ = {std::cref(builtInParameters), std::cref(clientContextParameters), std::cref(endpointParameters)};
+
+ for (const auto& parameterClass : allParameters)
+ {
+ for(const auto& parameter : parameterClass.get())
+ {
+ if(EndpointParameter::ParameterType::BOOLEAN == parameter.GetStoredType())
+ {
+ AWS_LOGSTREAM_TRACE(DEFAULT_ENDPOINT_PROVIDER_TAG, "Endpoint bool eval parameter: " << parameter.GetName() << " = " << parameter.GetBoolValueNoCheck());
+ crtRequestCtx.AddBoolean(Aws::Crt::ByteCursorFromCString(parameter.GetName().c_str()), parameter.GetBoolValueNoCheck());
+ }
+ else if(EndpointParameter::ParameterType::STRING == parameter.GetStoredType())
+ {
+ AWS_LOGSTREAM_TRACE(DEFAULT_ENDPOINT_PROVIDER_TAG, "Endpoint str eval parameter: " << parameter.GetName() << " = " << parameter.GetStrValueNoCheck());
+ crtRequestCtx.AddString(Aws::Crt::ByteCursorFromCString(parameter.GetName().c_str()), Aws::Crt::ByteCursorFromCString(parameter.GetStrValueNoCheck().c_str()));
+ }
+ else
+ {
+ return ResolveEndpointOutcome(
+ Aws::Client::AWSError<Aws::Client::CoreErrors>(
+ Aws::Client::CoreErrors::INVALID_QUERY_PARAMETER,
+ "",
+ "Invalid endpoint parameter type for parameter " + parameter.GetName(),
+ false/*retryable*/));
+ }
+ }
+ }
+
+ auto resolved = ruleEngine.Resolve(crtRequestCtx);
+
+ if(resolved.has_value())
+ {
+ if(resolved->IsError())
+ {
+ auto crtError = resolved->GetError();
+ Aws::String sdkCrtError = crtError ? Aws::String(crtError->begin(), crtError->end()) :
+ "CRT Rule engine resolution resulted in an unknown error";
+ return ResolveEndpointOutcome(
+ Aws::Client::AWSError<Aws::Client::CoreErrors>(
+ Aws::Client::CoreErrors::INVALID_PARAMETER_COMBINATION,
+ "",
+ sdkCrtError,
+ false/*retryable*/));
+ }
+ else if(resolved->IsEndpoint() && resolved->GetUrl())
+ {
+ Aws::Endpoint::AWSEndpoint endpoint;
+ const auto crtUrl = resolved->GetUrl();
+ Aws::String sdkCrtUrl = Aws::String(crtUrl->begin(), crtUrl->end());
+ AWS_LOGSTREAM_DEBUG(DEFAULT_ENDPOINT_PROVIDER_TAG, "Endpoint rules engine evaluated the endpoint: " << sdkCrtUrl);
+ endpoint.SetURL(PercentDecode(std::move(sdkCrtUrl)));
+
+ // Transform attributes
+ // Each attribute consist of properties, hence converting CRT properties to SDK attributes
+ const auto crtProps = resolved->GetProperties();
+ if (crtProps && crtProps->size() > 2) {
+ Aws::String sdkCrtProps = crtProps ? Aws::String(crtProps->begin(), crtProps->end()) : "";
+ AWS_LOGSTREAM_TRACE(DEFAULT_ENDPOINT_PROVIDER_TAG, "Endpoint rules evaluated props: " << sdkCrtProps);
+
+ Internal::Endpoint::EndpointAttributes epAttributes = Internal::Endpoint::EndpointAttributes::BuildEndpointAttributesFromJson(
+ sdkCrtProps);
+
+ endpoint.SetAttributes(std::move(epAttributes));
+ }
+
+ // transform headers
+ const auto crtHeaders = resolved->GetHeaders();
+ if (crtHeaders)
+ {
+ Aws::UnorderedMap<Aws::String, Aws::String> sdkHeaders;
+ for (const auto& header: *crtHeaders)
+ {
+ Aws::String key(header.first.begin(), header.first.end());
+ Aws::String value;
+ for (const auto& crtHeaderValue : header.second)
+ {
+ if(!value.empty()) {
+ value.insert(value.end(), ';');
+ }
+ value.insert(value.end(), crtHeaderValue.begin(), crtHeaderValue.end());
+ }
+ sdkHeaders.emplace(std::move(key), std::move(value));
+ }
+
+ endpoint.SetHeaders(std::move(sdkHeaders));
+ }
+
+ return ResolveEndpointOutcome(std::move(endpoint));
+ }
+ else
+ {
+ return ResolveEndpointOutcome(
+ Aws::Client::AWSError<Aws::Client::CoreErrors>(
+ Aws::Client::CoreErrors::INVALID_QUERY_PARAMETER,
+ "",
+ "Invalid AWS CRT RuleEngine state",
+ false/*retryable*/));
+ }
+ }
+
+ auto errCode = Aws::Crt::LastError();
+ AWS_LOGSTREAM_DEBUG(DEFAULT_ENDPOINT_PROVIDER_TAG, "ERROR: Rule engine has failed to evaluate the endpoint: " << errCode << " " << Aws::Crt::ErrorDebugString(errCode));
+
+ return ResolveEndpointOutcome(
+ Aws::Client::AWSError<Aws::Client::CoreErrors>(
+ Aws::Client::CoreErrors::INVALID_QUERY_PARAMETER,
+ "",
+ "Failed to evaluate the endpoint: null output from AWS CRT RuleEngine",
+ false/*retryable*/));
+
+}
+
+} // namespace Endpoint
+} // namespace Aws \ No newline at end of file
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/EndpointProviderBase.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/EndpointProviderBase.cpp
new file mode 100644
index 0000000000..0928186839
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/EndpointProviderBase.cpp
@@ -0,0 +1,20 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/core/endpoint/EndpointProviderBase.h>
+
+namespace Aws
+{
+namespace Endpoint
+{
+/**
+ * Export endpoint provider symbols from DLL
+ */
+template class AWS_CORE_API EndpointProviderBase<Aws::Client::GenericClientConfiguration<false>,
+ Aws::Endpoint::BuiltInParameters,
+ Aws::Endpoint::ClientContextParameters>;
+
+} // namespace Endpoint
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/internal/AWSEndpointAttribute.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/internal/AWSEndpointAttribute.cpp
new file mode 100644
index 0000000000..5c295bb132
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/endpoint/internal/AWSEndpointAttribute.cpp
@@ -0,0 +1,82 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/core/endpoint/internal/AWSEndpointAttribute.h>
+#include <aws/core/utils/logging/LogMacros.h>
+
+static const char ENDPOINT_AUTH_SCHEME_TAG[] = "EndpointAuthScheme::BuildEndpointAuthSchemeFromJson";
+
+Aws::String CrtToSdkSignerName(const Aws::String& crtSignerName)
+{
+ Aws::String sdkSigner = "NullSigner";
+ if (crtSignerName == "sigv4") {
+ sdkSigner = "SignatureV4";
+ } else if (crtSignerName == "sigv4a") {
+ sdkSigner = "AsymmetricSignatureV4";
+ } else if (crtSignerName == "none") {
+ sdkSigner = "NullSigner";
+ } else if (crtSignerName == "bearer") {
+ sdkSigner = "Bearer";
+ } else {
+ AWS_LOG_WARN(ENDPOINT_AUTH_SCHEME_TAG, (Aws::String("Unknown Endpoint authSchemes signer: ") + crtSignerName).c_str());
+ }
+
+ return sdkSigner;
+}
+
+Aws::Internal::Endpoint::EndpointAttributes
+Aws::Internal::Endpoint::EndpointAttributes::BuildEndpointAttributesFromJson(const Aws::String& iJsonStr)
+{
+ Aws::Internal::Endpoint::EndpointAttributes attributes;
+ Aws::Internal::Endpoint::EndpointAuthScheme& authScheme = attributes.authScheme;
+
+ Utils::Json::JsonValue jsonObject(iJsonStr);
+ if (jsonObject.WasParseSuccessful())
+ {
+ Aws::Map<Aws::String, Utils::Json::JsonView> jsonMap = jsonObject.View().GetAllObjects();
+ for (const auto& mapItemAttribute : jsonMap)
+ {
+ if (mapItemAttribute.first == "authSchemes" && mapItemAttribute.second.IsListType()) {
+ Aws::Utils::Array<Utils::Json::JsonView> jsonAuthSchemeArray = mapItemAttribute.second.AsArray();
+
+ for (size_t arrayIdx = 0; arrayIdx < jsonAuthSchemeArray.GetLength(); ++arrayIdx)
+ {
+ const Utils::Json::JsonView& property = jsonAuthSchemeArray.GetItem(arrayIdx);
+ for (const auto& mapItemProperty : property.GetAllObjects())
+ {
+ if (mapItemProperty.first == "name") {
+ authScheme.SetName(CrtToSdkSignerName(mapItemProperty.second.AsString()));
+ } else if (mapItemProperty.first == "signingName") {
+ authScheme.SetSigningName(mapItemProperty.second.AsString());
+ } else if (mapItemProperty.first == "signingRegion") {
+ authScheme.SetSigningRegion(mapItemProperty.second.AsString());
+ } else if (mapItemProperty.first == "signingRegionSet") {
+ Aws::Utils::Array<Utils::Json::JsonView> signingRegionArray = mapItemProperty.second.AsArray();
+ if (signingRegionArray.GetLength() != 1) {
+ AWS_LOG_WARN(ENDPOINT_AUTH_SCHEME_TAG,
+ "Signing region set size is not equal to 1");
+ }
+ if (signingRegionArray.GetLength() > 0) {
+ authScheme.SetSigningRegionSet(signingRegionArray.GetItem(0).AsString());
+ }
+ } else if (mapItemProperty.first == "disableDoubleEncoding") {
+ authScheme.SetDisableDoubleEncoding(mapItemProperty.second.AsBool());
+ } else {
+ AWS_LOG_WARN(ENDPOINT_AUTH_SCHEME_TAG, Aws::String("Unknown Endpoint authSchemes attribute property: " + mapItemProperty.first).c_str());
+ }
+ }
+ }
+ } else {
+ AWS_LOG_WARN(ENDPOINT_AUTH_SCHEME_TAG, Aws::String("Unknown Endpoint Attribute: " + mapItemAttribute.first).c_str());
+ }
+ }
+ }
+ else
+ {
+ AWS_LOGSTREAM_ERROR(ENDPOINT_AUTH_SCHEME_TAG, "Json Parse failed with message: " << jsonObject.GetErrorMessage());
+ }
+
+ return attributes;
+} \ No newline at end of file
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/external/cjson/cJSON.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/external/cjson/cJSON.cpp
index d21a2e7d86..cdcbf103e7 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/external/cjson/cJSON.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/external/cjson/cJSON.cpp
@@ -85,12 +85,19 @@ typedef struct {
const unsigned char *json;
size_t position;
} error;
+/*
+ * NOTE: the use of this static global variable is not thread-safe,
+ * hence writing to / reading from it is disabled in this code.
+ *
+ * See https://cjson.docsforge.com/#thread-safety (concurrent reads)
+ * See https://github.com/aws/aws-sdk-cpp/pull/2231 (concurrent writes)
static error global_error = { NULL, 0 };
CJSON_AS4CPP_PUBLIC(const char *) cJSON_AS4CPP_GetErrorPtr(void)
{
return (const char*) (global_error.json + global_error.position);
}
+ */
CJSON_AS4CPP_PUBLIC(char *) cJSON_AS4CPP_GetStringValue(const cJSON * const item)
{
@@ -120,7 +127,7 @@ CJSON_AS4CPP_PUBLIC(double) cJSON_AS4CPP_GetNumberValue(const cJSON * const item
CJSON_AS4CPP_PUBLIC(const char*) cJSON_AS4CPP_Version(void)
{
static char version[15];
- sprintf(version, "%i.%i.%i", CJSON_AS4CPP_VERSION_MAJOR, CJSON_AS4CPP_VERSION_MINOR, CJSON_AS4CPP_VERSION_PATCH);
+ snprintf(version, sizeof(version), "%i.%i.%i", CJSON_AS4CPP_VERSION_MAJOR, CJSON_AS4CPP_VERSION_MINOR, CJSON_AS4CPP_VERSION_PATCH);
return version;
}
@@ -569,27 +576,27 @@ static cJSON_AS4CPP_bool print_number(const cJSON * const item, printbuffer * co
/* For integer which is out of the range of [INT_MIN, INT_MAX], valuestring is an integer literal. */
if (item->valuestring)
{
- length = sprintf((char*)number_buffer, "%s", item->valuestring);
+ length = snprintf((char*)number_buffer, sizeof(number_buffer), "%s", item->valuestring);
}
/* This checks for NaN and Infinity */
else if (isnan(d) || isinf(d))
{
- length = sprintf((char*)number_buffer, "null");
+ length = snprintf((char*)number_buffer, sizeof(number_buffer), "null");
}
else
{
/* Try 15 decimal places of precision to avoid nonsignificant nonzero digits */
- length = sprintf((char*)number_buffer, "%1.15g", d);
+ length = snprintf((char*)number_buffer, sizeof(number_buffer), "%1.15g", d);
/* Check whether the original double can be recovered */
if ((sscanf((char*)number_buffer, "%lg", &test) != 1) || !compare_double((double)test, d))
{
/* If not, print with 17 decimal places of precision */
- length = sprintf((char*)number_buffer, "%1.17g", d);
+ length = snprintf((char*)number_buffer, sizeof(number_buffer), "%1.17g", d);
}
}
- /* sprintf failed or buffer overrun occurred */
+ /* snprintf failed or buffer overrun occurred */
if ((length < 0) || (length > (int)(sizeof(number_buffer) - 1)))
{
return false;
@@ -1018,7 +1025,7 @@ static cJSON_AS4CPP_bool print_string_ptr(const unsigned char * const input, pri
break;
default:
/* escape and print as unicode codepoint */
- sprintf((char*)output_pointer, "u%04x", *input_pointer);
+ snprintf((char*)output_pointer, output_buffer->length - (output_pointer - output_buffer->buffer), "u%04x", *input_pointer);
output_pointer += 4;
break;
}
@@ -1107,9 +1114,13 @@ CJSON_AS4CPP_PUBLIC(cJSON *) cJSON_AS4CPP_ParseWithLengthOpts(const char *value,
parse_buffer buffer = { 0, 0, 0, 0, { 0, 0, 0 } };
cJSON *item = NULL;
- /* reset error position */
+ /* reset error position
+ *
+ * NOTE: disabled due to thread safety (see note at the top of this file).
+ *
global_error.json = NULL;
global_error.position = 0;
+ */
if (value == NULL || 0 == buffer_length)
{
@@ -1175,7 +1186,9 @@ fail:
*return_parse_end = (const char*)local_error.json + local_error.position;
}
+ /* NOTE: disabled due to thread safety (see note at the top of this file).
global_error = local_error;
+ */
}
return NULL;
@@ -2470,7 +2483,7 @@ CJSON_AS4CPP_PUBLIC(cJSON *) cJSON_AS4CPP_CreateInt64(long long num)
if (num > INT_MAX || num < INT_MIN)
{
char buf[21];
- sprintf(buf, "%lld", num);
+ snprintf(buf, sizeof(buf), "%lld", num);
item->valuestring = (char*)cJSON_AS4CPP_strdup((const unsigned char*)buf, &global_hooks);
}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/external/tinyxml2/tinyxml2.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/external/tinyxml2/tinyxml2.cpp
index ebe0fd9eec..151a368676 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/external/tinyxml2/tinyxml2.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/external/tinyxml2/tinyxml2.cpp
@@ -135,13 +135,15 @@ struct Entity {
char value;
};
-static const int NUM_ENTITIES = 5;
+static const int NUM_ENTITIES = 7;
static const Entity entities[NUM_ENTITIES] = {
- { "quot", 4, DOUBLE_QUOTE },
- { "amp", 3, '&' },
- { "apos", 4, SINGLE_QUOTE },
- { "lt", 2, '<' },
- { "gt", 2, '>' }
+ { "quot", 4, DOUBLE_QUOTE },
+ { "amp", 3, '&' },
+ { "apos", 4, SINGLE_QUOTE },
+ { "lt", 2, '<' },
+ { "gt", 2, '>' },
+ { "#xA", 3, LF },
+ { "#xD", 3, CR }
};
@@ -2396,6 +2398,8 @@ XMLPrinter::XMLPrinter( FILE* file, bool compact, int depth ) :
_restrictedEntityFlag[(unsigned char)'&'] = true;
_restrictedEntityFlag[(unsigned char)'<'] = true;
_restrictedEntityFlag[(unsigned char)'>'] = true; // not required, but consistency is nice
+ _restrictedEntityFlag[(unsigned char)LF] = true;
+ _restrictedEntityFlag[(unsigned char)CR] = true;
_buffer.Push( 0 );
}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpClientFactory.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpClientFactory.cpp
index a556e39a5d..a08b21f9b5 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpClientFactory.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpClientFactory.cpp
@@ -121,9 +121,11 @@ namespace Aws
void InitStaticState() override
{
+ AWS_LOGSTREAM_DEBUG(HTTP_CLIENT_FACTORY_ALLOCATION_TAG, "Initializing Http Static State");
#if ENABLE_CURL_CLIENT
if(s_InitCleanupCurlFlag)
{
+ AWS_LOGSTREAM_DEBUG(HTTP_CLIENT_FACTORY_ALLOCATION_TAG, "Initializing Curl Http Client");
CurlHttpClient::InitGlobalState();
}
#if !defined (_WIN32)
@@ -139,9 +141,11 @@ namespace Aws
virtual void CleanupStaticState() override
{
+ AWS_LOGSTREAM_DEBUG(HTTP_CLIENT_FACTORY_ALLOCATION_TAG, "Cleanup Http Static State");
#if ENABLE_CURL_CLIENT
if(s_InitCleanupCurlFlag)
{
+ AWS_LOGSTREAM_DEBUG(HTTP_CLIENT_FACTORY_ALLOCATION_TAG, "Cleanup Curl Http Client");
CurlHttpClient::CleanupGlobalState();
}
#endif
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpRequest.cpp
index 95cb626c22..1f109c86a9 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpRequest.cpp
@@ -4,37 +4,88 @@
*/
#include <aws/core/http/HttpRequest.h>
-
+#include <aws/core/utils/memory/stl/AWSStringStream.h>
+#include <aws/http/request_response.h>
+#include <aws/crt/Types.h>
+#include <aws/crt/http/HttpRequestResponse.h>
namespace Aws
{
-namespace Http
-{
+ namespace Http
+ {
-const char DATE_HEADER[] = "date";
-const char AWS_DATE_HEADER[] = "X-Amz-Date";
-const char AWS_SECURITY_TOKEN[] = "X-Amz-Security-Token";
-const char ACCEPT_HEADER[] = "accept";
-const char ACCEPT_CHAR_SET_HEADER[] = "accept-charset";
-const char ACCEPT_ENCODING_HEADER[] = "accept-encoding";
-const char AUTHORIZATION_HEADER[] = "authorization";
-const char AWS_AUTHORIZATION_HEADER[] = "authorization";
-const char COOKIE_HEADER[] = "cookie";
-const char CONTENT_LENGTH_HEADER[] = "content-length";
-const char CONTENT_TYPE_HEADER[] = "content-type";
-const char TRANSFER_ENCODING_HEADER[] = "transfer-encoding";
-const char USER_AGENT_HEADER[] = "user-agent";
-const char VIA_HEADER[] = "via";
-const char HOST_HEADER[] = "host";
-const char AMZ_TARGET_HEADER[] = "x-amz-target";
-const char X_AMZ_EXPIRES_HEADER[] = "X-Amz-Expires";
-const char CONTENT_MD5_HEADER[] = "content-md5";
-const char API_VERSION_HEADER[] = "x-amz-api-version";
-const char SDK_INVOCATION_ID_HEADER[] = "amz-sdk-invocation-id";
-const char SDK_REQUEST_HEADER[] = "amz-sdk-request";
-const char CHUNKED_VALUE[] = "chunked";
+ const char DATE_HEADER[] = "date";
+ const char AWS_DATE_HEADER[] = "X-Amz-Date";
+ const char AWS_SECURITY_TOKEN[] = "X-Amz-Security-Token";
+ const char ACCEPT_HEADER[] = "accept";
+ const char ACCEPT_CHAR_SET_HEADER[] = "accept-charset";
+ const char ACCEPT_ENCODING_HEADER[] = "accept-encoding";
+ const char AUTHORIZATION_HEADER[] = "authorization";
+ const char AWS_AUTHORIZATION_HEADER[] = "authorization";
+ const char COOKIE_HEADER[] = "cookie";
+ const char DECODED_CONTENT_LENGTH_HEADER[] = "x-amz-decoded-content-length";
+ const char CONTENT_LENGTH_HEADER[] = "content-length";
+ const char CONTENT_TYPE_HEADER[] = "content-type";
+ const char CONTENT_ENCODING_HEADER[] = "content-encoding";
+ const char TRANSFER_ENCODING_HEADER[] = "transfer-encoding";
+ const char USER_AGENT_HEADER[] = "user-agent";
+ const char VIA_HEADER[] = "via";
+ const char HOST_HEADER[] = "host";
+ const char AMZ_TARGET_HEADER[] = "x-amz-target";
+ const char X_AMZ_EXPIRES_HEADER[] = "X-Amz-Expires";
+ const char CONTENT_MD5_HEADER[] = "content-md5";
+ const char API_VERSION_HEADER[] = "x-amz-api-version";
+ const char AWS_TRAILER_HEADER[] = "x-amz-trailer";
+ const char SDK_INVOCATION_ID_HEADER[] = "amz-sdk-invocation-id";
+ const char SDK_REQUEST_HEADER[] = "amz-sdk-request";
+ const char CHUNKED_VALUE[] = "chunked";
+ const char AWS_CHUNKED_VALUE[] = "aws-chunked";
+ const char X_AMZN_TRACE_ID_HEADER[] = "X-Amzn-Trace-Id";
+ const char ALLOCATION_TAG[] = "HttpRequestConversion";
+ const char X_AMZN_ERROR_TYPE[] = "x-amzn-errortype";
-} // Http
-} // Aws
+ std::shared_ptr<Aws::Crt::Http::HttpRequest> HttpRequest::ToCrtHttpRequest()
+ {
+ auto request = Aws::MakeShared<Aws::Crt::Http::HttpRequest>(ALLOCATION_TAG);
+ request->SetBody([&]() -> std::shared_ptr<IOStream> {
+ const std::shared_ptr<Aws::IOStream>& body = GetContentBody();
+ if (body) {
+ return body;
+ }
+ // Return an empty string stream for no body
+ return Aws::MakeShared<Aws::StringStream>(ALLOCATION_TAG, "");
+ }());
+ auto headers = GetHeaders();
+ for (const auto& it: headers)
+ {
+ Aws::Crt::Http::HttpHeader header;
+ header.name = Aws::Crt::ByteCursorFromCString(it.first.c_str());
+ header.value = Aws::Crt::ByteCursorFromCString(it.second.c_str());
+ request->AddHeader(header);
+ }
+ // Need a different URL encoding here.
+ // CRT sigv4 don't any encoding if double encoding is off, need to encode the path before passing to CRT.
+ const URI& uri = m_uri;
+ Aws::StringStream ss;
+ Aws::StringStream port;
+ if (uri.GetScheme() == Scheme::HTTP && uri.GetPort() != HTTP_DEFAULT_PORT)
+ {
+ port << ":" << uri.GetPort();
+ }
+ else if (uri.GetScheme() == Scheme::HTTPS && uri.GetPort() != HTTPS_DEFAULT_PORT)
+ {
+ port << ":" << uri.GetPort();
+ }
+ ss << SchemeMapper::ToString(uri.GetScheme()) << SEPARATOR << uri.GetAuthority() << port.str()
+ << ((uri.GetPath() == "/") ? "" : URI::URLEncodePath(uri.GetPath()))
+ << uri.GetQueryString();
+ request->SetPath(Aws::Crt::ByteCursorFromCString(ss.str().c_str()));
+ const char *method = HttpMethodMapper::GetNameForHttpMethod(m_method);
+ request->SetMethod(Aws::Crt::ByteCursorFromCString(method));
+ return request;
+ }
+
+ } // Http
+} // Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpResponse.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpResponse.cpp
new file mode 100644
index 0000000000..d4e0833653
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/HttpResponse.cpp
@@ -0,0 +1,24 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/core/http/HttpResponse.h>
+
+#include <aws/core/utils/memory/stl/AWSStreamFwd.h>
+#include <aws/core/utils/StringUtils.h>
+
+namespace Aws
+{
+ namespace Http
+ {
+ /**
+ * Overload ostream operator<< for HttpResponseCode enum class for a prettier output such as "200" and not "<C8-00 00-00>"
+ */
+ Aws::OStream& operator<< (Aws::OStream& oStream, HttpResponseCode code)
+ {
+ oStream << Aws::Utils::StringUtils::to_string(static_cast<typename std::underlying_type<HttpResponseCode>::type>(code));
+ return oStream;
+ }
+ } // Http
+} // Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/URI.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/URI.cpp
index a2239df54b..0bc3c09245 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/URI.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/URI.cpp
@@ -5,9 +5,8 @@
#include <aws/core/http/URI.h>
-#include <aws/core/utils/StringUtils.h>
-#include <aws/core/utils/memory/stl/AWSStringStream.h>
#include <aws/core/utils/memory/stl/AWSSet.h>
+#include <aws/core/utils/logging/LogMacros.h>
#include <cstdlib>
#include <cctype>
@@ -25,10 +24,52 @@ namespace Http
const char* SEPARATOR = "://";
+bool s_compliantRfc3986Encoding = false;
+void SetCompliantRfc3986Encoding(bool compliant) { s_compliantRfc3986Encoding = compliant; }
+
+Aws::String urlEncodeSegment(const Aws::String& segment)
+{
+ // consolidates legacy escaping logic into one local method
+ if (s_compliantRfc3986Encoding)
+ {
+ return StringUtils::URLEncode(segment.c_str());
+ }
+ else
+ {
+ Aws::StringStream ss;
+ ss << std::hex << std::uppercase;
+ for(unsigned char c : segment) // alnum results in UB if the value of c is not unsigned char & is not EOF
+ {
+ // RFC 3986 §2.3 unreserved characters
+ if (StringUtils::IsAlnum(c))
+ {
+ ss << c;
+ continue;
+ }
+ switch(c)
+ {
+ // §2.3 unreserved characters
+ // The path section of the URL allows unreserved characters to appear unescaped
+ case '-': case '_': case '.': case '~':
+ // RFC 3986 §2.2 Reserved characters
+ // NOTE: this implementation does not accurately implement the RFC on purpose to accommodate for
+ // discrepancies in the implementations of URL encoding between AWS services for legacy reasons.
+ case '$': case '&': case ',':
+ case ':': case '=': case '@':
+ ss << c;
+ break;
+ default:
+ ss << '%' << std::setfill('0') << std::setw(2) << (int)c << std::setw(0);
+ }
+ }
+ return ss.str();
+ }
+}
+
} // namespace Http
} // namespace Aws
-URI::URI() : m_scheme(Scheme::HTTP), m_port(HTTP_DEFAULT_PORT)
+URI::URI() : m_scheme(Scheme::HTTP), m_port(HTTP_DEFAULT_PORT), m_pathHasTrailingSlash(false)
{
}
@@ -102,7 +143,7 @@ void URI::SetScheme(Scheme value)
Aws::String URI::URLEncodePathRFC3986(const Aws::String& path)
{
- if(path.empty())
+ if (path.empty())
{
return path;
}
@@ -114,34 +155,10 @@ Aws::String URI::URLEncodePathRFC3986(const Aws::String& path)
// escape characters appearing in a URL path according to RFC 3986
for (const auto& segment : pathParts)
{
- ss << '/';
- for(unsigned char c : segment) // alnum results in UB if the value of c is not unsigned char & is not EOF
- {
- // §2.3 unreserved characters
- if (StringUtils::IsAlnum(c))
- {
- ss << c;
- continue;
- }
- switch(c)
- {
- // §2.3 unreserved characters
- case '-': case '_': case '.': case '~':
- // The path section of the URL allow reserved characters to appear unescaped
- // RFC 3986 §2.2 Reserved characters
- // NOTE: this implementation does not accurately implement the RFC on purpose to accommodate for
- // discrepancies in the implementations of URL encoding between AWS services for legacy reasons.
- case '$': case '&': case ',':
- case ':': case '=': case '@':
- ss << c;
- break;
- default:
- ss << '%' << std::setfill('0') << std::setw(2) << (int)((unsigned char)c) << std::setw(0);
- }
- }
+ ss << '/' << urlEncodeSegment(segment);
}
- //if the last character was also a slash, then add that back here.
+ // if the last character was also a slash, then add that back here.
if (path.back() == '/')
{
ss << '/';
@@ -176,23 +193,65 @@ Aws::String URI::URLEncodePath(const Aws::String& path)
}
}
-void URI::SetPath(const Aws::String& value)
+Aws::String URI::GetPath() const
{
- const Aws::Vector<Aws::String> pathParts = StringUtils::Split(value, '/');
- Aws::String path;
- path.reserve(value.length() + 1/* in case we have to append slash before the path. */);
+ Aws::String path = "";
- for (const auto& segment : pathParts)
+ for (auto const& segment : m_pathSegments)
{
path.push_back('/');
path.append(segment);
}
- if (value.back() == '/')
+ if (m_pathSegments.empty() || m_pathHasTrailingSlash)
{
path.push_back('/');
}
- m_path = std::move(path);
+
+ return path;
+}
+
+Aws::String URI::GetURLEncodedPath() const
+{
+ Aws::StringStream ss;
+
+ for (auto const& segment : m_pathSegments)
+ {
+ ss << '/' << StringUtils::URLEncode(segment.c_str());
+ }
+
+ if (m_pathSegments.empty() || m_pathHasTrailingSlash)
+ {
+ ss << '/';
+ }
+
+ return ss.str();
+}
+
+Aws::String URI::GetURLEncodedPathRFC3986() const
+{
+ Aws::StringStream ss;
+ ss << std::hex << std::uppercase;
+
+ // escape characters appearing in a URL path according to RFC 3986
+ // (mostly; there is some non-standards legacy support that can be disabled)
+ for (const auto& segment : m_pathSegments)
+ {
+ ss << '/' << urlEncodeSegment(segment);
+ }
+
+ if (m_pathSegments.empty() || m_pathHasTrailingSlash)
+ {
+ ss << '/';
+ }
+
+ return ss.str();
+}
+
+void URI::SetPath(const Aws::String& value)
+{
+ m_pathSegments.clear();
+ AddPathSegments(value);
}
//ugh, this isn't even part of the canonicalization spec. It is part of how our services have implemented their signers though....
@@ -347,9 +406,9 @@ Aws::String URI::GetURIString(bool includeQueryString) const
ss << ":" << m_port;
}
- if(m_path != "/")
+ if (!m_pathSegments.empty())
{
- ss << URLEncodePathRFC3986(m_path);
+ ss << GetURLEncodedPathRFC3986();
}
if(includeQueryString)
@@ -397,10 +456,26 @@ void URI::ExtractAndSetAuthority(const Aws::String& uri)
authorityStart += 3;
}
- size_t posOfEndOfAuthorityPort = uri.find(':', authorityStart);
- size_t posOfEndOfAuthoritySlash = uri.find('/', authorityStart);
- size_t posOfEndOfAuthorityQuery = uri.find('?', authorityStart);
- size_t posEndOfAuthority = (std::min)({posOfEndOfAuthorityPort, posOfEndOfAuthoritySlash, posOfEndOfAuthorityQuery});
+ size_t posEndOfAuthority=0;
+ // are we extracting an ipv6 address?
+ if (uri.length() > authorityStart && uri.at(authorityStart) == '[')
+ {
+ posEndOfAuthority = uri.find(']', authorityStart);
+ if (posEndOfAuthority == Aws::String::npos) {
+ AWS_LOGSTREAM_ERROR("Uri", "Malformed uri: " << uri.c_str());
+ }
+ else
+ {
+ ++posEndOfAuthority;
+ }
+ }
+ else
+ {
+ size_t posOfEndOfAuthorityPort = uri.find(':', authorityStart);
+ size_t posOfEndOfAuthoritySlash = uri.find('/', authorityStart);
+ size_t posOfEndOfAuthorityQuery = uri.find('?', authorityStart);
+ posEndOfAuthority = (std::min)({posOfEndOfAuthorityPort, posOfEndOfAuthoritySlash, posOfEndOfAuthorityQuery});
+ }
if (posEndOfAuthority == Aws::String::npos)
{
posEndOfAuthority = uri.length();
@@ -422,11 +497,25 @@ void URI::ExtractAndSetPort(const Aws::String& uri)
authorityStart += 3;
}
- size_t positionOfPortDelimiter = uri.find(':', authorityStart);
+ size_t portSearchStart = authorityStart;
+ // are we extracting an ipv6 address?
+ if (uri.length() > portSearchStart && uri.at(portSearchStart) == '[')
+ {
+ size_t posEndOfAuthority = uri.find(']', portSearchStart);
+ if (posEndOfAuthority == Aws::String::npos) {
+ AWS_LOGSTREAM_ERROR("Uri", "Malformed uri: " << uri.c_str());
+ }
+ else
+ {
+ portSearchStart = posEndOfAuthority;
+ }
+ }
+
+ size_t positionOfPortDelimiter = uri.find(':', portSearchStart);
bool hasPort = positionOfPortDelimiter != Aws::String::npos;
- if ((uri.find('/', authorityStart) < positionOfPortDelimiter) || (uri.find('?', authorityStart) < positionOfPortDelimiter))
+ if ((uri.find('/', portSearchStart) < positionOfPortDelimiter) || (uri.find('?', portSearchStart) < positionOfPortDelimiter))
{
hasPort = false;
}
@@ -506,5 +595,5 @@ Aws::String URI::GetFormParameters() const
bool URI::CompareURIParts(const URI& other) const
{
- return m_scheme == other.m_scheme && m_authority == other.m_authority && m_path == other.m_path && m_queryString == other.m_queryString;
+ return m_scheme == other.m_scheme && m_authority == other.m_authority && GetPath() == other.GetPath() && m_queryString == other.m_queryString;
}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/curl/CurlHandleContainer.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/curl/CurlHandleContainer.cpp
index 1a965cd795..a6684c640a 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/curl/CurlHandleContainer.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/curl/CurlHandleContainer.cpp
@@ -43,7 +43,7 @@ CURL* CurlHandleContainer::AcquireCurlHandle()
}
CURL* handle = m_handleContainer.Acquire();
- AWS_LOGSTREAM_INFO(CURL_HANDLE_CONTAINER_TAG, "Connection has been released. Continuing.");
+ AWS_LOGSTREAM_DEBUG(CURL_HANDLE_CONTAINER_TAG, "Connection has been released. Continuing.");
AWS_LOGSTREAM_DEBUG(CURL_HANDLE_CONTAINER_TAG, "Returning connection handle " << handle);
return handle;
}
@@ -52,6 +52,9 @@ void CurlHandleContainer::ReleaseCurlHandle(CURL* handle)
{
if (handle)
{
+#if LIBCURL_VERSION_NUM >= 0x074D00 // 7.77.0
+ curl_easy_setopt(handle, CURLOPT_COOKIEFILE, NULL); // workaround a mem leak on curl
+#endif
curl_easy_reset(handle);
SetDefaultOptionsOnHandle(handle);
AWS_LOGSTREAM_DEBUG(CURL_HANDLE_CONTAINER_TAG, "Releasing curl handle " << handle);
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/curl/CurlHttpClient.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/curl/CurlHttpClient.cpp
index 95132f5df0..0f64b15062 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/curl/CurlHttpClient.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/curl/CurlHttpClient.cpp
@@ -7,9 +7,12 @@
#include <aws/core/http/HttpRequest.h>
#include <aws/core/http/standard/StandardHttpResponse.h>
#include <aws/core/utils/StringUtils.h>
+#include <aws/core/utils/HashingUtils.h>
#include <aws/core/utils/logging/LogMacros.h>
#include <aws/core/utils/ratelimiter/RateLimiterInterface.h>
#include <aws/core/utils/DateTime.h>
+#include <aws/core/utils/crypto/Hash.h>
+#include <aws/core/utils/Outcome.h>
#include <aws/core/monitoring/HttpClientMetrics.h>
#include <cassert>
#include <algorithm>
@@ -146,17 +149,34 @@ struct CurlReadCallbackContext
m_client(client),
m_curlHandle(curlHandle),
m_rateLimiter(limiter),
- m_request(request)
+ m_request(request),
+ m_chunkEnd(false)
{}
const CurlHttpClient* m_client;
CURL* m_curlHandle;
Aws::Utils::RateLimits::RateLimiterInterface* m_rateLimiter;
HttpRequest* m_request;
+ bool m_chunkEnd;
};
static const char* CURL_HTTP_CLIENT_TAG = "CurlHttpClient";
+static int64_t GetContentLengthFromHeader(CURL* connectionHandle,
+ bool& hasContentLength) {
+#if LIBCURL_VERSION_NUM >= 0x073700 // 7.55.0
+ curl_off_t contentLength = {};
+ CURLcode res = curl_easy_getinfo(
+ connectionHandle, CURLINFO_CONTENT_LENGTH_DOWNLOAD_T, &contentLength);
+#else
+ double contentLength = {};
+ CURLcode res = curl_easy_getinfo(
+ connectionHandle, CURLINFO_CONTENT_LENGTH_DOWNLOAD, &contentLength);
+#endif
+ hasContentLength = (res == CURLE_OK) && (contentLength != -1);
+ return hasContentLength ? static_cast<int64_t>(contentLength) : -1;
+}
+
static size_t WriteData(char* ptr, size_t size, size_t nmemb, void* userdata)
{
if (ptr)
@@ -176,8 +196,13 @@ static size_t WriteData(char* ptr, size_t size, size_t nmemb, void* userdata)
context->m_rateLimiter->ApplyAndPayForCost(static_cast<int64_t>(sizeToWrite));
}
+ for (const auto& hashIterator : context->m_request->GetResponseValidationHashes())
+ {
+ hashIterator.second->Update(reinterpret_cast<unsigned char*>(ptr), sizeToWrite);
+ }
+
response->GetResponseBody().write(ptr, static_cast<std::streamsize>(sizeToWrite));
- if (context->m_request->IsEventStreamRequest())
+ if (context->m_request->IsEventStreamRequest() && !response->HasHeader(Aws::Http::X_AMZN_ERROR_TYPE))
{
response->GetResponseBody().flush();
}
@@ -214,8 +239,7 @@ static size_t WriteHeader(char* ptr, size_t size, size_t nmemb, void* userdata)
return 0;
}
-
-static size_t ReadBody(char* ptr, size_t size, size_t nmemb, void* userdata)
+static size_t ReadBody(char* ptr, size_t size, size_t nmemb, void* userdata, bool isStreaming)
{
CurlReadCallbackContext* context = reinterpret_cast<CurlReadCallbackContext*>(userdata);
if(context == nullptr)
@@ -232,10 +256,20 @@ static size_t ReadBody(char* ptr, size_t size, size_t nmemb, void* userdata)
HttpRequest* request = context->m_request;
const std::shared_ptr<Aws::IOStream>& ioStream = request->GetContentBody();
- const size_t amountToRead = size * nmemb;
+ size_t amountToRead = size * nmemb;
+ bool isAwsChunked = request->HasHeader(Aws::Http::CONTENT_ENCODING_HEADER) &&
+ request->GetHeaderValue(Aws::Http::CONTENT_ENCODING_HEADER) == Aws::Http::AWS_CHUNKED_VALUE;
+ // aws-chunk = hex(chunk-size) + CRLF + chunk-data + CRLF
+ // Needs to reserve bytes of sizeof(hex(chunk-size)) + sizeof(CRLF) + sizeof(CRLF)
+ if (isAwsChunked)
+ {
+ Aws::String amountToReadHexString = Aws::Utils::StringUtils::ToHexString(amountToRead);
+ amountToRead -= (amountToReadHexString.size() + 4);
+ }
+
if (ioStream != nullptr && amountToRead > 0)
{
- if (request->IsEventStreamRequest())
+ if (isStreaming)
{
if (ioStream->readsome(ptr, amountToRead) == 0 && !ioStream->eof())
{
@@ -247,6 +281,39 @@ static size_t ReadBody(char* ptr, size_t size, size_t nmemb, void* userdata)
ioStream->read(ptr, amountToRead);
}
size_t amountRead = static_cast<size_t>(ioStream->gcount());
+
+ if (isAwsChunked)
+ {
+ if (amountRead > 0)
+ {
+ if (request->GetRequestHash().second != nullptr)
+ {
+ request->GetRequestHash().second->Update(reinterpret_cast<unsigned char*>(ptr), amountRead);
+ }
+
+ Aws::String hex = Aws::Utils::StringUtils::ToHexString(amountRead);
+ memmove(ptr + hex.size() + 2, ptr, amountRead);
+ memmove(ptr + hex.size() + 2 + amountRead, "\r\n", 2);
+ memmove(ptr, hex.c_str(), hex.size());
+ memmove(ptr + hex.size(), "\r\n", 2);
+ amountRead += hex.size() + 4;
+ }
+ else if (!context->m_chunkEnd)
+ {
+ Aws::StringStream chunkedTrailer;
+ chunkedTrailer << "0\r\n";
+ if (request->GetRequestHash().second != nullptr)
+ {
+ chunkedTrailer << "x-amz-checksum-" << request->GetRequestHash().first << ":"
+ << HashingUtils::Base64Encode(request->GetRequestHash().second->GetHash().GetResult()) << "\r\n";
+ }
+ chunkedTrailer << "\r\n";
+ amountRead = chunkedTrailer.str().size();
+ memcpy(ptr, chunkedTrailer.str().c_str(), amountRead);
+ context->m_chunkEnd = true;
+ }
+ }
+
auto& sentHandler = request->GetDataSentEventHandler();
if (sentHandler)
{
@@ -264,6 +331,14 @@ static size_t ReadBody(char* ptr, size_t size, size_t nmemb, void* userdata)
return 0;
}
+static size_t ReadBodyStreaming(char* ptr, size_t size, size_t nmemb, void* userdata) {
+ return ReadBody(ptr, size, nmemb, userdata, true);
+}
+
+static size_t ReadBodyFunc(char* ptr, size_t size, size_t nmemb, void* userdata) {
+ return ReadBody(ptr, size, nmemb, userdata, false);
+}
+
static size_t SeekBody(void* userdata, curl_off_t offset, int origin)
{
CurlReadCallbackContext* context = reinterpret_cast<CurlReadCallbackContext*>(userdata);
@@ -358,7 +433,11 @@ void SetOptCodeForHttpMethod(CURL* requestHandle, const std::shared_ptr<HttpRequ
}
else
{
+#if LIBCURL_VERSION_NUM >= 0x070c01 // 7.12.1
+ curl_easy_setopt(requestHandle, CURLOPT_UPLOAD, 1L);
+#else
curl_easy_setopt(requestHandle, CURLOPT_PUT, 1L);
+#endif
}
break;
case HttpMethod::HTTP_HEAD:
@@ -579,6 +658,9 @@ std::shared_ptr<HttpResponse> CurlHttpClient::MakeRequest(const std::shared_ptr<
curl_easy_setopt(connectionHandle, CURLOPT_CAINFO, m_caFile.c_str());
}
+ // enable the cookie engine without reading any initial cookies.
+ curl_easy_setopt(connectionHandle, CURLOPT_COOKIEFILE, "");
+
// only set by android test builds because the emulator is missing a cert needed for aws services
#ifdef TEST_CERT_PATH
curl_easy_setopt(connectionHandle, CURLOPT_CAPATH, TEST_CERT_PATH);
@@ -664,12 +746,13 @@ std::shared_ptr<HttpResponse> CurlHttpClient::MakeRequest(const std::shared_ptr<
if (request->GetContentBody())
{
- curl_easy_setopt(connectionHandle, CURLOPT_READFUNCTION, ReadBody);
+ curl_easy_setopt(connectionHandle, CURLOPT_READFUNCTION, ReadBodyFunc);
curl_easy_setopt(connectionHandle, CURLOPT_READDATA, &readContext);
curl_easy_setopt(connectionHandle, CURLOPT_SEEKFUNCTION, SeekBody);
curl_easy_setopt(connectionHandle, CURLOPT_SEEKDATA, &readContext);
- if (request->IsEventStreamRequest())
+ if (request->IsEventStreamRequest() && !response->HasHeader(Aws::Http::X_AMZN_ERROR_TYPE))
{
+ curl_easy_setopt(connectionHandle, CURLOPT_READFUNCTION, ReadBodyStreaming);
curl_easy_setopt(connectionHandle, CURLOPT_NOPROGRESS, 0L);
#if LIBCURL_VERSION_NUM >= 0x072000 // 7.32.0
curl_easy_setopt(connectionHandle, CURLOPT_XFERINFOFUNCTION, CurlProgressCallback);
@@ -714,15 +797,18 @@ std::shared_ptr<HttpResponse> CurlHttpClient::MakeRequest(const std::shared_ptr<
AWS_LOGSTREAM_DEBUG(CURL_HTTP_CLIENT_TAG, "Returned content type " << contentType);
}
+ bool hasContentLength = false;
+ int64_t contentLength =
+ GetContentLengthFromHeader(connectionHandle, hasContentLength);
+
if (request->GetMethod() != HttpMethod::HTTP_HEAD &&
writeContext.m_client->IsRequestProcessingEnabled() &&
- response->HasHeader(Aws::Http::CONTENT_LENGTH_HEADER))
+ hasContentLength)
{
- const Aws::String& contentLength = response->GetHeader(Aws::Http::CONTENT_LENGTH_HEADER);
int64_t numBytesResponseReceived = writeContext.m_numBytesResponseReceived;
AWS_LOGSTREAM_TRACE(CURL_HTTP_CLIENT_TAG, "Response content-length header: " << contentLength);
AWS_LOGSTREAM_TRACE(CURL_HTTP_CLIENT_TAG, "Response body length: " << numBytesResponseReceived);
- if (StringUtils::ConvertToInt64(contentLength.c_str()) != numBytesResponseReceived)
+ if (contentLength != numBytesResponseReceived)
{
response->SetClientErrorType(CoreErrors::NETWORK_CONNECTION);
response->SetClientErrorMessage("Response body length doesn't match the content-length header.");
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/standard/StandardHttpRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/standard/StandardHttpRequest.cpp
index 47a0ee4fac..87b857ca24 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/standard/StandardHttpRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/standard/StandardHttpRequest.cpp
@@ -4,7 +4,7 @@
*/
#include <aws/core/http/standard/StandardHttpRequest.h>
-
+#include <aws/core/utils/logging/LogMacros.h>
#include <aws/core/utils/StringUtils.h>
#include <iostream>
@@ -15,6 +15,8 @@ using namespace Aws::Http;
using namespace Aws::Http::Standard;
using namespace Aws::Utils;
+static const char* STANDARD_HTTP_REQUEST_LOG_TAG = "StandardHttpRequest";
+
static bool IsDefaultPort(const URI& uri)
{
switch(uri.GetPort())
@@ -59,8 +61,13 @@ HeaderValueCollection StandardHttpRequest::GetHeaders() const
const Aws::String& StandardHttpRequest::GetHeaderValue(const char* headerName) const
{
- auto iter = headerMap.find(headerName);
+ auto iter = headerMap.find(StringUtils::ToLower(headerName));
assert (iter != headerMap.end());
+ if (iter == headerMap.end()) {
+ AWS_LOGSTREAM_ERROR(STANDARD_HTTP_REQUEST_LOG_TAG, "Requested a header value for a missing header key: " << headerName);
+ static const Aws::String EMPTY_STRING = "";
+ return EMPTY_STRING;
+ }
return iter->second;
}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/standard/StandardHttpResponse.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/standard/StandardHttpResponse.cpp
index 92d7a062b6..8b62ae5e63 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/standard/StandardHttpResponse.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/http/standard/StandardHttpResponse.cpp
@@ -6,6 +6,7 @@
#include <aws/core/http/standard/StandardHttpResponse.h>
#include <aws/core/utils/StringUtils.h>
+#include <aws/core/utils/logging/LogMacros.h>
#include <aws/core/utils/memory/AWSMemory.h>
#include <istream>
@@ -14,6 +15,7 @@ using namespace Aws::Http;
using namespace Aws::Http::Standard;
using namespace Aws::Utils;
+static const char* STANDARD_HTTP_RESPONSE_LOG_TAG = "StandardHttpResponse";
HeaderValueCollection StandardHttpResponse::GetHeaders() const
{
@@ -35,6 +37,12 @@ bool StandardHttpResponse::HasHeader(const char* headerName) const
const Aws::String& StandardHttpResponse::GetHeader(const Aws::String& headerName) const
{
Aws::Map<Aws::String, Aws::String>::const_iterator foundValue = headerMap.find(StringUtils::ToLower(headerName.c_str()));
+ assert(foundValue != headerMap.end());
+ if (foundValue == headerMap.end()) {
+ AWS_LOGSTREAM_ERROR(STANDARD_HTTP_RESPONSE_LOG_TAG, "Requested a header value for a missing header key: " << headerName);
+ static const Aws::String EMPTY_STRING = "";
+ return EMPTY_STRING;
+ }
return foundValue->second;
}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/internal/AWSHttpResourceClient.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/internal/AWSHttpResourceClient.cpp
index 2f372ec82a..ca664cc6c4 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/internal/AWSHttpResourceClient.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/internal/AWSHttpResourceClient.cpp
@@ -140,12 +140,12 @@ namespace Aws
}
const Aws::Client::AWSError<Aws::Client::CoreErrors> error = [this, &response]() {
- if (response->HasClientError() || response->GetResponseBody().tellp() < 1)
+ if (response->HasClientError() || response->GetResponseCode() == HttpResponseCode::REQUEST_NOT_MADE)
{
AWS_LOGSTREAM_ERROR(m_logtag.c_str(), "Http request to retrieve credentials failed");
return AWSError<CoreErrors>(CoreErrors::NETWORK_CONNECTION, true); // Retryable
}
- else if (m_errorMarshaller)
+ else if (m_errorMarshaller && response->GetResponseBody().tellp() > 0)
{
return m_errorMarshaller->Marshall(*response);
}
@@ -170,14 +170,23 @@ namespace Aws
}
}
- EC2MetadataClient::EC2MetadataClient(const char* endpoint)
- : AWSHttpResourceClient(EC2_METADATA_CLIENT_LOG_TAG), m_endpoint(endpoint), m_tokenRequired(true)
+ EC2MetadataClient::EC2MetadataClient(const char *endpoint) :
+ AWSHttpResourceClient(EC2_METADATA_CLIENT_LOG_TAG),
+ m_endpoint(endpoint),
+ m_disableIMDS(false),
+ m_tokenRequired(true)
{
+
}
- EC2MetadataClient::EC2MetadataClient(const Aws::Client::ClientConfiguration &clientConfiguration, const char *endpoint)
- : AWSHttpResourceClient(clientConfiguration, EC2_METADATA_CLIENT_LOG_TAG), m_endpoint(endpoint), m_tokenRequired(true)
+ EC2MetadataClient::EC2MetadataClient(const Aws::Client::ClientConfiguration &clientConfiguration,
+ const char *endpoint) :
+ AWSHttpResourceClient(clientConfiguration, EC2_METADATA_CLIENT_LOG_TAG),
+ m_endpoint(endpoint),
+ m_disableIMDS(clientConfiguration.disableIMDS),
+ m_tokenRequired(true)
{
+
}
EC2MetadataClient::~EC2MetadataClient()
@@ -190,15 +199,20 @@ namespace Aws
return GetResource(m_endpoint.c_str(), resourcePath, nullptr/*authToken*/);
}
+#if !defined(DISABLE_IMDSV1)
Aws::String EC2MetadataClient::GetDefaultCredentials() const
{
+ if (m_disableIMDS) {
+ AWS_LOGSTREAM_TRACE(m_logtag.c_str(), "Skipping call to IMDS Service");
+ return {};
+ }
std::unique_lock<std::recursive_mutex> locker(m_tokenMutex);
if (m_tokenRequired)
{
return GetDefaultCredentialsSecurely();
}
- AWS_LOGSTREAM_TRACE(m_logtag.c_str(), "Getting default credentials for ec2 instance");
+ AWS_LOGSTREAM_TRACE(m_logtag.c_str(), "Getting default credentials for ec2 instance from " << m_endpoint);
auto result = GetResourceWithAWSWebServiceResult(m_endpoint.c_str(), EC2_SECURITY_CREDENTIALS_RESOURCE, nullptr);
Aws::String credentialsString = result.GetPayload();
auto httpResponseCode = result.GetResponseCode();
@@ -232,14 +246,20 @@ namespace Aws
AWS_LOGSTREAM_DEBUG(m_logtag.c_str(), "Calling EC2MetadataService resource " << ss.str());
return GetResource(ss.str().c_str());
}
+#endif
Aws::String EC2MetadataClient::GetDefaultCredentialsSecurely() const
{
+ if (m_disableIMDS) {
+ AWS_LOGSTREAM_TRACE(m_logtag.c_str(), "Skipping call to IMDS Service");
+ return {};
+ }
std::unique_lock<std::recursive_mutex> locker(m_tokenMutex);
- if (!m_tokenRequired)
- {
+#if !defined(DISABLE_IMDSV1)
+ if (!m_tokenRequired) {
return GetDefaultCredentials();
}
+#endif
Aws::StringStream ss;
ss << m_endpoint << EC2_IMDS_TOKEN_RESOURCE;
@@ -257,12 +277,14 @@ namespace Aws
{
return {};
}
+#if !defined(DISABLE_IMDSV1)
else if (result.GetResponseCode() != HttpResponseCode::OK || trimmedTokenString.empty())
{
m_tokenRequired = false;
AWS_LOGSTREAM_TRACE(m_logtag.c_str(), "Calling EC2MetadataService to get token failed, falling back to less secure way.");
return GetDefaultCredentials();
}
+#endif
m_token = trimmedTokenString;
locker.unlock();
ss.str("");
@@ -278,7 +300,7 @@ namespace Aws
AWS_LOGSTREAM_DEBUG(m_logtag.c_str(), "Calling EC2MetadataService resource, " << EC2_SECURITY_CREDENTIALS_RESOURCE
<< " with token returned profile string " << trimmedProfileString);
- if (securityCredentials.size() == 0)
+ if (securityCredentials.empty())
{
AWS_LOGSTREAM_WARN(m_logtag.c_str(), "Calling EC2Metadataservice to get profiles failed");
return {};
@@ -296,6 +318,10 @@ namespace Aws
Aws::String EC2MetadataClient::GetCurrentRegion() const
{
+ if (m_disableIMDS) {
+ AWS_LOGSTREAM_TRACE(m_logtag.c_str(), "Skipping call to IMDS Service");
+ return {};
+ }
if (!m_region.empty())
{
return m_region;
@@ -311,6 +337,7 @@ namespace Aws
std::lock_guard<std::recursive_mutex> locker(m_tokenMutex);
if (m_tokenRequired)
{
+ GetDefaultCredentialsSecurely();
regionRequest->SetHeaderValue(EC2_IMDS_TOKEN_HEADER, m_token);
}
}
@@ -351,6 +378,16 @@ namespace Aws
return region;
}
+ void EC2MetadataClient::SetEndpoint(const Aws::String& endpoint)
+ {
+ m_endpoint = endpoint;
+ }
+
+ Aws::String EC2MetadataClient::GetEndpoint() const
+ {
+ return Aws::String(m_endpoint);
+ }
+
#ifdef _MSC_VER
// VS2015 compiler's bug, warning s_ec2metadataClient: symbol will be dynamically initialized (implementation limitation)
AWS_SUPPRESS_WARNING(4592,
@@ -366,7 +403,39 @@ namespace Aws
{
return;
}
- s_ec2metadataClient = Aws::MakeShared<EC2MetadataClient>(EC2_METADATA_CLIENT_LOG_TAG);
+ Aws::String ec2MetadataServiceEndpoint = Aws::Environment::GetEnv("AWS_EC2_METADATA_SERVICE_ENDPOINT");
+ if (ec2MetadataServiceEndpoint.empty())
+ {
+ Aws::String ec2MetadataServiceEndpointMode = Aws::Environment::GetEnv("AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE").c_str();
+ if (ec2MetadataServiceEndpointMode.length() == 0 )
+ {
+ ec2MetadataServiceEndpoint = "http://169.254.169.254"; //default to IPv4 default endpoint
+ }
+ else
+ {
+ if (ec2MetadataServiceEndpointMode.length() == 4 )
+ {
+ if (Aws::Utils::StringUtils::CaselessCompare(ec2MetadataServiceEndpointMode.c_str(), "ipv4"))
+ {
+ ec2MetadataServiceEndpoint = "http://169.254.169.254"; //default to IPv4 default endpoint
+ }
+ else if (Aws::Utils::StringUtils::CaselessCompare(ec2MetadataServiceEndpointMode.c_str(), "ipv6"))
+ {
+ ec2MetadataServiceEndpoint = "http://[fd00:ec2::254]";
+ }
+ else
+ {
+ AWS_LOGSTREAM_ERROR(EC2_METADATA_CLIENT_LOG_TAG, "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE can only be set to ipv4 or ipv6, received: " << ec2MetadataServiceEndpointMode );
+ }
+ }
+ else
+ {
+ AWS_LOGSTREAM_ERROR(EC2_METADATA_CLIENT_LOG_TAG, "AWS_EC2_METADATA_SERVICE_ENDPOINT_MODE can only be set to ipv4 or ipv6, received: " << ec2MetadataServiceEndpointMode );
+ }
+ }
+ }
+ AWS_LOGSTREAM_INFO(EC2_METADATA_CLIENT_LOG_TAG, "Using IMDS endpoint: " << ec2MetadataServiceEndpoint);
+ s_ec2metadataClient = Aws::MakeShared<EC2MetadataClient>(EC2_METADATA_CLIENT_LOG_TAG, ec2MetadataServiceEndpoint.c_str());
}
void CleanupEC2MetadataClient()
@@ -383,7 +452,6 @@ namespace Aws
return s_ec2metadataClient;
}
-
ECSCredentialsClient::ECSCredentialsClient(const char* resourcePath, const char* endpoint, const char* token)
: AWSHttpResourceClient(ECS_CREDENTIALS_CLIENT_LOG_TAG),
m_resourcePath(resourcePath), m_endpoint(endpoint), m_token(token)
@@ -510,6 +578,17 @@ namespace Aws
{
SetErrorMarshaller(Aws::MakeUnique<Aws::Client::JsonErrorMarshaller>(SSO_RESOURCE_CLIENT_LOG_TAG));
+ m_endpoint = buildEndpoint(clientConfiguration, "portal.sso.", "federation/credentials");
+ m_oidcEndpoint = buildEndpoint(clientConfiguration, "oidc.", "token");
+
+ AWS_LOGSTREAM_INFO(SSO_RESOURCE_CLIENT_LOG_TAG, "Creating SSO ResourceClient with endpoint: " << m_endpoint);
+ }
+
+ Aws::String SSOCredentialsClient::buildEndpoint(
+ const Aws::Client::ClientConfiguration& clientConfiguration,
+ const Aws::String& domain,
+ const Aws::String& endpoint)
+ {
Aws::StringStream ss;
if (clientConfiguration.scheme == Aws::Http::Scheme::HTTP)
{
@@ -525,15 +604,12 @@ namespace Aws
auto hash = Aws::Utils::HashingUtils::HashString(clientConfiguration.region.c_str());
AWS_LOGSTREAM_DEBUG(SSO_RESOURCE_CLIENT_LOG_TAG, "Preparing SSO client for region: " << clientConfiguration.region);
-
- ss << "portal.sso." << clientConfiguration.region << ".amazonaws.com/federation/credentials";
+ ss << domain << clientConfiguration.region << ".amazonaws.com/" << endpoint;
if (hash == CN_NORTH_1_HASH || hash == CN_NORTHWEST_1_HASH)
{
ss << ".cn";
}
- m_endpoint = ss.str();
-
- AWS_LOGSTREAM_INFO(SSO_RESOURCE_CLIENT_LOG_TAG, "Creating SSO ResourceClient with endpoint: " << m_endpoint);
+ return ss.str();
}
SSOCredentialsClient::SSOGetRoleCredentialsResult SSOCredentialsClient::GetSSOCredentials(const SSOGetRoleCredentialsRequest &request)
@@ -571,5 +647,70 @@ namespace Aws
result.creds = creds;
return result;
}
+
+ // An internal SSO CreateToken implementation to lightweight core package and not introduce a dependency on sso-oidc
+ SSOCredentialsClient::SSOCreateTokenResult SSOCredentialsClient::CreateToken(const SSOCreateTokenRequest& request)
+ {
+ std::shared_ptr<HttpRequest> httpRequest(CreateHttpRequest(m_oidcEndpoint, HttpMethod::HTTP_POST,
+ Aws::Utils::Stream::DefaultResponseStreamFactoryMethod));
+ SSOCreateTokenResult result;
+ if(!httpRequest) {
+ AWS_LOGSTREAM_FATAL(SSO_RESOURCE_CLIENT_LOG_TAG, "Failed to CreateHttpRequest: nullptr returned");
+ return result;
+ }
+ httpRequest->SetUserAgent(ComputeUserAgentString());
+
+ Json::JsonValue requestDoc;
+ if(!request.clientId.empty()) {
+ requestDoc.WithString("clientId", request.clientId);
+ }
+ if(!request.clientSecret.empty()) {
+ requestDoc.WithString("clientSecret", request.clientSecret);
+ }
+ if(!request.grantType.empty()) {
+ requestDoc.WithString("grantType", request.grantType);
+ }
+ if(!request.refreshToken.empty()) {
+ requestDoc.WithString("refreshToken", request.refreshToken);
+ }
+
+ std::shared_ptr<Aws::IOStream> body = Aws::MakeShared<Aws::StringStream>("SSO_BEARER_TOKEN_CREATE_TOKEN");
+ if(!body) {
+ AWS_LOGSTREAM_FATAL(SSO_RESOURCE_CLIENT_LOG_TAG, "Failed to allocate body"); // exceptions disabled
+ return result;
+ }
+ *body << requestDoc.View().WriteReadable();;
+
+ httpRequest->AddContentBody(body);
+ body->seekg(0, body->end);
+ auto streamSize = body->tellg();
+ body->seekg(0, body->beg);
+ Aws::StringStream contentLength;
+ contentLength << streamSize;
+ httpRequest->SetContentLength(contentLength.str());
+ httpRequest->SetContentType("application/json");
+
+ Aws::String rawReply = GetResourceWithAWSWebServiceResult(httpRequest).GetPayload();
+ Json::JsonValue refreshTokenDoc(rawReply);
+ Utils::Json::JsonView jsonValue = refreshTokenDoc.View();
+
+ if(jsonValue.ValueExists("accessToken")) {
+ result.accessToken = jsonValue.GetString("accessToken");
+ }
+ if(jsonValue.ValueExists("tokenType")) {
+ result.tokenType = jsonValue.GetString("tokenType");
+ }
+ if(jsonValue.ValueExists("expiresIn")) {
+ result.expiresIn = jsonValue.GetInteger("expiresIn");
+ }
+ if(jsonValue.ValueExists("idToken")) {
+ result.idToken = jsonValue.GetString("idToken");
+ }
+ if(jsonValue.ValueExists("refreshToken")) {
+ result.refreshToken = jsonValue.GetString("refreshToken");
+ }
+
+ return result;
+ }
}
}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/monitoring/MonitoringManager.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/monitoring/MonitoringManager.cpp
index 7a8d3adb41..d6891933c7 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/monitoring/MonitoringManager.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/monitoring/MonitoringManager.cpp
@@ -25,16 +25,18 @@ namespace Aws
/**
* Global factory to create global metrics instance.
*/
- static Aws::UniquePtr<Monitors> s_monitors;
+ static Monitors* s_monitors(nullptr);
Aws::Vector<void*> OnRequestStarted(const Aws::String& serviceName, const Aws::String& requestName, const std::shared_ptr<const Aws::Http::HttpRequest>& request)
{
- assert(s_monitors);
Aws::Vector<void*> contexts;
- contexts.reserve(s_monitors->size());
- for (const auto& interface: *s_monitors)
+ if (s_monitors)
{
- contexts.emplace_back(interface->OnRequestStarted(serviceName, requestName, request));
+ contexts.reserve(s_monitors->size());
+ for (const auto& interface: *s_monitors)
+ {
+ contexts.emplace_back(interface->OnRequestStarted(serviceName, requestName, request));
+ }
}
return contexts;
}
@@ -42,48 +44,56 @@ namespace Aws
void OnRequestSucceeded(const Aws::String& serviceName, const Aws::String& requestName, const std::shared_ptr<const Aws::Http::HttpRequest>& request,
const Aws::Client::HttpResponseOutcome& outcome, const CoreMetricsCollection& metricsFromCore, const Aws::Vector<void*>& contexts)
{
- assert(s_monitors);
- assert(contexts.size() == s_monitors->size());
- size_t index = 0;
- for (const auto& interface: *s_monitors)
+ if (s_monitors)
{
- interface->OnRequestSucceeded(serviceName, requestName, request, outcome, metricsFromCore, contexts[index++]);
+ assert(contexts.size() == s_monitors->size());
+ size_t index = 0;
+ for (const auto& interface: *s_monitors)
+ {
+ interface->OnRequestSucceeded(serviceName, requestName, request, outcome, metricsFromCore, contexts[index++]);
+ }
}
}
void OnRequestFailed(const Aws::String& serviceName, const Aws::String& requestName, const std::shared_ptr<const Aws::Http::HttpRequest>& request,
const Aws::Client::HttpResponseOutcome& outcome, const CoreMetricsCollection& metricsFromCore, const Aws::Vector<void*>& contexts)
{
- assert(s_monitors);
- assert(contexts.size() == s_monitors->size());
- size_t index = 0;
- for (const auto& interface: *s_monitors)
+ if (s_monitors)
{
- interface->OnRequestFailed(serviceName, requestName, request, outcome, metricsFromCore, contexts[index++]);
+ assert(contexts.size() == s_monitors->size());
+ size_t index = 0;
+ for (const auto& interface: *s_monitors)
+ {
+ interface->OnRequestFailed(serviceName, requestName, request, outcome, metricsFromCore, contexts[index++]);
+ }
}
}
void OnRequestRetry(const Aws::String& serviceName, const Aws::String& requestName,
const std::shared_ptr<const Aws::Http::HttpRequest>& request, const Aws::Vector<void*>& contexts)
{
- assert(s_monitors);
- assert(contexts.size() == s_monitors->size());
- size_t index = 0;
- for (const auto& interface: *s_monitors)
+ if (s_monitors)
{
- interface->OnRequestRetry(serviceName, requestName, request, contexts[index++]);
+ assert(contexts.size() == s_monitors->size());
+ size_t index = 0;
+ for (const auto& interface: *s_monitors)
+ {
+ interface->OnRequestRetry(serviceName, requestName, request, contexts[index++]);
+ }
}
}
void OnFinish(const Aws::String& serviceName, const Aws::String& requestName,
const std::shared_ptr<const Aws::Http::HttpRequest>& request, const Aws::Vector<void*>& contexts)
{
- assert(s_monitors);
- assert(contexts.size() == s_monitors->size());
- size_t index = 0;
- for (const auto& interface: *s_monitors)
+ if (s_monitors)
{
- interface->OnFinish(serviceName, requestName, request, contexts[index++]);
+ assert(contexts.size() == s_monitors->size());
+ size_t index = 0;
+ for (const auto& interface: *s_monitors)
+ {
+ interface->OnFinish(serviceName, requestName, request, contexts[index++]);
+ }
}
}
@@ -93,7 +103,8 @@ namespace Aws
{
return;
}
- s_monitors = Aws::MakeUnique<Monitors>(MonitoringTag);
+ assert(Aws::get_aws_allocator() != nullptr);
+ s_monitors = Aws::New<Monitors>(MonitoringTag);
for (const auto& function: monitoringFactoryCreateFunctions)
{
auto factory = function();
@@ -117,11 +128,7 @@ namespace Aws
void CleanupMonitoring()
{
- if (!s_monitors)
- {
- return;
- }
-
+ Aws::Delete(s_monitors);
s_monitors = nullptr;
}
} // namespace Monitoring
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/DateTimeCommon.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/DateTimeCommon.cpp
index b690c90c2d..5ef76dcfc6 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/DateTimeCommon.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/DateTimeCommon.cpp
@@ -176,7 +176,7 @@ static int GetWeekDayNumberFromStr(const char* timeString, size_t startIndex, si
}
}
-//Get the 0-11 monthy number from a string representing Month. Case insensitive and will stop on abbreviation
+//Get the 0-11 monthly number from a string representing Month. Case insensitive and will stop on abbreviation
static int GetMonthNumberFromStr(const char* timeString, size_t startIndex, size_t stopIndex)
{
if (stopIndex - startIndex < 3)
@@ -842,7 +842,9 @@ public:
break;
case 6:
- if ((c == 'Z' || c == '+' || c == '-' ) && (index - stateStartIndex == 3))
+ if ((c == 'Z' || c == '+' || c == '-' ) &&
+ (index - stateStartIndex >= 3) &&
+ (index - stateStartIndex <= 9))
{
m_tz[0] = c;
m_state = 7;
@@ -1268,6 +1270,12 @@ double DateTime::SecondsWithMSPrecision() const
return timestamp.count();
}
+int64_t DateTime::Seconds() const
+{
+ auto timestamp = std::chrono::duration_cast<std::chrono::seconds>(m_time.time_since_epoch());
+ return timestamp.count();
+}
+
int64_t DateTime::Millis() const
{
auto timestamp = std::chrono::duration_cast<std::chrono::milliseconds>(m_time.time_since_epoch());
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/Document.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/Document.cpp
new file mode 100644
index 0000000000..ef8210aeb1
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/Document.cpp
@@ -0,0 +1,673 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/core/utils/Document.h>
+
+#include <iterator>
+#include <algorithm>
+#include <aws/core/utils/memory/stl/AWSStringStream.h>
+#include <aws/core/utils/StringUtils.h>
+#include <aws/core/utils/json/JsonSerializer.h>
+
+using namespace Aws::Utils;
+
+Document::Document() : m_wasParseSuccessful(true)
+{
+ m_json = nullptr;
+}
+
+Document::Document(cJSON* value) :
+ m_json(cJSON_AS4CPP_Duplicate(value, true /* recurse */)),
+ m_wasParseSuccessful(true)
+{
+}
+
+Document::Document(const Aws::String& value) : m_wasParseSuccessful(true)
+{
+ const char* return_parse_end;
+ m_json = cJSON_AS4CPP_ParseWithOpts(value.c_str(), &return_parse_end, 1/*require_null_terminated*/);
+
+ if (!m_json || cJSON_AS4CPP_IsInvalid(m_json))
+ {
+ m_wasParseSuccessful = false;
+ m_errorMessage = "Failed to parse JSON at: ";
+ m_errorMessage += return_parse_end;
+ }
+}
+
+Document::Document(Aws::IStream& istream) : m_wasParseSuccessful(true)
+{
+ Aws::StringStream memoryStream;
+ std::copy(std::istreambuf_iterator<char>(istream), std::istreambuf_iterator<char>(), std::ostreambuf_iterator<char>(memoryStream));
+ const char* return_parse_end;
+ const auto input = memoryStream.str();
+ m_json = cJSON_AS4CPP_ParseWithOpts(input.c_str(), &return_parse_end, 1/*require_null_terminated*/);
+
+ if (!m_json || cJSON_AS4CPP_IsInvalid(m_json))
+ {
+ m_wasParseSuccessful = false;
+ m_errorMessage = "Failed to parse JSON. Invalid input at: ";
+ m_errorMessage += return_parse_end;
+ }
+}
+
+Document::Document(const Document& value) :
+ m_json(cJSON_AS4CPP_Duplicate(value.m_json, true/*recurse*/)),
+ m_wasParseSuccessful(value.m_wasParseSuccessful),
+ m_errorMessage(value.m_errorMessage)
+{
+}
+
+Document::Document(Document&& value) :
+ m_json(value.m_json),
+ m_wasParseSuccessful(value.m_wasParseSuccessful),
+ m_errorMessage(std::move(value.m_errorMessage))
+{
+ value.m_json = nullptr;
+}
+
+Document::Document(const Json::JsonView& view) :
+ m_json(cJSON_AS4CPP_Duplicate(view.m_value, true/*recurse*/)),
+ m_wasParseSuccessful(true),
+ m_errorMessage({})
+{
+}
+
+void Document::Destroy()
+{
+ cJSON_AS4CPP_Delete(m_json);
+}
+
+Document::~Document()
+{
+ Destroy();
+}
+
+Document& Document::operator=(const Document& other)
+{
+ if (this == &other)
+ {
+ return *this;
+ }
+
+ Destroy();
+ m_json = cJSON_AS4CPP_Duplicate(other.m_json, true /*recurse*/);
+ m_wasParseSuccessful = other.m_wasParseSuccessful;
+ m_errorMessage = other.m_errorMessage;
+ return *this;
+}
+
+Document& Document::operator=(Document&& other)
+{
+ if (this == &other)
+ {
+ return *this;
+ }
+
+ using std::swap;
+ swap(m_json, other.m_json);
+ swap(m_errorMessage, other.m_errorMessage);
+ m_wasParseSuccessful = other.m_wasParseSuccessful;
+ return *this;
+}
+
+Document& Document::operator=(const Json::JsonView& other)
+{
+ Destroy();
+ m_json = cJSON_AS4CPP_Duplicate(other.m_value, true /*recurse*/);
+ m_wasParseSuccessful = true;
+ m_errorMessage = {};
+ return *this;
+}
+
+bool Document::operator==(const Document& other) const
+{
+ return cJSON_AS4CPP_Compare(m_json, other.m_json, true /*case-sensitive*/) != 0;
+}
+
+bool Document::operator!=(const Document& other) const
+{
+ return !(*this == other);
+}
+
+static void AddOrReplace(cJSON* root, const char* key, cJSON* value)
+{
+ const auto existing = cJSON_AS4CPP_GetObjectItemCaseSensitive(root, key);
+ if (existing)
+ {
+ cJSON_AS4CPP_ReplaceItemInObjectCaseSensitive(root, key, value);
+ }
+ else
+ {
+ cJSON_AS4CPP_AddItemToObject(root, key, value);
+ }
+}
+
+Document& Document::WithString(const char* key, const Aws::String& value)
+{
+ if (!m_json)
+ {
+ m_json = cJSON_AS4CPP_CreateObject();
+ }
+
+ const auto val = cJSON_AS4CPP_CreateString(value.c_str());
+ AddOrReplace(m_json, key, val);
+ return *this;
+}
+
+Document& Document::WithString(const Aws::String& key, const Aws::String& value)
+{
+ return WithString(key.c_str(), value);
+}
+
+Document& Document::AsString(const Aws::String& value)
+{
+ Destroy();
+ m_json = cJSON_AS4CPP_CreateString(value.c_str());
+ return *this;
+}
+
+Document& Document::WithBool(const char* key, bool value)
+{
+ if (!m_json)
+ {
+ m_json = cJSON_AS4CPP_CreateObject();
+ }
+
+ const auto val = cJSON_AS4CPP_CreateBool(value);
+ AddOrReplace(m_json, key, val);
+ return *this;
+}
+
+Document& Document::WithBool(const Aws::String& key, bool value)
+{
+ return WithBool(key.c_str(), value);
+}
+
+Document& Document::AsBool(bool value)
+{
+ Destroy();
+ m_json = cJSON_AS4CPP_CreateBool(value);
+ return *this;
+}
+
+Document& Document::WithInteger(const char* key, int value)
+{
+ return WithDouble(key, static_cast<double>(value));
+}
+
+Document& Document::WithInteger(const Aws::String& key, int value)
+{
+ return WithDouble(key.c_str(), static_cast<double>(value));
+}
+
+Document& Document::AsInteger(int value)
+{
+ Destroy();
+ m_json = cJSON_AS4CPP_CreateNumber(static_cast<double>(value));
+ return *this;
+}
+
+Document& Document::WithInt64(const char* key, long long value)
+{
+ if (!m_json)
+ {
+ m_json = cJSON_AS4CPP_CreateObject();
+ }
+
+ const auto val = cJSON_AS4CPP_CreateInt64(value);
+ AddOrReplace(m_json, key, val);
+ return *this;
+}
+
+Document& Document::WithInt64(const Aws::String& key, long long value)
+{
+ return WithInt64(key.c_str(), value);
+}
+
+Document& Document::AsInt64(long long value)
+{
+ Destroy();
+ m_json = cJSON_AS4CPP_CreateInt64(value);
+ return *this;
+}
+
+Document& Document::WithDouble(const char* key, double value)
+{
+ if (!m_json)
+ {
+ m_json = cJSON_AS4CPP_CreateObject();
+ }
+
+ const auto val = cJSON_AS4CPP_CreateNumber(value);
+ AddOrReplace(m_json, key, val);
+ return *this;
+}
+
+Document& Document::WithDouble(const Aws::String& key, double value)
+{
+ return WithDouble(key.c_str(), value);
+}
+
+Document& Document::AsDouble(double value)
+{
+ Destroy();
+ m_json = cJSON_AS4CPP_CreateNumber(value);
+ return *this;
+}
+
+Document& Document::WithArray(const char* key, const Array<Aws::String>& array)
+{
+ if (!m_json)
+ {
+ m_json = cJSON_AS4CPP_CreateObject();
+ }
+
+ auto arrayValue = cJSON_AS4CPP_CreateArray();
+ for (unsigned i = 0; i < array.GetLength(); ++i)
+ {
+ cJSON_AS4CPP_AddItemToArray(arrayValue, cJSON_AS4CPP_CreateString(array[i].c_str()));
+ }
+
+ AddOrReplace(m_json, key, arrayValue);
+ return *this;
+}
+
+Document& Document::WithArray(const Aws::String& key, const Array<Aws::String>& array)
+{
+ return WithArray(key.c_str(), array);
+}
+
+Document& Document::WithArray(const Aws::String& key, const Array<Document>& array)
+{
+ if (!m_json)
+ {
+ m_json = cJSON_AS4CPP_CreateObject();
+ }
+
+ auto arrayValue = cJSON_AS4CPP_CreateArray();
+ for (unsigned i = 0; i < array.GetLength(); ++i)
+ {
+ cJSON_AS4CPP_AddItemToArray(arrayValue, cJSON_AS4CPP_Duplicate(array[i].m_json, true /*recurse*/));
+ }
+
+ AddOrReplace(m_json, key.c_str(), arrayValue);
+ return *this;
+}
+
+Document& Document::WithArray(const Aws::String& key, Array<Document>&& array)
+{
+ if (!m_json)
+ {
+ m_json = cJSON_AS4CPP_CreateObject();
+ }
+
+ auto arrayValue = cJSON_AS4CPP_CreateArray();
+ for (unsigned i = 0; i < array.GetLength(); ++i)
+ {
+ cJSON_AS4CPP_AddItemToArray(arrayValue, array[i].m_json);
+ array[i].m_json = nullptr;
+ }
+
+ AddOrReplace(m_json, key.c_str(), arrayValue);
+ return *this;
+}
+
+Document& Document::AsArray(const Array<Document>& array)
+{
+ auto arrayValue = cJSON_AS4CPP_CreateArray();
+ for (unsigned i = 0; i < array.GetLength(); ++i)
+ {
+ cJSON_AS4CPP_AddItemToArray(arrayValue, cJSON_AS4CPP_Duplicate(array[i].m_json, true /*recurse*/));
+ }
+
+ Destroy();
+ m_json = arrayValue;
+ return *this;
+}
+
+Document& Document::AsArray(Array<Document>&& array)
+{
+ auto arrayValue = cJSON_AS4CPP_CreateArray();
+ for (unsigned i = 0; i < array.GetLength(); ++i)
+ {
+ cJSON_AS4CPP_AddItemToArray(arrayValue, array[i].m_json);
+ array[i].m_json = nullptr;
+ }
+
+ Destroy();
+ m_json = arrayValue;
+ return *this;
+}
+
+Document& Document::WithObject(const char* key, const Document& value)
+{
+ if (!m_json)
+ {
+ m_json = cJSON_AS4CPP_CreateObject();
+ }
+
+ const auto copy = value.m_json == nullptr ? cJSON_AS4CPP_CreateObject() : cJSON_AS4CPP_Duplicate(value.m_json, true /*recurse*/);
+ AddOrReplace(m_json, key, copy);
+ return *this;
+}
+
+Document& Document::WithObject(const Aws::String& key, const Document& value)
+{
+ return WithObject(key.c_str(), value);
+}
+
+Document& Document::WithObject(const char* key, Document&& value)
+{
+ if (!m_json)
+ {
+ m_json = cJSON_AS4CPP_CreateObject();
+ }
+
+ AddOrReplace(m_json, key, value.m_json == nullptr ? cJSON_AS4CPP_CreateObject() : value.m_json);
+ value.m_json = nullptr;
+ return *this;
+}
+
+Document& Document::WithObject(const Aws::String& key, Document&& value)
+{
+ return WithObject(key.c_str(), std::move(value));
+}
+
+Document& Document::AsObject(const Document& value)
+{
+ *this = value;
+ return *this;
+}
+
+Document& Document::AsObject(Document && value)
+{
+ *this = std::move(value);
+ return *this;
+}
+
+DocumentView Document::View() const
+{
+ return *this;
+}
+
+DocumentView::DocumentView() : m_json(nullptr)
+{
+}
+
+DocumentView::DocumentView(const Document& value) : m_json(value.m_json)
+{
+}
+
+DocumentView::DocumentView(cJSON* v) : m_json(v)
+{
+}
+
+DocumentView& DocumentView::operator=(const Document& value)
+{
+ m_json = value.m_json;
+ return *this;
+}
+
+DocumentView& DocumentView::operator=(cJSON* value)
+{
+ m_json = value;
+ return *this;
+}
+
+Aws::String DocumentView::GetString(const Aws::String& key) const
+{
+ assert(m_json);
+ auto item = cJSON_AS4CPP_GetObjectItemCaseSensitive(m_json, key.c_str());
+ auto str = cJSON_AS4CPP_GetStringValue(item);
+ return str ? str : "";
+}
+
+Aws::String DocumentView::AsString() const
+{
+ const char* str = cJSON_AS4CPP_GetStringValue(m_json);
+ if (str == nullptr)
+ {
+ return {};
+ }
+ return str;
+}
+
+bool DocumentView::IsString() const
+{
+ return cJSON_AS4CPP_IsString(m_json) != 0;
+}
+
+bool DocumentView::GetBool(const Aws::String& key) const
+{
+ assert(m_json);
+ auto item = cJSON_AS4CPP_GetObjectItemCaseSensitive(m_json, key.c_str());
+ assert(item);
+ return item->valueint != 0;
+}
+
+bool DocumentView::AsBool() const
+{
+ assert(cJSON_AS4CPP_IsBool(m_json));
+ return cJSON_AS4CPP_IsTrue(m_json) != 0;
+}
+
+bool DocumentView::IsBool() const
+{
+ return cJSON_AS4CPP_IsBool(m_json) != 0;
+}
+
+int DocumentView::GetInteger(const Aws::String& key) const
+{
+ assert(m_json);
+ auto item = cJSON_AS4CPP_GetObjectItemCaseSensitive(m_json, key.c_str());
+ assert(item);
+ return item->valueint;
+}
+
+int DocumentView::AsInteger() const
+{
+ assert(cJSON_AS4CPP_IsNumber(m_json)); // can be double or value larger than int_max, but at least not UB
+ return m_json->valueint;
+}
+
+bool DocumentView::IsIntegerType() const
+{
+ if (!cJSON_AS4CPP_IsNumber(m_json))
+ {
+ return false;
+ }
+
+ if (m_json->valuestring)
+ {
+ Aws::String valueString = m_json->valuestring;
+ return std::all_of(valueString.begin(), valueString.end(), [](unsigned char c){ return ::isdigit(c) || c == '+' || c == '-'; });
+ }
+ return m_json->valuedouble == static_cast<long long>(m_json->valuedouble);
+}
+
+int64_t DocumentView::GetInt64(const Aws::String& key) const
+{
+ assert(m_json);
+ auto item = cJSON_AS4CPP_GetObjectItemCaseSensitive(m_json, key.c_str());
+ assert(item);
+ if (item->valuestring)
+ {
+ return Aws::Utils::StringUtils::ConvertToInt64(item->valuestring);
+ }
+ else
+ {
+ return static_cast<int64_t>(item->valuedouble);
+ }
+}
+
+int64_t DocumentView::AsInt64() const
+{
+ assert(cJSON_AS4CPP_IsNumber(m_json));
+ if (m_json->valuestring)
+ {
+ return Aws::Utils::StringUtils::ConvertToInt64(m_json->valuestring);
+ }
+ else
+ {
+ return static_cast<int64_t>(m_json->valuedouble);
+ }
+}
+
+double DocumentView::GetDouble(const Aws::String& key) const
+{
+ assert(m_json);
+ auto item = cJSON_AS4CPP_GetObjectItemCaseSensitive(m_json, key.c_str());
+ assert(item);
+ return item->valuedouble;
+}
+
+double DocumentView::AsDouble() const
+{
+ assert(cJSON_AS4CPP_IsNumber(m_json));
+ return m_json->valuedouble;
+}
+
+bool DocumentView::IsFloatingPointType() const
+{
+ if (!cJSON_AS4CPP_IsNumber(m_json))
+ {
+ return false;
+ }
+
+ if (m_json->valuestring)
+ {
+ Aws::String valueString = m_json->valuestring;
+ return std::any_of(valueString.begin(), valueString.end(), [](unsigned char c){ return !::isdigit(c) && c != '+' && c != '-'; });
+ }
+ return m_json->valuedouble != static_cast<long long>(m_json->valuedouble);
+}
+
+Array<DocumentView> DocumentView::GetArray(const Aws::String& key) const
+{
+ assert(m_json);
+ auto array = cJSON_AS4CPP_GetObjectItemCaseSensitive(m_json, key.c_str());
+ assert(cJSON_AS4CPP_IsArray(array));
+ Array<DocumentView> returnArray(cJSON_AS4CPP_GetArraySize(array));
+
+ auto element = array->child;
+ for (unsigned i = 0; element && i < returnArray.GetLength(); ++i, element = element->next)
+ {
+ returnArray[i] = element;
+ }
+
+ return returnArray;
+}
+
+Array<DocumentView> DocumentView::AsArray() const
+{
+ assert(cJSON_AS4CPP_IsArray(m_json));
+ Array<DocumentView> returnArray(cJSON_AS4CPP_GetArraySize(m_json));
+
+ auto element = m_json->child;
+
+ for (unsigned i = 0; element && i < returnArray.GetLength(); ++i, element = element->next)
+ {
+ returnArray[i] = element;
+ }
+
+ return returnArray;
+}
+
+bool DocumentView::IsListType() const
+{
+ return cJSON_AS4CPP_IsArray(m_json) != 0;
+}
+
+DocumentView DocumentView::GetObject(const Aws::String& key) const
+{
+ assert(m_json);
+ auto item = cJSON_AS4CPP_GetObjectItemCaseSensitive(m_json, key.c_str());
+ return item;
+}
+
+DocumentView DocumentView::AsObject() const
+{
+ assert(cJSON_AS4CPP_IsObject(m_json) || cJSON_AS4CPP_IsNull(m_json));
+ return m_json;
+}
+
+bool DocumentView::IsObject() const
+{
+ return cJSON_AS4CPP_IsObject(m_json) != 0;
+}
+
+bool DocumentView::IsNull() const
+{
+ return cJSON_AS4CPP_IsNull(m_json) != 0;
+}
+
+Aws::Map<Aws::String, DocumentView> DocumentView::GetAllObjects() const
+{
+ Aws::Map<Aws::String, DocumentView> valueMap;
+ if (!m_json)
+ {
+ return valueMap;
+ }
+
+ for (auto iter = m_json->child; iter; iter = iter->next)
+ {
+ valueMap.emplace(std::make_pair(Aws::String(iter->string), DocumentView(iter)));
+ }
+
+ return valueMap;
+}
+
+bool DocumentView::ValueExists(const Aws::String& key) const
+{
+ if (!cJSON_AS4CPP_IsObject(m_json))
+ {
+ return false;
+ }
+
+ auto item = cJSON_AS4CPP_GetObjectItemCaseSensitive(m_json, key.c_str());
+ return !(item == nullptr || cJSON_AS4CPP_IsNull(item));
+}
+
+bool DocumentView::KeyExists(const Aws::String& key) const
+{
+ if (!cJSON_AS4CPP_IsObject(m_json))
+ {
+ return false;
+ }
+
+ return cJSON_AS4CPP_GetObjectItemCaseSensitive(m_json, key.c_str()) != nullptr;;
+}
+
+Aws::String DocumentView::WriteCompact() const
+{
+ if (!m_json)
+ {
+ return "null";
+ }
+
+ auto temp = cJSON_AS4CPP_PrintUnformatted(m_json);
+ Aws::String out(temp);
+ cJSON_AS4CPP_free(temp);
+ return out;
+}
+
+Aws::String DocumentView::WriteReadable() const
+{
+ if (!m_json)
+ {
+ return "null";
+ }
+
+ auto temp = cJSON_AS4CPP_Print(m_json);
+ Aws::String out(temp);
+ cJSON_AS4CPP_free(temp);
+ return out;
+}
+
+Document DocumentView::Materialize() const
+{
+ return m_json;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/HashingUtils.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/HashingUtils.cpp
index 0e49a61634..0431835a61 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/HashingUtils.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/HashingUtils.cpp
@@ -11,6 +11,7 @@
#include <aws/core/utils/crypto/Sha256HMAC.h>
#include <aws/core/utils/crypto/Sha1.h>
#include <aws/core/utils/crypto/MD5.h>
+#include <aws/core/utils/crypto/CRC32.h>
#include <aws/core/utils/Outcome.h>
#include <aws/core/utils/memory/stl/AWSStringStream.h>
#include <aws/core/utils/memory/stl/AWSList.h>
@@ -234,6 +235,30 @@ ByteBuffer HashingUtils::CalculateMD5(Aws::IOStream& stream)
return hash.Calculate(stream).GetResult();
}
+ByteBuffer HashingUtils::CalculateCRC32(const Aws::String& str)
+{
+ CRC32 hash;
+ return hash.Calculate(str).GetResult();
+}
+
+ByteBuffer HashingUtils::CalculateCRC32(Aws::IOStream& stream)
+{
+ CRC32 hash;
+ return hash.Calculate(stream).GetResult();
+}
+
+ByteBuffer HashingUtils::CalculateCRC32C(const Aws::String& str)
+{
+ CRC32C hash;
+ return hash.Calculate(str).GetResult();
+}
+
+ByteBuffer HashingUtils::CalculateCRC32C(Aws::IOStream& stream)
+{
+ CRC32C hash;
+ return hash.Calculate(stream).GetResult();
+}
+
int HashingUtils::HashString(const char* strToHash)
{
if (!strToHash)
@@ -242,7 +267,7 @@ int HashingUtils::HashString(const char* strToHash)
unsigned hash = 0;
while (char charValue = *strToHash++)
{
- hash = charValue + 31 * hash;
+ hash = charValue + 31 * hash;
}
return hash;
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/CRC32.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/CRC32.cpp
new file mode 100644
index 0000000000..c09806fbe0
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/CRC32.cpp
@@ -0,0 +1,218 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+
+#include <aws/core/utils/crypto/CRC32.h>
+#include <aws/core/utils/Outcome.h>
+#include <aws/core/utils/crypto/Factories.h>
+#include <aws/crt/Types.h>
+#include <aws/checksums/crc.h>
+#include <aws/common/byte_buf.h>
+
+using namespace Aws::Utils::Crypto;
+
+static Aws::Utils::ByteBuffer ByteBufferFromInt32(uint32_t value)
+{
+ Aws::Utils::ByteBuffer buffer(4);
+ buffer[0] = (value >> 24) & 0xFF;
+ buffer[1] = (value >> 16) & 0xFF;
+ buffer[2] = (value >> 8) & 0xFF;
+ buffer[3] = value & 0xFF;
+ return buffer;
+}
+
+CRC32::CRC32() :
+ m_hashImpl(CreateCRC32Implementation())
+{
+}
+
+CRC32::~CRC32()
+{
+}
+
+HashResult CRC32::Calculate(const Aws::String& str)
+{
+ return m_hashImpl->Calculate(str);
+}
+
+HashResult CRC32::Calculate(Aws::IStream& stream)
+{
+ return m_hashImpl->Calculate(stream);
+}
+
+void CRC32::Update(unsigned char* buffer, size_t bufferSize)
+{
+ m_hashImpl->Update(buffer, bufferSize);
+}
+
+HashResult CRC32::GetHash()
+{
+ return m_hashImpl->GetHash();
+}
+
+CRC32C::CRC32C() :
+ m_hashImpl(CreateCRC32CImplementation())
+{
+}
+
+CRC32C::~CRC32C()
+{
+}
+
+HashResult CRC32C::Calculate(const Aws::String& str)
+{
+ return m_hashImpl->Calculate(str);
+}
+
+HashResult CRC32C::Calculate(Aws::IStream& stream)
+{
+ return m_hashImpl->Calculate(stream);
+}
+
+
+void CRC32C::Update(unsigned char* buffer, size_t bufferSize)
+{
+ m_hashImpl->Update(buffer, bufferSize);
+}
+
+HashResult CRC32C::GetHash()
+{
+ return m_hashImpl->GetHash();
+}
+
+
+CRC32Impl::CRC32Impl() : m_runningCrc32(0) {}
+
+HashResult CRC32Impl::Calculate(const Aws::String& str)
+{
+ Aws::Crt::ByteCursor byteCursor = Aws::Crt::ByteCursorFromArray(reinterpret_cast<const uint8_t*>(str.data()), str.size());
+
+ uint32_t runningCrc32 = 0;
+ while (byteCursor.len > INT_MAX)
+ {
+ runningCrc32 = aws_checksums_crc32(byteCursor.ptr, INT_MAX, runningCrc32);
+ aws_byte_cursor_advance(&byteCursor, INT_MAX);
+ }
+ runningCrc32 = aws_checksums_crc32(byteCursor.ptr, static_cast<int>(byteCursor.len), runningCrc32);
+ const Aws::Utils::ByteBuffer& hash = ByteBufferFromInt32(runningCrc32);
+ return HashResult(std::move(hash));
+}
+
+HashResult CRC32Impl::Calculate(Aws::IStream& stream)
+{
+ uint32_t runningCrc32 = 0;
+
+ auto currentPos = stream.tellg();
+ if (currentPos == std::ios::pos_type(-1))
+ {
+ currentPos = 0;
+ stream.clear();
+ }
+
+ stream.seekg(0, stream.beg);
+
+ uint8_t streamBuffer[Aws::Utils::Crypto::Hash::INTERNAL_HASH_STREAM_BUFFER_SIZE];
+ while (stream.good())
+ {
+ stream.read(reinterpret_cast<char*>(streamBuffer), Aws::Utils::Crypto::Hash::INTERNAL_HASH_STREAM_BUFFER_SIZE);
+ auto bytesRead = stream.gcount();
+
+ if (bytesRead > 0)
+ {
+ runningCrc32 = aws_checksums_crc32(streamBuffer, static_cast<int>(bytesRead), runningCrc32);
+ }
+ }
+
+ stream.clear();
+ stream.seekg(currentPos, stream.beg);
+
+ const Aws::Utils::ByteBuffer& hash = ByteBufferFromInt32(runningCrc32);
+ return HashResult(std::move(hash));
+}
+
+void CRC32Impl::Update(unsigned char* buffer, size_t bufferSize)
+{
+ Aws::Crt::ByteCursor byteCursor = Aws::Crt::ByteCursorFromArray(buffer, bufferSize);
+
+ while (byteCursor.len > INT_MAX)
+ {
+ m_runningCrc32 = aws_checksums_crc32(byteCursor.ptr, INT_MAX, m_runningCrc32);
+ aws_byte_cursor_advance(&byteCursor, INT_MAX);
+ }
+ m_runningCrc32 = aws_checksums_crc32(byteCursor.ptr, static_cast<int>(byteCursor.len), m_runningCrc32);
+}
+
+HashResult CRC32Impl::GetHash()
+{
+ const Aws::Utils::ByteBuffer& hash = ByteBufferFromInt32(m_runningCrc32);
+ return HashResult(std::move(hash));
+}
+
+CRC32CImpl::CRC32CImpl() : m_runningCrc32c(0) {}
+
+HashResult CRC32CImpl::Calculate(const Aws::String& str)
+{
+ Aws::Crt::ByteCursor byteCursor = Aws::Crt::ByteCursorFromArray(reinterpret_cast<const uint8_t*>(str.data()), str.size());
+
+ uint32_t runningCrc32c = 0;
+ while (byteCursor.len > INT_MAX)
+ {
+ runningCrc32c = aws_checksums_crc32c(byteCursor.ptr, INT_MAX, runningCrc32c);
+ aws_byte_cursor_advance(&byteCursor, INT_MAX);
+ }
+ runningCrc32c = aws_checksums_crc32c(byteCursor.ptr, static_cast<int>(byteCursor.len), runningCrc32c);
+ const Aws::Utils::ByteBuffer& hash = ByteBufferFromInt32(runningCrc32c);
+ return HashResult(std::move(hash));
+}
+
+HashResult CRC32CImpl::Calculate(Aws::IStream& stream)
+{
+ uint32_t runningCrc32c = 0;
+
+ auto currentPos = stream.tellg();
+ if (currentPos == std::ios::pos_type(-1))
+ {
+ currentPos = 0;
+ stream.clear();
+ }
+
+ stream.seekg(0, stream.beg);
+
+ uint8_t streamBuffer[Aws::Utils::Crypto::Hash::INTERNAL_HASH_STREAM_BUFFER_SIZE];
+ while (stream.good())
+ {
+ stream.read(reinterpret_cast<char*>(streamBuffer), Aws::Utils::Crypto::Hash::INTERNAL_HASH_STREAM_BUFFER_SIZE);
+ auto bytesRead = stream.gcount();
+
+ if (bytesRead > 0)
+ {
+ runningCrc32c = aws_checksums_crc32c(streamBuffer, static_cast<int>(bytesRead), runningCrc32c);
+ }
+ }
+
+ stream.clear();
+ stream.seekg(currentPos, stream.beg);
+
+ const Aws::Utils::ByteBuffer& hash = ByteBufferFromInt32(runningCrc32c);
+ return HashResult(std::move(hash));
+}
+
+void CRC32CImpl::Update(unsigned char* buffer, size_t bufferSize)
+{
+ Aws::Crt::ByteCursor byteCursor = Aws::Crt::ByteCursorFromArray(buffer, bufferSize);
+
+ while (byteCursor.len > INT_MAX)
+ {
+ m_runningCrc32c = aws_checksums_crc32c(byteCursor.ptr, INT_MAX, m_runningCrc32c);
+ aws_byte_cursor_advance(&byteCursor, INT_MAX);
+ }
+ m_runningCrc32c = aws_checksums_crc32c(byteCursor.ptr, static_cast<int>(byteCursor.len), m_runningCrc32c);
+}
+
+HashResult CRC32CImpl::GetHash()
+{
+ const Aws::Utils::ByteBuffer& hash = ByteBufferFromInt32(m_runningCrc32c);
+ return HashResult(std::move(hash));
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/MD5.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/MD5.cpp
index bf14ace1ad..f442878a90 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/MD5.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/MD5.cpp
@@ -11,7 +11,7 @@
using namespace Aws::Utils::Crypto;
-MD5::MD5() :
+MD5::MD5() :
m_hashImpl(CreateMD5Implementation())
{
}
@@ -28,4 +28,14 @@ HashResult MD5::Calculate(const Aws::String& str)
HashResult MD5::Calculate(Aws::IStream& stream)
{
return m_hashImpl->Calculate(stream);
-} \ No newline at end of file
+}
+
+void MD5::Update(unsigned char* buffer, size_t bufferSize)
+{
+ return m_hashImpl->Update(buffer, bufferSize);
+}
+
+HashResult MD5::GetHash()
+{
+ return m_hashImpl->GetHash();
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/Sha1.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/Sha1.cpp
index 5da3e63d28..a6783e18f0 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/Sha1.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/Sha1.cpp
@@ -28,3 +28,13 @@ HashResult Sha1::Calculate(Aws::IStream& stream)
{
return m_hashImpl->Calculate(stream);
}
+
+void Sha1::Update(unsigned char* buffer, size_t bufferSize)
+{
+ return m_hashImpl->Update(buffer, bufferSize);
+}
+
+HashResult Sha1::GetHash()
+{
+ return m_hashImpl->GetHash();
+} \ No newline at end of file
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/Sha256.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/Sha256.cpp
index a8aa5ae879..48612e8cf0 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/Sha256.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/Sha256.cpp
@@ -27,4 +27,14 @@ HashResult Sha256::Calculate(const Aws::String& str)
HashResult Sha256::Calculate(Aws::IStream& stream)
{
return m_hashImpl->Calculate(stream);
-} \ No newline at end of file
+}
+
+void Sha256::Update(unsigned char* buffer, size_t bufferSize)
+{
+ return m_hashImpl->Update(buffer, bufferSize);
+}
+
+HashResult Sha256::GetHash()
+{
+ return m_hashImpl->GetHash();
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/factory/Factories.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/factory/Factories.cpp
index 88ca147d11..cba90af4f4 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/factory/Factories.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/factory/Factories.cpp
@@ -7,6 +7,7 @@
#include <aws/core/utils/crypto/Factories.h>
#include <aws/core/utils/crypto/Hash.h>
#include <aws/core/utils/crypto/HMAC.h>
+#include <aws/core/utils/crypto/CRC32.h>
#if ENABLE_BCRYPT_ENCRYPTION
#error #include <aws/core/utils/crypto/bcrypt/CryptoImpl.h>
@@ -35,6 +36,18 @@ static std::shared_ptr<HashFactory>& GetMD5Factory()
return s_MD5Factory;
}
+static std::shared_ptr<HashFactory>& GetCRC32Factory()
+{
+ static std::shared_ptr<HashFactory> s_CRC32Factory(nullptr);
+ return s_CRC32Factory;
+}
+
+static std::shared_ptr<HashFactory>& GetCRC32CFactory()
+{
+ static std::shared_ptr<HashFactory> s_CRC32CFactory(nullptr);
+ return s_CRC32CFactory;
+}
+
static std::shared_ptr<HashFactory>& GetSha1Factory()
{
static std::shared_ptr<HashFactory> s_Sha1Factory(nullptr);
@@ -136,6 +149,24 @@ public:
}
};
+class DefaultCRC32Factory : public HashFactory
+{
+public:
+ std::shared_ptr<Hash> CreateImplementation() const override
+ {
+ return Aws::MakeShared<CRC32Impl>(s_allocationTag);
+ }
+};
+
+class DefaultCRC32CFactory : public HashFactory
+{
+public:
+ std::shared_ptr<Hash> CreateImplementation() const override
+ {
+ return Aws::MakeShared<CRC32CImpl>(s_allocationTag);
+ }
+};
+
class DefaultSHA1Factory : public HashFactory
{
public:
@@ -667,6 +698,16 @@ void Aws::Utils::Crypto::InitCrypto()
GetMD5Factory()->InitStaticState();
}
+ if(!GetCRC32Factory())
+ {
+ GetCRC32Factory() = Aws::MakeShared<DefaultCRC32Factory>(s_allocationTag);
+ }
+
+ if(!GetCRC32CFactory())
+ {
+ GetCRC32CFactory() = Aws::MakeShared<DefaultCRC32CFactory>(s_allocationTag);
+ }
+
if(GetSha1Factory())
{
GetSha1Factory()->InitStaticState();
@@ -754,6 +795,16 @@ void Aws::Utils::Crypto::CleanupCrypto()
GetMD5Factory() = nullptr;
}
+ if(GetCRC32CFactory())
+ {
+ GetCRC32Factory() = nullptr;
+ }
+
+ if(GetCRC32CFactory())
+ {
+ GetCRC32CFactory() = nullptr;
+ }
+
if(GetSha1Factory())
{
GetSha1Factory()->CleanupStaticState();
@@ -809,6 +860,16 @@ void Aws::Utils::Crypto::SetMD5Factory(const std::shared_ptr<HashFactory>& facto
GetMD5Factory() = factory;
}
+void Aws::Utils::Crypto::SetCRC32Factory(const std::shared_ptr<HashFactory>& factory)
+{
+ GetCRC32Factory() = factory;
+}
+
+void Aws::Utils::Crypto::SetCRC32CFactory(const std::shared_ptr<HashFactory>& factory)
+{
+ GetCRC32CFactory() = factory;
+}
+
void Aws::Utils::Crypto::SetSha1Factory(const std::shared_ptr<HashFactory>& factory)
{
GetSha1Factory() = factory;
@@ -854,6 +915,16 @@ std::shared_ptr<Hash> Aws::Utils::Crypto::CreateMD5Implementation()
return GetMD5Factory()->CreateImplementation();
}
+std::shared_ptr<Hash> Aws::Utils::Crypto::CreateCRC32Implementation()
+{
+ return GetCRC32Factory()->CreateImplementation();
+}
+
+std::shared_ptr<Hash> Aws::Utils::Crypto::CreateCRC32CImplementation()
+{
+ return GetCRC32CFactory()->CreateImplementation();
+}
+
std::shared_ptr<Hash> Aws::Utils::Crypto::CreateSha1Implementation()
{
return GetSha1Factory()->CreateImplementation();
@@ -967,5 +1038,5 @@ std::shared_ptr<SymmetricCipher> Aws::Utils::Crypto::CreateAES_KeyWrapImplementa
std::shared_ptr<SecureRandomBytes> Aws::Utils::Crypto::CreateSecureRandomBytesImplementation()
{
- return GetSecureRandom();
+ return GetSecureRandomFactory()->CreateImplementation();
}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/openssl/CryptoImpl.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/openssl/CryptoImpl.cpp
index 3a89265e6e..faebde3a8d 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/openssl/CryptoImpl.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/crypto/openssl/CryptoImpl.cpp
@@ -8,6 +8,7 @@
#include <aws/core/utils/memory/AWSMemory.h>
#include <aws/core/utils/crypto/openssl/CryptoImpl.h>
#include <aws/core/utils/Outcome.h>
+#include <openssl/crypto.h>
#include <openssl/md5.h>
#ifdef OPENSSL_IS_BORINGSSL
@@ -47,9 +48,19 @@ namespace Aws
*/
#if defined(LIBRESSL_VERSION_NUMBER) && (OPENSSL_VERSION_NUMBER == 0x20000000L)
#undef OPENSSL_VERSION_NUMBER
+#if LIBRESSL_VERSION_NUMBER < 0x3050000fL
#define OPENSSL_VERSION_NUMBER 0x1000107fL
+#else
+#define OPENSSL_VERSION_NUMBER 0x1010000fL
+#endif
#endif
+
#define OPENSSL_VERSION_LESS_1_1 (OPENSSL_VERSION_NUMBER < 0x10100003L)
+#define OPENSSL_VERSION_LESS_3_0 (OPENSSL_VERSION_NUMBER < 0x30000000L)
+
+#if !OPENSSL_VERSION_LESS_3_0
+#error #include <openssl/core_names.h>
+#endif
#if OPENSSL_VERSION_LESS_1_1
static const char* OPENSSL_INTERNALS_TAG = "OpenSSLCallbackState";
@@ -65,7 +76,7 @@ namespace Aws
#else
OPENSSL_init_crypto(OPENSSL_INIT_LOAD_CRYPTO_STRINGS /*options*/ ,NULL /* OpenSSL init settings*/ );
#endif
-#if !defined(OPENSSL_IS_BORINGSSL)
+#if !(defined(OPENSSL_IS_BORINGSSL) || defined(OPENSSL_IS_AWSLC))
OPENSSL_add_all_algorithms_noconf();
#endif
#if OPENSSL_VERSION_LESS_1_1
@@ -168,6 +179,22 @@ namespace Aws
EVP_MD_CTX *m_ctx;
};
+ MD5OpenSSLImpl::MD5OpenSSLImpl()
+ {
+ m_ctx = EVP_MD_CTX_create();
+ assert(m_ctx != nullptr);
+#if !defined(OPENSSL_IS_BORINGSSL)
+ EVP_MD_CTX_set_flags(m_ctx, EVP_MD_CTX_FLAG_NON_FIPS_ALLOW);
+#endif
+ EVP_DigestInit_ex(m_ctx, EVP_md5(), nullptr);
+ }
+
+ MD5OpenSSLImpl::~MD5OpenSSLImpl()
+ {
+ EVP_MD_CTX_destroy(m_ctx);
+ m_ctx = nullptr;
+ }
+
HashResult MD5OpenSSLImpl::Calculate(const Aws::String& str)
{
OpensslCtxRAIIGuard guard;
@@ -222,6 +249,34 @@ namespace Aws
return HashResult(std::move(hash));
}
+ void MD5OpenSSLImpl::Update(unsigned char* buffer, size_t bufferSize)
+ {
+ EVP_DigestUpdate(m_ctx, buffer, bufferSize);
+ }
+
+ HashResult MD5OpenSSLImpl::GetHash()
+ {
+ ByteBuffer hash(EVP_MD_size(EVP_md5()));
+ EVP_DigestFinal(m_ctx, hash.GetUnderlyingData(), nullptr);
+ return HashResult(std::move(hash));
+ }
+
+ Sha1OpenSSLImpl::Sha1OpenSSLImpl()
+ {
+ m_ctx = EVP_MD_CTX_create();
+ assert(m_ctx != nullptr);
+#if !defined(OPENSSL_IS_BORINGSSL)
+ EVP_MD_CTX_set_flags(m_ctx, EVP_MD_CTX_FLAG_NON_FIPS_ALLOW);
+#endif
+ EVP_DigestInit_ex(m_ctx, EVP_sha1(), nullptr);
+ }
+
+ Sha1OpenSSLImpl::~Sha1OpenSSLImpl()
+ {
+ EVP_MD_CTX_destroy(m_ctx);
+ m_ctx = nullptr;
+ }
+
HashResult Sha1OpenSSLImpl::Calculate(const Aws::String& str)
{
OpensslCtxRAIIGuard guard;
@@ -272,6 +327,34 @@ namespace Aws
return HashResult(std::move(hash));
}
+ void Sha1OpenSSLImpl::Update(unsigned char* buffer, size_t bufferSize)
+ {
+ EVP_DigestUpdate(m_ctx, buffer, bufferSize);
+ }
+
+ HashResult Sha1OpenSSLImpl::GetHash()
+ {
+ ByteBuffer hash(EVP_MD_size(EVP_sha1()));
+ EVP_DigestFinal(m_ctx, hash.GetUnderlyingData(), nullptr);
+ return HashResult(std::move(hash));
+ }
+
+ Sha256OpenSSLImpl::Sha256OpenSSLImpl()
+ {
+ m_ctx = EVP_MD_CTX_create();
+ assert(m_ctx != nullptr);
+#if !defined(OPENSSL_IS_BORINGSSL)
+ EVP_MD_CTX_set_flags(m_ctx, EVP_MD_CTX_FLAG_NON_FIPS_ALLOW);
+#endif
+ EVP_DigestInit_ex(m_ctx, EVP_sha256(), nullptr);
+ }
+
+ Sha256OpenSSLImpl::~Sha256OpenSSLImpl()
+ {
+ EVP_MD_CTX_destroy(m_ctx);
+ m_ctx = nullptr;
+ }
+
HashResult Sha256OpenSSLImpl::Calculate(const Aws::String& str)
{
OpensslCtxRAIIGuard guard;
@@ -322,13 +405,28 @@ namespace Aws
return HashResult(std::move(hash));
}
+ void Sha256OpenSSLImpl::Update(unsigned char* buffer, size_t bufferSize)
+ {
+ EVP_DigestUpdate(m_ctx, buffer, bufferSize);
+ }
+
+ HashResult Sha256OpenSSLImpl::GetHash()
+ {
+ ByteBuffer hash(EVP_MD_size(EVP_sha256()));
+ EVP_DigestFinal(m_ctx, hash.GetUnderlyingData(), nullptr);
+ return HashResult(std::move(hash));
+ }
+
class HMACRAIIGuard {
public:
HMACRAIIGuard() {
#if OPENSSL_VERSION_LESS_1_1
m_ctx = Aws::New<HMAC_CTX>("AllocSha256HAMCOpenSSLContext");
-#else
+#elif OPENSSL_VERSION_LESS_3_0
m_ctx = HMAC_CTX_new();
+#else
+ m_mac = EVP_MAC_fetch(NULL, "HMAC", NULL);
+ m_ctx = EVP_MAC_CTX_new(m_mac);
#endif
assert(m_ctx != nullptr);
}
@@ -336,17 +434,29 @@ namespace Aws
~HMACRAIIGuard() {
#if OPENSSL_VERSION_LESS_1_1
Aws::Delete<HMAC_CTX>(m_ctx);
-#else
+#elif OPENSSL_VERSION_LESS_3_0
HMAC_CTX_free(m_ctx);
+#else
+ EVP_MAC_free(m_mac);
+ EVP_MAC_CTX_free(m_ctx);
#endif
m_ctx = nullptr;
}
+#if OPENSSL_VERSION_LESS_3_0
HMAC_CTX* getResource() {
+#else
+ EVP_MAC_CTX* getResource() {
+#endif
return m_ctx;
}
private:
+#if OPENSSL_VERSION_LESS_3_0
HMAC_CTX *m_ctx;
+#else
+ EVP_MAC *m_mac;
+ EVP_MAC_CTX *m_ctx;
+#endif
};
HashResult Sha256HMACOpenSSLImpl::Calculate(const ByteBuffer& toSign, const ByteBuffer& secret)
@@ -356,20 +466,36 @@ namespace Aws
memset(digest.GetUnderlyingData(), 0, length);
HMACRAIIGuard guard;
+#if OPENSSL_VERSION_LESS_3_0
HMAC_CTX* m_ctx = guard.getResource();
+#else
+ EVP_MAC_CTX* m_ctx = guard.getResource();
+#endif
#if OPENSSL_VERSION_LESS_1_1
HMAC_CTX_init(m_ctx);
#endif
+#if OPENSSL_VERSION_LESS_3_0
HMAC_Init_ex(m_ctx, secret.GetUnderlyingData(), static_cast<int>(secret.GetLength()), EVP_sha256(),
NULL);
HMAC_Update(m_ctx, toSign.GetUnderlyingData(), toSign.GetLength());
HMAC_Final(m_ctx, digest.GetUnderlyingData(), &length);
+#else
+ char sha256[] {"SHA256"};
+ OSSL_PARAM ossl_params[2];
+ ossl_params[0] =
+ OSSL_PARAM_construct_utf8_string(OSSL_MAC_PARAM_DIGEST, sha256, 0);
+ ossl_params[1] = OSSL_PARAM_construct_end();
+ EVP_MAC_init(m_ctx, secret.GetUnderlyingData(),
+ static_cast<int>(secret.GetLength()), ossl_params);
+ EVP_MAC_update(m_ctx, toSign.GetUnderlyingData(), toSign.GetLength());
+ EVP_MAC_final(m_ctx, digest.GetUnderlyingData(), NULL, length);
+#endif
#if OPENSSL_VERSION_LESS_1_1
HMAC_CTX_cleanup(m_ctx);
-#else
+#elif OPENSSL_VERSION_LESS_3_0
HMAC_CTX_reset(m_ctx);
#endif
return HashResult(std::move(digest));
@@ -547,7 +673,7 @@ namespace Aws
CryptoBuffer finalBlock(GetBlockSizeBytes());
int writtenSize = static_cast<int>(finalBlock.GetLength());
int ret = EVP_DecryptFinal_ex(m_decryptor_ctx, finalBlock.GetUnderlyingData(), &writtenSize);
-#if OPENSSL_VERSION_NUMBER > 0x1010104fL //1.1.1d
+#if !defined(OPENSSL_IS_AWSLC) && OPENSSL_VERSION_NUMBER > 0x1010104fL //1.1.1d
if (ret <= 0)
#else
if (ret <= 0 && !m_emptyPlaintext) // see details why making exception for empty string at: https://github.com/aws/aws-sdk-cpp/issues/1413
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/event/EventStreamDecoder.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/event/EventStreamDecoder.cpp
index f70a6c88f6..053ff938d4 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/event/EventStreamDecoder.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/event/EventStreamDecoder.cpp
@@ -72,9 +72,7 @@ namespace Aws
assert(handler);
if (!handler)
{
- AWS_LOGSTREAM_ERROR(EVENT_STREAM_DECODER_CLASS_TAG, "Payload received, but decoder encountered internal errors before."
- "ErrorCode: " << EventStreamErrorsMapper::GetNameForError(handler->GetInternalError()) << ", "
- "ErrorMessage: " << handler->GetEventPayloadAsString());
+ AWS_LOGSTREAM_ERROR(EVENT_STREAM_DECODER_CLASS_TAG, "Payload received, but handler is null.");
return;
}
handler->WriteMessageEventPayload(static_cast<unsigned char*>(payload->buffer), payload->len);
@@ -129,9 +127,7 @@ namespace Aws
assert(handler);
if (!handler)
{
- AWS_LOGSTREAM_ERROR(EVENT_STREAM_DECODER_CLASS_TAG, "Payload received, but decoder encountered internal errors before."
- "ErrorCode: " << EventStreamErrorsMapper::GetNameForError(handler->GetInternalError()) << ", "
- "ErrorMessage: " << handler->GetEventPayloadAsString());
+ AWS_LOGSTREAM_ERROR(EVENT_STREAM_DECODER_CLASS_TAG, "Header received, but handler is null.");
return;
}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/event/EventStreamEncoder.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/event/EventStreamEncoder.cpp
index ef7104e839..750bf9e1e6 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/event/EventStreamEncoder.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/event/EventStreamEncoder.cpp
@@ -80,80 +80,83 @@ namespace Aws
Aws::Vector<unsigned char> EventStreamEncoder::EncodeAndSign(const Aws::Utils::Event::Message& msg)
{
- aws_event_stream_message encoded = Encode(msg);
- aws_event_stream_message signedMessage = Sign(&encoded);
+ Aws::Vector<unsigned char> outputBits;
- const auto signedMessageLength = signedMessage.message_buffer ? aws_event_stream_message_total_length(&signedMessage) : 0;
+ aws_event_stream_message encoded;
+ if (InitEncodedStruct(msg, &encoded))
+ {
+ aws_event_stream_message signedMessage;
+ if (InitSignedStruct(&encoded, &signedMessage))
+ {
+ // success!
+ const auto signedMessageBuffer = aws_event_stream_message_buffer(&signedMessage);
+ const auto signedMessageLength = aws_event_stream_message_total_length(&signedMessage);
+ outputBits.reserve(signedMessageLength);
+ outputBits.insert(outputBits.end(), signedMessageBuffer, signedMessageBuffer + signedMessageLength);
+
+ aws_event_stream_message_clean_up(&signedMessage);
+ }
+ aws_event_stream_message_clean_up(&encoded);
+ }
- Aws::Vector<unsigned char> outputBits(signedMessage.message_buffer, signedMessage.message_buffer + signedMessageLength);
- aws_event_stream_message_clean_up(&encoded);
- aws_event_stream_message_clean_up(&signedMessage);
return outputBits;
}
- aws_event_stream_message EventStreamEncoder::Encode(const Aws::Utils::Event::Message& msg)
+ bool EventStreamEncoder::InitEncodedStruct(const Aws::Utils::Event::Message& msg, aws_event_stream_message* encoded)
{
+ bool success = false;
+
aws_array_list headers;
EncodeHeaders(msg, &headers);
- aws_byte_buf payload;
- payload.len = msg.GetEventPayload().size();
- // this const_cast is OK because aws_byte_buf will only be "read from" by the following functions.
- payload.buffer = const_cast<uint8_t*>(msg.GetEventPayload().data());
- payload.capacity = 0;
- payload.allocator = nullptr;
+ aws_byte_buf payload = aws_byte_buf_from_array(msg.GetEventPayload().data(), msg.GetEventPayload().size());
- aws_event_stream_message encoded;
- if(aws_event_stream_message_init(&encoded, get_aws_allocator(), &headers, &payload) == AWS_OP_ERR)
+ if(aws_event_stream_message_init(encoded, get_aws_allocator(), &headers, &payload) == AWS_OP_SUCCESS)
+ {
+ success = true;
+ }
+ else
{
AWS_LOGSTREAM_ERROR(TAG, "Error creating event-stream message from payload.");
- aws_event_stream_headers_list_cleanup(&headers);
- // GCC 4.9.4 issues a warning with -Wextra if we simply do
- // return {};
- aws_event_stream_message empty{nullptr, nullptr, 0};
- return empty;
}
+
aws_event_stream_headers_list_cleanup(&headers);
- return encoded;
+ return success;
}
- aws_event_stream_message EventStreamEncoder::Sign(aws_event_stream_message* msg)
+ bool EventStreamEncoder::InitSignedStruct(const aws_event_stream_message* msg, aws_event_stream_message* signedmsg)
{
- const auto msglen = msg->message_buffer ? aws_event_stream_message_total_length(msg) : 0;
+ bool success = false;
+
+ const auto msgbuf = aws_event_stream_message_buffer(msg);
+ const auto msglen = aws_event_stream_message_total_length(msg);
Event::Message signedMessage;
- signedMessage.WriteEventPayload(msg->message_buffer, msglen);
+ signedMessage.WriteEventPayload(msgbuf, msglen);
assert(m_signer);
- if (!m_signer->SignEventMessage(signedMessage, m_signatureSeed))
+ if (m_signer->SignEventMessage(signedMessage, m_signatureSeed))
{
- AWS_LOGSTREAM_ERROR(TAG, "Failed to sign event message frame.");
- // GCC 4.9.4 issues a warning with -Wextra if we simply do
- // return {};
- aws_event_stream_message empty{nullptr, nullptr, 0};
- return empty;
- }
-
- aws_array_list headers;
- EncodeHeaders(signedMessage, &headers);
+ aws_array_list headers;
+ EncodeHeaders(signedMessage, &headers);
- aws_byte_buf payload;
- payload.len = signedMessage.GetEventPayload().size();
- payload.buffer = signedMessage.GetEventPayload().data();
- payload.capacity = 0;
- payload.allocator = nullptr;
+ aws_byte_buf payload = aws_byte_buf_from_array(signedMessage.GetEventPayload().data(), signedMessage.GetEventPayload().size());
- aws_event_stream_message signedmsg;
- if(aws_event_stream_message_init(&signedmsg, get_aws_allocator(), &headers, &payload))
- {
- AWS_LOGSTREAM_ERROR(TAG, "Error creating event-stream message from payload.");
+ if(aws_event_stream_message_init(signedmsg, get_aws_allocator(), &headers, &payload) == AWS_OP_SUCCESS)
+ {
+ success = true;
+ }
+ else
+ {
+ AWS_LOGSTREAM_ERROR(TAG, "Error creating event-stream message from payload.");
+ }
aws_event_stream_headers_list_cleanup(&headers);
- // GCC 4.9.4 issues a warning with -Wextra if we simply do
- // return {};
- aws_event_stream_message empty{nullptr, nullptr, 0};
- return empty;
}
- aws_event_stream_headers_list_cleanup(&headers);
- return signedmsg;
+ else
+ {
+ AWS_LOGSTREAM_ERROR(TAG, "Failed to sign event message frame.");
+ }
+
+ return success;
}
} // namespace Event
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/json/JsonSerializer.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/json/JsonSerializer.cpp
index 9358d00c0a..ebfd5d4456 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/json/JsonSerializer.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/json/JsonSerializer.cpp
@@ -9,6 +9,7 @@
#include <algorithm>
#include <aws/core/utils/memory/stl/AWSStringStream.h>
#include <aws/core/utils/StringUtils.h>
+#include <aws/core/utils/Document.h>
using namespace Aws::Utils;
using namespace Aws::Utils::Json;
@@ -68,6 +69,13 @@ JsonValue::JsonValue(JsonValue&& value) :
value.m_value = nullptr;
}
+JsonValue::JsonValue(const Aws::Utils::DocumentView& value) :
+ m_value(cJSON_AS4CPP_Duplicate(value.m_json, true/*recurse*/)),
+ m_wasParseSuccessful(true),
+ m_errorMessage({})
+{
+}
+
void JsonValue::Destroy()
{
cJSON_AS4CPP_Delete(m_value);
@@ -106,6 +114,15 @@ JsonValue& JsonValue::operator=(JsonValue&& other)
return *this;
}
+JsonValue& JsonValue::operator=(const Aws::Utils::DocumentView& other)
+{
+ Destroy();
+ m_value = cJSON_AS4CPP_Duplicate(other.m_json, true /*recurse*/);
+ m_wasParseSuccessful = true;
+ m_errorMessage = {};
+ return *this;
+}
+
static void AddOrReplace(cJSON* root, const char* key, cJSON* value)
{
const auto existing = cJSON_AS4CPP_GetObjectItemCaseSensitive(root, key);
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/CRTLogSystem.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/CRTLogSystem.cpp
new file mode 100644
index 0000000000..81f94d0d3a
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/CRTLogSystem.cpp
@@ -0,0 +1,107 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/core/utils/logging/CRTLogSystem.h>
+#include <aws/core/utils/logging/AWSLogging.h>
+#include <aws/core/utils/logging/LogSystemInterface.h>
+#include <aws/core/utils/Array.h>
+#include <aws/common/common.h>
+#include <cstdarg>
+
+using namespace Aws::Utils;
+using namespace Aws::Utils::Logging;
+
+namespace Aws
+{
+ namespace Utils
+ {
+ namespace Logging
+ {
+ static int s_aws_logger_redirect_log(
+ struct aws_logger *logger,
+ enum aws_log_level log_level,
+ aws_log_subject_t subject,
+ const char *format, ...)
+ {
+ DefaultCRTLogSystem* crtLogSystem = reinterpret_cast<DefaultCRTLogSystem*>(logger->p_impl);
+ Logging::LogLevel logLevel = static_cast<LogLevel>(log_level);
+ const char* subjectName = aws_log_subject_name(subject);
+ va_list args;
+ va_start(args, format);
+ crtLogSystem->Log(logLevel, subjectName, format, args);
+ va_end(args);
+ return AWS_OP_SUCCESS;
+ }
+
+ static enum aws_log_level s_aws_logger_redirect_get_log_level(struct aws_logger *logger, aws_log_subject_t subject) {
+ (void)subject;
+ DefaultCRTLogSystem* crtLogSystem = reinterpret_cast<DefaultCRTLogSystem*>(logger->p_impl);
+ return (aws_log_level)(crtLogSystem->GetLogLevel());
+ }
+
+ static void s_aws_logger_redirect_clean_up(struct aws_logger *logger) {
+ (void)logger;
+ }
+
+ static int s_aws_logger_redirect_set_log_level(struct aws_logger *logger, enum aws_log_level log_level)
+ {
+ DefaultCRTLogSystem* crtLogSystem = reinterpret_cast<DefaultCRTLogSystem*>(logger->p_impl);
+ crtLogSystem->SetLogLevel(static_cast<LogLevel>(log_level));
+ return AWS_OP_SUCCESS;
+ }
+
+ static struct aws_logger_vtable s_aws_logger_redirect_vtable = {
+ s_aws_logger_redirect_log, // .log
+ s_aws_logger_redirect_get_log_level, // .get_log_level
+ s_aws_logger_redirect_clean_up, // .clean_up
+ s_aws_logger_redirect_set_log_level // set_log_level
+ };
+
+ DefaultCRTLogSystem::DefaultCRTLogSystem(LogLevel logLevel) :
+ m_logLevel(logLevel),
+ m_logger()
+ {
+ m_logger.vtable = &s_aws_logger_redirect_vtable;
+ m_logger.allocator = Aws::get_aws_allocator();
+ m_logger.p_impl = this;
+
+ aws_logger_set(&m_logger);
+ }
+
+ DefaultCRTLogSystem::~DefaultCRTLogSystem()
+ {
+ if (aws_logger_get() == &m_logger)
+ {
+ aws_logger_set(NULL);
+ aws_logger_clean_up(&m_logger);
+ }
+ }
+
+ void DefaultCRTLogSystem::Log(LogLevel logLevel, const char* subjectName, const char* formatStr, va_list args)
+ {
+ va_list tmp_args;
+ va_copy(tmp_args, args);
+ #ifdef _WIN32
+ const int requiredLength = _vscprintf(formatStr, tmp_args) + 1;
+ #else
+ const int requiredLength = vsnprintf(nullptr, 0, formatStr, tmp_args) + 1;
+ #endif
+ va_end(tmp_args);
+
+ Array<char> outputBuff(requiredLength);
+ #ifdef _WIN32
+ vsnprintf_s(outputBuff.GetUnderlyingData(), requiredLength, _TRUNCATE, formatStr, args);
+ #else
+ vsnprintf(outputBuff.GetUnderlyingData(), requiredLength, formatStr, args);
+ #endif // _WIN32
+
+ Aws::OStringStream logStream;
+ logStream << outputBuff.GetUnderlyingData();
+ Logging::GetLogSystem()->LogStream(logLevel, subjectName, logStream);
+ }
+ }
+ }
+}
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/CRTLogging.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/CRTLogging.cpp
new file mode 100644
index 0000000000..5875ead9c0
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/CRTLogging.cpp
@@ -0,0 +1,31 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/core/utils/logging/CRTLogging.h>
+#include <aws/common/logging.h>
+#include <memory>
+
+using namespace Aws::Utils;
+using namespace Aws::Utils::Logging;
+
+namespace Aws
+{
+namespace Utils
+{
+namespace Logging {
+
+static std::shared_ptr<CRTLogSystemInterface> CRTLogSystem(nullptr);
+
+void InitializeCRTLogging(const std::shared_ptr<CRTLogSystemInterface>& crtLogSystem) {
+ CRTLogSystem = crtLogSystem;
+}
+
+void ShutdownCRTLogging() {
+ CRTLogSystem = nullptr;
+}
+
+} // namespace Logging
+} // namespace Utils
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/FormattedLogSystem.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/FormattedLogSystem.cpp
index 41c4d7e09c..26348b68fe 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/FormattedLogSystem.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/logging/FormattedLogSystem.cpp
@@ -72,7 +72,7 @@ void FormattedLogSystem::Log(LogLevel logLevel, const char* tag, const char* for
va_list tmp_args; //unfortunately you cannot consume a va_list twice
va_copy(tmp_args, args); //so we have to copy it
- #ifdef WIN32
+ #ifdef _WIN32
const int requiredLength = _vscprintf(formatStr, tmp_args) + 1;
#else
const int requiredLength = vsnprintf(nullptr, 0, formatStr, tmp_args) + 1;
@@ -80,11 +80,11 @@ void FormattedLogSystem::Log(LogLevel logLevel, const char* tag, const char* for
va_end(tmp_args);
Array<char> outputBuff(requiredLength);
- #ifdef WIN32
+ #ifdef _WIN32
vsnprintf_s(outputBuff.GetUnderlyingData(), requiredLength, _TRUNCATE, formatStr, args);
#else
vsnprintf(outputBuff.GetUnderlyingData(), requiredLength, formatStr, args);
- #endif // WIN32
+ #endif // _WIN32
ss << outputBuff.GetUnderlyingData() << std::endl;
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/stream/ResponseStream.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/stream/ResponseStream.cpp
index 6d1f90ed12..26c92eaafd 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/stream/ResponseStream.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/stream/ResponseStream.cpp
@@ -5,6 +5,7 @@
#include <aws/core/utils/stream/ResponseStream.h>
#include <aws/core/utils/memory/stl/AWSStringStream.h>
+#include <aws/core/utils/logging/LogMacros.h>
#if defined(_GLIBCXX_FULLY_DYNAMIC_STRING) && _GLIBCXX_FULLY_DYNAMIC_STRING == 0 && defined(__ANDROID__)
#include <aws/core/utils/stream/SimpleStreamBuf.h>
@@ -15,6 +16,8 @@ using DefaultStreamBufType = Aws::StringBuf;
using namespace Aws::Utils::Stream;
+const int ResponseStream::ResponseStream::xindex = std::ios_base::xalloc();
+
ResponseStream::ResponseStream(void) :
m_underlyingStream(nullptr)
{
@@ -23,16 +26,20 @@ ResponseStream::ResponseStream(void) :
ResponseStream::ResponseStream(Aws::IOStream* underlyingStreamToManage) :
m_underlyingStream(underlyingStreamToManage)
{
+ RegisterStream();
}
ResponseStream::ResponseStream(const Aws::IOStreamFactory& factory) :
m_underlyingStream(factory())
{
+ RegisterStream();
}
ResponseStream::ResponseStream(ResponseStream&& toMove) : m_underlyingStream(toMove.m_underlyingStream)
{
+ toMove.DeregisterStream();
toMove.m_underlyingStream = nullptr;
+ RegisterStream();
}
ResponseStream& ResponseStream::operator=(ResponseStream&& toMove)
@@ -43,12 +50,26 @@ ResponseStream& ResponseStream::operator=(ResponseStream&& toMove)
}
ReleaseStream();
+ toMove.DeregisterStream();
m_underlyingStream = toMove.m_underlyingStream;
toMove.m_underlyingStream = nullptr;
+ RegisterStream();
return *this;
}
+Aws::IOStream& ResponseStream::GetUnderlyingStream() const
+{
+ if (!m_underlyingStream)
+ {
+ assert(m_underlyingStream);
+ AWS_LOGSTREAM_FATAL("ResponseStream", "Unexpected nullptr m_underlyingStream");
+ static DefaultUnderlyingStream fallbackStream; // we are already in UB, let's just not crash existing apps
+ return fallbackStream;
+ }
+ return *m_underlyingStream;
+}
+
ResponseStream::~ResponseStream()
{
ReleaseStream();
@@ -58,13 +79,53 @@ void ResponseStream::ReleaseStream()
{
if (m_underlyingStream)
{
- m_underlyingStream->flush();
+ DeregisterStream();
Aws::Delete(m_underlyingStream);
}
m_underlyingStream = nullptr;
}
+void ResponseStream::RegisterStream()
+{
+ if (m_underlyingStream)
+ {
+ ResponseStream* pThat = static_cast<ResponseStream*>(m_underlyingStream->pword(ResponseStream::xindex));
+ if (pThat != nullptr)
+ {
+ // callback is already registered
+ assert(pThat != this); // Underlying stream must not be owned by more than one ResponseStream
+ }
+ else
+ {
+ m_underlyingStream->register_callback(ResponseStream::StreamCallback, ResponseStream::xindex);
+ }
+ m_underlyingStream->pword(ResponseStream::xindex) = this;
+ }
+}
+
+void ResponseStream::DeregisterStream()
+{
+ if (m_underlyingStream)
+ {
+ assert(static_cast<ResponseStream*>(m_underlyingStream->pword(ResponseStream::xindex)) == this); // Attempt to deregister another ResponseStream's stream
+ m_underlyingStream->pword(ResponseStream::xindex) = nullptr; // ios does not support deregister, so just erasing the context
+ }
+}
+
+void ResponseStream::StreamCallback(Aws::IOStream::event evt, std::ios_base& stream, int idx)
+{
+ if (evt == std::ios_base::erase_event)
+ {
+ ResponseStream* pThis = static_cast<ResponseStream*>(stream.pword(idx));
+ if (pThis)
+ {
+ // m_underlyingStream is being destructed, let's avoid double destruction or having a dangling pointer
+ pThis->m_underlyingStream = nullptr;
+ }
+ }
+}
+
static const char *DEFAULT_STREAM_TAG = "DefaultUnderlyingStream";
DefaultUnderlyingStream::DefaultUnderlyingStream() :
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/stream/SimpleStreamBuf.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/stream/SimpleStreamBuf.cpp
index 6e42994744..dbf77ab646 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/stream/SimpleStreamBuf.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/stream/SimpleStreamBuf.cpp
@@ -5,6 +5,7 @@
*/
#include <aws/core/utils/stream/SimpleStreamBuf.h>
+#include <aws/core/utils/logging/LogMacros.h>
#include <algorithm>
#include <cassert>
@@ -123,7 +124,14 @@ bool SimpleStreamBuf::GrowBuffer()
if(currentSize > 0)
{
- std::memcpy(newBuffer, m_buffer, currentSize);
+ if(m_buffer)
+ {
+ std::memcpy(newBuffer, m_buffer, currentSize);
+ }
+ else
+ {
+ AWS_LOGSTREAM_FATAL(SIMPLE_STREAMBUF_ALLOCATION_TAG, "Unexpected nullptr m_buffer");
+ }
}
if(m_buffer)
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/threading/Executor.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/threading/Executor.cpp
index 4a3c4209c4..f9538f0033 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/threading/Executor.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/threading/Executor.cpp
@@ -14,10 +14,15 @@ using namespace Aws::Utils::Threading;
bool DefaultExecutor::SubmitToThread(std::function<void()>&& fx)
{
- auto main = [fx, this] {
- fx();
- Detach(std::this_thread::get_id());
- };
+ // Generalized lambda capture is C++14, using std::bind as a workaround to force moving fx (instead of copying)
+ std::function<void()> main = std::bind(
+ [this](std::function<void()>& storedFx)
+ {
+ storedFx();
+ Detach(std::this_thread::get_id());
+ },
+ std::move(fx)
+ );
State expected;
do
@@ -25,7 +30,7 @@ bool DefaultExecutor::SubmitToThread(std::function<void()>&& fx)
expected = State::Free;
if(m_state.compare_exchange_strong(expected, State::Locked))
{
- std::thread t(main);
+ std::thread t(std::move(main));
const auto id = t.get_id(); // copy the id before we std::move the thread
m_threads.emplace(id, std::move(t));
m_state = State::Free;
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/xml/XmlSerializer.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/xml/XmlSerializer.cpp
index c06befaf9b..2d91f70000 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/xml/XmlSerializer.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/source/utils/xml/XmlSerializer.cpp
@@ -23,6 +23,8 @@ Aws::String Aws::Utils::Xml::DecodeEscapedXmlText(const Aws::String& textToDecod
StringUtils::Replace(decodedString, "&lt;", "<");
StringUtils::Replace(decodedString, "&gt;", ">");
StringUtils::Replace(decodedString, "&amp;", "&");
+ StringUtils::Replace(decodedString, "&#xA;", "\n");
+ StringUtils::Replace(decodedString, "&#xD;", "\r");
return decodedString;
}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/ya.make b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/ya.make
index bdbeda12d7..773a14e3ea 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/ya.make
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core/ya.make
@@ -13,8 +13,17 @@ LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
PEERDIR(
contrib/libs/curl
contrib/libs/openssl
+ contrib/libs/zlib
+ contrib/restricted/aws/aws-c-auth
+ contrib/restricted/aws/aws-c-cal
contrib/restricted/aws/aws-c-common
contrib/restricted/aws/aws-c-event-stream
+ contrib/restricted/aws/aws-c-http
+ contrib/restricted/aws/aws-c-io
+ contrib/restricted/aws/aws-c-mqtt
+ contrib/restricted/aws/aws-c-sdkutils
+ contrib/restricted/aws/aws-checksums
+ contrib/restricted/aws/aws-crt-cpp
)
ADDINCL(
@@ -26,32 +35,44 @@ NO_COMPILER_WARNINGS()
NO_UTIL()
CFLAGS(
+ -DAWS_AUTH_USE_IMPORT_EXPORT
-DAWS_CAL_USE_IMPORT_EXPORT
-DAWS_CHECKSUMS_USE_IMPORT_EXPORT
-DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_CRT_CPP_USE_IMPORT_EXPORT
-DAWS_EVENT_STREAM_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
-DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_MQTT_USE_IMPORT_EXPORT
+ -DAWS_MQTT_WITH_WEBSOCKETS
+ -DAWS_S3_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
-DAWS_SDK_VERSION_MAJOR=1
- -DAWS_SDK_VERSION_MINOR=8
- -DAWS_SDK_VERSION_PATCH=186
+ -DAWS_SDK_VERSION_MINOR=11
+ -DAWS_SDK_VERSION_PATCH=37
+ -DAWS_TEST_REGION=US_EAST_1
-DAWS_USE_EPOLL
-DCURL_HAS_H2
-DCURL_HAS_TLS_PROXY
+ -DENABLED_REQUEST_COMPRESSION
+ -DENABLED_ZLIB_REQUEST_COMPRESSION
-DENABLE_CURL_CLIENT
-DENABLE_CURL_LOGGING
-DENABLE_OPENSSL_ENCRYPTION
-DHAS_PATHCONF
-DHAS_UMASK
- -DS2N_ADX
- -DS2N_BIKE_R3_AVX2
- -DS2N_BIKE_R3_AVX512
- -DS2N_BIKE_R3_PCLMUL
- -DS2N_BIKE_R3_VPCLMUL
+ -DS2N_CLONE_SUPPORTED
-DS2N_CPUID_AVAILABLE
-DS2N_FALL_THROUGH_SUPPORTED
- -DS2N_HAVE_EXECINFO
+ -DS2N_FEATURES_AVAILABLE
-DS2N_KYBER512R3_AVX2_BMI2
- -DS2N_SIKE_P434_R3_ASM
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
-DS2N___RESTRICT__SUPPORTED
)
@@ -63,26 +84,52 @@ SRCS(
source/Globals.cpp
source/Region.cpp
source/Version.cpp
- source/auth/AWSAuthSigner.cpp
- source/auth/AWSAuthSignerProvider.cpp
source/auth/AWSCredentialsProvider.cpp
source/auth/AWSCredentialsProviderChain.cpp
source/auth/SSOCredentialsProvider.cpp
source/auth/STSCredentialsProvider.cpp
+ source/auth/bearer-token-provider/DefaultBearerTokenProviderChain.cpp
+ source/auth/bearer-token-provider/SSOBearerTokenProvider.cpp
+ source/auth/signer-provider/BearerTokenAuthSignerProvider.cpp
+ source/auth/signer-provider/DefaultAuthSignerProvider.cpp
+ source/auth/signer/AWSAuthBearerSigner.cpp
+ source/auth/signer/AWSAuthEventStreamV4Signer.cpp
+ source/auth/signer/AWSAuthSignerCommon.cpp
+ source/auth/signer/AWSAuthSignerHelper.cpp
+ source/auth/signer/AWSAuthV4Signer.cpp
+ source/auth/signer/AWSNullSigner.cpp
source/client/AWSClient.cpp
source/client/AWSErrorMarshaller.cpp
+ source/client/AWSJsonClient.cpp
+ source/client/AWSUrlPresigner.cpp
+ source/client/AWSXmlClient.cpp
+ source/client/AdaptiveRetryStrategy.cpp
source/client/AsyncCallerContext.cpp
source/client/ClientConfiguration.cpp
source/client/CoreErrors.cpp
source/client/DefaultRetryStrategy.cpp
+ source/client/GenericClientConfiguration.cpp
+ source/client/RequestCompression.cpp
source/client/RetryStrategy.cpp
source/client/SpecifiedRetryableErrorsRetryStrategy.cpp
- source/config/AWSProfileConfigLoader.cpp
+ source/config/AWSConfigFileProfileConfigLoader.cpp
+ source/config/AWSProfileConfigLoaderBase.cpp
+ source/config/ConfigAndCredentialsCacheManager.cpp
+ source/config/EC2InstanceProfileConfigLoader.cpp
+ source/config/defaults/ClientConfigurationDefaults.cpp
+ source/endpoint/AWSEndpoint.cpp
+ source/endpoint/AWSPartitions.cpp
+ source/endpoint/BuiltInParameters.cpp
+ source/endpoint/ClientContextParameters.cpp
+ source/endpoint/DefaultEndpointProvider.cpp
+ source/endpoint/EndpointProviderBase.cpp
+ source/endpoint/internal/AWSEndpointAttribute.cpp
source/external/cjson/cJSON.cpp
source/external/tinyxml2/tinyxml2.cpp
source/http/HttpClient.cpp
source/http/HttpClientFactory.cpp
source/http/HttpRequest.cpp
+ source/http/HttpResponse.cpp
source/http/HttpTypes.cpp
source/http/Scheme.cpp
source/http/URI.cpp
@@ -99,6 +146,7 @@ SRCS(
source/utils/DNS.cpp
source/utils/DateTimeCommon.cpp
source/utils/Directory.cpp
+ source/utils/Document.cpp
source/utils/EnumParseOverflowContainer.cpp
source/utils/FileSystemUtils.cpp
source/utils/GetTheLights.cpp
@@ -107,6 +155,7 @@ SRCS(
source/utils/TempFile.cpp
source/utils/UUID.cpp
source/utils/base64/Base64.cpp
+ source/utils/crypto/CRC32.cpp
source/utils/crypto/Cipher.cpp
source/utils/crypto/ContentCryptoMaterial.cpp
source/utils/crypto/ContentCryptoScheme.cpp
@@ -130,6 +179,8 @@ SRCS(
source/utils/event/EventStreamErrors.cpp
source/utils/json/JsonSerializer.cpp
source/utils/logging/AWSLogging.cpp
+ source/utils/logging/CRTLogSystem.cpp
+ source/utils/logging/CRTLogging.cpp
source/utils/logging/ConsoleLogSystem.cpp
source/utils/logging/DefaultLogSystem.cpp
source/utils/logging/FormattedLogSystem.cpp
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/CMakeLists.darwin-arm64.txt b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/CMakeLists.darwin-arm64.txt
index 1b1705649a..42e8df4fa8 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/CMakeLists.darwin-arm64.txt
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/CMakeLists.darwin-arm64.txt
@@ -9,29 +9,41 @@
add_library(libs-aws-sdk-cpp-aws-cpp-sdk-s3)
target_compile_options(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
+ -DAWS_AUTH_USE_IMPORT_EXPORT
-DAWS_CAL_USE_IMPORT_EXPORT
-DAWS_CHECKSUMS_USE_IMPORT_EXPORT
-DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_CRT_CPP_USE_IMPORT_EXPORT
-DAWS_EVENT_STREAM_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
-DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_MQTT_USE_IMPORT_EXPORT
+ -DAWS_MQTT_WITH_WEBSOCKETS
+ -DAWS_S3_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
-DAWS_SDK_VERSION_MAJOR=1
- -DAWS_SDK_VERSION_MINOR=8
- -DAWS_SDK_VERSION_PATCH=186
+ -DAWS_SDK_VERSION_MINOR=11
+ -DAWS_SDK_VERSION_PATCH=37
+ -DAWS_TEST_REGION=US_EAST_1
-DAWS_USE_EPOLL
+ -DENABLED_REQUEST_COMPRESSION
+ -DENABLED_ZLIB_REQUEST_COMPRESSION
-DENABLE_CURL_CLIENT
-DENABLE_OPENSSL_ENCRYPTION
-DHAS_PATHCONF
-DHAS_UMASK
- -DS2N_ADX
- -DS2N_BIKE_R3_AVX2
- -DS2N_BIKE_R3_AVX512
- -DS2N_BIKE_R3_PCLMUL
- -DS2N_BIKE_R3_VPCLMUL
+ -DS2N_CLONE_SUPPORTED
-DS2N_CPUID_AVAILABLE
-DS2N_FALL_THROUGH_SUPPORTED
- -DS2N_HAVE_EXECINFO
+ -DS2N_FEATURES_AVAILABLE
-DS2N_KYBER512R3_AVX2_BMI2
- -DS2N_SIKE_P434_R3_ASM
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
-DS2N___RESTRICT__SUPPORTED
$<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
)
@@ -44,15 +56,22 @@ target_include_directories(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
target_link_libraries(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PUBLIC
contrib-libs-cxxsupp
libs-aws-sdk-cpp-aws-cpp-sdk-core
+ restricted-aws-aws-c-auth
restricted-aws-aws-c-common
restricted-aws-aws-c-event-stream
+ restricted-aws-aws-c-io
+ restricted-aws-aws-c-mqtt
+ restricted-aws-aws-c-sdkutils
+ restricted-aws-aws-crt-cpp
)
target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
- ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3ARN.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Client.cpp
- ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Endpoint.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3ClientConfiguration.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3EndpointProvider.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3EndpointRules.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3ErrorMarshaller.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Errors.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Request.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/AbortIncompleteMultipartUpload.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/AbortMultipartUploadRequest.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/AbortMultipartUploadResult.cpp
@@ -78,6 +97,9 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CORSRule.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CSVInput.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CSVOutput.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Checksum.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ChecksumAlgorithm.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ChecksumMode.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CloudFunctionConfiguration.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CommonPrefix.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CompleteMultipartUploadRequest.cpp
@@ -128,6 +150,7 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Error.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ErrorDocument.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Event.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/EventBridgeConfiguration.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ExistingObjectReplication.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ExistingObjectReplicationStatus.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ExpirationStatus.cpp
@@ -177,6 +200,9 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketWebsiteResult.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAclRequest.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAclResult.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAttributesParts.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAttributesRequest.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAttributesResult.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectLegalHoldRequest.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectLegalHoldResult.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectLockConfigurationRequest.cpp
@@ -261,6 +287,7 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/NotificationConfigurationDeprecated.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/NotificationConfigurationFilter.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Object.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectAttributes.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectCannedACL.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectIdentifier.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectLockConfiguration.cpp
@@ -272,6 +299,7 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectLockRetentionMode.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectLockRule.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectOwnership.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectPart.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectStorageClass.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectVersion.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectVersionStorageClass.cpp
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/CMakeLists.darwin-x86_64.txt b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/CMakeLists.darwin-x86_64.txt
index 1b1705649a..42e8df4fa8 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/CMakeLists.darwin-x86_64.txt
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/CMakeLists.darwin-x86_64.txt
@@ -9,29 +9,41 @@
add_library(libs-aws-sdk-cpp-aws-cpp-sdk-s3)
target_compile_options(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
+ -DAWS_AUTH_USE_IMPORT_EXPORT
-DAWS_CAL_USE_IMPORT_EXPORT
-DAWS_CHECKSUMS_USE_IMPORT_EXPORT
-DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_CRT_CPP_USE_IMPORT_EXPORT
-DAWS_EVENT_STREAM_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
-DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_MQTT_USE_IMPORT_EXPORT
+ -DAWS_MQTT_WITH_WEBSOCKETS
+ -DAWS_S3_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
-DAWS_SDK_VERSION_MAJOR=1
- -DAWS_SDK_VERSION_MINOR=8
- -DAWS_SDK_VERSION_PATCH=186
+ -DAWS_SDK_VERSION_MINOR=11
+ -DAWS_SDK_VERSION_PATCH=37
+ -DAWS_TEST_REGION=US_EAST_1
-DAWS_USE_EPOLL
+ -DENABLED_REQUEST_COMPRESSION
+ -DENABLED_ZLIB_REQUEST_COMPRESSION
-DENABLE_CURL_CLIENT
-DENABLE_OPENSSL_ENCRYPTION
-DHAS_PATHCONF
-DHAS_UMASK
- -DS2N_ADX
- -DS2N_BIKE_R3_AVX2
- -DS2N_BIKE_R3_AVX512
- -DS2N_BIKE_R3_PCLMUL
- -DS2N_BIKE_R3_VPCLMUL
+ -DS2N_CLONE_SUPPORTED
-DS2N_CPUID_AVAILABLE
-DS2N_FALL_THROUGH_SUPPORTED
- -DS2N_HAVE_EXECINFO
+ -DS2N_FEATURES_AVAILABLE
-DS2N_KYBER512R3_AVX2_BMI2
- -DS2N_SIKE_P434_R3_ASM
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
-DS2N___RESTRICT__SUPPORTED
$<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
)
@@ -44,15 +56,22 @@ target_include_directories(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
target_link_libraries(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PUBLIC
contrib-libs-cxxsupp
libs-aws-sdk-cpp-aws-cpp-sdk-core
+ restricted-aws-aws-c-auth
restricted-aws-aws-c-common
restricted-aws-aws-c-event-stream
+ restricted-aws-aws-c-io
+ restricted-aws-aws-c-mqtt
+ restricted-aws-aws-c-sdkutils
+ restricted-aws-aws-crt-cpp
)
target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
- ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3ARN.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Client.cpp
- ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Endpoint.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3ClientConfiguration.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3EndpointProvider.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3EndpointRules.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3ErrorMarshaller.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Errors.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Request.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/AbortIncompleteMultipartUpload.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/AbortMultipartUploadRequest.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/AbortMultipartUploadResult.cpp
@@ -78,6 +97,9 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CORSRule.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CSVInput.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CSVOutput.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Checksum.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ChecksumAlgorithm.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ChecksumMode.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CloudFunctionConfiguration.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CommonPrefix.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CompleteMultipartUploadRequest.cpp
@@ -128,6 +150,7 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Error.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ErrorDocument.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Event.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/EventBridgeConfiguration.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ExistingObjectReplication.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ExistingObjectReplicationStatus.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ExpirationStatus.cpp
@@ -177,6 +200,9 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketWebsiteResult.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAclRequest.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAclResult.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAttributesParts.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAttributesRequest.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAttributesResult.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectLegalHoldRequest.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectLegalHoldResult.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectLockConfigurationRequest.cpp
@@ -261,6 +287,7 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/NotificationConfigurationDeprecated.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/NotificationConfigurationFilter.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Object.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectAttributes.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectCannedACL.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectIdentifier.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectLockConfiguration.cpp
@@ -272,6 +299,7 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectLockRetentionMode.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectLockRule.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectOwnership.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectPart.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectStorageClass.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectVersion.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectVersionStorageClass.cpp
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/CMakeLists.linux-aarch64.txt b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/CMakeLists.linux-aarch64.txt
index 114aa69426..2fdc7856d5 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/CMakeLists.linux-aarch64.txt
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/CMakeLists.linux-aarch64.txt
@@ -9,29 +9,41 @@
add_library(libs-aws-sdk-cpp-aws-cpp-sdk-s3)
target_compile_options(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
+ -DAWS_AUTH_USE_IMPORT_EXPORT
-DAWS_CAL_USE_IMPORT_EXPORT
-DAWS_CHECKSUMS_USE_IMPORT_EXPORT
-DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_CRT_CPP_USE_IMPORT_EXPORT
-DAWS_EVENT_STREAM_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
-DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_MQTT_USE_IMPORT_EXPORT
+ -DAWS_MQTT_WITH_WEBSOCKETS
+ -DAWS_S3_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
-DAWS_SDK_VERSION_MAJOR=1
- -DAWS_SDK_VERSION_MINOR=8
- -DAWS_SDK_VERSION_PATCH=186
+ -DAWS_SDK_VERSION_MINOR=11
+ -DAWS_SDK_VERSION_PATCH=37
+ -DAWS_TEST_REGION=US_EAST_1
-DAWS_USE_EPOLL
+ -DENABLED_REQUEST_COMPRESSION
+ -DENABLED_ZLIB_REQUEST_COMPRESSION
-DENABLE_CURL_CLIENT
-DENABLE_OPENSSL_ENCRYPTION
-DHAS_PATHCONF
-DHAS_UMASK
- -DS2N_ADX
- -DS2N_BIKE_R3_AVX2
- -DS2N_BIKE_R3_AVX512
- -DS2N_BIKE_R3_PCLMUL
- -DS2N_BIKE_R3_VPCLMUL
+ -DS2N_CLONE_SUPPORTED
-DS2N_CPUID_AVAILABLE
-DS2N_FALL_THROUGH_SUPPORTED
- -DS2N_HAVE_EXECINFO
+ -DS2N_FEATURES_AVAILABLE
-DS2N_KYBER512R3_AVX2_BMI2
- -DS2N_SIKE_P434_R3_ASM
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
-DS2N___RESTRICT__SUPPORTED
$<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
)
@@ -45,15 +57,22 @@ target_link_libraries(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PUBLIC
contrib-libs-linux-headers
contrib-libs-cxxsupp
libs-aws-sdk-cpp-aws-cpp-sdk-core
+ restricted-aws-aws-c-auth
restricted-aws-aws-c-common
restricted-aws-aws-c-event-stream
+ restricted-aws-aws-c-io
+ restricted-aws-aws-c-mqtt
+ restricted-aws-aws-c-sdkutils
+ restricted-aws-aws-crt-cpp
)
target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
- ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3ARN.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Client.cpp
- ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Endpoint.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3ClientConfiguration.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3EndpointProvider.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3EndpointRules.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3ErrorMarshaller.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Errors.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Request.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/AbortIncompleteMultipartUpload.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/AbortMultipartUploadRequest.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/AbortMultipartUploadResult.cpp
@@ -79,6 +98,9 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CORSRule.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CSVInput.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CSVOutput.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Checksum.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ChecksumAlgorithm.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ChecksumMode.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CloudFunctionConfiguration.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CommonPrefix.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CompleteMultipartUploadRequest.cpp
@@ -129,6 +151,7 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Error.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ErrorDocument.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Event.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/EventBridgeConfiguration.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ExistingObjectReplication.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ExistingObjectReplicationStatus.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ExpirationStatus.cpp
@@ -178,6 +201,9 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketWebsiteResult.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAclRequest.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAclResult.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAttributesParts.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAttributesRequest.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAttributesResult.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectLegalHoldRequest.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectLegalHoldResult.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectLockConfigurationRequest.cpp
@@ -262,6 +288,7 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/NotificationConfigurationDeprecated.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/NotificationConfigurationFilter.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Object.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectAttributes.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectCannedACL.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectIdentifier.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectLockConfiguration.cpp
@@ -273,6 +300,7 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectLockRetentionMode.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectLockRule.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectOwnership.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectPart.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectStorageClass.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectVersion.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectVersionStorageClass.cpp
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/CMakeLists.linux-x86_64.txt b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/CMakeLists.linux-x86_64.txt
index 114aa69426..2fdc7856d5 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/CMakeLists.linux-x86_64.txt
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/CMakeLists.linux-x86_64.txt
@@ -9,29 +9,41 @@
add_library(libs-aws-sdk-cpp-aws-cpp-sdk-s3)
target_compile_options(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
+ -DAWS_AUTH_USE_IMPORT_EXPORT
-DAWS_CAL_USE_IMPORT_EXPORT
-DAWS_CHECKSUMS_USE_IMPORT_EXPORT
-DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_CRT_CPP_USE_IMPORT_EXPORT
-DAWS_EVENT_STREAM_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
-DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_MQTT_USE_IMPORT_EXPORT
+ -DAWS_MQTT_WITH_WEBSOCKETS
+ -DAWS_S3_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
-DAWS_SDK_VERSION_MAJOR=1
- -DAWS_SDK_VERSION_MINOR=8
- -DAWS_SDK_VERSION_PATCH=186
+ -DAWS_SDK_VERSION_MINOR=11
+ -DAWS_SDK_VERSION_PATCH=37
+ -DAWS_TEST_REGION=US_EAST_1
-DAWS_USE_EPOLL
+ -DENABLED_REQUEST_COMPRESSION
+ -DENABLED_ZLIB_REQUEST_COMPRESSION
-DENABLE_CURL_CLIENT
-DENABLE_OPENSSL_ENCRYPTION
-DHAS_PATHCONF
-DHAS_UMASK
- -DS2N_ADX
- -DS2N_BIKE_R3_AVX2
- -DS2N_BIKE_R3_AVX512
- -DS2N_BIKE_R3_PCLMUL
- -DS2N_BIKE_R3_VPCLMUL
+ -DS2N_CLONE_SUPPORTED
-DS2N_CPUID_AVAILABLE
-DS2N_FALL_THROUGH_SUPPORTED
- -DS2N_HAVE_EXECINFO
+ -DS2N_FEATURES_AVAILABLE
-DS2N_KYBER512R3_AVX2_BMI2
- -DS2N_SIKE_P434_R3_ASM
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
-DS2N___RESTRICT__SUPPORTED
$<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
)
@@ -45,15 +57,22 @@ target_link_libraries(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PUBLIC
contrib-libs-linux-headers
contrib-libs-cxxsupp
libs-aws-sdk-cpp-aws-cpp-sdk-core
+ restricted-aws-aws-c-auth
restricted-aws-aws-c-common
restricted-aws-aws-c-event-stream
+ restricted-aws-aws-c-io
+ restricted-aws-aws-c-mqtt
+ restricted-aws-aws-c-sdkutils
+ restricted-aws-aws-crt-cpp
)
target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
- ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3ARN.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Client.cpp
- ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Endpoint.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3ClientConfiguration.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3EndpointProvider.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3EndpointRules.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3ErrorMarshaller.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Errors.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Request.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/AbortIncompleteMultipartUpload.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/AbortMultipartUploadRequest.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/AbortMultipartUploadResult.cpp
@@ -79,6 +98,9 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CORSRule.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CSVInput.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CSVOutput.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Checksum.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ChecksumAlgorithm.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ChecksumMode.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CloudFunctionConfiguration.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CommonPrefix.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CompleteMultipartUploadRequest.cpp
@@ -129,6 +151,7 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Error.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ErrorDocument.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Event.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/EventBridgeConfiguration.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ExistingObjectReplication.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ExistingObjectReplicationStatus.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ExpirationStatus.cpp
@@ -178,6 +201,9 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketWebsiteResult.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAclRequest.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAclResult.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAttributesParts.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAttributesRequest.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAttributesResult.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectLegalHoldRequest.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectLegalHoldResult.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectLockConfigurationRequest.cpp
@@ -262,6 +288,7 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/NotificationConfigurationDeprecated.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/NotificationConfigurationFilter.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Object.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectAttributes.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectCannedACL.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectIdentifier.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectLockConfiguration.cpp
@@ -273,6 +300,7 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectLockRetentionMode.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectLockRule.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectOwnership.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectPart.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectStorageClass.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectVersion.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectVersionStorageClass.cpp
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/CMakeLists.windows-x86_64.txt b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/CMakeLists.windows-x86_64.txt
index 1b1705649a..42e8df4fa8 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/CMakeLists.windows-x86_64.txt
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/CMakeLists.windows-x86_64.txt
@@ -9,29 +9,41 @@
add_library(libs-aws-sdk-cpp-aws-cpp-sdk-s3)
target_compile_options(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
+ -DAWS_AUTH_USE_IMPORT_EXPORT
-DAWS_CAL_USE_IMPORT_EXPORT
-DAWS_CHECKSUMS_USE_IMPORT_EXPORT
-DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_CRT_CPP_USE_IMPORT_EXPORT
-DAWS_EVENT_STREAM_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
-DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_MQTT_USE_IMPORT_EXPORT
+ -DAWS_MQTT_WITH_WEBSOCKETS
+ -DAWS_S3_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
-DAWS_SDK_VERSION_MAJOR=1
- -DAWS_SDK_VERSION_MINOR=8
- -DAWS_SDK_VERSION_PATCH=186
+ -DAWS_SDK_VERSION_MINOR=11
+ -DAWS_SDK_VERSION_PATCH=37
+ -DAWS_TEST_REGION=US_EAST_1
-DAWS_USE_EPOLL
+ -DENABLED_REQUEST_COMPRESSION
+ -DENABLED_ZLIB_REQUEST_COMPRESSION
-DENABLE_CURL_CLIENT
-DENABLE_OPENSSL_ENCRYPTION
-DHAS_PATHCONF
-DHAS_UMASK
- -DS2N_ADX
- -DS2N_BIKE_R3_AVX2
- -DS2N_BIKE_R3_AVX512
- -DS2N_BIKE_R3_PCLMUL
- -DS2N_BIKE_R3_VPCLMUL
+ -DS2N_CLONE_SUPPORTED
-DS2N_CPUID_AVAILABLE
-DS2N_FALL_THROUGH_SUPPORTED
- -DS2N_HAVE_EXECINFO
+ -DS2N_FEATURES_AVAILABLE
-DS2N_KYBER512R3_AVX2_BMI2
- -DS2N_SIKE_P434_R3_ASM
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
-DS2N___RESTRICT__SUPPORTED
$<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
)
@@ -44,15 +56,22 @@ target_include_directories(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
target_link_libraries(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PUBLIC
contrib-libs-cxxsupp
libs-aws-sdk-cpp-aws-cpp-sdk-core
+ restricted-aws-aws-c-auth
restricted-aws-aws-c-common
restricted-aws-aws-c-event-stream
+ restricted-aws-aws-c-io
+ restricted-aws-aws-c-mqtt
+ restricted-aws-aws-c-sdkutils
+ restricted-aws-aws-crt-cpp
)
target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
- ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3ARN.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Client.cpp
- ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Endpoint.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3ClientConfiguration.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3EndpointProvider.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3EndpointRules.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3ErrorMarshaller.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Errors.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Request.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/AbortIncompleteMultipartUpload.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/AbortMultipartUploadRequest.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/AbortMultipartUploadResult.cpp
@@ -78,6 +97,9 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CORSRule.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CSVInput.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CSVOutput.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Checksum.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ChecksumAlgorithm.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ChecksumMode.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CloudFunctionConfiguration.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CommonPrefix.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CompleteMultipartUploadRequest.cpp
@@ -128,6 +150,7 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Error.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ErrorDocument.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Event.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/EventBridgeConfiguration.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ExistingObjectReplication.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ExistingObjectReplicationStatus.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ExpirationStatus.cpp
@@ -177,6 +200,9 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketWebsiteResult.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAclRequest.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAclResult.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAttributesParts.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAttributesRequest.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAttributesResult.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectLegalHoldRequest.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectLegalHoldResult.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectLockConfigurationRequest.cpp
@@ -261,6 +287,7 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/NotificationConfigurationDeprecated.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/NotificationConfigurationFilter.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Object.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectAttributes.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectCannedACL.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectIdentifier.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectLockConfiguration.cpp
@@ -272,6 +299,7 @@ target_sources(libs-aws-sdk-cpp-aws-cpp-sdk-s3 PRIVATE
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectLockRetentionMode.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectLockRule.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectOwnership.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectPart.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectStorageClass.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectVersion.cpp
${CMAKE_SOURCE_DIR}/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectVersionStorageClass.cpp
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3ARN.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3ARN.h
deleted file mode 100644
index 546f1582b7..0000000000
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3ARN.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-
-#pragma once
-
-#include <aws/s3/S3_EXPORTS.h>
-
-#include <aws/core/client/AWSError.h>
-#include <aws/core/utils/ARN.h>
-#include <aws/s3/S3Errors.h>
-
-namespace Aws
-{
- namespace Utils
- {
- template<typename R, typename E> class Outcome;
- }
-
- namespace S3
- {
- namespace ARNService
- {
- static const char S3[] = "s3";
- static const char S3_OUTPOSTS[] = "s3-outposts";
- static const char S3_OBJECT_LAMBDA[] = "s3-object-lambda";
- }
-
- namespace ARNResourceType
- {
- static const char ACCESSPOINT[] = "accesspoint";
- static const char OUTPOST[] = "outpost";
- }
-
- typedef Aws::Utils::Outcome<bool, Aws::Client::AWSError<S3Errors>> S3ARNOutcome;
-
- class AWS_S3_API S3ARN : public Aws::Utils::ARN
- {
- public:
- S3ARN(const Aws::String& arn);
-
- const Aws::String& GetResourceType() const { return m_resourceType; }
- const Aws::String& GetResourceId() const { return m_resourceId; }
- const Aws::String& GetSubResourceType() const { return m_subResourceType; }
- const Aws::String& GetSubResourceId() const { return m_subResourceId; }
- const Aws::String& GetResourceQualifier() const { return m_resourceQualifier; }
-
- // Check if S3ARN is valid.
- S3ARNOutcome Validate() const;
- // Check if S3ARN is valid, and especially, ARN region should match the region specified.
- S3ARNOutcome Validate(const char* region) const;
-
- private:
- void ParseARNResource();
-
- Aws::String m_resourceType;
- Aws::String m_resourceId;
- Aws::String m_subResourceType;
- Aws::String m_subResourceId;
- Aws::String m_resourceQualifier;
- };
- }
-}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3Client.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3Client.h
index 5b3d798fcf..6d66c3b6c9 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3Client.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3Client.h
@@ -5,393 +5,25 @@
#pragma once
#include <aws/s3/S3_EXPORTS.h>
-#include <aws/s3/S3Errors.h>
-#include <aws/core/client/AWSError.h>
#include <aws/core/client/ClientConfiguration.h>
#include <aws/core/client/AWSClient.h>
+#include <aws/core/client/AWSClientAsyncCRTP.h>
#include <aws/core/auth/AWSAuthSigner.h>
-#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/core/utils/xml/XmlSerializer.h>
#include <aws/core/utils/DNS.h>
-#include <aws/s3/model/AbortMultipartUploadResult.h>
-#include <aws/s3/model/CompleteMultipartUploadResult.h>
-#include <aws/s3/model/CopyObjectResult.h>
-#include <aws/s3/model/CreateBucketResult.h>
-#include <aws/s3/model/CreateMultipartUploadResult.h>
-#include <aws/s3/model/DeleteObjectResult.h>
-#include <aws/s3/model/DeleteObjectTaggingResult.h>
-#include <aws/s3/model/DeleteObjectsResult.h>
-#include <aws/s3/model/GetBucketAccelerateConfigurationResult.h>
-#include <aws/s3/model/GetBucketAclResult.h>
-#include <aws/s3/model/GetBucketAnalyticsConfigurationResult.h>
-#include <aws/s3/model/GetBucketCorsResult.h>
-#include <aws/s3/model/GetBucketEncryptionResult.h>
-#include <aws/s3/model/GetBucketIntelligentTieringConfigurationResult.h>
-#include <aws/s3/model/GetBucketInventoryConfigurationResult.h>
-#include <aws/s3/model/GetBucketLifecycleConfigurationResult.h>
-#include <aws/s3/model/GetBucketLocationResult.h>
-#include <aws/s3/model/GetBucketLoggingResult.h>
-#include <aws/s3/model/GetBucketMetricsConfigurationResult.h>
-#include <aws/s3/model/GetBucketNotificationConfigurationResult.h>
-#include <aws/s3/model/GetBucketOwnershipControlsResult.h>
-#include <aws/s3/model/GetBucketPolicyResult.h>
-#include <aws/s3/model/GetBucketPolicyStatusResult.h>
-#include <aws/s3/model/GetBucketReplicationResult.h>
-#include <aws/s3/model/GetBucketRequestPaymentResult.h>
-#include <aws/s3/model/GetBucketTaggingResult.h>
-#include <aws/s3/model/GetBucketVersioningResult.h>
-#include <aws/s3/model/GetBucketWebsiteResult.h>
-#include <aws/s3/model/GetObjectResult.h>
-#include <aws/s3/model/GetObjectAclResult.h>
-#include <aws/s3/model/GetObjectLegalHoldResult.h>
-#include <aws/s3/model/GetObjectLockConfigurationResult.h>
-#include <aws/s3/model/GetObjectRetentionResult.h>
-#include <aws/s3/model/GetObjectTaggingResult.h>
-#include <aws/s3/model/GetObjectTorrentResult.h>
-#include <aws/s3/model/GetPublicAccessBlockResult.h>
-#include <aws/s3/model/HeadObjectResult.h>
-#include <aws/s3/model/ListBucketAnalyticsConfigurationsResult.h>
-#include <aws/s3/model/ListBucketIntelligentTieringConfigurationsResult.h>
-#include <aws/s3/model/ListBucketInventoryConfigurationsResult.h>
-#include <aws/s3/model/ListBucketMetricsConfigurationsResult.h>
-#include <aws/s3/model/ListBucketsResult.h>
-#include <aws/s3/model/ListMultipartUploadsResult.h>
-#include <aws/s3/model/ListObjectVersionsResult.h>
-#include <aws/s3/model/ListObjectsResult.h>
-#include <aws/s3/model/ListObjectsV2Result.h>
-#include <aws/s3/model/ListPartsResult.h>
-#include <aws/s3/model/PutObjectResult.h>
-#include <aws/s3/model/PutObjectAclResult.h>
-#include <aws/s3/model/PutObjectLegalHoldResult.h>
-#include <aws/s3/model/PutObjectLockConfigurationResult.h>
-#include <aws/s3/model/PutObjectRetentionResult.h>
-#include <aws/s3/model/PutObjectTaggingResult.h>
-#include <aws/s3/model/RestoreObjectResult.h>
-#include <aws/s3/model/UploadPartResult.h>
-#include <aws/s3/model/UploadPartCopyResult.h>
-#include <aws/core/NoResult.h>
-#include <aws/core/client/AsyncCallerContext.h>
-#include <aws/core/http/HttpTypes.h>
-#include <future>
-#include <functional>
-namespace Aws
-{
-
- namespace Http
- {
- class HttpClient;
- class HttpClientFactory;
- } // namespace Http
-
- namespace Utils
- {
- template< typename R, typename E> class Outcome;
-
- namespace Threading
- {
- class Executor;
- } // namespace Threading
-
- namespace Xml
- {
- class XmlDocument;
- } // namespace Xml
- } // namespace Utils
+#include <aws/s3/S3ServiceClientModel.h>
- namespace Auth
- {
- class AWSCredentials;
- class AWSCredentialsProvider;
- } // namespace Auth
-
- namespace Client
- {
- class RetryStrategy;
- } // namespace Client
+// TODO: temporary fix for naming conflicts on Windows.
+#ifdef _WIN32
+#ifdef GetObject
+#undef GetObject
+#endif
+#endif
+namespace Aws
+{
namespace S3
{
- namespace Model
- {
- class AbortMultipartUploadRequest;
- class CompleteMultipartUploadRequest;
- class CopyObjectRequest;
- class CreateBucketRequest;
- class CreateMultipartUploadRequest;
- class DeleteBucketRequest;
- class DeleteBucketAnalyticsConfigurationRequest;
- class DeleteBucketCorsRequest;
- class DeleteBucketEncryptionRequest;
- class DeleteBucketIntelligentTieringConfigurationRequest;
- class DeleteBucketInventoryConfigurationRequest;
- class DeleteBucketLifecycleRequest;
- class DeleteBucketMetricsConfigurationRequest;
- class DeleteBucketOwnershipControlsRequest;
- class DeleteBucketPolicyRequest;
- class DeleteBucketReplicationRequest;
- class DeleteBucketTaggingRequest;
- class DeleteBucketWebsiteRequest;
- class DeleteObjectRequest;
- class DeleteObjectTaggingRequest;
- class DeleteObjectsRequest;
- class DeletePublicAccessBlockRequest;
- class GetBucketAccelerateConfigurationRequest;
- class GetBucketAclRequest;
- class GetBucketAnalyticsConfigurationRequest;
- class GetBucketCorsRequest;
- class GetBucketEncryptionRequest;
- class GetBucketIntelligentTieringConfigurationRequest;
- class GetBucketInventoryConfigurationRequest;
- class GetBucketLifecycleConfigurationRequest;
- class GetBucketLocationRequest;
- class GetBucketLoggingRequest;
- class GetBucketMetricsConfigurationRequest;
- class GetBucketNotificationConfigurationRequest;
- class GetBucketOwnershipControlsRequest;
- class GetBucketPolicyRequest;
- class GetBucketPolicyStatusRequest;
- class GetBucketReplicationRequest;
- class GetBucketRequestPaymentRequest;
- class GetBucketTaggingRequest;
- class GetBucketVersioningRequest;
- class GetBucketWebsiteRequest;
- class GetObjectRequest;
- class GetObjectAclRequest;
- class GetObjectLegalHoldRequest;
- class GetObjectLockConfigurationRequest;
- class GetObjectRetentionRequest;
- class GetObjectTaggingRequest;
- class GetObjectTorrentRequest;
- class GetPublicAccessBlockRequest;
- class HeadBucketRequest;
- class HeadObjectRequest;
- class ListBucketAnalyticsConfigurationsRequest;
- class ListBucketIntelligentTieringConfigurationsRequest;
- class ListBucketInventoryConfigurationsRequest;
- class ListBucketMetricsConfigurationsRequest;
- class ListMultipartUploadsRequest;
- class ListObjectVersionsRequest;
- class ListObjectsRequest;
- class ListObjectsV2Request;
- class ListPartsRequest;
- class PutBucketAccelerateConfigurationRequest;
- class PutBucketAclRequest;
- class PutBucketAnalyticsConfigurationRequest;
- class PutBucketCorsRequest;
- class PutBucketEncryptionRequest;
- class PutBucketIntelligentTieringConfigurationRequest;
- class PutBucketInventoryConfigurationRequest;
- class PutBucketLifecycleConfigurationRequest;
- class PutBucketLoggingRequest;
- class PutBucketMetricsConfigurationRequest;
- class PutBucketNotificationConfigurationRequest;
- class PutBucketOwnershipControlsRequest;
- class PutBucketPolicyRequest;
- class PutBucketReplicationRequest;
- class PutBucketRequestPaymentRequest;
- class PutBucketTaggingRequest;
- class PutBucketVersioningRequest;
- class PutBucketWebsiteRequest;
- class PutObjectRequest;
- class PutObjectAclRequest;
- class PutObjectLegalHoldRequest;
- class PutObjectLockConfigurationRequest;
- class PutObjectRetentionRequest;
- class PutObjectTaggingRequest;
- class PutPublicAccessBlockRequest;
- class RestoreObjectRequest;
- class SelectObjectContentRequest;
- class UploadPartRequest;
- class UploadPartCopyRequest;
- class WriteGetObjectResponseRequest;
-
- typedef Aws::Utils::Outcome<AbortMultipartUploadResult, S3Error> AbortMultipartUploadOutcome;
- typedef Aws::Utils::Outcome<CompleteMultipartUploadResult, S3Error> CompleteMultipartUploadOutcome;
- typedef Aws::Utils::Outcome<CopyObjectResult, S3Error> CopyObjectOutcome;
- typedef Aws::Utils::Outcome<CreateBucketResult, S3Error> CreateBucketOutcome;
- typedef Aws::Utils::Outcome<CreateMultipartUploadResult, S3Error> CreateMultipartUploadOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> DeleteBucketOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> DeleteBucketAnalyticsConfigurationOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> DeleteBucketCorsOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> DeleteBucketEncryptionOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> DeleteBucketIntelligentTieringConfigurationOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> DeleteBucketInventoryConfigurationOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> DeleteBucketLifecycleOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> DeleteBucketMetricsConfigurationOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> DeleteBucketOwnershipControlsOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> DeleteBucketPolicyOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> DeleteBucketReplicationOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> DeleteBucketTaggingOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> DeleteBucketWebsiteOutcome;
- typedef Aws::Utils::Outcome<DeleteObjectResult, S3Error> DeleteObjectOutcome;
- typedef Aws::Utils::Outcome<DeleteObjectTaggingResult, S3Error> DeleteObjectTaggingOutcome;
- typedef Aws::Utils::Outcome<DeleteObjectsResult, S3Error> DeleteObjectsOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> DeletePublicAccessBlockOutcome;
- typedef Aws::Utils::Outcome<GetBucketAccelerateConfigurationResult, S3Error> GetBucketAccelerateConfigurationOutcome;
- typedef Aws::Utils::Outcome<GetBucketAclResult, S3Error> GetBucketAclOutcome;
- typedef Aws::Utils::Outcome<GetBucketAnalyticsConfigurationResult, S3Error> GetBucketAnalyticsConfigurationOutcome;
- typedef Aws::Utils::Outcome<GetBucketCorsResult, S3Error> GetBucketCorsOutcome;
- typedef Aws::Utils::Outcome<GetBucketEncryptionResult, S3Error> GetBucketEncryptionOutcome;
- typedef Aws::Utils::Outcome<GetBucketIntelligentTieringConfigurationResult, S3Error> GetBucketIntelligentTieringConfigurationOutcome;
- typedef Aws::Utils::Outcome<GetBucketInventoryConfigurationResult, S3Error> GetBucketInventoryConfigurationOutcome;
- typedef Aws::Utils::Outcome<GetBucketLifecycleConfigurationResult, S3Error> GetBucketLifecycleConfigurationOutcome;
- typedef Aws::Utils::Outcome<GetBucketLocationResult, S3Error> GetBucketLocationOutcome;
- typedef Aws::Utils::Outcome<GetBucketLoggingResult, S3Error> GetBucketLoggingOutcome;
- typedef Aws::Utils::Outcome<GetBucketMetricsConfigurationResult, S3Error> GetBucketMetricsConfigurationOutcome;
- typedef Aws::Utils::Outcome<GetBucketNotificationConfigurationResult, S3Error> GetBucketNotificationConfigurationOutcome;
- typedef Aws::Utils::Outcome<GetBucketOwnershipControlsResult, S3Error> GetBucketOwnershipControlsOutcome;
- typedef Aws::Utils::Outcome<GetBucketPolicyResult, S3Error> GetBucketPolicyOutcome;
- typedef Aws::Utils::Outcome<GetBucketPolicyStatusResult, S3Error> GetBucketPolicyStatusOutcome;
- typedef Aws::Utils::Outcome<GetBucketReplicationResult, S3Error> GetBucketReplicationOutcome;
- typedef Aws::Utils::Outcome<GetBucketRequestPaymentResult, S3Error> GetBucketRequestPaymentOutcome;
- typedef Aws::Utils::Outcome<GetBucketTaggingResult, S3Error> GetBucketTaggingOutcome;
- typedef Aws::Utils::Outcome<GetBucketVersioningResult, S3Error> GetBucketVersioningOutcome;
- typedef Aws::Utils::Outcome<GetBucketWebsiteResult, S3Error> GetBucketWebsiteOutcome;
- typedef Aws::Utils::Outcome<GetObjectResult, S3Error> GetObjectOutcome;
- typedef Aws::Utils::Outcome<GetObjectAclResult, S3Error> GetObjectAclOutcome;
- typedef Aws::Utils::Outcome<GetObjectLegalHoldResult, S3Error> GetObjectLegalHoldOutcome;
- typedef Aws::Utils::Outcome<GetObjectLockConfigurationResult, S3Error> GetObjectLockConfigurationOutcome;
- typedef Aws::Utils::Outcome<GetObjectRetentionResult, S3Error> GetObjectRetentionOutcome;
- typedef Aws::Utils::Outcome<GetObjectTaggingResult, S3Error> GetObjectTaggingOutcome;
- typedef Aws::Utils::Outcome<GetObjectTorrentResult, S3Error> GetObjectTorrentOutcome;
- typedef Aws::Utils::Outcome<GetPublicAccessBlockResult, S3Error> GetPublicAccessBlockOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> HeadBucketOutcome;
- typedef Aws::Utils::Outcome<HeadObjectResult, S3Error> HeadObjectOutcome;
- typedef Aws::Utils::Outcome<ListBucketAnalyticsConfigurationsResult, S3Error> ListBucketAnalyticsConfigurationsOutcome;
- typedef Aws::Utils::Outcome<ListBucketIntelligentTieringConfigurationsResult, S3Error> ListBucketIntelligentTieringConfigurationsOutcome;
- typedef Aws::Utils::Outcome<ListBucketInventoryConfigurationsResult, S3Error> ListBucketInventoryConfigurationsOutcome;
- typedef Aws::Utils::Outcome<ListBucketMetricsConfigurationsResult, S3Error> ListBucketMetricsConfigurationsOutcome;
- typedef Aws::Utils::Outcome<ListBucketsResult, S3Error> ListBucketsOutcome;
- typedef Aws::Utils::Outcome<ListMultipartUploadsResult, S3Error> ListMultipartUploadsOutcome;
- typedef Aws::Utils::Outcome<ListObjectVersionsResult, S3Error> ListObjectVersionsOutcome;
- typedef Aws::Utils::Outcome<ListObjectsResult, S3Error> ListObjectsOutcome;
- typedef Aws::Utils::Outcome<ListObjectsV2Result, S3Error> ListObjectsV2Outcome;
- typedef Aws::Utils::Outcome<ListPartsResult, S3Error> ListPartsOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketAccelerateConfigurationOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketAclOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketAnalyticsConfigurationOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketCorsOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketEncryptionOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketIntelligentTieringConfigurationOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketInventoryConfigurationOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketLifecycleConfigurationOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketLoggingOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketMetricsConfigurationOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketNotificationConfigurationOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketOwnershipControlsOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketPolicyOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketReplicationOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketRequestPaymentOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketTaggingOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketVersioningOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketWebsiteOutcome;
- typedef Aws::Utils::Outcome<PutObjectResult, S3Error> PutObjectOutcome;
- typedef Aws::Utils::Outcome<PutObjectAclResult, S3Error> PutObjectAclOutcome;
- typedef Aws::Utils::Outcome<PutObjectLegalHoldResult, S3Error> PutObjectLegalHoldOutcome;
- typedef Aws::Utils::Outcome<PutObjectLockConfigurationResult, S3Error> PutObjectLockConfigurationOutcome;
- typedef Aws::Utils::Outcome<PutObjectRetentionResult, S3Error> PutObjectRetentionOutcome;
- typedef Aws::Utils::Outcome<PutObjectTaggingResult, S3Error> PutObjectTaggingOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutPublicAccessBlockOutcome;
- typedef Aws::Utils::Outcome<RestoreObjectResult, S3Error> RestoreObjectOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> SelectObjectContentOutcome;
- typedef Aws::Utils::Outcome<UploadPartResult, S3Error> UploadPartOutcome;
- typedef Aws::Utils::Outcome<UploadPartCopyResult, S3Error> UploadPartCopyOutcome;
- typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> WriteGetObjectResponseOutcome;
-
- typedef std::future<AbortMultipartUploadOutcome> AbortMultipartUploadOutcomeCallable;
- typedef std::future<CompleteMultipartUploadOutcome> CompleteMultipartUploadOutcomeCallable;
- typedef std::future<CopyObjectOutcome> CopyObjectOutcomeCallable;
- typedef std::future<CreateBucketOutcome> CreateBucketOutcomeCallable;
- typedef std::future<CreateMultipartUploadOutcome> CreateMultipartUploadOutcomeCallable;
- typedef std::future<DeleteBucketOutcome> DeleteBucketOutcomeCallable;
- typedef std::future<DeleteBucketAnalyticsConfigurationOutcome> DeleteBucketAnalyticsConfigurationOutcomeCallable;
- typedef std::future<DeleteBucketCorsOutcome> DeleteBucketCorsOutcomeCallable;
- typedef std::future<DeleteBucketEncryptionOutcome> DeleteBucketEncryptionOutcomeCallable;
- typedef std::future<DeleteBucketIntelligentTieringConfigurationOutcome> DeleteBucketIntelligentTieringConfigurationOutcomeCallable;
- typedef std::future<DeleteBucketInventoryConfigurationOutcome> DeleteBucketInventoryConfigurationOutcomeCallable;
- typedef std::future<DeleteBucketLifecycleOutcome> DeleteBucketLifecycleOutcomeCallable;
- typedef std::future<DeleteBucketMetricsConfigurationOutcome> DeleteBucketMetricsConfigurationOutcomeCallable;
- typedef std::future<DeleteBucketOwnershipControlsOutcome> DeleteBucketOwnershipControlsOutcomeCallable;
- typedef std::future<DeleteBucketPolicyOutcome> DeleteBucketPolicyOutcomeCallable;
- typedef std::future<DeleteBucketReplicationOutcome> DeleteBucketReplicationOutcomeCallable;
- typedef std::future<DeleteBucketTaggingOutcome> DeleteBucketTaggingOutcomeCallable;
- typedef std::future<DeleteBucketWebsiteOutcome> DeleteBucketWebsiteOutcomeCallable;
- typedef std::future<DeleteObjectOutcome> DeleteObjectOutcomeCallable;
- typedef std::future<DeleteObjectTaggingOutcome> DeleteObjectTaggingOutcomeCallable;
- typedef std::future<DeleteObjectsOutcome> DeleteObjectsOutcomeCallable;
- typedef std::future<DeletePublicAccessBlockOutcome> DeletePublicAccessBlockOutcomeCallable;
- typedef std::future<GetBucketAccelerateConfigurationOutcome> GetBucketAccelerateConfigurationOutcomeCallable;
- typedef std::future<GetBucketAclOutcome> GetBucketAclOutcomeCallable;
- typedef std::future<GetBucketAnalyticsConfigurationOutcome> GetBucketAnalyticsConfigurationOutcomeCallable;
- typedef std::future<GetBucketCorsOutcome> GetBucketCorsOutcomeCallable;
- typedef std::future<GetBucketEncryptionOutcome> GetBucketEncryptionOutcomeCallable;
- typedef std::future<GetBucketIntelligentTieringConfigurationOutcome> GetBucketIntelligentTieringConfigurationOutcomeCallable;
- typedef std::future<GetBucketInventoryConfigurationOutcome> GetBucketInventoryConfigurationOutcomeCallable;
- typedef std::future<GetBucketLifecycleConfigurationOutcome> GetBucketLifecycleConfigurationOutcomeCallable;
- typedef std::future<GetBucketLocationOutcome> GetBucketLocationOutcomeCallable;
- typedef std::future<GetBucketLoggingOutcome> GetBucketLoggingOutcomeCallable;
- typedef std::future<GetBucketMetricsConfigurationOutcome> GetBucketMetricsConfigurationOutcomeCallable;
- typedef std::future<GetBucketNotificationConfigurationOutcome> GetBucketNotificationConfigurationOutcomeCallable;
- typedef std::future<GetBucketOwnershipControlsOutcome> GetBucketOwnershipControlsOutcomeCallable;
- typedef std::future<GetBucketPolicyOutcome> GetBucketPolicyOutcomeCallable;
- typedef std::future<GetBucketPolicyStatusOutcome> GetBucketPolicyStatusOutcomeCallable;
- typedef std::future<GetBucketReplicationOutcome> GetBucketReplicationOutcomeCallable;
- typedef std::future<GetBucketRequestPaymentOutcome> GetBucketRequestPaymentOutcomeCallable;
- typedef std::future<GetBucketTaggingOutcome> GetBucketTaggingOutcomeCallable;
- typedef std::future<GetBucketVersioningOutcome> GetBucketVersioningOutcomeCallable;
- typedef std::future<GetBucketWebsiteOutcome> GetBucketWebsiteOutcomeCallable;
- typedef std::future<GetObjectOutcome> GetObjectOutcomeCallable;
- typedef std::future<GetObjectAclOutcome> GetObjectAclOutcomeCallable;
- typedef std::future<GetObjectLegalHoldOutcome> GetObjectLegalHoldOutcomeCallable;
- typedef std::future<GetObjectLockConfigurationOutcome> GetObjectLockConfigurationOutcomeCallable;
- typedef std::future<GetObjectRetentionOutcome> GetObjectRetentionOutcomeCallable;
- typedef std::future<GetObjectTaggingOutcome> GetObjectTaggingOutcomeCallable;
- typedef std::future<GetObjectTorrentOutcome> GetObjectTorrentOutcomeCallable;
- typedef std::future<GetPublicAccessBlockOutcome> GetPublicAccessBlockOutcomeCallable;
- typedef std::future<HeadBucketOutcome> HeadBucketOutcomeCallable;
- typedef std::future<HeadObjectOutcome> HeadObjectOutcomeCallable;
- typedef std::future<ListBucketAnalyticsConfigurationsOutcome> ListBucketAnalyticsConfigurationsOutcomeCallable;
- typedef std::future<ListBucketIntelligentTieringConfigurationsOutcome> ListBucketIntelligentTieringConfigurationsOutcomeCallable;
- typedef std::future<ListBucketInventoryConfigurationsOutcome> ListBucketInventoryConfigurationsOutcomeCallable;
- typedef std::future<ListBucketMetricsConfigurationsOutcome> ListBucketMetricsConfigurationsOutcomeCallable;
- typedef std::future<ListBucketsOutcome> ListBucketsOutcomeCallable;
- typedef std::future<ListMultipartUploadsOutcome> ListMultipartUploadsOutcomeCallable;
- typedef std::future<ListObjectVersionsOutcome> ListObjectVersionsOutcomeCallable;
- typedef std::future<ListObjectsOutcome> ListObjectsOutcomeCallable;
- typedef std::future<ListObjectsV2Outcome> ListObjectsV2OutcomeCallable;
- typedef std::future<ListPartsOutcome> ListPartsOutcomeCallable;
- typedef std::future<PutBucketAccelerateConfigurationOutcome> PutBucketAccelerateConfigurationOutcomeCallable;
- typedef std::future<PutBucketAclOutcome> PutBucketAclOutcomeCallable;
- typedef std::future<PutBucketAnalyticsConfigurationOutcome> PutBucketAnalyticsConfigurationOutcomeCallable;
- typedef std::future<PutBucketCorsOutcome> PutBucketCorsOutcomeCallable;
- typedef std::future<PutBucketEncryptionOutcome> PutBucketEncryptionOutcomeCallable;
- typedef std::future<PutBucketIntelligentTieringConfigurationOutcome> PutBucketIntelligentTieringConfigurationOutcomeCallable;
- typedef std::future<PutBucketInventoryConfigurationOutcome> PutBucketInventoryConfigurationOutcomeCallable;
- typedef std::future<PutBucketLifecycleConfigurationOutcome> PutBucketLifecycleConfigurationOutcomeCallable;
- typedef std::future<PutBucketLoggingOutcome> PutBucketLoggingOutcomeCallable;
- typedef std::future<PutBucketMetricsConfigurationOutcome> PutBucketMetricsConfigurationOutcomeCallable;
- typedef std::future<PutBucketNotificationConfigurationOutcome> PutBucketNotificationConfigurationOutcomeCallable;
- typedef std::future<PutBucketOwnershipControlsOutcome> PutBucketOwnershipControlsOutcomeCallable;
- typedef std::future<PutBucketPolicyOutcome> PutBucketPolicyOutcomeCallable;
- typedef std::future<PutBucketReplicationOutcome> PutBucketReplicationOutcomeCallable;
- typedef std::future<PutBucketRequestPaymentOutcome> PutBucketRequestPaymentOutcomeCallable;
- typedef std::future<PutBucketTaggingOutcome> PutBucketTaggingOutcomeCallable;
- typedef std::future<PutBucketVersioningOutcome> PutBucketVersioningOutcomeCallable;
- typedef std::future<PutBucketWebsiteOutcome> PutBucketWebsiteOutcomeCallable;
- typedef std::future<PutObjectOutcome> PutObjectOutcomeCallable;
- typedef std::future<PutObjectAclOutcome> PutObjectAclOutcomeCallable;
- typedef std::future<PutObjectLegalHoldOutcome> PutObjectLegalHoldOutcomeCallable;
- typedef std::future<PutObjectLockConfigurationOutcome> PutObjectLockConfigurationOutcomeCallable;
- typedef std::future<PutObjectRetentionOutcome> PutObjectRetentionOutcomeCallable;
- typedef std::future<PutObjectTaggingOutcome> PutObjectTaggingOutcomeCallable;
- typedef std::future<PutPublicAccessBlockOutcome> PutPublicAccessBlockOutcomeCallable;
- typedef std::future<RestoreObjectOutcome> RestoreObjectOutcomeCallable;
- typedef std::future<SelectObjectContentOutcome> SelectObjectContentOutcomeCallable;
- typedef std::future<UploadPartOutcome> UploadPartOutcomeCallable;
- typedef std::future<UploadPartCopyOutcome> UploadPartCopyOutcomeCallable;
- typedef std::future<WriteGetObjectResponseOutcome> WriteGetObjectResponseOutcomeCallable;
- } // namespace Model
-
namespace SSEHeaders
{
static const char SERVER_SIDE_ENCRYPTION[] = "x-amz-server-side-encryption";
@@ -401,151 +33,75 @@ namespace Aws
static const char SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5[] = "x-amz-server-side-encryption-customer-key-MD5";
} // SS3Headers
- class S3Client;
-
- typedef std::function<void(const S3Client*, const Model::AbortMultipartUploadRequest&, const Model::AbortMultipartUploadOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > AbortMultipartUploadResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::CompleteMultipartUploadRequest&, const Model::CompleteMultipartUploadOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > CompleteMultipartUploadResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::CopyObjectRequest&, const Model::CopyObjectOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > CopyObjectResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::CreateBucketRequest&, const Model::CreateBucketOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > CreateBucketResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::CreateMultipartUploadRequest&, const Model::CreateMultipartUploadOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > CreateMultipartUploadResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::DeleteBucketRequest&, const Model::DeleteBucketOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteBucketResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::DeleteBucketAnalyticsConfigurationRequest&, const Model::DeleteBucketAnalyticsConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteBucketAnalyticsConfigurationResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::DeleteBucketCorsRequest&, const Model::DeleteBucketCorsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteBucketCorsResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::DeleteBucketEncryptionRequest&, const Model::DeleteBucketEncryptionOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteBucketEncryptionResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::DeleteBucketIntelligentTieringConfigurationRequest&, const Model::DeleteBucketIntelligentTieringConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteBucketIntelligentTieringConfigurationResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::DeleteBucketInventoryConfigurationRequest&, const Model::DeleteBucketInventoryConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteBucketInventoryConfigurationResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::DeleteBucketLifecycleRequest&, const Model::DeleteBucketLifecycleOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteBucketLifecycleResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::DeleteBucketMetricsConfigurationRequest&, const Model::DeleteBucketMetricsConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteBucketMetricsConfigurationResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::DeleteBucketOwnershipControlsRequest&, const Model::DeleteBucketOwnershipControlsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteBucketOwnershipControlsResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::DeleteBucketPolicyRequest&, const Model::DeleteBucketPolicyOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteBucketPolicyResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::DeleteBucketReplicationRequest&, const Model::DeleteBucketReplicationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteBucketReplicationResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::DeleteBucketTaggingRequest&, const Model::DeleteBucketTaggingOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteBucketTaggingResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::DeleteBucketWebsiteRequest&, const Model::DeleteBucketWebsiteOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteBucketWebsiteResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::DeleteObjectRequest&, const Model::DeleteObjectOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteObjectResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::DeleteObjectTaggingRequest&, const Model::DeleteObjectTaggingOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteObjectTaggingResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::DeleteObjectsRequest&, const Model::DeleteObjectsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteObjectsResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::DeletePublicAccessBlockRequest&, const Model::DeletePublicAccessBlockOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeletePublicAccessBlockResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::GetBucketAccelerateConfigurationRequest&, const Model::GetBucketAccelerateConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketAccelerateConfigurationResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::GetBucketAclRequest&, const Model::GetBucketAclOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketAclResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::GetBucketAnalyticsConfigurationRequest&, const Model::GetBucketAnalyticsConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketAnalyticsConfigurationResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::GetBucketCorsRequest&, const Model::GetBucketCorsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketCorsResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::GetBucketEncryptionRequest&, const Model::GetBucketEncryptionOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketEncryptionResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::GetBucketIntelligentTieringConfigurationRequest&, const Model::GetBucketIntelligentTieringConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketIntelligentTieringConfigurationResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::GetBucketInventoryConfigurationRequest&, const Model::GetBucketInventoryConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketInventoryConfigurationResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::GetBucketLifecycleConfigurationRequest&, const Model::GetBucketLifecycleConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketLifecycleConfigurationResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::GetBucketLocationRequest&, const Model::GetBucketLocationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketLocationResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::GetBucketLoggingRequest&, const Model::GetBucketLoggingOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketLoggingResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::GetBucketMetricsConfigurationRequest&, const Model::GetBucketMetricsConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketMetricsConfigurationResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::GetBucketNotificationConfigurationRequest&, const Model::GetBucketNotificationConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketNotificationConfigurationResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::GetBucketOwnershipControlsRequest&, const Model::GetBucketOwnershipControlsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketOwnershipControlsResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::GetBucketPolicyRequest&, Model::GetBucketPolicyOutcome, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketPolicyResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::GetBucketPolicyStatusRequest&, const Model::GetBucketPolicyStatusOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketPolicyStatusResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::GetBucketReplicationRequest&, const Model::GetBucketReplicationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketReplicationResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::GetBucketRequestPaymentRequest&, const Model::GetBucketRequestPaymentOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketRequestPaymentResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::GetBucketTaggingRequest&, const Model::GetBucketTaggingOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketTaggingResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::GetBucketVersioningRequest&, const Model::GetBucketVersioningOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketVersioningResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::GetBucketWebsiteRequest&, const Model::GetBucketWebsiteOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketWebsiteResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::GetObjectRequest&, Model::GetObjectOutcome, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetObjectResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::GetObjectAclRequest&, const Model::GetObjectAclOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetObjectAclResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::GetObjectLegalHoldRequest&, const Model::GetObjectLegalHoldOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetObjectLegalHoldResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::GetObjectLockConfigurationRequest&, const Model::GetObjectLockConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetObjectLockConfigurationResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::GetObjectRetentionRequest&, const Model::GetObjectRetentionOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetObjectRetentionResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::GetObjectTaggingRequest&, const Model::GetObjectTaggingOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetObjectTaggingResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::GetObjectTorrentRequest&, Model::GetObjectTorrentOutcome, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetObjectTorrentResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::GetPublicAccessBlockRequest&, const Model::GetPublicAccessBlockOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetPublicAccessBlockResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::HeadBucketRequest&, const Model::HeadBucketOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > HeadBucketResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::HeadObjectRequest&, const Model::HeadObjectOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > HeadObjectResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::ListBucketAnalyticsConfigurationsRequest&, const Model::ListBucketAnalyticsConfigurationsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListBucketAnalyticsConfigurationsResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::ListBucketIntelligentTieringConfigurationsRequest&, const Model::ListBucketIntelligentTieringConfigurationsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListBucketIntelligentTieringConfigurationsResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::ListBucketInventoryConfigurationsRequest&, const Model::ListBucketInventoryConfigurationsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListBucketInventoryConfigurationsResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::ListBucketMetricsConfigurationsRequest&, const Model::ListBucketMetricsConfigurationsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListBucketMetricsConfigurationsResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::ListBucketsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListBucketsResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::ListMultipartUploadsRequest&, const Model::ListMultipartUploadsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListMultipartUploadsResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::ListObjectVersionsRequest&, const Model::ListObjectVersionsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListObjectVersionsResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::ListObjectsRequest&, const Model::ListObjectsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListObjectsResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::ListObjectsV2Request&, const Model::ListObjectsV2Outcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListObjectsV2ResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::ListPartsRequest&, const Model::ListPartsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListPartsResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::PutBucketAccelerateConfigurationRequest&, const Model::PutBucketAccelerateConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketAccelerateConfigurationResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::PutBucketAclRequest&, const Model::PutBucketAclOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketAclResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::PutBucketAnalyticsConfigurationRequest&, const Model::PutBucketAnalyticsConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketAnalyticsConfigurationResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::PutBucketCorsRequest&, const Model::PutBucketCorsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketCorsResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::PutBucketEncryptionRequest&, const Model::PutBucketEncryptionOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketEncryptionResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::PutBucketIntelligentTieringConfigurationRequest&, const Model::PutBucketIntelligentTieringConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketIntelligentTieringConfigurationResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::PutBucketInventoryConfigurationRequest&, const Model::PutBucketInventoryConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketInventoryConfigurationResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::PutBucketLifecycleConfigurationRequest&, const Model::PutBucketLifecycleConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketLifecycleConfigurationResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::PutBucketLoggingRequest&, const Model::PutBucketLoggingOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketLoggingResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::PutBucketMetricsConfigurationRequest&, const Model::PutBucketMetricsConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketMetricsConfigurationResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::PutBucketNotificationConfigurationRequest&, const Model::PutBucketNotificationConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketNotificationConfigurationResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::PutBucketOwnershipControlsRequest&, const Model::PutBucketOwnershipControlsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketOwnershipControlsResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::PutBucketPolicyRequest&, const Model::PutBucketPolicyOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketPolicyResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::PutBucketReplicationRequest&, const Model::PutBucketReplicationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketReplicationResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::PutBucketRequestPaymentRequest&, const Model::PutBucketRequestPaymentOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketRequestPaymentResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::PutBucketTaggingRequest&, const Model::PutBucketTaggingOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketTaggingResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::PutBucketVersioningRequest&, const Model::PutBucketVersioningOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketVersioningResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::PutBucketWebsiteRequest&, const Model::PutBucketWebsiteOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketWebsiteResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::PutObjectRequest&, const Model::PutObjectOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutObjectResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::PutObjectAclRequest&, const Model::PutObjectAclOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutObjectAclResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::PutObjectLegalHoldRequest&, const Model::PutObjectLegalHoldOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutObjectLegalHoldResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::PutObjectLockConfigurationRequest&, const Model::PutObjectLockConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutObjectLockConfigurationResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::PutObjectRetentionRequest&, const Model::PutObjectRetentionOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutObjectRetentionResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::PutObjectTaggingRequest&, const Model::PutObjectTaggingOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutObjectTaggingResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::PutPublicAccessBlockRequest&, const Model::PutPublicAccessBlockOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutPublicAccessBlockResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::RestoreObjectRequest&, const Model::RestoreObjectOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > RestoreObjectResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::SelectObjectContentRequest&, const Model::SelectObjectContentOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > SelectObjectContentResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::UploadPartRequest&, const Model::UploadPartOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > UploadPartResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::UploadPartCopyRequest&, const Model::UploadPartCopyOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > UploadPartCopyResponseReceivedHandler;
- typedef std::function<void(const S3Client*, const Model::WriteGetObjectResponseRequest&, const Model::WriteGetObjectResponseOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > WriteGetObjectResponseResponseReceivedHandler;
-
- // Get endpoint, signer region and signer service name after computing the endpoint.
- struct ComputeEndpointResult
- {
- ComputeEndpointResult(const Aws::String& endpointName = {}, const Aws::String& region = {}, const Aws::String& serviceName = {}) :
- endpoint(endpointName), signerRegion(region), signerServiceName(serviceName) {}
-
- Aws::String endpoint;
- Aws::String signerRegion;
- Aws::String signerServiceName;
- };
- typedef Aws::Utils::Outcome<ComputeEndpointResult, Aws::Client::AWSError<S3Errors>> ComputeEndpointOutcome;
-
//max expiration for presigned urls in s3 is 7 days.
static const unsigned MAX_EXPIRATION_SECONDS = 7 * 24 * 60 * 60;
/**
* <p/>
*/
- enum class US_EAST_1_REGIONAL_ENDPOINT_OPTION
- {
- NOT_SET,
- LEGACY, //stands for using global endpoint for us-east-1,
- REGIONAL //stands for using regional endpoint for us-east-1
- };
- class AWS_S3_API S3Client : public Aws::Client::AWSXMLClient
+ class AWS_S3_API S3Client : public Aws::Client::AWSXMLClient, public Aws::Client::ClientWithAsyncTemplateMethods<S3Client>
{
public:
typedef Aws::Client::AWSXMLClient BASECLASS;
+ static const char* SERVICE_NAME;
+ static const char* ALLOCATION_TAG;
/**
* Initializes client to use DefaultCredentialProviderChain, with default http client factory, and optional client config. If client config
* is not specified, it will be initialized to default values.
*/
- S3Client(const Aws::Client::ClientConfiguration& clientConfiguration = Aws::Client::ClientConfiguration(), Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy signPayloads = Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, bool useVirtualAddressing = true, Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION USEast1RegionalEndPointOption = Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION::NOT_SET);
+ S3Client(const Aws::S3::S3ClientConfiguration& clientConfiguration = Aws::S3::S3ClientConfiguration(),
+ std::shared_ptr<S3EndpointProviderBase> endpointProvider = Aws::MakeShared<S3EndpointProvider>(ALLOCATION_TAG));
/**
* Initializes client to use SimpleAWSCredentialsProvider, with default http client factory, and optional client config. If client config
* is not specified, it will be initialized to default values.
*/
- S3Client(const Aws::Auth::AWSCredentials& credentials, const Aws::Client::ClientConfiguration& clientConfiguration = Aws::Client::ClientConfiguration(), Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy signPayloads = Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, bool useVirtualAddressing = true, Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION USEast1RegionalEndPointOption = Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION::NOT_SET);
+ S3Client(const Aws::Auth::AWSCredentials& credentials,
+ std::shared_ptr<S3EndpointProviderBase> endpointProvider = Aws::MakeShared<S3EndpointProvider>(ALLOCATION_TAG),
+ const Aws::S3::S3ClientConfiguration& clientConfiguration = Aws::S3::S3ClientConfiguration());
/**
* Initializes client to use specified credentials provider with specified client config. If http client factory is not supplied,
* the default http client factory will be used
*/
S3Client(const std::shared_ptr<Aws::Auth::AWSCredentialsProvider>& credentialsProvider,
- const Aws::Client::ClientConfiguration& clientConfiguration = Aws::Client::ClientConfiguration(), Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy signPayloads = Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, bool useVirtualAddressing = true, Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION USEast1RegionalEndPointOption = Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION::NOT_SET);
+ std::shared_ptr<S3EndpointProviderBase> endpointProvider = Aws::MakeShared<S3EndpointProvider>(ALLOCATION_TAG),
+ const Aws::S3::S3ClientConfiguration& clientConfiguration = Aws::S3::S3ClientConfiguration());
- virtual ~S3Client();
+ /* Legacy constructors due deprecation */
+ /**
+ * Initializes client to use DefaultCredentialProviderChain, with default http client factory, and optional client config. If client config
+ * is not specified, it will be initialized to default values.
+ */
+ S3Client(const Aws::Client::ClientConfiguration& clientConfiguration,
+ Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy signPayloads,
+ bool useVirtualAddressing,
+ Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION USEast1RegionalEndPointOption = Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION::NOT_SET);
+
+ /**
+ * Initializes client to use SimpleAWSCredentialsProvider, with default http client factory, and optional client config. If client config
+ * is not specified, it will be initialized to default values.
+ */
+ S3Client(const Aws::Auth::AWSCredentials& credentials,
+ const Aws::Client::ClientConfiguration& clientConfiguration,
+ Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy signPayloads,
+ bool useVirtualAddressing,
+ Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION USEast1RegionalEndPointOption = Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION::NOT_SET);
+
+ /**
+ * Initializes client to use specified credentials provider with specified client config. If http client factory is not supplied,
+ * the default http client factory will be used
+ */
+ S3Client(const std::shared_ptr<Aws::Auth::AWSCredentialsProvider>& credentialsProvider,
+ const Aws::Client::ClientConfiguration& clientConfiguration,
+ Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy signPayloads,
+ bool useVirtualAddressing,
+ Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION USEast1RegionalEndPointOption = Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION::NOT_SET);
+
+ /* End of legacy constructors due deprecation */
+ virtual ~S3Client();
/**
* <p>This action aborts a multipart upload. After a multipart upload is aborted,
@@ -578,68 +134,22 @@ namespace Aws
virtual Model::AbortMultipartUploadOutcome AbortMultipartUpload(const Model::AbortMultipartUploadRequest& request) const;
/**
- * <p>This action aborts a multipart upload. After a multipart upload is aborted,
- * no additional parts can be uploaded using that upload ID. The storage consumed
- * by any previously uploaded parts will be freed. However, if any part uploads are
- * currently in progress, those part uploads might or might not succeed. As a
- * result, it might be necessary to abort a given multipart upload multiple times
- * in order to completely free all storage consumed by all parts. </p> <p>To verify
- * that all parts have been removed, so you don't get charged for the part storage,
- * you should call the <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html">ListParts</a>
- * action and ensure that the parts list is empty.</p> <p>For information about
- * permissions required to use the multipart upload, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html">Multipart
- * Upload and Permissions</a>.</p> <p>The following operations are related to
- * <code>AbortMultipartUpload</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html">CreateMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html">UploadPart</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html">CompleteMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html">ListParts</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html">ListMultipartUploads</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for AbortMultipartUpload that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::AbortMultipartUploadOutcomeCallable AbortMultipartUploadCallable(const Model::AbortMultipartUploadRequest& request) const;
+ template<typename AbortMultipartUploadRequestT = Model::AbortMultipartUploadRequest>
+ Model::AbortMultipartUploadOutcomeCallable AbortMultipartUploadCallable(const AbortMultipartUploadRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::AbortMultipartUpload, request);
+ }
/**
- * <p>This action aborts a multipart upload. After a multipart upload is aborted,
- * no additional parts can be uploaded using that upload ID. The storage consumed
- * by any previously uploaded parts will be freed. However, if any part uploads are
- * currently in progress, those part uploads might or might not succeed. As a
- * result, it might be necessary to abort a given multipart upload multiple times
- * in order to completely free all storage consumed by all parts. </p> <p>To verify
- * that all parts have been removed, so you don't get charged for the part storage,
- * you should call the <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html">ListParts</a>
- * action and ensure that the parts list is empty.</p> <p>For information about
- * permissions required to use the multipart upload, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html">Multipart
- * Upload and Permissions</a>.</p> <p>The following operations are related to
- * <code>AbortMultipartUpload</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html">CreateMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html">UploadPart</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html">CompleteMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html">ListParts</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html">ListMultipartUploads</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortMultipartUpload">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for AbortMultipartUpload that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void AbortMultipartUploadAsync(const Model::AbortMultipartUploadRequest& request, const AbortMultipartUploadResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename AbortMultipartUploadRequestT = Model::AbortMultipartUploadRequest>
+ void AbortMultipartUploadAsync(const AbortMultipartUploadRequestT& request, const AbortMultipartUploadResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::AbortMultipartUpload, request, handler, context);
+ }
/**
* <p>Completes a multipart upload by assembling previously uploaded parts.</p>
@@ -663,7 +173,11 @@ namespace Aws
* if <code>CompleteMultipartUpload</code> fails, applications should be prepared
* to retry the failed requests. For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html">Amazon
- * S3 Error Best Practices</a>.</p> <p>For more information about multipart
+ * S3 Error Best Practices</a>.</p> <p>You cannot use
+ * <code>Content-Type: application/x-www-form-urlencoded</code> with Complete
+ * Multipart Upload requests. Also, if you do not provide a
+ * <code>Content-Type</code> header, <code>CompleteMultipartUpload</code> returns a
+ * 200 OK response.</p> <p>For more information about multipart
* uploads, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html">Uploading
* Objects Using Multipart Upload</a>.</p> <p>For information about permissions
@@ -702,135 +216,29 @@ namespace Aws
virtual Model::CompleteMultipartUploadOutcome CompleteMultipartUpload(const Model::CompleteMultipartUploadRequest& request) const;
/**
- * <p>Completes a multipart upload by assembling previously uploaded parts.</p>
- * <p>You first initiate the multipart upload and then upload all parts using the
- * <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html">UploadPart</a>
- * operation. After successfully uploading all relevant parts of an upload, you
- * call this action to complete the upload. Upon receiving this request, Amazon S3
- * concatenates all the parts in ascending order by part number to create a new
- * object. In the Complete Multipart Upload request, you must provide the parts
- * list. You must ensure that the parts list is complete. This action concatenates
- * the parts that you provide in the list. For each part in the list, you must
- * provide the part number and the <code>ETag</code> value, returned after that
- * part was uploaded.</p> <p>Processing of a Complete Multipart Upload request
- * could take several minutes to complete. After Amazon S3 begins processing the
- * request, it sends an HTTP response header that specifies a 200 OK response.
- * While processing is in progress, Amazon S3 periodically sends white space
- * characters to keep the connection from timing out. Because a request could fail
- * after the initial 200 OK response has been sent, it is important that you check
- * the response body to determine whether the request succeeded.</p> <p>Note that
- * if <code>CompleteMultipartUpload</code> fails, applications should be prepared
- * to retry the failed requests. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html">Amazon
- * S3 Error Best Practices</a>.</p> <p>For more information about multipart
- * uploads, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html">Uploading
- * Objects Using Multipart Upload</a>.</p> <p>For information about permissions
- * required to use the multipart upload API, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html">Multipart
- * Upload and Permissions</a>.</p> <p> <code>CompleteMultipartUpload</code> has the
- * following special errors:</p> <ul> <li> <p>Error code:
- * <code>EntityTooSmall</code> </p> <ul> <li> <p>Description: Your proposed upload
- * is smaller than the minimum allowed object size. Each part must be at least 5 MB
- * in size, except the last part.</p> </li> <li> <p>400 Bad Request</p> </li> </ul>
- * </li> <li> <p>Error code: <code>InvalidPart</code> </p> <ul> <li>
- * <p>Description: One or more of the specified parts could not be found. The part
- * might not have been uploaded, or the specified entity tag might not have matched
- * the part's entity tag.</p> </li> <li> <p>400 Bad Request</p> </li> </ul> </li>
- * <li> <p>Error code: <code>InvalidPartOrder</code> </p> <ul> <li> <p>Description:
- * The list of parts was not in ascending order. The parts list must be specified
- * in order by part number.</p> </li> <li> <p>400 Bad Request</p> </li> </ul> </li>
- * <li> <p>Error code: <code>NoSuchUpload</code> </p> <ul> <li> <p>Description: The
- * specified multipart upload does not exist. The upload ID might be invalid, or
- * the multipart upload might have been aborted or completed.</p> </li> <li> <p>404
- * Not Found</p> </li> </ul> </li> </ul> <p>The following operations are related to
- * <code>CompleteMultipartUpload</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html">CreateMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html">UploadPart</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html">AbortMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html">ListParts</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html">ListMultipartUploads</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for CompleteMultipartUpload that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::CompleteMultipartUploadOutcomeCallable CompleteMultipartUploadCallable(const Model::CompleteMultipartUploadRequest& request) const;
+ template<typename CompleteMultipartUploadRequestT = Model::CompleteMultipartUploadRequest>
+ Model::CompleteMultipartUploadOutcomeCallable CompleteMultipartUploadCallable(const CompleteMultipartUploadRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::CompleteMultipartUpload, request);
+ }
/**
- * <p>Completes a multipart upload by assembling previously uploaded parts.</p>
- * <p>You first initiate the multipart upload and then upload all parts using the
- * <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html">UploadPart</a>
- * operation. After successfully uploading all relevant parts of an upload, you
- * call this action to complete the upload. Upon receiving this request, Amazon S3
- * concatenates all the parts in ascending order by part number to create a new
- * object. In the Complete Multipart Upload request, you must provide the parts
- * list. You must ensure that the parts list is complete. This action concatenates
- * the parts that you provide in the list. For each part in the list, you must
- * provide the part number and the <code>ETag</code> value, returned after that
- * part was uploaded.</p> <p>Processing of a Complete Multipart Upload request
- * could take several minutes to complete. After Amazon S3 begins processing the
- * request, it sends an HTTP response header that specifies a 200 OK response.
- * While processing is in progress, Amazon S3 periodically sends white space
- * characters to keep the connection from timing out. Because a request could fail
- * after the initial 200 OK response has been sent, it is important that you check
- * the response body to determine whether the request succeeded.</p> <p>Note that
- * if <code>CompleteMultipartUpload</code> fails, applications should be prepared
- * to retry the failed requests. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ErrorBestPractices.html">Amazon
- * S3 Error Best Practices</a>.</p> <p>For more information about multipart
- * uploads, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html">Uploading
- * Objects Using Multipart Upload</a>.</p> <p>For information about permissions
- * required to use the multipart upload API, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html">Multipart
- * Upload and Permissions</a>.</p> <p> <code>CompleteMultipartUpload</code> has the
- * following special errors:</p> <ul> <li> <p>Error code:
- * <code>EntityTooSmall</code> </p> <ul> <li> <p>Description: Your proposed upload
- * is smaller than the minimum allowed object size. Each part must be at least 5 MB
- * in size, except the last part.</p> </li> <li> <p>400 Bad Request</p> </li> </ul>
- * </li> <li> <p>Error code: <code>InvalidPart</code> </p> <ul> <li>
- * <p>Description: One or more of the specified parts could not be found. The part
- * might not have been uploaded, or the specified entity tag might not have matched
- * the part's entity tag.</p> </li> <li> <p>400 Bad Request</p> </li> </ul> </li>
- * <li> <p>Error code: <code>InvalidPartOrder</code> </p> <ul> <li> <p>Description:
- * The list of parts was not in ascending order. The parts list must be specified
- * in order by part number.</p> </li> <li> <p>400 Bad Request</p> </li> </ul> </li>
- * <li> <p>Error code: <code>NoSuchUpload</code> </p> <ul> <li> <p>Description: The
- * specified multipart upload does not exist. The upload ID might be invalid, or
- * the multipart upload might have been aborted or completed.</p> </li> <li> <p>404
- * Not Found</p> </li> </ul> </li> </ul> <p>The following operations are related to
- * <code>CompleteMultipartUpload</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html">CreateMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html">UploadPart</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html">AbortMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html">ListParts</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html">ListMultipartUploads</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompleteMultipartUpload">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for CompleteMultipartUpload that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void CompleteMultipartUploadAsync(const Model::CompleteMultipartUploadRequest& request, const CompleteMultipartUploadResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename CompleteMultipartUploadRequestT = Model::CompleteMultipartUploadRequest>
+ void CompleteMultipartUploadAsync(const CompleteMultipartUploadRequestT& request, const CompleteMultipartUploadResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::CompleteMultipartUpload, request, handler, context);
+ }
/**
* <p>Creates a copy of an object that is already stored in Amazon S3.</p>
* <p>You can store individual objects of up to 5 TB in Amazon S3. You create a
* copy of your object up to 5 GB in size in a single atomic action using this API.
* However, to copy an object greater than 5 GB, you must use the multipart upload
- * Upload Part - Copy API. For more information, see <a
+ * Upload Part - Copy (UploadPartCopy) API. For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html">Copy
* Object Using the REST Multipart Upload API</a>.</p> <p>All copy requests
* must be authenticated. Additionally, you must have <i>read</i> access to the
@@ -870,15 +278,14 @@ namespace Aws
* key to enforce certain metadata behavior when objects are uploaded. For more
* information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html">Specifying
- * Conditions in a Policy</a> in the <i>Amazon S3 Developer Guide</i>. For a
- * complete list of Amazon S3-specific condition keys, see <a
+ * Conditions in a Policy</a> in the <i>Amazon S3 User Guide</i>. For a complete
+ * list of Amazon S3-specific condition keys, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html">Actions,
- * Resources, and Condition Keys for Amazon S3</a>.</p> <p> <b>
- * <code>x-amz-copy-source-if</code> Headers</b> </p> <p>To only copy an object
- * under certain conditions, such as whether the <code>Etag</code> matches or
- * whether the object was modified before or after a specified date, use the
- * following request parameters:</p> <ul> <li> <p>
- * <code>x-amz-copy-source-if-match</code> </p> </li> <li> <p>
+ * Resources, and Condition Keys for Amazon S3</a>.</p> <p> <b>x-amz-copy-source-if
+ * Headers</b> </p> <p>To only copy an object under certain conditions, such as
+ * whether the <code>Etag</code> matches or whether the object was modified before
+ * or after a specified date, use the following request parameters:</p> <ul> <li>
+ * <p> <code>x-amz-copy-source-if-match</code> </p> </li> <li> <p>
* <code>x-amz-copy-source-if-none-match</code> </p> </li> <li> <p>
* <code>x-amz-copy-source-if-unmodified-since</code> </p> </li> <li> <p>
* <code>x-amz-copy-source-if-modified-since</code> </p> </li> </ul> <p> If both
@@ -899,11 +306,11 @@ namespace Aws
* <code>x-amz-</code> prefix, including <code>x-amz-copy-source</code>, must be
* signed.</p> <p> <b>Server-side encryption</b> </p> <p>When you perform a
* CopyObject operation, you can optionally use the appropriate encryption-related
- * headers to encrypt the object using server-side encryption with AWS managed
- * encryption keys (SSE-S3 or SSE-KMS) or a customer-provided encryption key. With
- * server-side encryption, Amazon S3 encrypts your data as it writes it to disks in
- * its data centers and decrypts the data when you access it. For more information
- * about server-side encryption, see <a
+ * headers to encrypt the object using server-side encryption with Amazon Web
+ * Services managed encryption keys (SSE-S3 or SSE-KMS) or a customer-provided
+ * encryption key. With server-side encryption, Amazon S3 encrypts your data as it
+ * writes it to disks in its data centers and decrypts the data when you access it.
+ * For more information about server-side encryption, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html">Using
* Server-Side Encryption</a>.</p> <p>If a target object uses SSE-KMS, you can
* enable an S3 Bucket Key for the object. For more information, see <a
@@ -912,25 +319,40 @@ namespace Aws
* List (ACL)-Specific Request Headers</b> </p> <p>When copying an object, you can
* optionally use headers to grant ACL-based permissions. By default, all objects
* are private. Only the owner has full access control. When adding a new object,
- * you can grant permissions to individual AWS accounts or to predefined groups
- * defined by Amazon S3. These permissions are then added to the ACL on the object.
- * For more information, see <a
+ * you can grant permissions to individual Amazon Web Services accounts or to
+ * predefined groups defined by Amazon S3. These permissions are then added to the
+ * ACL on the object. For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html">Access
* Control List (ACL) Overview</a> and <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html">Managing
- * ACLs Using the REST API</a>. </p> <p> <b>Storage Class Options</b> </p> <p>You
- * can use the <code>CopyObject</code> action to change the storage class of an
- * object that is already stored in Amazon S3 using the <code>StorageClass</code>
- * parameter. For more information, see <a
+ * ACLs Using the REST API</a>. </p> <p>If the bucket that you're copying objects
+ * to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are
+ * disabled and no longer affect permissions. Buckets that use this setting only
+ * accept PUT requests that don't specify an ACL or PUT requests that specify
+ * bucket owner full control ACLs, such as the
+ * <code>bucket-owner-full-control</code> canned ACL or an equivalent form of this
+ * ACL expressed in the XML format.</p> <p>For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html">
+ * Controlling ownership of objects and disabling ACLs</a> in the <i>Amazon S3 User
+ * Guide</i>.</p> <p>If your bucket uses the bucket owner enforced setting
+ * for Object Ownership, all objects written to the bucket by any account will be
+ * owned by the bucket owner.</p> <p> <b>Checksums</b> </p> <p>When copying
+ * an object, if it has a checksum, that checksum will be copied to the new object
+ * by default. When you copy the object over, you may optionally specify a
+ * different checksum algorithm to use with the
+ * <code>x-amz-checksum-algorithm</code> header.</p> <p> <b>Storage Class
+ * Options</b> </p> <p>You can use the <code>CopyObject</code> action to change the
+ * storage class of an object that is already stored in Amazon S3 using the
+ * <code>StorageClass</code> parameter. For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
- * Classes</a> in the <i>Amazon S3 Service Developer Guide</i>.</p> <p>
- * <b>Versioning</b> </p> <p>By default, <code>x-amz-copy-source</code> identifies
- * the current version of an object to copy. If the current version is a delete
- * marker, Amazon S3 behaves as if the object was deleted. To copy a different
- * version, use the <code>versionId</code> subresource.</p> <p>If you enable
- * versioning on the target bucket, Amazon S3 generates a unique version ID for the
- * object being copied. This version ID is different from the version ID of the
- * source object. Amazon S3 returns the version ID of the copied object in the
+ * Classes</a> in the <i>Amazon S3 User Guide</i>.</p> <p> <b>Versioning</b> </p>
+ * <p>By default, <code>x-amz-copy-source</code> identifies the current version of
+ * an object to copy. If the current version is a delete marker, Amazon S3 behaves
+ * as if the object was deleted. To copy a different version, use the
+ * <code>versionId</code> subresource.</p> <p>If you enable versioning on the
+ * target bucket, Amazon S3 generates a unique version ID for the object being
+ * copied. This version ID is different from the version ID of the source object.
+ * Amazon S3 returns the version ID of the copied object in the
* <code>x-amz-version-id</code> response header in the response.</p> <p>If you do
* not enable versioning or suspend it on the target bucket, the version ID that
* Amazon S3 generates is always null.</p> <p>If the source object's storage class
@@ -951,268 +373,32 @@ namespace Aws
virtual Model::CopyObjectOutcome CopyObject(const Model::CopyObjectRequest& request) const;
/**
- * <p>Creates a copy of an object that is already stored in Amazon S3.</p>
- * <p>You can store individual objects of up to 5 TB in Amazon S3. You create a
- * copy of your object up to 5 GB in size in a single atomic action using this API.
- * However, to copy an object greater than 5 GB, you must use the multipart upload
- * Upload Part - Copy API. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html">Copy
- * Object Using the REST Multipart Upload API</a>.</p> <p>All copy requests
- * must be authenticated. Additionally, you must have <i>read</i> access to the
- * source object and <i>write</i> access to the destination bucket. For more
- * information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html">REST
- * Authentication</a>. Both the Region that you want to copy the object from and
- * the Region that you want to copy the object to must be enabled for your
- * account.</p> <p>A copy request might return an error when Amazon S3 receives the
- * copy request or while Amazon S3 is copying the files. If the error occurs before
- * the copy action starts, you receive a standard Amazon S3 error. If the error
- * occurs during the copy operation, the error response is embedded in the
- * <code>200 OK</code> response. This means that a <code>200 OK</code> response can
- * contain either a success or an error. Design your application to parse the
- * contents of the response and handle it appropriately.</p> <p>If the copy is
- * successful, you receive a response with information about the copied object.</p>
- * <p>If the request is an HTTP 1.1 request, the response is chunk encoded.
- * If it were not, it would not contain the content-length, and you would need to
- * read the entire body.</p> <p>The copy request charge is based on the
- * storage class and Region that you specify for the destination object. For
- * pricing information, see <a href="http://aws.amazon.com/s3/pricing/">Amazon S3
- * pricing</a>.</p> <p>Amazon S3 transfer acceleration does not support
- * cross-Region copies. If you request a cross-Region copy using a transfer
- * acceleration endpoint, you get a 400 <code>Bad Request</code> error. For more
- * information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html">Transfer
- * Acceleration</a>.</p> <p> <b>Metadata</b> </p> <p>When copying an
- * object, you can preserve all metadata (default) or specify new metadata.
- * However, the ACL is not preserved and is set to private for the user making the
- * request. To override the default ACL setting, specify a new ACL when generating
- * a copy request. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html">Using
- * ACLs</a>. </p> <p>To specify whether you want the object metadata copied from
- * the source object or replaced with metadata provided in the request, you can
- * optionally add the <code>x-amz-metadata-directive</code> header. When you grant
- * permissions, you can use the <code>s3:x-amz-metadata-directive</code> condition
- * key to enforce certain metadata behavior when objects are uploaded. For more
- * information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html">Specifying
- * Conditions in a Policy</a> in the <i>Amazon S3 Developer Guide</i>. For a
- * complete list of Amazon S3-specific condition keys, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html">Actions,
- * Resources, and Condition Keys for Amazon S3</a>.</p> <p> <b>
- * <code>x-amz-copy-source-if</code> Headers</b> </p> <p>To only copy an object
- * under certain conditions, such as whether the <code>Etag</code> matches or
- * whether the object was modified before or after a specified date, use the
- * following request parameters:</p> <ul> <li> <p>
- * <code>x-amz-copy-source-if-match</code> </p> </li> <li> <p>
- * <code>x-amz-copy-source-if-none-match</code> </p> </li> <li> <p>
- * <code>x-amz-copy-source-if-unmodified-since</code> </p> </li> <li> <p>
- * <code>x-amz-copy-source-if-modified-since</code> </p> </li> </ul> <p> If both
- * the <code>x-amz-copy-source-if-match</code> and
- * <code>x-amz-copy-source-if-unmodified-since</code> headers are present in the
- * request and evaluate as follows, Amazon S3 returns <code>200 OK</code> and
- * copies the data:</p> <ul> <li> <p> <code>x-amz-copy-source-if-match</code>
- * condition evaluates to true</p> </li> <li> <p>
- * <code>x-amz-copy-source-if-unmodified-since</code> condition evaluates to
- * false</p> </li> </ul> <p>If both the
- * <code>x-amz-copy-source-if-none-match</code> and
- * <code>x-amz-copy-source-if-modified-since</code> headers are present in the
- * request and evaluate as follows, Amazon S3 returns the <code>412 Precondition
- * Failed</code> response code:</p> <ul> <li> <p>
- * <code>x-amz-copy-source-if-none-match</code> condition evaluates to false</p>
- * </li> <li> <p> <code>x-amz-copy-source-if-modified-since</code> condition
- * evaluates to true</p> </li> </ul> <p>All headers with the
- * <code>x-amz-</code> prefix, including <code>x-amz-copy-source</code>, must be
- * signed.</p> <p> <b>Server-side encryption</b> </p> <p>When you perform a
- * CopyObject operation, you can optionally use the appropriate encryption-related
- * headers to encrypt the object using server-side encryption with AWS managed
- * encryption keys (SSE-S3 or SSE-KMS) or a customer-provided encryption key. With
- * server-side encryption, Amazon S3 encrypts your data as it writes it to disks in
- * its data centers and decrypts the data when you access it. For more information
- * about server-side encryption, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html">Using
- * Server-Side Encryption</a>.</p> <p>If a target object uses SSE-KMS, you can
- * enable an S3 Bucket Key for the object. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html">Amazon S3
- * Bucket Keys</a> in the <i>Amazon S3 User Guide</i>.</p> <p> <b>Access Control
- * List (ACL)-Specific Request Headers</b> </p> <p>When copying an object, you can
- * optionally use headers to grant ACL-based permissions. By default, all objects
- * are private. Only the owner has full access control. When adding a new object,
- * you can grant permissions to individual AWS accounts or to predefined groups
- * defined by Amazon S3. These permissions are then added to the ACL on the object.
- * For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html">Access
- * Control List (ACL) Overview</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html">Managing
- * ACLs Using the REST API</a>. </p> <p> <b>Storage Class Options</b> </p> <p>You
- * can use the <code>CopyObject</code> action to change the storage class of an
- * object that is already stored in Amazon S3 using the <code>StorageClass</code>
- * parameter. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
- * Classes</a> in the <i>Amazon S3 Service Developer Guide</i>.</p> <p>
- * <b>Versioning</b> </p> <p>By default, <code>x-amz-copy-source</code> identifies
- * the current version of an object to copy. If the current version is a delete
- * marker, Amazon S3 behaves as if the object was deleted. To copy a different
- * version, use the <code>versionId</code> subresource.</p> <p>If you enable
- * versioning on the target bucket, Amazon S3 generates a unique version ID for the
- * object being copied. This version ID is different from the version ID of the
- * source object. Amazon S3 returns the version ID of the copied object in the
- * <code>x-amz-version-id</code> response header in the response.</p> <p>If you do
- * not enable versioning or suspend it on the target bucket, the version ID that
- * Amazon S3 generates is always null.</p> <p>If the source object's storage class
- * is GLACIER, you must restore a copy of this object before you can use it as a
- * source object for the copy operation. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html">RestoreObject</a>.</p>
- * <p>The following operations are related to <code>CopyObject</code>:</p> <ul>
- * <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html">PutObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>
- * </p> </li> </ul> <p>For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html">Copying
- * Objects</a>.</p><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject">AWS API
- * Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for CopyObject that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::CopyObjectOutcomeCallable CopyObjectCallable(const Model::CopyObjectRequest& request) const;
+ template<typename CopyObjectRequestT = Model::CopyObjectRequest>
+ Model::CopyObjectOutcomeCallable CopyObjectCallable(const CopyObjectRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::CopyObject, request);
+ }
/**
- * <p>Creates a copy of an object that is already stored in Amazon S3.</p>
- * <p>You can store individual objects of up to 5 TB in Amazon S3. You create a
- * copy of your object up to 5 GB in size in a single atomic action using this API.
- * However, to copy an object greater than 5 GB, you must use the multipart upload
- * Upload Part - Copy API. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjctsUsingRESTMPUapi.html">Copy
- * Object Using the REST Multipart Upload API</a>.</p> <p>All copy requests
- * must be authenticated. Additionally, you must have <i>read</i> access to the
- * source object and <i>write</i> access to the destination bucket. For more
- * information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html">REST
- * Authentication</a>. Both the Region that you want to copy the object from and
- * the Region that you want to copy the object to must be enabled for your
- * account.</p> <p>A copy request might return an error when Amazon S3 receives the
- * copy request or while Amazon S3 is copying the files. If the error occurs before
- * the copy action starts, you receive a standard Amazon S3 error. If the error
- * occurs during the copy operation, the error response is embedded in the
- * <code>200 OK</code> response. This means that a <code>200 OK</code> response can
- * contain either a success or an error. Design your application to parse the
- * contents of the response and handle it appropriately.</p> <p>If the copy is
- * successful, you receive a response with information about the copied object.</p>
- * <p>If the request is an HTTP 1.1 request, the response is chunk encoded.
- * If it were not, it would not contain the content-length, and you would need to
- * read the entire body.</p> <p>The copy request charge is based on the
- * storage class and Region that you specify for the destination object. For
- * pricing information, see <a href="http://aws.amazon.com/s3/pricing/">Amazon S3
- * pricing</a>.</p> <p>Amazon S3 transfer acceleration does not support
- * cross-Region copies. If you request a cross-Region copy using a transfer
- * acceleration endpoint, you get a 400 <code>Bad Request</code> error. For more
- * information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html">Transfer
- * Acceleration</a>.</p> <p> <b>Metadata</b> </p> <p>When copying an
- * object, you can preserve all metadata (default) or specify new metadata.
- * However, the ACL is not preserved and is set to private for the user making the
- * request. To override the default ACL setting, specify a new ACL when generating
- * a copy request. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html">Using
- * ACLs</a>. </p> <p>To specify whether you want the object metadata copied from
- * the source object or replaced with metadata provided in the request, you can
- * optionally add the <code>x-amz-metadata-directive</code> header. When you grant
- * permissions, you can use the <code>s3:x-amz-metadata-directive</code> condition
- * key to enforce certain metadata behavior when objects are uploaded. For more
- * information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/amazon-s3-policy-keys.html">Specifying
- * Conditions in a Policy</a> in the <i>Amazon S3 Developer Guide</i>. For a
- * complete list of Amazon S3-specific condition keys, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/list_amazons3.html">Actions,
- * Resources, and Condition Keys for Amazon S3</a>.</p> <p> <b>
- * <code>x-amz-copy-source-if</code> Headers</b> </p> <p>To only copy an object
- * under certain conditions, such as whether the <code>Etag</code> matches or
- * whether the object was modified before or after a specified date, use the
- * following request parameters:</p> <ul> <li> <p>
- * <code>x-amz-copy-source-if-match</code> </p> </li> <li> <p>
- * <code>x-amz-copy-source-if-none-match</code> </p> </li> <li> <p>
- * <code>x-amz-copy-source-if-unmodified-since</code> </p> </li> <li> <p>
- * <code>x-amz-copy-source-if-modified-since</code> </p> </li> </ul> <p> If both
- * the <code>x-amz-copy-source-if-match</code> and
- * <code>x-amz-copy-source-if-unmodified-since</code> headers are present in the
- * request and evaluate as follows, Amazon S3 returns <code>200 OK</code> and
- * copies the data:</p> <ul> <li> <p> <code>x-amz-copy-source-if-match</code>
- * condition evaluates to true</p> </li> <li> <p>
- * <code>x-amz-copy-source-if-unmodified-since</code> condition evaluates to
- * false</p> </li> </ul> <p>If both the
- * <code>x-amz-copy-source-if-none-match</code> and
- * <code>x-amz-copy-source-if-modified-since</code> headers are present in the
- * request and evaluate as follows, Amazon S3 returns the <code>412 Precondition
- * Failed</code> response code:</p> <ul> <li> <p>
- * <code>x-amz-copy-source-if-none-match</code> condition evaluates to false</p>
- * </li> <li> <p> <code>x-amz-copy-source-if-modified-since</code> condition
- * evaluates to true</p> </li> </ul> <p>All headers with the
- * <code>x-amz-</code> prefix, including <code>x-amz-copy-source</code>, must be
- * signed.</p> <p> <b>Server-side encryption</b> </p> <p>When you perform a
- * CopyObject operation, you can optionally use the appropriate encryption-related
- * headers to encrypt the object using server-side encryption with AWS managed
- * encryption keys (SSE-S3 or SSE-KMS) or a customer-provided encryption key. With
- * server-side encryption, Amazon S3 encrypts your data as it writes it to disks in
- * its data centers and decrypts the data when you access it. For more information
- * about server-side encryption, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html">Using
- * Server-Side Encryption</a>.</p> <p>If a target object uses SSE-KMS, you can
- * enable an S3 Bucket Key for the object. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html">Amazon S3
- * Bucket Keys</a> in the <i>Amazon S3 User Guide</i>.</p> <p> <b>Access Control
- * List (ACL)-Specific Request Headers</b> </p> <p>When copying an object, you can
- * optionally use headers to grant ACL-based permissions. By default, all objects
- * are private. Only the owner has full access control. When adding a new object,
- * you can grant permissions to individual AWS accounts or to predefined groups
- * defined by Amazon S3. These permissions are then added to the ACL on the object.
- * For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html">Access
- * Control List (ACL) Overview</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html">Managing
- * ACLs Using the REST API</a>. </p> <p> <b>Storage Class Options</b> </p> <p>You
- * can use the <code>CopyObject</code> action to change the storage class of an
- * object that is already stored in Amazon S3 using the <code>StorageClass</code>
- * parameter. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
- * Classes</a> in the <i>Amazon S3 Service Developer Guide</i>.</p> <p>
- * <b>Versioning</b> </p> <p>By default, <code>x-amz-copy-source</code> identifies
- * the current version of an object to copy. If the current version is a delete
- * marker, Amazon S3 behaves as if the object was deleted. To copy a different
- * version, use the <code>versionId</code> subresource.</p> <p>If you enable
- * versioning on the target bucket, Amazon S3 generates a unique version ID for the
- * object being copied. This version ID is different from the version ID of the
- * source object. Amazon S3 returns the version ID of the copied object in the
- * <code>x-amz-version-id</code> response header in the response.</p> <p>If you do
- * not enable versioning or suspend it on the target bucket, the version ID that
- * Amazon S3 generates is always null.</p> <p>If the source object's storage class
- * is GLACIER, you must restore a copy of this object before you can use it as a
- * source object for the copy operation. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html">RestoreObject</a>.</p>
- * <p>The following operations are related to <code>CopyObject</code>:</p> <ul>
- * <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html">PutObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>
- * </p> </li> </ul> <p>For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/CopyingObjectsExamples.html">Copying
- * Objects</a>.</p><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObject">AWS API
- * Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for CopyObject that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void CopyObjectAsync(const Model::CopyObjectRequest& request, const CopyObjectResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename CopyObjectRequestT = Model::CopyObjectRequest>
+ void CopyObjectAsync(const CopyObjectRequestT& request, const CopyObjectResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::CopyObject, request, handler, context);
+ }
/**
* <p>Creates a new S3 bucket. To create a bucket, you must register with Amazon S3
- * and have a valid AWS Access Key ID to authenticate requests. Anonymous requests
- * are never allowed to create buckets. By creating the bucket, you become the
- * bucket owner.</p> <p>Not every string is an acceptable bucket name. For
- * information about bucket naming restrictions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html">Working
- * with Amazon S3 buckets</a>. </p> <p>If you want to create an Amazon S3 on
- * Outposts bucket, see <a
+ * and have a valid Amazon Web Services Access Key ID to authenticate requests.
+ * Anonymous requests are never allowed to create buckets. By creating the bucket,
+ * you become the bucket owner.</p> <p>Not every string is an acceptable bucket
+ * name. For information about bucket naming restrictions, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/bucketnamingrules.html">Bucket
+ * naming rules</a>.</p> <p>If you want to create an Amazon S3 on Outposts bucket,
+ * see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html">Create
* Bucket</a>. </p> <p>By default, the bucket is created in the US East (N.
* Virginia) Region. You can optionally specify a Region in the request body. You
@@ -1229,40 +415,67 @@ namespace Aws
* bucket in a Region other than US East (N. Virginia), your application must be
* able to handle 307 redirect. For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html">Virtual
- * hosting of buckets</a>.</p> <p>When creating a bucket using this
- * operation, you can optionally specify the accounts or groups that should be
- * granted specific permissions on the bucket. There are two ways to grant the
- * appropriate permissions using the request headers.</p> <ul> <li> <p>Specify a
- * canned ACL using the <code>x-amz-acl</code> request header. Amazon S3 supports a
- * set of predefined ACLs, known as <i>canned ACLs</i>. Each canned ACL has a
- * predefined set of grantees and permissions. For more information, see <a
+ * hosting of buckets</a>.</p> <p> <b>Access control lists (ACLs)</b> </p>
+ * <p>When creating a bucket using this operation, you can optionally configure the
+ * bucket ACL to specify the accounts or groups that should be granted specific
+ * permissions on the bucket.</p> <p>If your CreateBucket request sets
+ * bucket owner enforced for S3 Object Ownership and specifies a bucket ACL that
+ * provides access to an external Amazon Web Services account, your request fails
+ * with a <code>400</code> error and returns the
+ * <code>InvalidBucketAclWithObjectOwnership</code> error code. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html">Controlling
+ * object ownership</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * <p>There are two ways to grant the appropriate permissions using the request
+ * headers.</p> <ul> <li> <p>Specify a canned ACL using the <code>x-amz-acl</code>
+ * request header. Amazon S3 supports a set of predefined ACLs, known as <i>canned
+ * ACLs</i>. Each canned ACL has a predefined set of grantees and permissions. For
+ * more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL">Canned
* ACL</a>.</p> </li> <li> <p>Specify access permissions explicitly using the
* <code>x-amz-grant-read</code>, <code>x-amz-grant-write</code>,
* <code>x-amz-grant-read-acp</code>, <code>x-amz-grant-write-acp</code>, and
* <code>x-amz-grant-full-control</code> headers. These headers map to the set of
* permissions Amazon S3 supports in an ACL. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html">Access
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html">Access
* control list (ACL) overview</a>.</p> <p>You specify each grantee as a type=value
* pair, where the type is one of the following:</p> <ul> <li> <p> <code>id</code>
- * – if the value specified is the canonical user ID of an AWS account</p> </li>
- * <li> <p> <code>uri</code> – if you are granting permissions to a predefined
- * group</p> </li> <li> <p> <code>emailAddress</code> – if the value specified is
- * the email address of an AWS account</p> <p>Using email addresses to
- * specify a grantee is only supported in the following AWS Regions: </p> <ul> <li>
- * <p>US East (N. Virginia)</p> </li> <li> <p>US West (N. California)</p> </li>
- * <li> <p> US West (Oregon)</p> </li> <li> <p> Asia Pacific (Singapore)</p> </li>
- * <li> <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia Pacific (Tokyo)</p> </li>
- * <li> <p>Europe (Ireland)</p> </li> <li> <p>South America (São Paulo)</p> </li>
- * </ul> <p>For a list of all the Amazon S3 supported Regions and endpoints, see <a
+ * – if the value specified is the canonical user ID of an Amazon Web Services
+ * account</p> </li> <li> <p> <code>uri</code> – if you are granting permissions to
+ * a predefined group</p> </li> <li> <p> <code>emailAddress</code> – if the value
+ * specified is the email address of an Amazon Web Services account</p>
+ * <p>Using email addresses to specify a grantee is only supported in the following
+ * Amazon Web Services Regions: </p> <ul> <li> <p>US East (N. Virginia)</p> </li>
+ * <li> <p>US West (N. California)</p> </li> <li> <p> US West (Oregon)</p> </li>
+ * <li> <p> Asia Pacific (Singapore)</p> </li> <li> <p>Asia Pacific (Sydney)</p>
+ * </li> <li> <p>Asia Pacific (Tokyo)</p> </li> <li> <p>Europe (Ireland)</p> </li>
+ * <li> <p>South America (São Paulo)</p> </li> </ul> <p>For a list of all the
+ * Amazon S3 supported Regions and endpoints, see <a
* href="https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">Regions
- * and Endpoints</a> in the AWS General Reference.</p> </li> </ul> <p>For
- * example, the following <code>x-amz-grant-read</code> header grants the AWS
- * accounts identified by account IDs permissions to read object data and its
- * metadata:</p> <p> <code>x-amz-grant-read: id="11112222333", id="444455556666"
- * </code> </p> </li> </ul> <p>You can use either a canned ACL or specify
- * access permissions explicitly. You cannot do both.</p> <p>The following
- * operations are related to <code>CreateBucket</code>:</p> <ul> <li> <p> <a
+ * and Endpoints</a> in the Amazon Web Services General Reference.</p>
+ * </li> </ul> <p>For example, the following <code>x-amz-grant-read</code> header
+ * grants the Amazon Web Services accounts identified by account IDs permissions to
+ * read object data and its metadata:</p> <p> <code>x-amz-grant-read:
+ * id="11112222333", id="444455556666" </code> </p> </li> </ul> <p>You can
+ * use either a canned ACL or specify access permissions explicitly. You cannot do
+ * both.</p> <p> <b>Permissions</b> </p> <p>In addition to
+ * <code>s3:CreateBucket</code>, the following permissions are required when your
+ * CreateBucket includes specific headers:</p> <ul> <li> <p> <b>ACLs</b> - If your
+ * <code>CreateBucket</code> request specifies ACL permissions and the ACL is
+ * public-read, public-read-write, authenticated-read, or if you specify access
+ * permissions explicitly through any other ACL, both <code>s3:CreateBucket</code>
+ * and <code>s3:PutBucketAcl</code> permissions are needed. If the ACL the
+ * <code>CreateBucket</code> request is private or doesn't specify any ACLs, only
+ * <code>s3:CreateBucket</code> permission is needed. </p> </li> <li> <p> <b>Object
+ * Lock</b> - If <code>ObjectLockEnabledForBucket</code> is set to true in your
+ * <code>CreateBucket</code> request,
+ * <code>s3:PutBucketObjectLockConfiguration</code> and
+ * <code>s3:PutBucketVersioning</code> permissions are required.</p> </li> <li> <p>
+ * <b>S3 Object Ownership</b> - If your CreateBucket request includes the the
+ * <code>x-amz-object-ownership</code> header,
+ * <code>s3:PutBucketOwnershipControls</code> permission is required.</p> </li>
+ * </ul> <p>The following operations are related to <code>CreateBucket</code>:</p>
+ * <ul> <li> <p> <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html">PutObject</a>
* </p> </li> <li> <p> <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html">DeleteBucket</a>
@@ -1273,144 +486,22 @@ namespace Aws
virtual Model::CreateBucketOutcome CreateBucket(const Model::CreateBucketRequest& request) const;
/**
- * <p>Creates a new S3 bucket. To create a bucket, you must register with Amazon S3
- * and have a valid AWS Access Key ID to authenticate requests. Anonymous requests
- * are never allowed to create buckets. By creating the bucket, you become the
- * bucket owner.</p> <p>Not every string is an acceptable bucket name. For
- * information about bucket naming restrictions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html">Working
- * with Amazon S3 buckets</a>. </p> <p>If you want to create an Amazon S3 on
- * Outposts bucket, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html">Create
- * Bucket</a>. </p> <p>By default, the bucket is created in the US East (N.
- * Virginia) Region. You can optionally specify a Region in the request body. You
- * might choose a Region to optimize latency, minimize costs, or address regulatory
- * requirements. For example, if you reside in Europe, you will probably find it
- * advantageous to create buckets in the Europe (Ireland) Region. For more
- * information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro">Accessing
- * a bucket</a>.</p> <p>If you send your create bucket request to the
- * <code>s3.amazonaws.com</code> endpoint, the request goes to the us-east-1
- * Region. Accordingly, the signature calculations in Signature Version 4 must use
- * us-east-1 as the Region, even if the location constraint in the request
- * specifies another Region where the bucket is to be created. If you create a
- * bucket in a Region other than US East (N. Virginia), your application must be
- * able to handle 307 redirect. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html">Virtual
- * hosting of buckets</a>.</p> <p>When creating a bucket using this
- * operation, you can optionally specify the accounts or groups that should be
- * granted specific permissions on the bucket. There are two ways to grant the
- * appropriate permissions using the request headers.</p> <ul> <li> <p>Specify a
- * canned ACL using the <code>x-amz-acl</code> request header. Amazon S3 supports a
- * set of predefined ACLs, known as <i>canned ACLs</i>. Each canned ACL has a
- * predefined set of grantees and permissions. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL">Canned
- * ACL</a>.</p> </li> <li> <p>Specify access permissions explicitly using the
- * <code>x-amz-grant-read</code>, <code>x-amz-grant-write</code>,
- * <code>x-amz-grant-read-acp</code>, <code>x-amz-grant-write-acp</code>, and
- * <code>x-amz-grant-full-control</code> headers. These headers map to the set of
- * permissions Amazon S3 supports in an ACL. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html">Access
- * control list (ACL) overview</a>.</p> <p>You specify each grantee as a type=value
- * pair, where the type is one of the following:</p> <ul> <li> <p> <code>id</code>
- * – if the value specified is the canonical user ID of an AWS account</p> </li>
- * <li> <p> <code>uri</code> – if you are granting permissions to a predefined
- * group</p> </li> <li> <p> <code>emailAddress</code> – if the value specified is
- * the email address of an AWS account</p> <p>Using email addresses to
- * specify a grantee is only supported in the following AWS Regions: </p> <ul> <li>
- * <p>US East (N. Virginia)</p> </li> <li> <p>US West (N. California)</p> </li>
- * <li> <p> US West (Oregon)</p> </li> <li> <p> Asia Pacific (Singapore)</p> </li>
- * <li> <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia Pacific (Tokyo)</p> </li>
- * <li> <p>Europe (Ireland)</p> </li> <li> <p>South America (São Paulo)</p> </li>
- * </ul> <p>For a list of all the Amazon S3 supported Regions and endpoints, see <a
- * href="https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">Regions
- * and Endpoints</a> in the AWS General Reference.</p> </li> </ul> <p>For
- * example, the following <code>x-amz-grant-read</code> header grants the AWS
- * accounts identified by account IDs permissions to read object data and its
- * metadata:</p> <p> <code>x-amz-grant-read: id="11112222333", id="444455556666"
- * </code> </p> </li> </ul> <p>You can use either a canned ACL or specify
- * access permissions explicitly. You cannot do both.</p> <p>The following
- * operations are related to <code>CreateBucket</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html">PutObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html">DeleteBucket</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket">AWS API
- * Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for CreateBucket that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::CreateBucketOutcomeCallable CreateBucketCallable(const Model::CreateBucketRequest& request) const;
+ template<typename CreateBucketRequestT = Model::CreateBucketRequest>
+ Model::CreateBucketOutcomeCallable CreateBucketCallable(const CreateBucketRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::CreateBucket, request);
+ }
/**
- * <p>Creates a new S3 bucket. To create a bucket, you must register with Amazon S3
- * and have a valid AWS Access Key ID to authenticate requests. Anonymous requests
- * are never allowed to create buckets. By creating the bucket, you become the
- * bucket owner.</p> <p>Not every string is an acceptable bucket name. For
- * information about bucket naming restrictions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html">Working
- * with Amazon S3 buckets</a>. </p> <p>If you want to create an Amazon S3 on
- * Outposts bucket, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_control_CreateBucket.html">Create
- * Bucket</a>. </p> <p>By default, the bucket is created in the US East (N.
- * Virginia) Region. You can optionally specify a Region in the request body. You
- * might choose a Region to optimize latency, minimize costs, or address regulatory
- * requirements. For example, if you reside in Europe, you will probably find it
- * advantageous to create buckets in the Europe (Ireland) Region. For more
- * information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro">Accessing
- * a bucket</a>.</p> <p>If you send your create bucket request to the
- * <code>s3.amazonaws.com</code> endpoint, the request goes to the us-east-1
- * Region. Accordingly, the signature calculations in Signature Version 4 must use
- * us-east-1 as the Region, even if the location constraint in the request
- * specifies another Region where the bucket is to be created. If you create a
- * bucket in a Region other than US East (N. Virginia), your application must be
- * able to handle 307 redirect. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html">Virtual
- * hosting of buckets</a>.</p> <p>When creating a bucket using this
- * operation, you can optionally specify the accounts or groups that should be
- * granted specific permissions on the bucket. There are two ways to grant the
- * appropriate permissions using the request headers.</p> <ul> <li> <p>Specify a
- * canned ACL using the <code>x-amz-acl</code> request header. Amazon S3 supports a
- * set of predefined ACLs, known as <i>canned ACLs</i>. Each canned ACL has a
- * predefined set of grantees and permissions. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL">Canned
- * ACL</a>.</p> </li> <li> <p>Specify access permissions explicitly using the
- * <code>x-amz-grant-read</code>, <code>x-amz-grant-write</code>,
- * <code>x-amz-grant-read-acp</code>, <code>x-amz-grant-write-acp</code>, and
- * <code>x-amz-grant-full-control</code> headers. These headers map to the set of
- * permissions Amazon S3 supports in an ACL. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html">Access
- * control list (ACL) overview</a>.</p> <p>You specify each grantee as a type=value
- * pair, where the type is one of the following:</p> <ul> <li> <p> <code>id</code>
- * – if the value specified is the canonical user ID of an AWS account</p> </li>
- * <li> <p> <code>uri</code> – if you are granting permissions to a predefined
- * group</p> </li> <li> <p> <code>emailAddress</code> – if the value specified is
- * the email address of an AWS account</p> <p>Using email addresses to
- * specify a grantee is only supported in the following AWS Regions: </p> <ul> <li>
- * <p>US East (N. Virginia)</p> </li> <li> <p>US West (N. California)</p> </li>
- * <li> <p> US West (Oregon)</p> </li> <li> <p> Asia Pacific (Singapore)</p> </li>
- * <li> <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia Pacific (Tokyo)</p> </li>
- * <li> <p>Europe (Ireland)</p> </li> <li> <p>South America (São Paulo)</p> </li>
- * </ul> <p>For a list of all the Amazon S3 supported Regions and endpoints, see <a
- * href="https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">Regions
- * and Endpoints</a> in the AWS General Reference.</p> </li> </ul> <p>For
- * example, the following <code>x-amz-grant-read</code> header grants the AWS
- * accounts identified by account IDs permissions to read object data and its
- * metadata:</p> <p> <code>x-amz-grant-read: id="11112222333", id="444455556666"
- * </code> </p> </li> </ul> <p>You can use either a canned ACL or specify
- * access permissions explicitly. You cannot do both.</p> <p>The following
- * operations are related to <code>CreateBucket</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html">PutObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html">DeleteBucket</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucket">AWS API
- * Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for CreateBucket that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void CreateBucketAsync(const Model::CreateBucketRequest& request, const CreateBucketResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename CreateBucketRequestT = Model::CreateBucketRequest>
+ void CreateBucketAsync(const CreateBucketRequestT& request, const CreateBucketResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::CreateBucket, request, handler, context);
+ }
/**
* <p>This action initiates a multipart upload and returns an upload ID. This
@@ -1438,33 +529,34 @@ namespace Aws
* sign each request individually. There is nothing special about signing multipart
* upload requests. For more information about signing, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html">Authenticating
- * Requests (AWS Signature Version 4)</a>.</p> <p> After you initiate a
- * multipart upload and upload one or more parts, to stop being charged for storing
- * the uploaded parts, you must either complete or abort the multipart upload.
- * Amazon S3 frees up the space used to store the parts and stop charging you for
- * storing them only after you either complete or abort a multipart upload. </p>
- * <p>You can optionally request server-side encryption. For server-side
- * encryption, Amazon S3 encrypts your data as it writes it to disks in its data
- * centers and decrypts it when you access it. You can provide your own encryption
- * key, or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or
- * Amazon S3-managed encryption keys. If you choose to provide your own encryption
- * key, the request headers you provide in <a
+ * Requests (Amazon Web Services Signature Version 4)</a>.</p> <p> After you
+ * initiate a multipart upload and upload one or more parts, to stop being charged
+ * for storing the uploaded parts, you must either complete or abort the multipart
+ * upload. Amazon S3 frees up the space used to store the parts and stop charging
+ * you for storing them only after you either complete or abort a multipart upload.
+ * </p> <p>You can optionally request server-side encryption. For
+ * server-side encryption, Amazon S3 encrypts your data as it writes it to disks in
+ * its data centers and decrypts it when you access it. You can provide your own
+ * encryption key, or use Amazon Web Services KMS keys or Amazon S3-managed
+ * encryption keys. If you choose to provide your own encryption key, the request
+ * headers you provide in <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html">UploadPart</a>
* and <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html">UploadPartCopy</a>
* requests must match the headers you used in the request to initiate the upload
* by using <code>CreateMultipartUpload</code>. </p> <p>To perform a multipart
- * upload with encryption using an AWS KMS CMK, the requester must have permission
- * to the <code>kms:Encrypt</code>, <code>kms:Decrypt</code>,
- * <code>kms:ReEncrypt*</code>, <code>kms:GenerateDataKey*</code>, and
- * <code>kms:DescribeKey</code> actions on the key. These permissions are required
- * because Amazon S3 must decrypt and read data from the encrypted file parts
- * before it completes the multipart upload.</p> <p>If your AWS Identity and Access
- * Management (IAM) user or role is in the same AWS account as the AWS KMS CMK,
- * then you must have these permissions on the key policy. If your IAM user or role
- * belongs to a different account than the key, then you must have the permissions
- * on both the key policy and your IAM user or role.</p> <p> For more information,
- * see <a
+ * upload with encryption using an Amazon Web Services KMS key, the requester must
+ * have permission to the <code>kms:Decrypt</code> and
+ * <code>kms:GenerateDataKey*</code> actions on the key. These permissions are
+ * required because Amazon S3 must decrypt and read data from the encrypted file
+ * parts before it completes the multipart upload. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/mpuoverview.html#mpuAndPermissions">Multipart
+ * upload API and permissions</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If
+ * your Identity and Access Management (IAM) user or role is in the same Amazon Web
+ * Services account as the KMS key, then you must have these permissions on the key
+ * policy. If your IAM user or role belongs to a different account than the key,
+ * then you must have the permissions on both the key policy and your IAM user or
+ * role.</p> <p> For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html">Protecting
* Data Using Server-Side Encryption</a>.</p> <dl> <dt>Access Permissions</dt> <dd>
* <p>When copying an object, you can optionally specify the accounts or groups
@@ -1485,35 +577,37 @@ namespace Aws
* optionally tell Amazon S3 to encrypt data at rest using server-side encryption.
* Server-side encryption is for data encryption at rest. Amazon S3 encrypts your
* data as it writes it to disks in its data centers and decrypts it when you
- * access it. The option you use depends on whether you want to use AWS managed
- * encryption keys or provide your own encryption key. </p> <ul> <li> <p>Use
- * encryption keys managed by Amazon S3 or customer master keys (CMKs) stored in
- * AWS Key Management Service (AWS KMS) – If you want AWS to manage the keys used
- * to encrypt data, specify the following headers in the request.</p> <ul> <li>
- * <p>x-amz-server-side-encryption</p> </li> <li>
- * <p>x-amz-server-side-encryption-aws-kms-key-id</p> </li> <li>
- * <p>x-amz-server-side-encryption-context</p> </li> </ul> <p>If you specify
- * <code>x-amz-server-side-encryption:aws:kms</code>, but don't provide
- * <code>x-amz-server-side-encryption-aws-kms-key-id</code>, Amazon S3 uses the AWS
- * managed CMK in AWS KMS to protect the data.</p> <p>All GET
- * and PUT requests for an object protected by AWS KMS fail if you don't make them
- * with SSL or by using SigV4.</p> <p>For more information about
- * server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see <a
+ * access it. The option you use depends on whether you want to use Amazon Web
+ * Services managed encryption keys or provide your own encryption key. </p> <ul>
+ * <li> <p>Use encryption keys managed by Amazon S3 or customer managed key stored
+ * in Amazon Web Services Key Management Service (Amazon Web Services KMS) – If you
+ * want Amazon Web Services to manage the keys used to encrypt data, specify the
+ * following headers in the request.</p> <ul> <li> <p>
+ * <code>x-amz-server-side-encryption</code> </p> </li> <li> <p>
+ * <code>x-amz-server-side-encryption-aws-kms-key-id</code> </p> </li> <li> <p>
+ * <code>x-amz-server-side-encryption-context</code> </p> </li> </ul> <p>If
+ * you specify <code>x-amz-server-side-encryption:aws:kms</code>, but don't provide
+ * <code>x-amz-server-side-encryption-aws-kms-key-id</code>, Amazon S3 uses the
+ * Amazon Web Services managed key in Amazon Web Services KMS to protect the
+ * data.</p> <p>All GET and PUT requests for an object
+ * protected by Amazon Web Services KMS fail if you don't make them with SSL or by
+ * using SigV4.</p> <p>For more information about server-side
+ * encryption with KMS key (SSE-KMS), see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html">Protecting
- * Data Using Server-Side Encryption with CMKs stored in AWS KMS</a>.</p> </li>
- * <li> <p>Use customer-provided encryption keys – If you want to manage your own
- * encryption keys, provide all the following headers in the request.</p> <ul> <li>
- * <p>x-amz-server-side-encryption-customer-algorithm</p> </li> <li>
- * <p>x-amz-server-side-encryption-customer-key</p> </li> <li>
- * <p>x-amz-server-side-encryption-customer-key-MD5</p> </li> </ul> <p>For more
- * information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS),
+ * Data Using Server-Side Encryption with KMS keys</a>.</p> </li> <li> <p>Use
+ * customer-provided encryption keys – If you want to manage your own encryption
+ * keys, provide all the following headers in the request.</p> <ul> <li> <p>
+ * <code>x-amz-server-side-encryption-customer-algorithm</code> </p> </li> <li> <p>
+ * <code>x-amz-server-side-encryption-customer-key</code> </p> </li> <li> <p>
+ * <code>x-amz-server-side-encryption-customer-key-MD5</code> </p> </li> </ul>
+ * <p>For more information about server-side encryption with KMS keys (SSE-KMS),
* see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html">Protecting
- * Data Using Server-Side Encryption with CMKs stored in AWS KMS</a>.</p> </li>
- * </ul> </dd> <dt>Access-Control-List (ACL)-Specific Request Headers</dt> <dd>
- * <p>You also can use the following access control–related headers with this
- * operation. By default, all objects are private. Only the owner has full access
- * control. When adding a new object, you can grant permissions to individual AWS
+ * Data Using Server-Side Encryption with KMS keys</a>.</p> </li> </ul> </dd>
+ * <dt>Access-Control-List (ACL)-Specific Request Headers</dt> <dd> <p>You also can
+ * use the following access control–related headers with this operation. By
+ * default, all objects are private. Only the owner has full access control. When
+ * adding a new object, you can grant permissions to individual Amazon Web Services
* accounts or to predefined groups defined by Amazon S3. These permissions are
* then added to the access control list (ACL) on the object. For more information,
* see <a
@@ -1525,34 +619,38 @@ namespace Aws
* permissions. For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL">Canned
* ACL</a>.</p> </li> <li> <p>Specify access permissions explicitly — To explicitly
- * grant access permissions to specific AWS accounts or groups, use the following
- * headers. Each header maps to specific permissions that Amazon S3 supports in an
- * ACL. For more information, see <a
+ * grant access permissions to specific Amazon Web Services accounts or groups, use
+ * the following headers. Each header maps to specific permissions that Amazon S3
+ * supports in an ACL. For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html">Access
* Control List (ACL) Overview</a>. In the header, you specify a list of grantees
* who get the specific permission. To grant permissions explicitly, use:</p> <ul>
- * <li> <p>x-amz-grant-read</p> </li> <li> <p>x-amz-grant-write</p> </li> <li>
- * <p>x-amz-grant-read-acp</p> </li> <li> <p>x-amz-grant-write-acp</p> </li> <li>
- * <p>x-amz-grant-full-control</p> </li> </ul> <p>You specify each grantee as a
- * type=value pair, where the type is one of the following:</p> <ul> <li> <p>
- * <code>id</code> – if the value specified is the canonical user ID of an AWS
- * account</p> </li> <li> <p> <code>uri</code> – if you are granting permissions to
- * a predefined group</p> </li> <li> <p> <code>emailAddress</code> – if the value
- * specified is the email address of an AWS account</p> <p>Using email
- * addresses to specify a grantee is only supported in the following AWS Regions:
- * </p> <ul> <li> <p>US East (N. Virginia)</p> </li> <li> <p>US West (N.
- * California)</p> </li> <li> <p> US West (Oregon)</p> </li> <li> <p> Asia Pacific
- * (Singapore)</p> </li> <li> <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia
- * Pacific (Tokyo)</p> </li> <li> <p>Europe (Ireland)</p> </li> <li> <p>South
- * America (São Paulo)</p> </li> </ul> <p>For a list of all the Amazon S3 supported
- * Regions and endpoints, see <a
+ * <li> <p> <code>x-amz-grant-read</code> </p> </li> <li> <p>
+ * <code>x-amz-grant-write</code> </p> </li> <li> <p>
+ * <code>x-amz-grant-read-acp</code> </p> </li> <li> <p>
+ * <code>x-amz-grant-write-acp</code> </p> </li> <li> <p>
+ * <code>x-amz-grant-full-control</code> </p> </li> </ul> <p>You specify each
+ * grantee as a type=value pair, where the type is one of the following:</p> <ul>
+ * <li> <p> <code>id</code> – if the value specified is the canonical user ID of an
+ * Amazon Web Services account</p> </li> <li> <p> <code>uri</code> – if you are
+ * granting permissions to a predefined group</p> </li> <li> <p>
+ * <code>emailAddress</code> – if the value specified is the email address of an
+ * Amazon Web Services account</p> <p>Using email addresses to specify a
+ * grantee is only supported in the following Amazon Web Services Regions: </p>
+ * <ul> <li> <p>US East (N. Virginia)</p> </li> <li> <p>US West (N. California)</p>
+ * </li> <li> <p> US West (Oregon)</p> </li> <li> <p> Asia Pacific (Singapore)</p>
+ * </li> <li> <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia Pacific (Tokyo)</p>
+ * </li> <li> <p>Europe (Ireland)</p> </li> <li> <p>South America (São Paulo)</p>
+ * </li> </ul> <p>For a list of all the Amazon S3 supported Regions and endpoints,
+ * see <a
* href="https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">Regions
- * and Endpoints</a> in the AWS General Reference.</p> </li> </ul> <p>For
- * example, the following <code>x-amz-grant-read</code> header grants the AWS
- * accounts identified by account IDs permissions to read object data and its
- * metadata:</p> <p> <code>x-amz-grant-read: id="11112222333", id="444455556666"
- * </code> </p> </li> </ul> </dd> </dl> <p>The following operations are related to
- * <code>CreateMultipartUpload</code>:</p> <ul> <li> <p> <a
+ * and Endpoints</a> in the Amazon Web Services General Reference.</p>
+ * </li> </ul> <p>For example, the following <code>x-amz-grant-read</code> header
+ * grants the Amazon Web Services accounts identified by account IDs permissions to
+ * read object data and its metadata:</p> <p> <code>x-amz-grant-read:
+ * id="11112222333", id="444455556666" </code> </p> </li> </ul> </dd> </dl> <p>The
+ * following operations are related to <code>CreateMultipartUpload</code>:</p> <ul>
+ * <li> <p> <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html">UploadPart</a>
* </p> </li> <li> <p> <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html">CompleteMultipartUpload</a>
@@ -1569,320 +667,22 @@ namespace Aws
virtual Model::CreateMultipartUploadOutcome CreateMultipartUpload(const Model::CreateMultipartUploadRequest& request) const;
/**
- * <p>This action initiates a multipart upload and returns an upload ID. This
- * upload ID is used to associate all of the parts in the specific multipart
- * upload. You specify this upload ID in each of your subsequent upload part
- * requests (see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html">UploadPart</a>).
- * You also include this upload ID in the final request to either complete or abort
- * the multipart upload request.</p> <p>For more information about multipart
- * uploads, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html">Multipart
- * Upload Overview</a>.</p> <p>If you have configured a lifecycle rule to abort
- * incomplete multipart uploads, the upload must complete within the number of days
- * specified in the bucket lifecycle configuration. Otherwise, the incomplete
- * multipart upload becomes eligible for an abort action and Amazon S3 aborts the
- * multipart upload. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config">Aborting
- * Incomplete Multipart Uploads Using a Bucket Lifecycle Policy</a>.</p> <p>For
- * information about the permissions required to use the multipart upload API, see
- * <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html">Multipart
- * Upload and Permissions</a>.</p> <p>For request signing, multipart upload is just
- * a series of regular requests. You initiate a multipart upload, send one or more
- * requests to upload parts, and then complete the multipart upload process. You
- * sign each request individually. There is nothing special about signing multipart
- * upload requests. For more information about signing, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html">Authenticating
- * Requests (AWS Signature Version 4)</a>.</p> <p> After you initiate a
- * multipart upload and upload one or more parts, to stop being charged for storing
- * the uploaded parts, you must either complete or abort the multipart upload.
- * Amazon S3 frees up the space used to store the parts and stop charging you for
- * storing them only after you either complete or abort a multipart upload. </p>
- * <p>You can optionally request server-side encryption. For server-side
- * encryption, Amazon S3 encrypts your data as it writes it to disks in its data
- * centers and decrypts it when you access it. You can provide your own encryption
- * key, or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or
- * Amazon S3-managed encryption keys. If you choose to provide your own encryption
- * key, the request headers you provide in <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html">UploadPart</a>
- * and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html">UploadPartCopy</a>
- * requests must match the headers you used in the request to initiate the upload
- * by using <code>CreateMultipartUpload</code>. </p> <p>To perform a multipart
- * upload with encryption using an AWS KMS CMK, the requester must have permission
- * to the <code>kms:Encrypt</code>, <code>kms:Decrypt</code>,
- * <code>kms:ReEncrypt*</code>, <code>kms:GenerateDataKey*</code>, and
- * <code>kms:DescribeKey</code> actions on the key. These permissions are required
- * because Amazon S3 must decrypt and read data from the encrypted file parts
- * before it completes the multipart upload.</p> <p>If your AWS Identity and Access
- * Management (IAM) user or role is in the same AWS account as the AWS KMS CMK,
- * then you must have these permissions on the key policy. If your IAM user or role
- * belongs to a different account than the key, then you must have the permissions
- * on both the key policy and your IAM user or role.</p> <p> For more information,
- * see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html">Protecting
- * Data Using Server-Side Encryption</a>.</p> <dl> <dt>Access Permissions</dt> <dd>
- * <p>When copying an object, you can optionally specify the accounts or groups
- * that should be granted specific permissions on the new object. There are two
- * ways to grant the permissions using the request headers:</p> <ul> <li>
- * <p>Specify a canned ACL with the <code>x-amz-acl</code> request header. For more
- * information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL">Canned
- * ACL</a>.</p> </li> <li> <p>Specify access permissions explicitly with the
- * <code>x-amz-grant-read</code>, <code>x-amz-grant-read-acp</code>,
- * <code>x-amz-grant-write-acp</code>, and <code>x-amz-grant-full-control</code>
- * headers. These parameters map to the set of permissions that Amazon S3 supports
- * in an ACL. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html">Access
- * Control List (ACL) Overview</a>.</p> </li> </ul> <p>You can use either a canned
- * ACL or specify access permissions explicitly. You cannot do both.</p> </dd>
- * <dt>Server-Side- Encryption-Specific Request Headers</dt> <dd> <p>You can
- * optionally tell Amazon S3 to encrypt data at rest using server-side encryption.
- * Server-side encryption is for data encryption at rest. Amazon S3 encrypts your
- * data as it writes it to disks in its data centers and decrypts it when you
- * access it. The option you use depends on whether you want to use AWS managed
- * encryption keys or provide your own encryption key. </p> <ul> <li> <p>Use
- * encryption keys managed by Amazon S3 or customer master keys (CMKs) stored in
- * AWS Key Management Service (AWS KMS) – If you want AWS to manage the keys used
- * to encrypt data, specify the following headers in the request.</p> <ul> <li>
- * <p>x-amz-server-side-encryption</p> </li> <li>
- * <p>x-amz-server-side-encryption-aws-kms-key-id</p> </li> <li>
- * <p>x-amz-server-side-encryption-context</p> </li> </ul> <p>If you specify
- * <code>x-amz-server-side-encryption:aws:kms</code>, but don't provide
- * <code>x-amz-server-side-encryption-aws-kms-key-id</code>, Amazon S3 uses the AWS
- * managed CMK in AWS KMS to protect the data.</p> <p>All GET
- * and PUT requests for an object protected by AWS KMS fail if you don't make them
- * with SSL or by using SigV4.</p> <p>For more information about
- * server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html">Protecting
- * Data Using Server-Side Encryption with CMKs stored in AWS KMS</a>.</p> </li>
- * <li> <p>Use customer-provided encryption keys – If you want to manage your own
- * encryption keys, provide all the following headers in the request.</p> <ul> <li>
- * <p>x-amz-server-side-encryption-customer-algorithm</p> </li> <li>
- * <p>x-amz-server-side-encryption-customer-key</p> </li> <li>
- * <p>x-amz-server-side-encryption-customer-key-MD5</p> </li> </ul> <p>For more
- * information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS),
- * see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html">Protecting
- * Data Using Server-Side Encryption with CMKs stored in AWS KMS</a>.</p> </li>
- * </ul> </dd> <dt>Access-Control-List (ACL)-Specific Request Headers</dt> <dd>
- * <p>You also can use the following access control–related headers with this
- * operation. By default, all objects are private. Only the owner has full access
- * control. When adding a new object, you can grant permissions to individual AWS
- * accounts or to predefined groups defined by Amazon S3. These permissions are
- * then added to the access control list (ACL) on the object. For more information,
- * see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html">Using
- * ACLs</a>. With this operation, you can grant access permissions using one of the
- * following two methods:</p> <ul> <li> <p>Specify a canned ACL
- * (<code>x-amz-acl</code>) — Amazon S3 supports a set of predefined ACLs, known as
- * <i>canned ACLs</i>. Each canned ACL has a predefined set of grantees and
- * permissions. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL">Canned
- * ACL</a>.</p> </li> <li> <p>Specify access permissions explicitly — To explicitly
- * grant access permissions to specific AWS accounts or groups, use the following
- * headers. Each header maps to specific permissions that Amazon S3 supports in an
- * ACL. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html">Access
- * Control List (ACL) Overview</a>. In the header, you specify a list of grantees
- * who get the specific permission. To grant permissions explicitly, use:</p> <ul>
- * <li> <p>x-amz-grant-read</p> </li> <li> <p>x-amz-grant-write</p> </li> <li>
- * <p>x-amz-grant-read-acp</p> </li> <li> <p>x-amz-grant-write-acp</p> </li> <li>
- * <p>x-amz-grant-full-control</p> </li> </ul> <p>You specify each grantee as a
- * type=value pair, where the type is one of the following:</p> <ul> <li> <p>
- * <code>id</code> – if the value specified is the canonical user ID of an AWS
- * account</p> </li> <li> <p> <code>uri</code> – if you are granting permissions to
- * a predefined group</p> </li> <li> <p> <code>emailAddress</code> – if the value
- * specified is the email address of an AWS account</p> <p>Using email
- * addresses to specify a grantee is only supported in the following AWS Regions:
- * </p> <ul> <li> <p>US East (N. Virginia)</p> </li> <li> <p>US West (N.
- * California)</p> </li> <li> <p> US West (Oregon)</p> </li> <li> <p> Asia Pacific
- * (Singapore)</p> </li> <li> <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia
- * Pacific (Tokyo)</p> </li> <li> <p>Europe (Ireland)</p> </li> <li> <p>South
- * America (São Paulo)</p> </li> </ul> <p>For a list of all the Amazon S3 supported
- * Regions and endpoints, see <a
- * href="https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">Regions
- * and Endpoints</a> in the AWS General Reference.</p> </li> </ul> <p>For
- * example, the following <code>x-amz-grant-read</code> header grants the AWS
- * accounts identified by account IDs permissions to read object data and its
- * metadata:</p> <p> <code>x-amz-grant-read: id="11112222333", id="444455556666"
- * </code> </p> </li> </ul> </dd> </dl> <p>The following operations are related to
- * <code>CreateMultipartUpload</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html">UploadPart</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html">CompleteMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html">AbortMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html">ListParts</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html">ListMultipartUploads</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for CreateMultipartUpload that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::CreateMultipartUploadOutcomeCallable CreateMultipartUploadCallable(const Model::CreateMultipartUploadRequest& request) const;
+ template<typename CreateMultipartUploadRequestT = Model::CreateMultipartUploadRequest>
+ Model::CreateMultipartUploadOutcomeCallable CreateMultipartUploadCallable(const CreateMultipartUploadRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::CreateMultipartUpload, request);
+ }
/**
- * <p>This action initiates a multipart upload and returns an upload ID. This
- * upload ID is used to associate all of the parts in the specific multipart
- * upload. You specify this upload ID in each of your subsequent upload part
- * requests (see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html">UploadPart</a>).
- * You also include this upload ID in the final request to either complete or abort
- * the multipart upload request.</p> <p>For more information about multipart
- * uploads, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html">Multipart
- * Upload Overview</a>.</p> <p>If you have configured a lifecycle rule to abort
- * incomplete multipart uploads, the upload must complete within the number of days
- * specified in the bucket lifecycle configuration. Otherwise, the incomplete
- * multipart upload becomes eligible for an abort action and Amazon S3 aborts the
- * multipart upload. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html#mpu-abort-incomplete-mpu-lifecycle-config">Aborting
- * Incomplete Multipart Uploads Using a Bucket Lifecycle Policy</a>.</p> <p>For
- * information about the permissions required to use the multipart upload API, see
- * <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html">Multipart
- * Upload and Permissions</a>.</p> <p>For request signing, multipart upload is just
- * a series of regular requests. You initiate a multipart upload, send one or more
- * requests to upload parts, and then complete the multipart upload process. You
- * sign each request individually. There is nothing special about signing multipart
- * upload requests. For more information about signing, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html">Authenticating
- * Requests (AWS Signature Version 4)</a>.</p> <p> After you initiate a
- * multipart upload and upload one or more parts, to stop being charged for storing
- * the uploaded parts, you must either complete or abort the multipart upload.
- * Amazon S3 frees up the space used to store the parts and stop charging you for
- * storing them only after you either complete or abort a multipart upload. </p>
- * <p>You can optionally request server-side encryption. For server-side
- * encryption, Amazon S3 encrypts your data as it writes it to disks in its data
- * centers and decrypts it when you access it. You can provide your own encryption
- * key, or use AWS Key Management Service (AWS KMS) customer master keys (CMKs) or
- * Amazon S3-managed encryption keys. If you choose to provide your own encryption
- * key, the request headers you provide in <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html">UploadPart</a>
- * and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html">UploadPartCopy</a>
- * requests must match the headers you used in the request to initiate the upload
- * by using <code>CreateMultipartUpload</code>. </p> <p>To perform a multipart
- * upload with encryption using an AWS KMS CMK, the requester must have permission
- * to the <code>kms:Encrypt</code>, <code>kms:Decrypt</code>,
- * <code>kms:ReEncrypt*</code>, <code>kms:GenerateDataKey*</code>, and
- * <code>kms:DescribeKey</code> actions on the key. These permissions are required
- * because Amazon S3 must decrypt and read data from the encrypted file parts
- * before it completes the multipart upload.</p> <p>If your AWS Identity and Access
- * Management (IAM) user or role is in the same AWS account as the AWS KMS CMK,
- * then you must have these permissions on the key policy. If your IAM user or role
- * belongs to a different account than the key, then you must have the permissions
- * on both the key policy and your IAM user or role.</p> <p> For more information,
- * see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html">Protecting
- * Data Using Server-Side Encryption</a>.</p> <dl> <dt>Access Permissions</dt> <dd>
- * <p>When copying an object, you can optionally specify the accounts or groups
- * that should be granted specific permissions on the new object. There are two
- * ways to grant the permissions using the request headers:</p> <ul> <li>
- * <p>Specify a canned ACL with the <code>x-amz-acl</code> request header. For more
- * information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL">Canned
- * ACL</a>.</p> </li> <li> <p>Specify access permissions explicitly with the
- * <code>x-amz-grant-read</code>, <code>x-amz-grant-read-acp</code>,
- * <code>x-amz-grant-write-acp</code>, and <code>x-amz-grant-full-control</code>
- * headers. These parameters map to the set of permissions that Amazon S3 supports
- * in an ACL. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html">Access
- * Control List (ACL) Overview</a>.</p> </li> </ul> <p>You can use either a canned
- * ACL or specify access permissions explicitly. You cannot do both.</p> </dd>
- * <dt>Server-Side- Encryption-Specific Request Headers</dt> <dd> <p>You can
- * optionally tell Amazon S3 to encrypt data at rest using server-side encryption.
- * Server-side encryption is for data encryption at rest. Amazon S3 encrypts your
- * data as it writes it to disks in its data centers and decrypts it when you
- * access it. The option you use depends on whether you want to use AWS managed
- * encryption keys or provide your own encryption key. </p> <ul> <li> <p>Use
- * encryption keys managed by Amazon S3 or customer master keys (CMKs) stored in
- * AWS Key Management Service (AWS KMS) – If you want AWS to manage the keys used
- * to encrypt data, specify the following headers in the request.</p> <ul> <li>
- * <p>x-amz-server-side-encryption</p> </li> <li>
- * <p>x-amz-server-side-encryption-aws-kms-key-id</p> </li> <li>
- * <p>x-amz-server-side-encryption-context</p> </li> </ul> <p>If you specify
- * <code>x-amz-server-side-encryption:aws:kms</code>, but don't provide
- * <code>x-amz-server-side-encryption-aws-kms-key-id</code>, Amazon S3 uses the AWS
- * managed CMK in AWS KMS to protect the data.</p> <p>All GET
- * and PUT requests for an object protected by AWS KMS fail if you don't make them
- * with SSL or by using SigV4.</p> <p>For more information about
- * server-side encryption with CMKs stored in AWS KMS (SSE-KMS), see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html">Protecting
- * Data Using Server-Side Encryption with CMKs stored in AWS KMS</a>.</p> </li>
- * <li> <p>Use customer-provided encryption keys – If you want to manage your own
- * encryption keys, provide all the following headers in the request.</p> <ul> <li>
- * <p>x-amz-server-side-encryption-customer-algorithm</p> </li> <li>
- * <p>x-amz-server-side-encryption-customer-key</p> </li> <li>
- * <p>x-amz-server-side-encryption-customer-key-MD5</p> </li> </ul> <p>For more
- * information about server-side encryption with CMKs stored in AWS KMS (SSE-KMS),
- * see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingKMSEncryption.html">Protecting
- * Data Using Server-Side Encryption with CMKs stored in AWS KMS</a>.</p> </li>
- * </ul> </dd> <dt>Access-Control-List (ACL)-Specific Request Headers</dt> <dd>
- * <p>You also can use the following access control–related headers with this
- * operation. By default, all objects are private. Only the owner has full access
- * control. When adding a new object, you can grant permissions to individual AWS
- * accounts or to predefined groups defined by Amazon S3. These permissions are
- * then added to the access control list (ACL) on the object. For more information,
- * see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html">Using
- * ACLs</a>. With this operation, you can grant access permissions using one of the
- * following two methods:</p> <ul> <li> <p>Specify a canned ACL
- * (<code>x-amz-acl</code>) — Amazon S3 supports a set of predefined ACLs, known as
- * <i>canned ACLs</i>. Each canned ACL has a predefined set of grantees and
- * permissions. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL">Canned
- * ACL</a>.</p> </li> <li> <p>Specify access permissions explicitly — To explicitly
- * grant access permissions to specific AWS accounts or groups, use the following
- * headers. Each header maps to specific permissions that Amazon S3 supports in an
- * ACL. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html">Access
- * Control List (ACL) Overview</a>. In the header, you specify a list of grantees
- * who get the specific permission. To grant permissions explicitly, use:</p> <ul>
- * <li> <p>x-amz-grant-read</p> </li> <li> <p>x-amz-grant-write</p> </li> <li>
- * <p>x-amz-grant-read-acp</p> </li> <li> <p>x-amz-grant-write-acp</p> </li> <li>
- * <p>x-amz-grant-full-control</p> </li> </ul> <p>You specify each grantee as a
- * type=value pair, where the type is one of the following:</p> <ul> <li> <p>
- * <code>id</code> – if the value specified is the canonical user ID of an AWS
- * account</p> </li> <li> <p> <code>uri</code> – if you are granting permissions to
- * a predefined group</p> </li> <li> <p> <code>emailAddress</code> – if the value
- * specified is the email address of an AWS account</p> <p>Using email
- * addresses to specify a grantee is only supported in the following AWS Regions:
- * </p> <ul> <li> <p>US East (N. Virginia)</p> </li> <li> <p>US West (N.
- * California)</p> </li> <li> <p> US West (Oregon)</p> </li> <li> <p> Asia Pacific
- * (Singapore)</p> </li> <li> <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia
- * Pacific (Tokyo)</p> </li> <li> <p>Europe (Ireland)</p> </li> <li> <p>South
- * America (São Paulo)</p> </li> </ul> <p>For a list of all the Amazon S3 supported
- * Regions and endpoints, see <a
- * href="https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">Regions
- * and Endpoints</a> in the AWS General Reference.</p> </li> </ul> <p>For
- * example, the following <code>x-amz-grant-read</code> header grants the AWS
- * accounts identified by account IDs permissions to read object data and its
- * metadata:</p> <p> <code>x-amz-grant-read: id="11112222333", id="444455556666"
- * </code> </p> </li> </ul> </dd> </dl> <p>The following operations are related to
- * <code>CreateMultipartUpload</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html">UploadPart</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html">CompleteMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html">AbortMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html">ListParts</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html">ListMultipartUploads</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateMultipartUpload">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for CreateMultipartUpload that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void CreateMultipartUploadAsync(const Model::CreateMultipartUploadRequest& request, const CreateMultipartUploadResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename CreateMultipartUploadRequestT = Model::CreateMultipartUploadRequest>
+ void CreateMultipartUploadAsync(const CreateMultipartUploadRequestT& request, const CreateMultipartUploadResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::CreateMultipartUpload, request, handler, context);
+ }
/**
* <p>Deletes the S3 bucket. All objects (including all object versions and delete
@@ -1898,34 +698,22 @@ namespace Aws
virtual Model::DeleteBucketOutcome DeleteBucket(const Model::DeleteBucketRequest& request) const;
/**
- * <p>Deletes the S3 bucket. All objects (including all object versions and delete
- * markers) in the bucket must be deleted before the bucket itself can be
- * deleted.</p> <p class="title"> <b>Related Resources</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html">DeleteObject</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket">AWS API
- * Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for DeleteBucket that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::DeleteBucketOutcomeCallable DeleteBucketCallable(const Model::DeleteBucketRequest& request) const;
+ template<typename DeleteBucketRequestT = Model::DeleteBucketRequest>
+ Model::DeleteBucketOutcomeCallable DeleteBucketCallable(const DeleteBucketRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::DeleteBucket, request);
+ }
/**
- * <p>Deletes the S3 bucket. All objects (including all object versions and delete
- * markers) in the bucket must be deleted before the bucket itself can be
- * deleted.</p> <p class="title"> <b>Related Resources</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html">DeleteObject</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucket">AWS API
- * Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for DeleteBucket that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void DeleteBucketAsync(const Model::DeleteBucketRequest& request, const DeleteBucketResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename DeleteBucketRequestT = Model::DeleteBucketRequest>
+ void DeleteBucketAsync(const DeleteBucketRequestT& request, const DeleteBucketResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::DeleteBucket, request, handler, context);
+ }
/**
* <p>Deletes an analytics configuration for the bucket (specified by the analytics
@@ -1953,58 +741,22 @@ namespace Aws
virtual Model::DeleteBucketAnalyticsConfigurationOutcome DeleteBucketAnalyticsConfiguration(const Model::DeleteBucketAnalyticsConfigurationRequest& request) const;
/**
- * <p>Deletes an analytics configuration for the bucket (specified by the analytics
- * configuration ID).</p> <p>To use this operation, you must have permissions to
- * perform the <code>s3:PutAnalyticsConfiguration</code> action. The bucket owner
- * has this permission by default. The bucket owner can grant this permission to
- * others. For more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>For information about
- * the Amazon S3 analytics feature, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html">Amazon
- * S3 Analytics – Storage Class Analysis</a>. </p> <p>The following operations are
- * related to <code>DeleteBucketAnalyticsConfiguration</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html">GetBucketAnalyticsConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html">ListBucketAnalyticsConfigurations</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html">PutBucketAnalyticsConfiguration</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for DeleteBucketAnalyticsConfiguration that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::DeleteBucketAnalyticsConfigurationOutcomeCallable DeleteBucketAnalyticsConfigurationCallable(const Model::DeleteBucketAnalyticsConfigurationRequest& request) const;
+ template<typename DeleteBucketAnalyticsConfigurationRequestT = Model::DeleteBucketAnalyticsConfigurationRequest>
+ Model::DeleteBucketAnalyticsConfigurationOutcomeCallable DeleteBucketAnalyticsConfigurationCallable(const DeleteBucketAnalyticsConfigurationRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::DeleteBucketAnalyticsConfiguration, request);
+ }
/**
- * <p>Deletes an analytics configuration for the bucket (specified by the analytics
- * configuration ID).</p> <p>To use this operation, you must have permissions to
- * perform the <code>s3:PutAnalyticsConfiguration</code> action. The bucket owner
- * has this permission by default. The bucket owner can grant this permission to
- * others. For more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>For information about
- * the Amazon S3 analytics feature, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html">Amazon
- * S3 Analytics – Storage Class Analysis</a>. </p> <p>The following operations are
- * related to <code>DeleteBucketAnalyticsConfiguration</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html">GetBucketAnalyticsConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html">ListBucketAnalyticsConfigurations</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html">PutBucketAnalyticsConfiguration</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketAnalyticsConfiguration">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for DeleteBucketAnalyticsConfiguration that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void DeleteBucketAnalyticsConfigurationAsync(const Model::DeleteBucketAnalyticsConfigurationRequest& request, const DeleteBucketAnalyticsConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename DeleteBucketAnalyticsConfigurationRequestT = Model::DeleteBucketAnalyticsConfigurationRequest>
+ void DeleteBucketAnalyticsConfigurationAsync(const DeleteBucketAnalyticsConfigurationRequestT& request, const DeleteBucketAnalyticsConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::DeleteBucketAnalyticsConfiguration, request, handler, context);
+ }
/**
* <p>Deletes the <code>cors</code> configuration information set for the
@@ -2025,44 +777,22 @@ namespace Aws
virtual Model::DeleteBucketCorsOutcome DeleteBucketCors(const Model::DeleteBucketCorsRequest& request) const;
/**
- * <p>Deletes the <code>cors</code> configuration information set for the
- * bucket.</p> <p>To use this operation, you must have permission to perform the
- * <code>s3:PutBucketCORS</code> action. The bucket owner has this permission by
- * default and can grant this permission to others. </p> <p>For information about
- * <code>cors</code>, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html">Enabling
- * Cross-Origin Resource Sharing</a> in the <i>Amazon S3 User Guide</i>.</p> <p
- * class="title"> <b>Related Resources:</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html">PutBucketCors</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html">RESTOPTIONSobject</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for DeleteBucketCors that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::DeleteBucketCorsOutcomeCallable DeleteBucketCorsCallable(const Model::DeleteBucketCorsRequest& request) const;
+ template<typename DeleteBucketCorsRequestT = Model::DeleteBucketCorsRequest>
+ Model::DeleteBucketCorsOutcomeCallable DeleteBucketCorsCallable(const DeleteBucketCorsRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::DeleteBucketCors, request);
+ }
/**
- * <p>Deletes the <code>cors</code> configuration information set for the
- * bucket.</p> <p>To use this operation, you must have permission to perform the
- * <code>s3:PutBucketCORS</code> action. The bucket owner has this permission by
- * default and can grant this permission to others. </p> <p>For information about
- * <code>cors</code>, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html">Enabling
- * Cross-Origin Resource Sharing</a> in the <i>Amazon S3 User Guide</i>.</p> <p
- * class="title"> <b>Related Resources:</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html">PutBucketCors</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html">RESTOPTIONSobject</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketCors">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for DeleteBucketCors that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void DeleteBucketCorsAsync(const Model::DeleteBucketCorsRequest& request, const DeleteBucketCorsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename DeleteBucketCorsRequestT = Model::DeleteBucketCorsRequest>
+ void DeleteBucketCorsAsync(const DeleteBucketCorsRequestT& request, const DeleteBucketCorsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::DeleteBucketCors, request, handler, context);
+ }
/**
* <p>This implementation of the DELETE action removes default encryption from the
@@ -2088,68 +818,38 @@ namespace Aws
virtual Model::DeleteBucketEncryptionOutcome DeleteBucketEncryption(const Model::DeleteBucketEncryptionRequest& request) const;
/**
- * <p>This implementation of the DELETE action removes default encryption from the
- * bucket. For information about the Amazon S3 default encryption feature, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html">Amazon
- * S3 Default Bucket Encryption</a> in the <i>Amazon S3 User Guide</i>.</p> <p>To
- * use this operation, you must have permissions to perform the
- * <code>s3:PutEncryptionConfiguration</code> action. The bucket owner has this
- * permission by default. The bucket owner can grant this permission to others. For
- * more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to your Amazon S3 Resources</a> in the <i>Amazon S3 User
- * Guide</i>.</p> <p class="title"> <b>Related Resources</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html">PutBucketEncryption</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html">GetBucketEncryption</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for DeleteBucketEncryption that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::DeleteBucketEncryptionOutcomeCallable DeleteBucketEncryptionCallable(const Model::DeleteBucketEncryptionRequest& request) const;
+ template<typename DeleteBucketEncryptionRequestT = Model::DeleteBucketEncryptionRequest>
+ Model::DeleteBucketEncryptionOutcomeCallable DeleteBucketEncryptionCallable(const DeleteBucketEncryptionRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::DeleteBucketEncryption, request);
+ }
/**
- * <p>This implementation of the DELETE action removes default encryption from the
- * bucket. For information about the Amazon S3 default encryption feature, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html">Amazon
- * S3 Default Bucket Encryption</a> in the <i>Amazon S3 User Guide</i>.</p> <p>To
- * use this operation, you must have permissions to perform the
- * <code>s3:PutEncryptionConfiguration</code> action. The bucket owner has this
- * permission by default. The bucket owner can grant this permission to others. For
- * more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to your Amazon S3 Resources</a> in the <i>Amazon S3 User
- * Guide</i>.</p> <p class="title"> <b>Related Resources</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html">PutBucketEncryption</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html">GetBucketEncryption</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketEncryption">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for DeleteBucketEncryption that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void DeleteBucketEncryptionAsync(const Model::DeleteBucketEncryptionRequest& request, const DeleteBucketEncryptionResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename DeleteBucketEncryptionRequestT = Model::DeleteBucketEncryptionRequest>
+ void DeleteBucketEncryptionAsync(const DeleteBucketEncryptionRequestT& request, const DeleteBucketEncryptionResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::DeleteBucketEncryption, request, handler, context);
+ }
/**
* <p>Deletes the S3 Intelligent-Tiering configuration from the specified
* bucket.</p> <p>The S3 Intelligent-Tiering storage class is designed to optimize
* storage costs by automatically moving data to the most cost-effective storage
- * access tier, without additional operational overhead. S3 Intelligent-Tiering
- * delivers automatic cost savings by moving data between access tiers, when access
- * patterns change.</p> <p>The S3 Intelligent-Tiering storage class is suitable for
- * objects larger than 128 KB that you plan to store for at least 30 days. If the
- * size of an object is less than 128 KB, it is not eligible for auto-tiering.
- * Smaller objects can be stored, but they are always charged at the frequent
- * access tier rates in the S3 Intelligent-Tiering storage class. </p> <p>If you
- * delete an object before the end of the 30-day minimum storage duration period,
- * you are charged for 30 days. For more information, see <a
+ * access tier, without performance impact or operational overhead. S3
+ * Intelligent-Tiering delivers automatic cost savings in three low latency and
+ * high throughput access tiers. To get the lowest storage cost on data that can be
+ * accessed in minutes to hours, you can choose to activate additional archiving
+ * capabilities.</p> <p>The S3 Intelligent-Tiering storage class is the ideal
+ * storage class for data with unknown, changing, or unpredictable access patterns,
+ * independent of object size or retention period. If the size of an object is less
+ * than 128 KB, it is not monitored and not eligible for auto-tiering. Smaller
+ * objects can be stored, but they are always charged at the Frequent Access tier
+ * rates in the S3 Intelligent-Tiering storage class.</p> <p>For more information,
+ * see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access">Storage
* class for automatically optimizing frequently and infrequently accessed
* objects</a>.</p> <p>Operations related to
@@ -2167,66 +867,22 @@ namespace Aws
virtual Model::DeleteBucketIntelligentTieringConfigurationOutcome DeleteBucketIntelligentTieringConfiguration(const Model::DeleteBucketIntelligentTieringConfigurationRequest& request) const;
/**
- * <p>Deletes the S3 Intelligent-Tiering configuration from the specified
- * bucket.</p> <p>The S3 Intelligent-Tiering storage class is designed to optimize
- * storage costs by automatically moving data to the most cost-effective storage
- * access tier, without additional operational overhead. S3 Intelligent-Tiering
- * delivers automatic cost savings by moving data between access tiers, when access
- * patterns change.</p> <p>The S3 Intelligent-Tiering storage class is suitable for
- * objects larger than 128 KB that you plan to store for at least 30 days. If the
- * size of an object is less than 128 KB, it is not eligible for auto-tiering.
- * Smaller objects can be stored, but they are always charged at the frequent
- * access tier rates in the S3 Intelligent-Tiering storage class. </p> <p>If you
- * delete an object before the end of the 30-day minimum storage duration period,
- * you are charged for 30 days. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access">Storage
- * class for automatically optimizing frequently and infrequently accessed
- * objects</a>.</p> <p>Operations related to
- * <code>DeleteBucketIntelligentTieringConfiguration</code> include: </p> <ul> <li>
- * <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html">GetBucketIntelligentTieringConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html">PutBucketIntelligentTieringConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html">ListBucketIntelligentTieringConfigurations</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketIntelligentTieringConfiguration">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for DeleteBucketIntelligentTieringConfiguration that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::DeleteBucketIntelligentTieringConfigurationOutcomeCallable DeleteBucketIntelligentTieringConfigurationCallable(const Model::DeleteBucketIntelligentTieringConfigurationRequest& request) const;
+ template<typename DeleteBucketIntelligentTieringConfigurationRequestT = Model::DeleteBucketIntelligentTieringConfigurationRequest>
+ Model::DeleteBucketIntelligentTieringConfigurationOutcomeCallable DeleteBucketIntelligentTieringConfigurationCallable(const DeleteBucketIntelligentTieringConfigurationRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::DeleteBucketIntelligentTieringConfiguration, request);
+ }
/**
- * <p>Deletes the S3 Intelligent-Tiering configuration from the specified
- * bucket.</p> <p>The S3 Intelligent-Tiering storage class is designed to optimize
- * storage costs by automatically moving data to the most cost-effective storage
- * access tier, without additional operational overhead. S3 Intelligent-Tiering
- * delivers automatic cost savings by moving data between access tiers, when access
- * patterns change.</p> <p>The S3 Intelligent-Tiering storage class is suitable for
- * objects larger than 128 KB that you plan to store for at least 30 days. If the
- * size of an object is less than 128 KB, it is not eligible for auto-tiering.
- * Smaller objects can be stored, but they are always charged at the frequent
- * access tier rates in the S3 Intelligent-Tiering storage class. </p> <p>If you
- * delete an object before the end of the 30-day minimum storage duration period,
- * you are charged for 30 days. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access">Storage
- * class for automatically optimizing frequently and infrequently accessed
- * objects</a>.</p> <p>Operations related to
- * <code>DeleteBucketIntelligentTieringConfiguration</code> include: </p> <ul> <li>
- * <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html">GetBucketIntelligentTieringConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html">PutBucketIntelligentTieringConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html">ListBucketIntelligentTieringConfigurations</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketIntelligentTieringConfiguration">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for DeleteBucketIntelligentTieringConfiguration that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void DeleteBucketIntelligentTieringConfigurationAsync(const Model::DeleteBucketIntelligentTieringConfigurationRequest& request, const DeleteBucketIntelligentTieringConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename DeleteBucketIntelligentTieringConfigurationRequestT = Model::DeleteBucketIntelligentTieringConfigurationRequest>
+ void DeleteBucketIntelligentTieringConfigurationAsync(const DeleteBucketIntelligentTieringConfigurationRequestT& request, const DeleteBucketIntelligentTieringConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::DeleteBucketIntelligentTieringConfiguration, request, handler, context);
+ }
/**
* <p>Deletes an inventory configuration (identified by the inventory ID) from the
@@ -2254,58 +910,22 @@ namespace Aws
virtual Model::DeleteBucketInventoryConfigurationOutcome DeleteBucketInventoryConfiguration(const Model::DeleteBucketInventoryConfigurationRequest& request) const;
/**
- * <p>Deletes an inventory configuration (identified by the inventory ID) from the
- * bucket.</p> <p>To use this operation, you must have permissions to perform the
- * <code>s3:PutInventoryConfiguration</code> action. The bucket owner has this
- * permission by default. The bucket owner can grant this permission to others. For
- * more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>For information about
- * the Amazon S3 inventory feature, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html">Amazon
- * S3 Inventory</a>.</p> <p>Operations related to
- * <code>DeleteBucketInventoryConfiguration</code> include: </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html">GetBucketInventoryConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html">PutBucketInventoryConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html">ListBucketInventoryConfigurations</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for DeleteBucketInventoryConfiguration that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::DeleteBucketInventoryConfigurationOutcomeCallable DeleteBucketInventoryConfigurationCallable(const Model::DeleteBucketInventoryConfigurationRequest& request) const;
+ template<typename DeleteBucketInventoryConfigurationRequestT = Model::DeleteBucketInventoryConfigurationRequest>
+ Model::DeleteBucketInventoryConfigurationOutcomeCallable DeleteBucketInventoryConfigurationCallable(const DeleteBucketInventoryConfigurationRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::DeleteBucketInventoryConfiguration, request);
+ }
/**
- * <p>Deletes an inventory configuration (identified by the inventory ID) from the
- * bucket.</p> <p>To use this operation, you must have permissions to perform the
- * <code>s3:PutInventoryConfiguration</code> action. The bucket owner has this
- * permission by default. The bucket owner can grant this permission to others. For
- * more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>For information about
- * the Amazon S3 inventory feature, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html">Amazon
- * S3 Inventory</a>.</p> <p>Operations related to
- * <code>DeleteBucketInventoryConfiguration</code> include: </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html">GetBucketInventoryConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html">PutBucketInventoryConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html">ListBucketInventoryConfigurations</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketInventoryConfiguration">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for DeleteBucketInventoryConfiguration that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void DeleteBucketInventoryConfigurationAsync(const Model::DeleteBucketInventoryConfigurationRequest& request, const DeleteBucketInventoryConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename DeleteBucketInventoryConfigurationRequestT = Model::DeleteBucketInventoryConfigurationRequest>
+ void DeleteBucketInventoryConfigurationAsync(const DeleteBucketInventoryConfigurationRequestT& request, const DeleteBucketInventoryConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::DeleteBucketInventoryConfiguration, request, handler, context);
+ }
/**
* <p>Deletes the lifecycle configuration from the specified bucket. Amazon S3
@@ -2331,54 +951,22 @@ namespace Aws
virtual Model::DeleteBucketLifecycleOutcome DeleteBucketLifecycle(const Model::DeleteBucketLifecycleRequest& request) const;
/**
- * <p>Deletes the lifecycle configuration from the specified bucket. Amazon S3
- * removes all the lifecycle configuration rules in the lifecycle subresource
- * associated with the bucket. Your objects never expire, and Amazon S3 no longer
- * automatically deletes any objects on the basis of rules contained in the deleted
- * lifecycle configuration.</p> <p>To use this operation, you must have permission
- * to perform the <code>s3:PutLifecycleConfiguration</code> action. By default, the
- * bucket owner has this permission and the bucket owner can grant this permission
- * to others.</p> <p>There is usually some time lag before lifecycle configuration
- * deletion is fully propagated to all the Amazon S3 systems.</p> <p>For more
- * information about the object expiration, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions">Elements
- * to Describe Lifecycle Actions</a>.</p> <p>Related actions include:</p> <ul> <li>
- * <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html">PutBucketLifecycleConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html">GetBucketLifecycleConfiguration</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for DeleteBucketLifecycle that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::DeleteBucketLifecycleOutcomeCallable DeleteBucketLifecycleCallable(const Model::DeleteBucketLifecycleRequest& request) const;
+ template<typename DeleteBucketLifecycleRequestT = Model::DeleteBucketLifecycleRequest>
+ Model::DeleteBucketLifecycleOutcomeCallable DeleteBucketLifecycleCallable(const DeleteBucketLifecycleRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::DeleteBucketLifecycle, request);
+ }
/**
- * <p>Deletes the lifecycle configuration from the specified bucket. Amazon S3
- * removes all the lifecycle configuration rules in the lifecycle subresource
- * associated with the bucket. Your objects never expire, and Amazon S3 no longer
- * automatically deletes any objects on the basis of rules contained in the deleted
- * lifecycle configuration.</p> <p>To use this operation, you must have permission
- * to perform the <code>s3:PutLifecycleConfiguration</code> action. By default, the
- * bucket owner has this permission and the bucket owner can grant this permission
- * to others.</p> <p>There is usually some time lag before lifecycle configuration
- * deletion is fully propagated to all the Amazon S3 systems.</p> <p>For more
- * information about the object expiration, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#intro-lifecycle-rules-actions">Elements
- * to Describe Lifecycle Actions</a>.</p> <p>Related actions include:</p> <ul> <li>
- * <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html">PutBucketLifecycleConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html">GetBucketLifecycleConfiguration</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketLifecycle">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for DeleteBucketLifecycle that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void DeleteBucketLifecycleAsync(const Model::DeleteBucketLifecycleRequest& request, const DeleteBucketLifecycleResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename DeleteBucketLifecycleRequestT = Model::DeleteBucketLifecycleRequest>
+ void DeleteBucketLifecycleAsync(const DeleteBucketLifecycleRequestT& request, const DeleteBucketLifecycleResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::DeleteBucketLifecycle, request, handler, context);
+ }
/**
* <p>Deletes a metrics configuration for the Amazon CloudWatch request metrics
@@ -2409,64 +997,22 @@ namespace Aws
virtual Model::DeleteBucketMetricsConfigurationOutcome DeleteBucketMetricsConfiguration(const Model::DeleteBucketMetricsConfigurationRequest& request) const;
/**
- * <p>Deletes a metrics configuration for the Amazon CloudWatch request metrics
- * (specified by the metrics configuration ID) from the bucket. Note that this
- * doesn't include the daily storage metrics.</p> <p> To use this operation, you
- * must have permissions to perform the <code>s3:PutMetricsConfiguration</code>
- * action. The bucket owner has this permission by default. The bucket owner can
- * grant this permission to others. For more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>For information about
- * CloudWatch request metrics for Amazon S3, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html">Monitoring
- * Metrics with Amazon CloudWatch</a>. </p> <p>The following operations are related
- * to <code>DeleteBucketMetricsConfiguration</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html">GetBucketMetricsConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html">PutBucketMetricsConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html">ListBucketMetricsConfigurations</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html">Monitoring
- * Metrics with Amazon CloudWatch</a> </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for DeleteBucketMetricsConfiguration that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::DeleteBucketMetricsConfigurationOutcomeCallable DeleteBucketMetricsConfigurationCallable(const Model::DeleteBucketMetricsConfigurationRequest& request) const;
+ template<typename DeleteBucketMetricsConfigurationRequestT = Model::DeleteBucketMetricsConfigurationRequest>
+ Model::DeleteBucketMetricsConfigurationOutcomeCallable DeleteBucketMetricsConfigurationCallable(const DeleteBucketMetricsConfigurationRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::DeleteBucketMetricsConfiguration, request);
+ }
/**
- * <p>Deletes a metrics configuration for the Amazon CloudWatch request metrics
- * (specified by the metrics configuration ID) from the bucket. Note that this
- * doesn't include the daily storage metrics.</p> <p> To use this operation, you
- * must have permissions to perform the <code>s3:PutMetricsConfiguration</code>
- * action. The bucket owner has this permission by default. The bucket owner can
- * grant this permission to others. For more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>For information about
- * CloudWatch request metrics for Amazon S3, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html">Monitoring
- * Metrics with Amazon CloudWatch</a>. </p> <p>The following operations are related
- * to <code>DeleteBucketMetricsConfiguration</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html">GetBucketMetricsConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html">PutBucketMetricsConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html">ListBucketMetricsConfigurations</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html">Monitoring
- * Metrics with Amazon CloudWatch</a> </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketMetricsConfiguration">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for DeleteBucketMetricsConfiguration that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void DeleteBucketMetricsConfigurationAsync(const Model::DeleteBucketMetricsConfigurationRequest& request, const DeleteBucketMetricsConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename DeleteBucketMetricsConfigurationRequestT = Model::DeleteBucketMetricsConfigurationRequest>
+ void DeleteBucketMetricsConfigurationAsync(const DeleteBucketMetricsConfigurationRequestT& request, const DeleteBucketMetricsConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::DeleteBucketMetricsConfiguration, request, handler, context);
+ }
/**
* <p>Removes <code>OwnershipControls</code> for an Amazon S3 bucket. To use this
@@ -2486,56 +1032,37 @@ namespace Aws
virtual Model::DeleteBucketOwnershipControlsOutcome DeleteBucketOwnershipControls(const Model::DeleteBucketOwnershipControlsRequest& request) const;
/**
- * <p>Removes <code>OwnershipControls</code> for an Amazon S3 bucket. To use this
- * operation, you must have the <code>s3:PutBucketOwnershipControls</code>
- * permission. For more information about Amazon S3 permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html">Specifying
- * Permissions in a Policy</a>.</p> <p>For information about Amazon S3 Object
- * Ownership, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html">Using
- * Object Ownership</a>. </p> <p>The following operations are related to
- * <code>DeleteBucketOwnershipControls</code>:</p> <ul> <li> <p>
- * <a>GetBucketOwnershipControls</a> </p> </li> <li> <p>
- * <a>PutBucketOwnershipControls</a> </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOwnershipControls">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for DeleteBucketOwnershipControls that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::DeleteBucketOwnershipControlsOutcomeCallable DeleteBucketOwnershipControlsCallable(const Model::DeleteBucketOwnershipControlsRequest& request) const;
+ template<typename DeleteBucketOwnershipControlsRequestT = Model::DeleteBucketOwnershipControlsRequest>
+ Model::DeleteBucketOwnershipControlsOutcomeCallable DeleteBucketOwnershipControlsCallable(const DeleteBucketOwnershipControlsRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::DeleteBucketOwnershipControls, request);
+ }
/**
- * <p>Removes <code>OwnershipControls</code> for an Amazon S3 bucket. To use this
- * operation, you must have the <code>s3:PutBucketOwnershipControls</code>
- * permission. For more information about Amazon S3 permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html">Specifying
- * Permissions in a Policy</a>.</p> <p>For information about Amazon S3 Object
- * Ownership, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html">Using
- * Object Ownership</a>. </p> <p>The following operations are related to
- * <code>DeleteBucketOwnershipControls</code>:</p> <ul> <li> <p>
- * <a>GetBucketOwnershipControls</a> </p> </li> <li> <p>
- * <a>PutBucketOwnershipControls</a> </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketOwnershipControls">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for DeleteBucketOwnershipControls that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void DeleteBucketOwnershipControlsAsync(const Model::DeleteBucketOwnershipControlsRequest& request, const DeleteBucketOwnershipControlsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename DeleteBucketOwnershipControlsRequestT = Model::DeleteBucketOwnershipControlsRequest>
+ void DeleteBucketOwnershipControlsAsync(const DeleteBucketOwnershipControlsRequestT& request, const DeleteBucketOwnershipControlsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::DeleteBucketOwnershipControls, request, handler, context);
+ }
/**
* <p>This implementation of the DELETE action uses the policy subresource to
* delete the policy of a specified bucket. If you are using an identity other than
- * the root user of the AWS account that owns the bucket, the calling identity must
- * have the <code>DeleteBucketPolicy</code> permissions on the specified bucket and
- * belong to the bucket owner's account to use this operation. </p> <p>If you don't
- * have <code>DeleteBucketPolicy</code> permissions, Amazon S3 returns a <code>403
- * Access Denied</code> error. If you have the correct permissions, but you're not
- * using an identity that belongs to the bucket owner's account, Amazon S3 returns
- * a <code>405 Method Not Allowed</code> error. </p> <p>As a security
- * precaution, the root user of the AWS account that owns a bucket can always use
- * this operation, even if the policy explicitly denies the root user the ability
- * to perform this action.</p> <p>For more information about bucket
+ * the root user of the Amazon Web Services account that owns the bucket, the
+ * calling identity must have the <code>DeleteBucketPolicy</code> permissions on
+ * the specified bucket and belong to the bucket owner's account to use this
+ * operation. </p> <p>If you don't have <code>DeleteBucketPolicy</code>
+ * permissions, Amazon S3 returns a <code>403 Access Denied</code> error. If you
+ * have the correct permissions, but you're not using an identity that belongs to
+ * the bucket owner's account, Amazon S3 returns a <code>405 Method Not
+ * Allowed</code> error. </p> <p>As a security precaution, the root
+ * user of the Amazon Web Services account that owns a bucket can always use this
+ * operation, even if the policy explicitly denies the root user the ability to
+ * perform this action.</p> <p>For more information about bucket
* policies, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html">Using
* Bucket Policies and UserPolicies</a>. </p> <p>The following operations are
@@ -2550,60 +1077,22 @@ namespace Aws
virtual Model::DeleteBucketPolicyOutcome DeleteBucketPolicy(const Model::DeleteBucketPolicyRequest& request) const;
/**
- * <p>This implementation of the DELETE action uses the policy subresource to
- * delete the policy of a specified bucket. If you are using an identity other than
- * the root user of the AWS account that owns the bucket, the calling identity must
- * have the <code>DeleteBucketPolicy</code> permissions on the specified bucket and
- * belong to the bucket owner's account to use this operation. </p> <p>If you don't
- * have <code>DeleteBucketPolicy</code> permissions, Amazon S3 returns a <code>403
- * Access Denied</code> error. If you have the correct permissions, but you're not
- * using an identity that belongs to the bucket owner's account, Amazon S3 returns
- * a <code>405 Method Not Allowed</code> error. </p> <p>As a security
- * precaution, the root user of the AWS account that owns a bucket can always use
- * this operation, even if the policy explicitly denies the root user the ability
- * to perform this action.</p> <p>For more information about bucket
- * policies, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html">Using
- * Bucket Policies and UserPolicies</a>. </p> <p>The following operations are
- * related to <code>DeleteBucketPolicy</code> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html">DeleteObject</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for DeleteBucketPolicy that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::DeleteBucketPolicyOutcomeCallable DeleteBucketPolicyCallable(const Model::DeleteBucketPolicyRequest& request) const;
+ template<typename DeleteBucketPolicyRequestT = Model::DeleteBucketPolicyRequest>
+ Model::DeleteBucketPolicyOutcomeCallable DeleteBucketPolicyCallable(const DeleteBucketPolicyRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::DeleteBucketPolicy, request);
+ }
/**
- * <p>This implementation of the DELETE action uses the policy subresource to
- * delete the policy of a specified bucket. If you are using an identity other than
- * the root user of the AWS account that owns the bucket, the calling identity must
- * have the <code>DeleteBucketPolicy</code> permissions on the specified bucket and
- * belong to the bucket owner's account to use this operation. </p> <p>If you don't
- * have <code>DeleteBucketPolicy</code> permissions, Amazon S3 returns a <code>403
- * Access Denied</code> error. If you have the correct permissions, but you're not
- * using an identity that belongs to the bucket owner's account, Amazon S3 returns
- * a <code>405 Method Not Allowed</code> error. </p> <p>As a security
- * precaution, the root user of the AWS account that owns a bucket can always use
- * this operation, even if the policy explicitly denies the root user the ability
- * to perform this action.</p> <p>For more information about bucket
- * policies, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html">Using
- * Bucket Policies and UserPolicies</a>. </p> <p>The following operations are
- * related to <code>DeleteBucketPolicy</code> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html">DeleteObject</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketPolicy">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for DeleteBucketPolicy that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void DeleteBucketPolicyAsync(const Model::DeleteBucketPolicyRequest& request, const DeleteBucketPolicyResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename DeleteBucketPolicyRequestT = Model::DeleteBucketPolicyRequest>
+ void DeleteBucketPolicyAsync(const DeleteBucketPolicyRequestT& request, const DeleteBucketPolicyResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::DeleteBucketPolicy, request, handler, context);
+ }
/**
* <p> Deletes the replication configuration from the bucket.</p> <p>To use this
@@ -2618,8 +1107,8 @@ namespace Aws
* while for the deletion of a replication configuration to fully propagate.</p>
* <p> For information about replication configuration, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html">Replication</a>
- * in the <i>Amazon S3 Developer Guide</i>. </p> <p>The following operations are
- * related to <code>DeleteBucketReplication</code>:</p> <ul> <li> <p> <a
+ * in the <i>Amazon S3 User Guide</i>.</p> <p>The following operations are related
+ * to <code>DeleteBucketReplication</code>:</p> <ul> <li> <p> <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html">PutBucketReplication</a>
* </p> </li> <li> <p> <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html">GetBucketReplication</a>
@@ -2630,56 +1119,22 @@ namespace Aws
virtual Model::DeleteBucketReplicationOutcome DeleteBucketReplication(const Model::DeleteBucketReplicationRequest& request) const;
/**
- * <p> Deletes the replication configuration from the bucket.</p> <p>To use this
- * operation, you must have permissions to perform the
- * <code>s3:PutReplicationConfiguration</code> action. The bucket owner has these
- * permissions by default and can grant it to others. For more information about
- * permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>. </p> <p>It can take a
- * while for the deletion of a replication configuration to fully propagate.</p>
- * <p> For information about replication configuration, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html">Replication</a>
- * in the <i>Amazon S3 Developer Guide</i>. </p> <p>The following operations are
- * related to <code>DeleteBucketReplication</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html">PutBucketReplication</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html">GetBucketReplication</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for DeleteBucketReplication that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::DeleteBucketReplicationOutcomeCallable DeleteBucketReplicationCallable(const Model::DeleteBucketReplicationRequest& request) const;
+ template<typename DeleteBucketReplicationRequestT = Model::DeleteBucketReplicationRequest>
+ Model::DeleteBucketReplicationOutcomeCallable DeleteBucketReplicationCallable(const DeleteBucketReplicationRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::DeleteBucketReplication, request);
+ }
/**
- * <p> Deletes the replication configuration from the bucket.</p> <p>To use this
- * operation, you must have permissions to perform the
- * <code>s3:PutReplicationConfiguration</code> action. The bucket owner has these
- * permissions by default and can grant it to others. For more information about
- * permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>. </p> <p>It can take a
- * while for the deletion of a replication configuration to fully propagate.</p>
- * <p> For information about replication configuration, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html">Replication</a>
- * in the <i>Amazon S3 Developer Guide</i>. </p> <p>The following operations are
- * related to <code>DeleteBucketReplication</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html">PutBucketReplication</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html">GetBucketReplication</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketReplication">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for DeleteBucketReplication that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void DeleteBucketReplicationAsync(const Model::DeleteBucketReplicationRequest& request, const DeleteBucketReplicationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename DeleteBucketReplicationRequestT = Model::DeleteBucketReplicationRequest>
+ void DeleteBucketReplicationAsync(const DeleteBucketReplicationRequestT& request, const DeleteBucketReplicationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::DeleteBucketReplication, request, handler, context);
+ }
/**
* <p>Deletes the tags from the bucket.</p> <p>To use this operation, you must have
@@ -2697,38 +1152,22 @@ namespace Aws
virtual Model::DeleteBucketTaggingOutcome DeleteBucketTagging(const Model::DeleteBucketTaggingRequest& request) const;
/**
- * <p>Deletes the tags from the bucket.</p> <p>To use this operation, you must have
- * permission to perform the <code>s3:PutBucketTagging</code> action. By default,
- * the bucket owner has this permission and can grant this permission to others.
- * </p> <p>The following operations are related to
- * <code>DeleteBucketTagging</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html">GetBucketTagging</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html">PutBucketTagging</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for DeleteBucketTagging that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::DeleteBucketTaggingOutcomeCallable DeleteBucketTaggingCallable(const Model::DeleteBucketTaggingRequest& request) const;
+ template<typename DeleteBucketTaggingRequestT = Model::DeleteBucketTaggingRequest>
+ Model::DeleteBucketTaggingOutcomeCallable DeleteBucketTaggingCallable(const DeleteBucketTaggingRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::DeleteBucketTagging, request);
+ }
/**
- * <p>Deletes the tags from the bucket.</p> <p>To use this operation, you must have
- * permission to perform the <code>s3:PutBucketTagging</code> action. By default,
- * the bucket owner has this permission and can grant this permission to others.
- * </p> <p>The following operations are related to
- * <code>DeleteBucketTagging</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html">GetBucketTagging</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html">PutBucketTagging</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketTagging">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for DeleteBucketTagging that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void DeleteBucketTaggingAsync(const Model::DeleteBucketTaggingRequest& request, const DeleteBucketTaggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename DeleteBucketTaggingRequestT = Model::DeleteBucketTaggingRequest>
+ void DeleteBucketTaggingAsync(const DeleteBucketTaggingRequestT& request, const DeleteBucketTaggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::DeleteBucketTagging, request, handler, context);
+ }
/**
* <p>This action removes the website configuration for a bucket. Amazon S3 returns
@@ -2755,56 +1194,22 @@ namespace Aws
virtual Model::DeleteBucketWebsiteOutcome DeleteBucketWebsite(const Model::DeleteBucketWebsiteRequest& request) const;
/**
- * <p>This action removes the website configuration for a bucket. Amazon S3 returns
- * a <code>200 OK</code> response upon successfully deleting a website
- * configuration on the specified bucket. You will get a <code>200 OK</code>
- * response if the website configuration you are trying to delete does not exist on
- * the bucket. Amazon S3 returns a <code>404</code> response if the bucket
- * specified in the request does not exist.</p> <p>This DELETE action requires the
- * <code>S3:DeleteBucketWebsite</code> permission. By default, only the bucket
- * owner can delete the website configuration attached to a bucket. However, bucket
- * owners can grant other users permission to delete the website configuration by
- * writing a bucket policy granting them the <code>S3:DeleteBucketWebsite</code>
- * permission. </p> <p>For more information about hosting websites, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html">Hosting
- * Websites on Amazon S3</a>. </p> <p>The following operations are related to
- * <code>DeleteBucketWebsite</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html">GetBucketWebsite</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html">PutBucketWebsite</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for DeleteBucketWebsite that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::DeleteBucketWebsiteOutcomeCallable DeleteBucketWebsiteCallable(const Model::DeleteBucketWebsiteRequest& request) const;
+ template<typename DeleteBucketWebsiteRequestT = Model::DeleteBucketWebsiteRequest>
+ Model::DeleteBucketWebsiteOutcomeCallable DeleteBucketWebsiteCallable(const DeleteBucketWebsiteRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::DeleteBucketWebsite, request);
+ }
/**
- * <p>This action removes the website configuration for a bucket. Amazon S3 returns
- * a <code>200 OK</code> response upon successfully deleting a website
- * configuration on the specified bucket. You will get a <code>200 OK</code>
- * response if the website configuration you are trying to delete does not exist on
- * the bucket. Amazon S3 returns a <code>404</code> response if the bucket
- * specified in the request does not exist.</p> <p>This DELETE action requires the
- * <code>S3:DeleteBucketWebsite</code> permission. By default, only the bucket
- * owner can delete the website configuration attached to a bucket. However, bucket
- * owners can grant other users permission to delete the website configuration by
- * writing a bucket policy granting them the <code>S3:DeleteBucketWebsite</code>
- * permission. </p> <p>For more information about hosting websites, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html">Hosting
- * Websites on Amazon S3</a>. </p> <p>The following operations are related to
- * <code>DeleteBucketWebsite</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketWebsite.html">GetBucketWebsite</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html">PutBucketWebsite</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteBucketWebsite">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for DeleteBucketWebsite that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void DeleteBucketWebsiteAsync(const Model::DeleteBucketWebsiteRequest& request, const DeleteBucketWebsiteResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename DeleteBucketWebsiteRequestT = Model::DeleteBucketWebsiteRequest>
+ void DeleteBucketWebsiteAsync(const DeleteBucketWebsiteRequestT& request, const DeleteBucketWebsiteResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::DeleteBucketWebsite, request, handler, context);
+ }
/**
* <p>Removes the null version (if there is one) of an object and inserts a delete
@@ -2838,70 +1243,22 @@ namespace Aws
virtual Model::DeleteObjectOutcome DeleteObject(const Model::DeleteObjectRequest& request) const;
/**
- * <p>Removes the null version (if there is one) of an object and inserts a delete
- * marker, which becomes the latest version of the object. If there isn't a null
- * version, Amazon S3 does not remove any objects but will still respond that the
- * command was successful.</p> <p>To remove a specific version, you must be the
- * bucket owner and you must use the version Id subresource. Using this subresource
- * permanently deletes the version. If the object deleted is a delete marker,
- * Amazon S3 sets the response header, <code>x-amz-delete-marker</code>, to true.
- * </p> <p>If the object you want to delete is in a bucket where the bucket
- * versioning configuration is MFA Delete enabled, you must include the
- * <code>x-amz-mfa</code> request header in the DELETE <code>versionId</code>
- * request. Requests that include <code>x-amz-mfa</code> must use HTTPS. </p> <p>
- * For more information about MFA Delete, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html">Using
- * MFA Delete</a>. To see sample requests that use versioning, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete">Sample
- * Request</a>. </p> <p>You can delete objects by explicitly calling DELETE Object
- * or configure its lifecycle (<a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html">PutBucketLifecycle</a>)
- * to enable Amazon S3 to remove them for you. If you want to block users or
- * accounts from removing or deleting objects from your bucket, you must deny them
- * the <code>s3:DeleteObject</code>, <code>s3:DeleteObjectVersion</code>, and
- * <code>s3:PutLifeCycleConfiguration</code> actions. </p> <p>The following action
- * is related to <code>DeleteObject</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html">PutObject</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject">AWS API
- * Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for DeleteObject that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::DeleteObjectOutcomeCallable DeleteObjectCallable(const Model::DeleteObjectRequest& request) const;
+ template<typename DeleteObjectRequestT = Model::DeleteObjectRequest>
+ Model::DeleteObjectOutcomeCallable DeleteObjectCallable(const DeleteObjectRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::DeleteObject, request);
+ }
/**
- * <p>Removes the null version (if there is one) of an object and inserts a delete
- * marker, which becomes the latest version of the object. If there isn't a null
- * version, Amazon S3 does not remove any objects but will still respond that the
- * command was successful.</p> <p>To remove a specific version, you must be the
- * bucket owner and you must use the version Id subresource. Using this subresource
- * permanently deletes the version. If the object deleted is a delete marker,
- * Amazon S3 sets the response header, <code>x-amz-delete-marker</code>, to true.
- * </p> <p>If the object you want to delete is in a bucket where the bucket
- * versioning configuration is MFA Delete enabled, you must include the
- * <code>x-amz-mfa</code> request header in the DELETE <code>versionId</code>
- * request. Requests that include <code>x-amz-mfa</code> must use HTTPS. </p> <p>
- * For more information about MFA Delete, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMFADelete.html">Using
- * MFA Delete</a>. To see sample requests that use versioning, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html#ExampleVersionObjectDelete">Sample
- * Request</a>. </p> <p>You can delete objects by explicitly calling DELETE Object
- * or configure its lifecycle (<a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html">PutBucketLifecycle</a>)
- * to enable Amazon S3 to remove them for you. If you want to block users or
- * accounts from removing or deleting objects from your bucket, you must deny them
- * the <code>s3:DeleteObject</code>, <code>s3:DeleteObjectVersion</code>, and
- * <code>s3:PutLifeCycleConfiguration</code> actions. </p> <p>The following action
- * is related to <code>DeleteObject</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html">PutObject</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObject">AWS API
- * Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for DeleteObject that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void DeleteObjectAsync(const Model::DeleteObjectRequest& request, const DeleteObjectResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename DeleteObjectRequestT = Model::DeleteObjectRequest>
+ void DeleteObjectAsync(const DeleteObjectRequestT& request, const DeleteObjectResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::DeleteObject, request, handler, context);
+ }
/**
* <p>Removes the entire tag set from the specified object. For more information
@@ -2924,48 +1281,22 @@ namespace Aws
virtual Model::DeleteObjectTaggingOutcome DeleteObjectTagging(const Model::DeleteObjectTaggingRequest& request) const;
/**
- * <p>Removes the entire tag set from the specified object. For more information
- * about managing object tags, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html">
- * Object Tagging</a>.</p> <p>To use this operation, you must have permission to
- * perform the <code>s3:DeleteObjectTagging</code> action.</p> <p>To delete tags of
- * a specific object version, add the <code>versionId</code> query parameter in the
- * request. You will need permission for the
- * <code>s3:DeleteObjectVersionTagging</code> action.</p> <p>The following
- * operations are related to <code>DeleteBucketMetricsConfiguration</code>:</p>
- * <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html">PutObjectTagging</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html">GetObjectTagging</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for DeleteObjectTagging that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::DeleteObjectTaggingOutcomeCallable DeleteObjectTaggingCallable(const Model::DeleteObjectTaggingRequest& request) const;
+ template<typename DeleteObjectTaggingRequestT = Model::DeleteObjectTaggingRequest>
+ Model::DeleteObjectTaggingOutcomeCallable DeleteObjectTaggingCallable(const DeleteObjectTaggingRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::DeleteObjectTagging, request);
+ }
/**
- * <p>Removes the entire tag set from the specified object. For more information
- * about managing object tags, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html">
- * Object Tagging</a>.</p> <p>To use this operation, you must have permission to
- * perform the <code>s3:DeleteObjectTagging</code> action.</p> <p>To delete tags of
- * a specific object version, add the <code>versionId</code> query parameter in the
- * request. You will need permission for the
- * <code>s3:DeleteObjectVersionTagging</code> action.</p> <p>The following
- * operations are related to <code>DeleteBucketMetricsConfiguration</code>:</p>
- * <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html">PutObjectTagging</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html">GetObjectTagging</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjectTagging">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for DeleteObjectTagging that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void DeleteObjectTaggingAsync(const Model::DeleteObjectTaggingRequest& request, const DeleteObjectTaggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename DeleteObjectTaggingRequestT = Model::DeleteObjectTaggingRequest>
+ void DeleteObjectTaggingAsync(const DeleteObjectTaggingRequestT& request, const DeleteObjectTaggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::DeleteObjectTagging, request, handler, context);
+ }
/**
* <p>This action enables you to delete multiple objects from a bucket using a
@@ -3010,92 +1341,22 @@ namespace Aws
virtual Model::DeleteObjectsOutcome DeleteObjects(const Model::DeleteObjectsRequest& request) const;
/**
- * <p>This action enables you to delete multiple objects from a bucket using a
- * single HTTP request. If you know the object keys that you want to delete, then
- * this action provides a suitable alternative to sending individual delete
- * requests, reducing per-request overhead.</p> <p>The request contains a list of
- * up to 1000 keys that you want to delete. In the XML, you provide the object key
- * names, and optionally, version IDs if you want to delete a specific version of
- * the object from a versioning-enabled bucket. For each key, Amazon S3 performs a
- * delete action and returns the result of that delete, success, or failure, in the
- * response. Note that if the object specified in the request is not found, Amazon
- * S3 returns the result as deleted.</p> <p> The action supports two modes for the
- * response: verbose and quiet. By default, the action uses verbose mode in which
- * the response includes the result of deletion of each key in your request. In
- * quiet mode the response includes only keys where the delete action encountered
- * an error. For a successful deletion, the action does not return any information
- * about the delete in the response body.</p> <p>When performing this action on an
- * MFA Delete enabled bucket, that attempts to delete any versioned objects, you
- * must include an MFA token. If you do not provide one, the entire request will
- * fail, even if there are non-versioned objects you are trying to delete. If you
- * provide an invalid token, whether there are versioned keys in the request or
- * not, the entire Multi-Object Delete request will fail. For information about MFA
- * Delete, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete">
- * MFA Delete</a>.</p> <p>Finally, the Content-MD5 header is required for all
- * Multi-Object Delete requests. Amazon S3 uses the header value to ensure that
- * your request body has not been altered in transit.</p> <p>The following
- * operations are related to <code>DeleteObjects</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html">CreateMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html">UploadPart</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html">CompleteMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html">ListParts</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html">AbortMultipartUpload</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for DeleteObjects that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::DeleteObjectsOutcomeCallable DeleteObjectsCallable(const Model::DeleteObjectsRequest& request) const;
+ template<typename DeleteObjectsRequestT = Model::DeleteObjectsRequest>
+ Model::DeleteObjectsOutcomeCallable DeleteObjectsCallable(const DeleteObjectsRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::DeleteObjects, request);
+ }
/**
- * <p>This action enables you to delete multiple objects from a bucket using a
- * single HTTP request. If you know the object keys that you want to delete, then
- * this action provides a suitable alternative to sending individual delete
- * requests, reducing per-request overhead.</p> <p>The request contains a list of
- * up to 1000 keys that you want to delete. In the XML, you provide the object key
- * names, and optionally, version IDs if you want to delete a specific version of
- * the object from a versioning-enabled bucket. For each key, Amazon S3 performs a
- * delete action and returns the result of that delete, success, or failure, in the
- * response. Note that if the object specified in the request is not found, Amazon
- * S3 returns the result as deleted.</p> <p> The action supports two modes for the
- * response: verbose and quiet. By default, the action uses verbose mode in which
- * the response includes the result of deletion of each key in your request. In
- * quiet mode the response includes only keys where the delete action encountered
- * an error. For a successful deletion, the action does not return any information
- * about the delete in the response body.</p> <p>When performing this action on an
- * MFA Delete enabled bucket, that attempts to delete any versioned objects, you
- * must include an MFA token. If you do not provide one, the entire request will
- * fail, even if there are non-versioned objects you are trying to delete. If you
- * provide an invalid token, whether there are versioned keys in the request or
- * not, the entire Multi-Object Delete request will fail. For information about MFA
- * Delete, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html#MultiFactorAuthenticationDelete">
- * MFA Delete</a>.</p> <p>Finally, the Content-MD5 header is required for all
- * Multi-Object Delete requests. Amazon S3 uses the header value to ensure that
- * your request body has not been altered in transit.</p> <p>The following
- * operations are related to <code>DeleteObjects</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html">CreateMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html">UploadPart</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html">CompleteMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html">ListParts</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html">AbortMultipartUpload</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteObjects">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for DeleteObjects that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void DeleteObjectsAsync(const Model::DeleteObjectsRequest& request, const DeleteObjectsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename DeleteObjectsRequestT = Model::DeleteObjectsRequest>
+ void DeleteObjectsAsync(const DeleteObjectsRequestT& request, const DeleteObjectsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::DeleteObjects, request, handler, context);
+ }
/**
* <p>Removes the <code>PublicAccessBlock</code> configuration for an Amazon S3
@@ -3122,56 +1383,22 @@ namespace Aws
virtual Model::DeletePublicAccessBlockOutcome DeletePublicAccessBlock(const Model::DeletePublicAccessBlockRequest& request) const;
/**
- * <p>Removes the <code>PublicAccessBlock</code> configuration for an Amazon S3
- * bucket. To use this operation, you must have the
- * <code>s3:PutBucketPublicAccessBlock</code> permission. For more information
- * about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>The following
- * operations are related to <code>DeletePublicAccessBlock</code>:</p> <ul> <li>
- * <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html">Using
- * Amazon S3 Block Public Access</a> </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html">GetPublicAccessBlock</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html">PutPublicAccessBlock</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html">GetBucketPolicyStatus</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for DeletePublicAccessBlock that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::DeletePublicAccessBlockOutcomeCallable DeletePublicAccessBlockCallable(const Model::DeletePublicAccessBlockRequest& request) const;
+ template<typename DeletePublicAccessBlockRequestT = Model::DeletePublicAccessBlockRequest>
+ Model::DeletePublicAccessBlockOutcomeCallable DeletePublicAccessBlockCallable(const DeletePublicAccessBlockRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::DeletePublicAccessBlock, request);
+ }
/**
- * <p>Removes the <code>PublicAccessBlock</code> configuration for an Amazon S3
- * bucket. To use this operation, you must have the
- * <code>s3:PutBucketPublicAccessBlock</code> permission. For more information
- * about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>The following
- * operations are related to <code>DeletePublicAccessBlock</code>:</p> <ul> <li>
- * <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html">Using
- * Amazon S3 Block Public Access</a> </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html">GetPublicAccessBlock</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html">PutPublicAccessBlock</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html">GetBucketPolicyStatus</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletePublicAccessBlock">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for DeletePublicAccessBlock that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void DeletePublicAccessBlockAsync(const Model::DeletePublicAccessBlockRequest& request, const DeletePublicAccessBlockResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename DeletePublicAccessBlockRequestT = Model::DeletePublicAccessBlockRequest>
+ void DeletePublicAccessBlockAsync(const DeletePublicAccessBlockRequestT& request, const DeletePublicAccessBlockResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::DeletePublicAccessBlock, request, handler, context);
+ }
/**
* <p>This implementation of the GET action uses the <code>accelerate</code>
@@ -3204,68 +1431,22 @@ namespace Aws
virtual Model::GetBucketAccelerateConfigurationOutcome GetBucketAccelerateConfiguration(const Model::GetBucketAccelerateConfigurationRequest& request) const;
/**
- * <p>This implementation of the GET action uses the <code>accelerate</code>
- * subresource to return the Transfer Acceleration state of a bucket, which is
- * either <code>Enabled</code> or <code>Suspended</code>. Amazon S3 Transfer
- * Acceleration is a bucket-level feature that enables you to perform faster data
- * transfers to and from Amazon S3.</p> <p>To use this operation, you must have
- * permission to perform the <code>s3:GetAccelerateConfiguration</code> action. The
- * bucket owner has this permission by default. The bucket owner can grant this
- * permission to others. For more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to your Amazon S3 Resources</a> in the <i>Amazon S3 User
- * Guide</i>.</p> <p>You set the Transfer Acceleration state of an existing bucket
- * to <code>Enabled</code> or <code>Suspended</code> by using the <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html">PutBucketAccelerateConfiguration</a>
- * operation. </p> <p>A GET <code>accelerate</code> request does not return a state
- * value for a bucket that has no transfer acceleration state. A bucket has no
- * Transfer Acceleration state if a state has never been set on the bucket. </p>
- * <p>For more information about transfer acceleration, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html">Transfer
- * Acceleration</a> in the Amazon S3 User Guide.</p> <p class="title"> <b>Related
- * Resources</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html">PutBucketAccelerateConfiguration</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for GetBucketAccelerateConfiguration that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::GetBucketAccelerateConfigurationOutcomeCallable GetBucketAccelerateConfigurationCallable(const Model::GetBucketAccelerateConfigurationRequest& request) const;
+ template<typename GetBucketAccelerateConfigurationRequestT = Model::GetBucketAccelerateConfigurationRequest>
+ Model::GetBucketAccelerateConfigurationOutcomeCallable GetBucketAccelerateConfigurationCallable(const GetBucketAccelerateConfigurationRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::GetBucketAccelerateConfiguration, request);
+ }
/**
- * <p>This implementation of the GET action uses the <code>accelerate</code>
- * subresource to return the Transfer Acceleration state of a bucket, which is
- * either <code>Enabled</code> or <code>Suspended</code>. Amazon S3 Transfer
- * Acceleration is a bucket-level feature that enables you to perform faster data
- * transfers to and from Amazon S3.</p> <p>To use this operation, you must have
- * permission to perform the <code>s3:GetAccelerateConfiguration</code> action. The
- * bucket owner has this permission by default. The bucket owner can grant this
- * permission to others. For more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to your Amazon S3 Resources</a> in the <i>Amazon S3 User
- * Guide</i>.</p> <p>You set the Transfer Acceleration state of an existing bucket
- * to <code>Enabled</code> or <code>Suspended</code> by using the <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html">PutBucketAccelerateConfiguration</a>
- * operation. </p> <p>A GET <code>accelerate</code> request does not return a state
- * value for a bucket that has no transfer acceleration state. A bucket has no
- * Transfer Acceleration state if a state has never been set on the bucket. </p>
- * <p>For more information about transfer acceleration, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html">Transfer
- * Acceleration</a> in the Amazon S3 User Guide.</p> <p class="title"> <b>Related
- * Resources</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAccelerateConfiguration.html">PutBucketAccelerateConfiguration</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAccelerateConfiguration">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for GetBucketAccelerateConfiguration that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void GetBucketAccelerateConfigurationAsync(const Model::GetBucketAccelerateConfigurationRequest& request, const GetBucketAccelerateConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename GetBucketAccelerateConfigurationRequestT = Model::GetBucketAccelerateConfigurationRequest>
+ void GetBucketAccelerateConfigurationAsync(const GetBucketAccelerateConfigurationRequestT& request, const GetBucketAccelerateConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::GetBucketAccelerateConfiguration, request, handler, context);
+ }
/**
* <p>This implementation of the <code>GET</code> action uses the <code>acl</code>
@@ -3273,8 +1454,14 @@ namespace Aws
* <code>GET</code> to return the ACL of the bucket, you must have
* <code>READ_ACP</code> access to the bucket. If <code>READ_ACP</code> permission
* is granted to the anonymous user, you can return the ACL of the bucket without
- * using an authorization header.</p> <p class="title"> <b>Related Resources</b>
- * </p> <ul> <li> <p> <a
+ * using an authorization header.</p> <p>If your bucket uses the bucket
+ * owner enforced setting for S3 Object Ownership, requests to read ACLs are still
+ * supported and return the <code>bucket-owner-full-control</code> ACL with the
+ * owner being the account that created the bucket. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html">
+ * Controlling object ownership and disabling ACLs</a> in the <i>Amazon S3 User
+ * Guide</i>.</p> <p class="title"> <b>Related Resources</b> </p> <ul> <li>
+ * <p> <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html">ListObjects</a>
* </p> </li> </ul><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl">AWS API
@@ -3283,38 +1470,22 @@ namespace Aws
virtual Model::GetBucketAclOutcome GetBucketAcl(const Model::GetBucketAclRequest& request) const;
/**
- * <p>This implementation of the <code>GET</code> action uses the <code>acl</code>
- * subresource to return the access control list (ACL) of a bucket. To use
- * <code>GET</code> to return the ACL of the bucket, you must have
- * <code>READ_ACP</code> access to the bucket. If <code>READ_ACP</code> permission
- * is granted to the anonymous user, you can return the ACL of the bucket without
- * using an authorization header.</p> <p class="title"> <b>Related Resources</b>
- * </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html">ListObjects</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl">AWS API
- * Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for GetBucketAcl that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::GetBucketAclOutcomeCallable GetBucketAclCallable(const Model::GetBucketAclRequest& request) const;
+ template<typename GetBucketAclRequestT = Model::GetBucketAclRequest>
+ Model::GetBucketAclOutcomeCallable GetBucketAclCallable(const GetBucketAclRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::GetBucketAcl, request);
+ }
/**
- * <p>This implementation of the <code>GET</code> action uses the <code>acl</code>
- * subresource to return the access control list (ACL) of a bucket. To use
- * <code>GET</code> to return the ACL of the bucket, you must have
- * <code>READ_ACP</code> access to the bucket. If <code>READ_ACP</code> permission
- * is granted to the anonymous user, you can return the ACL of the bucket without
- * using an authorization header.</p> <p class="title"> <b>Related Resources</b>
- * </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html">ListObjects</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAcl">AWS API
- * Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for GetBucketAcl that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void GetBucketAclAsync(const Model::GetBucketAclRequest& request, const GetBucketAclResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename GetBucketAclRequestT = Model::GetBucketAclRequest>
+ void GetBucketAclAsync(const GetBucketAclRequestT& request, const GetBucketAclResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::GetBucketAcl, request, handler, context);
+ }
/**
* <p>This implementation of the GET action returns an analytics configuration
@@ -3343,66 +1514,29 @@ namespace Aws
virtual Model::GetBucketAnalyticsConfigurationOutcome GetBucketAnalyticsConfiguration(const Model::GetBucketAnalyticsConfigurationRequest& request) const;
/**
- * <p>This implementation of the GET action returns an analytics configuration
- * (identified by the analytics configuration ID) from the bucket.</p> <p>To use
- * this operation, you must have permissions to perform the
- * <code>s3:GetAnalyticsConfiguration</code> action. The bucket owner has this
- * permission by default. The bucket owner can grant this permission to others. For
- * more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">
- * Permissions Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a> in the <i>Amazon S3 User
- * Guide</i>. </p> <p>For information about Amazon S3 analytics feature, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html">Amazon
- * S3 Analytics – Storage Class Analysis</a> in the <i>Amazon S3 User
- * Guide</i>.</p> <p class="title"> <b>Related Resources</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html">DeleteBucketAnalyticsConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html">ListBucketAnalyticsConfigurations</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html">PutBucketAnalyticsConfiguration</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for GetBucketAnalyticsConfiguration that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::GetBucketAnalyticsConfigurationOutcomeCallable GetBucketAnalyticsConfigurationCallable(const Model::GetBucketAnalyticsConfigurationRequest& request) const;
+ template<typename GetBucketAnalyticsConfigurationRequestT = Model::GetBucketAnalyticsConfigurationRequest>
+ Model::GetBucketAnalyticsConfigurationOutcomeCallable GetBucketAnalyticsConfigurationCallable(const GetBucketAnalyticsConfigurationRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::GetBucketAnalyticsConfiguration, request);
+ }
/**
- * <p>This implementation of the GET action returns an analytics configuration
- * (identified by the analytics configuration ID) from the bucket.</p> <p>To use
- * this operation, you must have permissions to perform the
- * <code>s3:GetAnalyticsConfiguration</code> action. The bucket owner has this
- * permission by default. The bucket owner can grant this permission to others. For
- * more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">
- * Permissions Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a> in the <i>Amazon S3 User
- * Guide</i>. </p> <p>For information about Amazon S3 analytics feature, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html">Amazon
- * S3 Analytics – Storage Class Analysis</a> in the <i>Amazon S3 User
- * Guide</i>.</p> <p class="title"> <b>Related Resources</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html">DeleteBucketAnalyticsConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html">ListBucketAnalyticsConfigurations</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html">PutBucketAnalyticsConfiguration</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketAnalyticsConfiguration">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for GetBucketAnalyticsConfiguration that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void GetBucketAnalyticsConfigurationAsync(const Model::GetBucketAnalyticsConfigurationRequest& request, const GetBucketAnalyticsConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename GetBucketAnalyticsConfigurationRequestT = Model::GetBucketAnalyticsConfigurationRequest>
+ void GetBucketAnalyticsConfigurationAsync(const GetBucketAnalyticsConfigurationRequestT& request, const GetBucketAnalyticsConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::GetBucketAnalyticsConfiguration, request, handler, context);
+ }
/**
- * <p>Returns the cors configuration information set for the bucket.</p> <p> To use
- * this operation, you must have permission to perform the s3:GetBucketCORS action.
- * By default, the bucket owner has this permission and can grant it to others.</p>
- * <p> For more information about cors, see <a
+ * <p>Returns the Cross-Origin Resource Sharing (CORS) configuration information
+ * set for the bucket.</p> <p> To use this operation, you must have permission to
+ * perform the <code>s3:GetBucketCORS</code> action. By default, the bucket owner
+ * has this permission and can grant it to others.</p> <p> For more information
+ * about CORS, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html"> Enabling
* Cross-Origin Resource Sharing</a>.</p> <p>The following operations are related
* to <code>GetBucketCors</code>:</p> <ul> <li> <p> <a
@@ -3416,42 +1550,22 @@ namespace Aws
virtual Model::GetBucketCorsOutcome GetBucketCors(const Model::GetBucketCorsRequest& request) const;
/**
- * <p>Returns the cors configuration information set for the bucket.</p> <p> To use
- * this operation, you must have permission to perform the s3:GetBucketCORS action.
- * By default, the bucket owner has this permission and can grant it to others.</p>
- * <p> For more information about cors, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html"> Enabling
- * Cross-Origin Resource Sharing</a>.</p> <p>The following operations are related
- * to <code>GetBucketCors</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html">PutBucketCors</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html">DeleteBucketCors</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for GetBucketCors that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::GetBucketCorsOutcomeCallable GetBucketCorsCallable(const Model::GetBucketCorsRequest& request) const;
+ template<typename GetBucketCorsRequestT = Model::GetBucketCorsRequest>
+ Model::GetBucketCorsOutcomeCallable GetBucketCorsCallable(const GetBucketCorsRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::GetBucketCors, request);
+ }
/**
- * <p>Returns the cors configuration information set for the bucket.</p> <p> To use
- * this operation, you must have permission to perform the s3:GetBucketCORS action.
- * By default, the bucket owner has this permission and can grant it to others.</p>
- * <p> For more information about cors, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html"> Enabling
- * Cross-Origin Resource Sharing</a>.</p> <p>The following operations are related
- * to <code>GetBucketCors</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketCors.html">PutBucketCors</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html">DeleteBucketCors</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketCors">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for GetBucketCors that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void GetBucketCorsAsync(const Model::GetBucketCorsRequest& request, const GetBucketCorsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename GetBucketCorsRequestT = Model::GetBucketCorsRequest>
+ void GetBucketCorsAsync(const GetBucketCorsRequestT& request, const GetBucketCorsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::GetBucketCors, request, handler, context);
+ }
/**
* <p>Returns the default encryption configuration for an Amazon S3 bucket. If the
@@ -3478,70 +1592,37 @@ namespace Aws
virtual Model::GetBucketEncryptionOutcome GetBucketEncryption(const Model::GetBucketEncryptionRequest& request) const;
/**
- * <p>Returns the default encryption configuration for an Amazon S3 bucket. If the
- * bucket does not have a default encryption configuration, GetBucketEncryption
- * returns <code>ServerSideEncryptionConfigurationNotFoundError</code>. </p> <p>For
- * information about the Amazon S3 default encryption feature, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html">Amazon
- * S3 Default Bucket Encryption</a>.</p> <p> To use this operation, you must have
- * permission to perform the <code>s3:GetEncryptionConfiguration</code> action. The
- * bucket owner has this permission by default. The bucket owner can grant this
- * permission to others. For more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>The following
- * operations are related to <code>GetBucketEncryption</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html">PutBucketEncryption</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html">DeleteBucketEncryption</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for GetBucketEncryption that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::GetBucketEncryptionOutcomeCallable GetBucketEncryptionCallable(const Model::GetBucketEncryptionRequest& request) const;
+ template<typename GetBucketEncryptionRequestT = Model::GetBucketEncryptionRequest>
+ Model::GetBucketEncryptionOutcomeCallable GetBucketEncryptionCallable(const GetBucketEncryptionRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::GetBucketEncryption, request);
+ }
/**
- * <p>Returns the default encryption configuration for an Amazon S3 bucket. If the
- * bucket does not have a default encryption configuration, GetBucketEncryption
- * returns <code>ServerSideEncryptionConfigurationNotFoundError</code>. </p> <p>For
- * information about the Amazon S3 default encryption feature, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html">Amazon
- * S3 Default Bucket Encryption</a>.</p> <p> To use this operation, you must have
- * permission to perform the <code>s3:GetEncryptionConfiguration</code> action. The
- * bucket owner has this permission by default. The bucket owner can grant this
- * permission to others. For more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>The following
- * operations are related to <code>GetBucketEncryption</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketEncryption.html">PutBucketEncryption</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html">DeleteBucketEncryption</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketEncryption">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for GetBucketEncryption that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void GetBucketEncryptionAsync(const Model::GetBucketEncryptionRequest& request, const GetBucketEncryptionResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename GetBucketEncryptionRequestT = Model::GetBucketEncryptionRequest>
+ void GetBucketEncryptionAsync(const GetBucketEncryptionRequestT& request, const GetBucketEncryptionResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::GetBucketEncryption, request, handler, context);
+ }
/**
* <p>Gets the S3 Intelligent-Tiering configuration from the specified bucket.</p>
* <p>The S3 Intelligent-Tiering storage class is designed to optimize storage
* costs by automatically moving data to the most cost-effective storage access
- * tier, without additional operational overhead. S3 Intelligent-Tiering delivers
- * automatic cost savings by moving data between access tiers, when access patterns
- * change.</p> <p>The S3 Intelligent-Tiering storage class is suitable for objects
- * larger than 128 KB that you plan to store for at least 30 days. If the size of
- * an object is less than 128 KB, it is not eligible for auto-tiering. Smaller
- * objects can be stored, but they are always charged at the frequent access tier
- * rates in the S3 Intelligent-Tiering storage class. </p> <p>If you delete an
- * object before the end of the 30-day minimum storage duration period, you are
- * charged for 30 days. For more information, see <a
+ * tier, without performance impact or operational overhead. S3 Intelligent-Tiering
+ * delivers automatic cost savings in three low latency and high throughput access
+ * tiers. To get the lowest storage cost on data that can be accessed in minutes to
+ * hours, you can choose to activate additional archiving capabilities.</p> <p>The
+ * S3 Intelligent-Tiering storage class is the ideal storage class for data with
+ * unknown, changing, or unpredictable access patterns, independent of object size
+ * or retention period. If the size of an object is less than 128 KB, it is not
+ * monitored and not eligible for auto-tiering. Smaller objects can be stored, but
+ * they are always charged at the Frequent Access tier rates in the S3
+ * Intelligent-Tiering storage class.</p> <p>For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access">Storage
* class for automatically optimizing frequently and infrequently accessed
* objects</a>.</p> <p>Operations related to
@@ -3559,66 +1640,22 @@ namespace Aws
virtual Model::GetBucketIntelligentTieringConfigurationOutcome GetBucketIntelligentTieringConfiguration(const Model::GetBucketIntelligentTieringConfigurationRequest& request) const;
/**
- * <p>Gets the S3 Intelligent-Tiering configuration from the specified bucket.</p>
- * <p>The S3 Intelligent-Tiering storage class is designed to optimize storage
- * costs by automatically moving data to the most cost-effective storage access
- * tier, without additional operational overhead. S3 Intelligent-Tiering delivers
- * automatic cost savings by moving data between access tiers, when access patterns
- * change.</p> <p>The S3 Intelligent-Tiering storage class is suitable for objects
- * larger than 128 KB that you plan to store for at least 30 days. If the size of
- * an object is less than 128 KB, it is not eligible for auto-tiering. Smaller
- * objects can be stored, but they are always charged at the frequent access tier
- * rates in the S3 Intelligent-Tiering storage class. </p> <p>If you delete an
- * object before the end of the 30-day minimum storage duration period, you are
- * charged for 30 days. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access">Storage
- * class for automatically optimizing frequently and infrequently accessed
- * objects</a>.</p> <p>Operations related to
- * <code>GetBucketIntelligentTieringConfiguration</code> include: </p> <ul> <li>
- * <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html">DeleteBucketIntelligentTieringConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html">PutBucketIntelligentTieringConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html">ListBucketIntelligentTieringConfigurations</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketIntelligentTieringConfiguration">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for GetBucketIntelligentTieringConfiguration that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::GetBucketIntelligentTieringConfigurationOutcomeCallable GetBucketIntelligentTieringConfigurationCallable(const Model::GetBucketIntelligentTieringConfigurationRequest& request) const;
+ template<typename GetBucketIntelligentTieringConfigurationRequestT = Model::GetBucketIntelligentTieringConfigurationRequest>
+ Model::GetBucketIntelligentTieringConfigurationOutcomeCallable GetBucketIntelligentTieringConfigurationCallable(const GetBucketIntelligentTieringConfigurationRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::GetBucketIntelligentTieringConfiguration, request);
+ }
/**
- * <p>Gets the S3 Intelligent-Tiering configuration from the specified bucket.</p>
- * <p>The S3 Intelligent-Tiering storage class is designed to optimize storage
- * costs by automatically moving data to the most cost-effective storage access
- * tier, without additional operational overhead. S3 Intelligent-Tiering delivers
- * automatic cost savings by moving data between access tiers, when access patterns
- * change.</p> <p>The S3 Intelligent-Tiering storage class is suitable for objects
- * larger than 128 KB that you plan to store for at least 30 days. If the size of
- * an object is less than 128 KB, it is not eligible for auto-tiering. Smaller
- * objects can be stored, but they are always charged at the frequent access tier
- * rates in the S3 Intelligent-Tiering storage class. </p> <p>If you delete an
- * object before the end of the 30-day minimum storage duration period, you are
- * charged for 30 days. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access">Storage
- * class for automatically optimizing frequently and infrequently accessed
- * objects</a>.</p> <p>Operations related to
- * <code>GetBucketIntelligentTieringConfiguration</code> include: </p> <ul> <li>
- * <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html">DeleteBucketIntelligentTieringConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html">PutBucketIntelligentTieringConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html">ListBucketIntelligentTieringConfigurations</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketIntelligentTieringConfiguration">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for GetBucketIntelligentTieringConfiguration that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void GetBucketIntelligentTieringConfigurationAsync(const Model::GetBucketIntelligentTieringConfigurationRequest& request, const GetBucketIntelligentTieringConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename GetBucketIntelligentTieringConfigurationRequestT = Model::GetBucketIntelligentTieringConfigurationRequest>
+ void GetBucketIntelligentTieringConfigurationAsync(const GetBucketIntelligentTieringConfigurationRequestT& request, const GetBucketIntelligentTieringConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::GetBucketIntelligentTieringConfiguration, request, handler, context);
+ }
/**
* <p>Returns an inventory configuration (identified by the inventory configuration
@@ -3646,58 +1683,22 @@ namespace Aws
virtual Model::GetBucketInventoryConfigurationOutcome GetBucketInventoryConfiguration(const Model::GetBucketInventoryConfigurationRequest& request) const;
/**
- * <p>Returns an inventory configuration (identified by the inventory configuration
- * ID) from the bucket.</p> <p>To use this operation, you must have permissions to
- * perform the <code>s3:GetInventoryConfiguration</code> action. The bucket owner
- * has this permission by default and can grant this permission to others. For more
- * information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>For information about
- * the Amazon S3 inventory feature, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html">Amazon
- * S3 Inventory</a>.</p> <p>The following operations are related to
- * <code>GetBucketInventoryConfiguration</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html">DeleteBucketInventoryConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html">ListBucketInventoryConfigurations</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html">PutBucketInventoryConfiguration</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for GetBucketInventoryConfiguration that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::GetBucketInventoryConfigurationOutcomeCallable GetBucketInventoryConfigurationCallable(const Model::GetBucketInventoryConfigurationRequest& request) const;
+ template<typename GetBucketInventoryConfigurationRequestT = Model::GetBucketInventoryConfigurationRequest>
+ Model::GetBucketInventoryConfigurationOutcomeCallable GetBucketInventoryConfigurationCallable(const GetBucketInventoryConfigurationRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::GetBucketInventoryConfiguration, request);
+ }
/**
- * <p>Returns an inventory configuration (identified by the inventory configuration
- * ID) from the bucket.</p> <p>To use this operation, you must have permissions to
- * perform the <code>s3:GetInventoryConfiguration</code> action. The bucket owner
- * has this permission by default and can grant this permission to others. For more
- * information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>For information about
- * the Amazon S3 inventory feature, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html">Amazon
- * S3 Inventory</a>.</p> <p>The following operations are related to
- * <code>GetBucketInventoryConfiguration</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html">DeleteBucketInventoryConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html">ListBucketInventoryConfigurations</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html">PutBucketInventoryConfiguration</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketInventoryConfiguration">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for GetBucketInventoryConfiguration that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void GetBucketInventoryConfigurationAsync(const Model::GetBucketInventoryConfigurationRequest& request, const GetBucketInventoryConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename GetBucketInventoryConfigurationRequestT = Model::GetBucketInventoryConfigurationRequest>
+ void GetBucketInventoryConfigurationAsync(const GetBucketInventoryConfigurationRequestT& request, const GetBucketInventoryConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::GetBucketInventoryConfiguration, request, handler, context);
+ }
/**
* <p>Bucket lifecycle configuration now supports specifying a lifecycle
@@ -3737,91 +1738,32 @@ namespace Aws
virtual Model::GetBucketLifecycleConfigurationOutcome GetBucketLifecycleConfiguration(const Model::GetBucketLifecycleConfigurationRequest& request) const;
/**
- * <p>Bucket lifecycle configuration now supports specifying a lifecycle
- * rule using an object key name prefix, one or more object tags, or a combination
- * of both. Accordingly, this section describes the latest API. The response
- * describes the new filter element that you can use to specify a filter to select
- * a subset of objects to which the rule applies. If you are using a previous
- * version of the lifecycle configuration, it still works. For the earlier action,
- * see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html">GetBucketLifecycle</a>.</p>
- * <p>Returns the lifecycle configuration information set on the bucket.
- * For information about lifecycle configuration, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html">Object
- * Lifecycle Management</a>.</p> <p>To use this operation, you must have permission
- * to perform the <code>s3:GetLifecycleConfiguration</code> action. The bucket
- * owner has this permission, by default. The bucket owner can grant this
- * permission to others. For more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>
- * <code>GetBucketLifecycleConfiguration</code> has the following special
- * error:</p> <ul> <li> <p>Error code: <code>NoSuchLifecycleConfiguration</code>
- * </p> <ul> <li> <p>Description: The lifecycle configuration does not exist.</p>
- * </li> <li> <p>HTTP Status Code: 404 Not Found</p> </li> <li> <p>SOAP Fault Code
- * Prefix: Client</p> </li> </ul> </li> </ul> <p>The following operations are
- * related to <code>GetBucketLifecycleConfiguration</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html">GetBucketLifecycle</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html">PutBucketLifecycle</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html">DeleteBucketLifecycle</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for GetBucketLifecycleConfiguration that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::GetBucketLifecycleConfigurationOutcomeCallable GetBucketLifecycleConfigurationCallable(const Model::GetBucketLifecycleConfigurationRequest& request) const;
+ template<typename GetBucketLifecycleConfigurationRequestT = Model::GetBucketLifecycleConfigurationRequest>
+ Model::GetBucketLifecycleConfigurationOutcomeCallable GetBucketLifecycleConfigurationCallable(const GetBucketLifecycleConfigurationRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::GetBucketLifecycleConfiguration, request);
+ }
/**
- * <p>Bucket lifecycle configuration now supports specifying a lifecycle
- * rule using an object key name prefix, one or more object tags, or a combination
- * of both. Accordingly, this section describes the latest API. The response
- * describes the new filter element that you can use to specify a filter to select
- * a subset of objects to which the rule applies. If you are using a previous
- * version of the lifecycle configuration, it still works. For the earlier action,
- * see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html">GetBucketLifecycle</a>.</p>
- * <p>Returns the lifecycle configuration information set on the bucket.
- * For information about lifecycle configuration, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html">Object
- * Lifecycle Management</a>.</p> <p>To use this operation, you must have permission
- * to perform the <code>s3:GetLifecycleConfiguration</code> action. The bucket
- * owner has this permission, by default. The bucket owner can grant this
- * permission to others. For more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>
- * <code>GetBucketLifecycleConfiguration</code> has the following special
- * error:</p> <ul> <li> <p>Error code: <code>NoSuchLifecycleConfiguration</code>
- * </p> <ul> <li> <p>Description: The lifecycle configuration does not exist.</p>
- * </li> <li> <p>HTTP Status Code: 404 Not Found</p> </li> <li> <p>SOAP Fault Code
- * Prefix: Client</p> </li> </ul> </li> </ul> <p>The following operations are
- * related to <code>GetBucketLifecycleConfiguration</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycle.html">GetBucketLifecycle</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html">PutBucketLifecycle</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html">DeleteBucketLifecycle</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLifecycleConfiguration">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for GetBucketLifecycleConfiguration that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void GetBucketLifecycleConfigurationAsync(const Model::GetBucketLifecycleConfigurationRequest& request, const GetBucketLifecycleConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename GetBucketLifecycleConfigurationRequestT = Model::GetBucketLifecycleConfigurationRequest>
+ void GetBucketLifecycleConfigurationAsync(const GetBucketLifecycleConfigurationRequestT& request, const GetBucketLifecycleConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::GetBucketLifecycleConfiguration, request, handler, context);
+ }
/**
* <p>Returns the Region the bucket resides in. You set the bucket's Region using
* the <code>LocationConstraint</code> request parameter in a
* <code>CreateBucket</code> request. For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>.</p>
- * <p> To use this implementation of the operation, you must be the bucket
- * owner.</p> <p>The following operations are related to
- * <code>GetBucketLocation</code>:</p> <ul> <li> <p> <a
+ * <p>To use this implementation of the operation, you must be the bucket
+ * owner.</p> <p>To use this API against an access point, provide the alias of the
+ * access point in place of the bucket name.</p> <p>The following operations are
+ * related to <code>GetBucketLocation</code>:</p> <ul> <li> <p> <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>
* </p> </li> <li> <p> <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>
@@ -3832,42 +1774,22 @@ namespace Aws
virtual Model::GetBucketLocationOutcome GetBucketLocation(const Model::GetBucketLocationRequest& request) const;
/**
- * <p>Returns the Region the bucket resides in. You set the bucket's Region using
- * the <code>LocationConstraint</code> request parameter in a
- * <code>CreateBucket</code> request. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>.</p>
- * <p> To use this implementation of the operation, you must be the bucket
- * owner.</p> <p>The following operations are related to
- * <code>GetBucketLocation</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for GetBucketLocation that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::GetBucketLocationOutcomeCallable GetBucketLocationCallable(const Model::GetBucketLocationRequest& request) const;
+ template<typename GetBucketLocationRequestT = Model::GetBucketLocationRequest>
+ Model::GetBucketLocationOutcomeCallable GetBucketLocationCallable(const GetBucketLocationRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::GetBucketLocation, request);
+ }
/**
- * <p>Returns the Region the bucket resides in. You set the bucket's Region using
- * the <code>LocationConstraint</code> request parameter in a
- * <code>CreateBucket</code> request. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>.</p>
- * <p> To use this implementation of the operation, you must be the bucket
- * owner.</p> <p>The following operations are related to
- * <code>GetBucketLocation</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLocation">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for GetBucketLocation that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void GetBucketLocationAsync(const Model::GetBucketLocationRequest& request, const GetBucketLocationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename GetBucketLocationRequestT = Model::GetBucketLocationRequest>
+ void GetBucketLocationAsync(const GetBucketLocationRequestT& request, const GetBucketLocationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::GetBucketLocation, request, handler, context);
+ }
/**
* <p>Returns the logging status of a bucket and the permissions users have to view
@@ -3884,36 +1806,22 @@ namespace Aws
virtual Model::GetBucketLoggingOutcome GetBucketLogging(const Model::GetBucketLoggingRequest& request) const;
/**
- * <p>Returns the logging status of a bucket and the permissions users have to view
- * and modify that status. To use GET, you must be the bucket owner.</p> <p>The
- * following operations are related to <code>GetBucketLogging</code>:</p> <ul> <li>
- * <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html">PutBucketLogging</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for GetBucketLogging that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::GetBucketLoggingOutcomeCallable GetBucketLoggingCallable(const Model::GetBucketLoggingRequest& request) const;
+ template<typename GetBucketLoggingRequestT = Model::GetBucketLoggingRequest>
+ Model::GetBucketLoggingOutcomeCallable GetBucketLoggingCallable(const GetBucketLoggingRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::GetBucketLogging, request);
+ }
/**
- * <p>Returns the logging status of a bucket and the permissions users have to view
- * and modify that status. To use GET, you must be the bucket owner.</p> <p>The
- * following operations are related to <code>GetBucketLogging</code>:</p> <ul> <li>
- * <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLogging.html">PutBucketLogging</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketLogging">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for GetBucketLogging that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void GetBucketLoggingAsync(const Model::GetBucketLoggingRequest& request, const GetBucketLoggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename GetBucketLoggingRequestT = Model::GetBucketLoggingRequest>
+ void GetBucketLoggingAsync(const GetBucketLoggingRequestT& request, const GetBucketLoggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::GetBucketLogging, request, handler, context);
+ }
/**
* <p>Gets a metrics configuration (specified by the metrics configuration ID) from
@@ -3944,64 +1852,22 @@ namespace Aws
virtual Model::GetBucketMetricsConfigurationOutcome GetBucketMetricsConfiguration(const Model::GetBucketMetricsConfigurationRequest& request) const;
/**
- * <p>Gets a metrics configuration (specified by the metrics configuration ID) from
- * the bucket. Note that this doesn't include the daily storage metrics.</p> <p> To
- * use this operation, you must have permissions to perform the
- * <code>s3:GetMetricsConfiguration</code> action. The bucket owner has this
- * permission by default. The bucket owner can grant this permission to others. For
- * more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p> For information
- * about CloudWatch request metrics for Amazon S3, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html">Monitoring
- * Metrics with Amazon CloudWatch</a>.</p> <p>The following operations are related
- * to <code>GetBucketMetricsConfiguration</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html">PutBucketMetricsConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html">DeleteBucketMetricsConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html">ListBucketMetricsConfigurations</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html">Monitoring
- * Metrics with Amazon CloudWatch</a> </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for GetBucketMetricsConfiguration that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::GetBucketMetricsConfigurationOutcomeCallable GetBucketMetricsConfigurationCallable(const Model::GetBucketMetricsConfigurationRequest& request) const;
+ template<typename GetBucketMetricsConfigurationRequestT = Model::GetBucketMetricsConfigurationRequest>
+ Model::GetBucketMetricsConfigurationOutcomeCallable GetBucketMetricsConfigurationCallable(const GetBucketMetricsConfigurationRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::GetBucketMetricsConfiguration, request);
+ }
/**
- * <p>Gets a metrics configuration (specified by the metrics configuration ID) from
- * the bucket. Note that this doesn't include the daily storage metrics.</p> <p> To
- * use this operation, you must have permissions to perform the
- * <code>s3:GetMetricsConfiguration</code> action. The bucket owner has this
- * permission by default. The bucket owner can grant this permission to others. For
- * more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p> For information
- * about CloudWatch request metrics for Amazon S3, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html">Monitoring
- * Metrics with Amazon CloudWatch</a>.</p> <p>The following operations are related
- * to <code>GetBucketMetricsConfiguration</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html">PutBucketMetricsConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html">DeleteBucketMetricsConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html">ListBucketMetricsConfigurations</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html">Monitoring
- * Metrics with Amazon CloudWatch</a> </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketMetricsConfiguration">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for GetBucketMetricsConfiguration that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void GetBucketMetricsConfigurationAsync(const Model::GetBucketMetricsConfigurationRequest& request, const GetBucketMetricsConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename GetBucketMetricsConfigurationRequestT = Model::GetBucketMetricsConfigurationRequest>
+ void GetBucketMetricsConfigurationAsync(const GetBucketMetricsConfigurationRequestT& request, const GetBucketMetricsConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::GetBucketMetricsConfiguration, request, handler, context);
+ }
/**
* <p>Returns the notification configuration of a bucket.</p> <p>If notifications
@@ -4026,61 +1892,31 @@ namespace Aws
virtual Model::GetBucketNotificationConfigurationOutcome GetBucketNotificationConfiguration(const Model::GetBucketNotificationConfigurationRequest& request) const;
/**
- * <p>Returns the notification configuration of a bucket.</p> <p>If notifications
- * are not enabled on the bucket, the action returns an empty
- * <code>NotificationConfiguration</code> element.</p> <p>By default, you must be
- * the bucket owner to read the notification configuration of a bucket. However,
- * the bucket owner can use a bucket policy to grant permission to other users to
- * read this configuration with the <code>s3:GetBucketNotification</code>
- * permission.</p> <p>For more information about setting and reading the
- * notification configuration on a bucket, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html">Setting
- * Up Notification of Bucket Events</a>. For more information about bucket
- * policies, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html">Using
- * Bucket Policies</a>.</p> <p>The following action is related to
- * <code>GetBucketNotification</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html">PutBucketNotification</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for GetBucketNotificationConfiguration that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::GetBucketNotificationConfigurationOutcomeCallable GetBucketNotificationConfigurationCallable(const Model::GetBucketNotificationConfigurationRequest& request) const;
+ template<typename GetBucketNotificationConfigurationRequestT = Model::GetBucketNotificationConfigurationRequest>
+ Model::GetBucketNotificationConfigurationOutcomeCallable GetBucketNotificationConfigurationCallable(const GetBucketNotificationConfigurationRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::GetBucketNotificationConfiguration, request);
+ }
/**
- * <p>Returns the notification configuration of a bucket.</p> <p>If notifications
- * are not enabled on the bucket, the action returns an empty
- * <code>NotificationConfiguration</code> element.</p> <p>By default, you must be
- * the bucket owner to read the notification configuration of a bucket. However,
- * the bucket owner can use a bucket policy to grant permission to other users to
- * read this configuration with the <code>s3:GetBucketNotification</code>
- * permission.</p> <p>For more information about setting and reading the
- * notification configuration on a bucket, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html">Setting
- * Up Notification of Bucket Events</a>. For more information about bucket
- * policies, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html">Using
- * Bucket Policies</a>.</p> <p>The following action is related to
- * <code>GetBucketNotification</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketNotification.html">PutBucketNotification</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketNotificationConfiguration">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for GetBucketNotificationConfiguration that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void GetBucketNotificationConfigurationAsync(const Model::GetBucketNotificationConfigurationRequest& request, const GetBucketNotificationConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename GetBucketNotificationConfigurationRequestT = Model::GetBucketNotificationConfigurationRequest>
+ void GetBucketNotificationConfigurationAsync(const GetBucketNotificationConfigurationRequestT& request, const GetBucketNotificationConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::GetBucketNotificationConfiguration, request, handler, context);
+ }
/**
* <p>Retrieves <code>OwnershipControls</code> for an Amazon S3 bucket. To use this
* operation, you must have the <code>s3:GetBucketOwnershipControls</code>
* permission. For more information about Amazon S3 permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html">Specifying
- * Permissions in a Policy</a>. </p> <p>For information about Amazon S3 Object
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html">Specifying
+ * permissions in a policy</a>. </p> <p>For information about Amazon S3 Object
* Ownership, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html">Using
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html">Using
* Object Ownership</a>. </p> <p>The following operations are related to
* <code>GetBucketOwnershipControls</code>:</p> <ul> <li> <p>
* <a>PutBucketOwnershipControls</a> </p> </li> <li> <p>
@@ -4091,56 +1927,36 @@ namespace Aws
virtual Model::GetBucketOwnershipControlsOutcome GetBucketOwnershipControls(const Model::GetBucketOwnershipControlsRequest& request) const;
/**
- * <p>Retrieves <code>OwnershipControls</code> for an Amazon S3 bucket. To use this
- * operation, you must have the <code>s3:GetBucketOwnershipControls</code>
- * permission. For more information about Amazon S3 permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html">Specifying
- * Permissions in a Policy</a>. </p> <p>For information about Amazon S3 Object
- * Ownership, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html">Using
- * Object Ownership</a>. </p> <p>The following operations are related to
- * <code>GetBucketOwnershipControls</code>:</p> <ul> <li> <p>
- * <a>PutBucketOwnershipControls</a> </p> </li> <li> <p>
- * <a>DeleteBucketOwnershipControls</a> </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketOwnershipControls">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for GetBucketOwnershipControls that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::GetBucketOwnershipControlsOutcomeCallable GetBucketOwnershipControlsCallable(const Model::GetBucketOwnershipControlsRequest& request) const;
+ template<typename GetBucketOwnershipControlsRequestT = Model::GetBucketOwnershipControlsRequest>
+ Model::GetBucketOwnershipControlsOutcomeCallable GetBucketOwnershipControlsCallable(const GetBucketOwnershipControlsRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::GetBucketOwnershipControls, request);
+ }
/**
- * <p>Retrieves <code>OwnershipControls</code> for an Amazon S3 bucket. To use this
- * operation, you must have the <code>s3:GetBucketOwnershipControls</code>
- * permission. For more information about Amazon S3 permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html">Specifying
- * Permissions in a Policy</a>. </p> <p>For information about Amazon S3 Object
- * Ownership, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html">Using
- * Object Ownership</a>. </p> <p>The following operations are related to
- * <code>GetBucketOwnershipControls</code>:</p> <ul> <li> <p>
- * <a>PutBucketOwnershipControls</a> </p> </li> <li> <p>
- * <a>DeleteBucketOwnershipControls</a> </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketOwnershipControls">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for GetBucketOwnershipControls that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void GetBucketOwnershipControlsAsync(const Model::GetBucketOwnershipControlsRequest& request, const GetBucketOwnershipControlsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename GetBucketOwnershipControlsRequestT = Model::GetBucketOwnershipControlsRequest>
+ void GetBucketOwnershipControlsAsync(const GetBucketOwnershipControlsRequestT& request, const GetBucketOwnershipControlsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::GetBucketOwnershipControls, request, handler, context);
+ }
/**
* <p>Returns the policy of a specified bucket. If you are using an identity other
- * than the root user of the AWS account that owns the bucket, the calling identity
- * must have the <code>GetBucketPolicy</code> permissions on the specified bucket
- * and belong to the bucket owner's account in order to use this operation.</p>
- * <p>If you don't have <code>GetBucketPolicy</code> permissions, Amazon S3 returns
- * a <code>403 Access Denied</code> error. If you have the correct permissions, but
- * you're not using an identity that belongs to the bucket owner's account, Amazon
- * S3 returns a <code>405 Method Not Allowed</code> error.</p> <p>As a
- * security precaution, the root user of the AWS account that owns a bucket can
- * always use this operation, even if the policy explicitly denies the root user
- * the ability to perform this action.</p> <p>For more information
- * about bucket policies, see <a
+ * than the root user of the Amazon Web Services account that owns the bucket, the
+ * calling identity must have the <code>GetBucketPolicy</code> permissions on the
+ * specified bucket and belong to the bucket owner's account in order to use this
+ * operation.</p> <p>If you don't have <code>GetBucketPolicy</code> permissions,
+ * Amazon S3 returns a <code>403 Access Denied</code> error. If you have the
+ * correct permissions, but you're not using an identity that belongs to the bucket
+ * owner's account, Amazon S3 returns a <code>405 Method Not Allowed</code>
+ * error.</p> <p>As a security precaution, the root user of the Amazon
+ * Web Services account that owns a bucket can always use this operation, even if
+ * the policy explicitly denies the root user the ability to perform this
+ * action.</p> <p>For more information about bucket policies, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html">Using
* Bucket Policies and User Policies</a>.</p> <p>The following action is related to
* <code>GetBucketPolicy</code>:</p> <ul> <li> <p> <a
@@ -4152,54 +1968,22 @@ namespace Aws
virtual Model::GetBucketPolicyOutcome GetBucketPolicy(const Model::GetBucketPolicyRequest& request) const;
/**
- * <p>Returns the policy of a specified bucket. If you are using an identity other
- * than the root user of the AWS account that owns the bucket, the calling identity
- * must have the <code>GetBucketPolicy</code> permissions on the specified bucket
- * and belong to the bucket owner's account in order to use this operation.</p>
- * <p>If you don't have <code>GetBucketPolicy</code> permissions, Amazon S3 returns
- * a <code>403 Access Denied</code> error. If you have the correct permissions, but
- * you're not using an identity that belongs to the bucket owner's account, Amazon
- * S3 returns a <code>405 Method Not Allowed</code> error.</p> <p>As a
- * security precaution, the root user of the AWS account that owns a bucket can
- * always use this operation, even if the policy explicitly denies the root user
- * the ability to perform this action.</p> <p>For more information
- * about bucket policies, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html">Using
- * Bucket Policies and User Policies</a>.</p> <p>The following action is related to
- * <code>GetBucketPolicy</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for GetBucketPolicy that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::GetBucketPolicyOutcomeCallable GetBucketPolicyCallable(const Model::GetBucketPolicyRequest& request) const;
+ template<typename GetBucketPolicyRequestT = Model::GetBucketPolicyRequest>
+ Model::GetBucketPolicyOutcomeCallable GetBucketPolicyCallable(const GetBucketPolicyRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::GetBucketPolicy, request);
+ }
/**
- * <p>Returns the policy of a specified bucket. If you are using an identity other
- * than the root user of the AWS account that owns the bucket, the calling identity
- * must have the <code>GetBucketPolicy</code> permissions on the specified bucket
- * and belong to the bucket owner's account in order to use this operation.</p>
- * <p>If you don't have <code>GetBucketPolicy</code> permissions, Amazon S3 returns
- * a <code>403 Access Denied</code> error. If you have the correct permissions, but
- * you're not using an identity that belongs to the bucket owner's account, Amazon
- * S3 returns a <code>405 Method Not Allowed</code> error.</p> <p>As a
- * security precaution, the root user of the AWS account that owns a bucket can
- * always use this operation, even if the policy explicitly denies the root user
- * the ability to perform this action.</p> <p>For more information
- * about bucket policies, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html">Using
- * Bucket Policies and User Policies</a>.</p> <p>The following action is related to
- * <code>GetBucketPolicy</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicy">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for GetBucketPolicy that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void GetBucketPolicyAsync(const Model::GetBucketPolicyRequest& request, const GetBucketPolicyResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename GetBucketPolicyRequestT = Model::GetBucketPolicyRequest>
+ void GetBucketPolicyAsync(const GetBucketPolicyRequestT& request, const GetBucketPolicyResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::GetBucketPolicy, request, handler, context);
+ }
/**
* <p>Retrieves the policy status for an Amazon S3 bucket, indicating whether the
@@ -4226,56 +2010,22 @@ namespace Aws
virtual Model::GetBucketPolicyStatusOutcome GetBucketPolicyStatus(const Model::GetBucketPolicyStatusRequest& request) const;
/**
- * <p>Retrieves the policy status for an Amazon S3 bucket, indicating whether the
- * bucket is public. In order to use this operation, you must have the
- * <code>s3:GetBucketPolicyStatus</code> permission. For more information about
- * Amazon S3 permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html">Specifying
- * Permissions in a Policy</a>.</p> <p> For more information about when Amazon S3
- * considers a bucket public, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status">The
- * Meaning of "Public"</a>. </p> <p>The following operations are related to
- * <code>GetBucketPolicyStatus</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html">Using
- * Amazon S3 Block Public Access</a> </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html">GetPublicAccessBlock</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html">PutPublicAccessBlock</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html">DeletePublicAccessBlock</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatus">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for GetBucketPolicyStatus that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::GetBucketPolicyStatusOutcomeCallable GetBucketPolicyStatusCallable(const Model::GetBucketPolicyStatusRequest& request) const;
+ template<typename GetBucketPolicyStatusRequestT = Model::GetBucketPolicyStatusRequest>
+ Model::GetBucketPolicyStatusOutcomeCallable GetBucketPolicyStatusCallable(const GetBucketPolicyStatusRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::GetBucketPolicyStatus, request);
+ }
/**
- * <p>Retrieves the policy status for an Amazon S3 bucket, indicating whether the
- * bucket is public. In order to use this operation, you must have the
- * <code>s3:GetBucketPolicyStatus</code> permission. For more information about
- * Amazon S3 permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html">Specifying
- * Permissions in a Policy</a>.</p> <p> For more information about when Amazon S3
- * considers a bucket public, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status">The
- * Meaning of "Public"</a>. </p> <p>The following operations are related to
- * <code>GetBucketPolicyStatus</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html">Using
- * Amazon S3 Block Public Access</a> </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html">GetPublicAccessBlock</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html">PutPublicAccessBlock</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html">DeletePublicAccessBlock</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketPolicyStatus">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for GetBucketPolicyStatus that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void GetBucketPolicyStatusAsync(const Model::GetBucketPolicyStatusRequest& request, const GetBucketPolicyStatusResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename GetBucketPolicyStatusRequestT = Model::GetBucketPolicyStatusRequest>
+ void GetBucketPolicyStatusAsync(const GetBucketPolicyStatusRequestT& request, const GetBucketPolicyStatusResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::GetBucketPolicyStatus, request, handler, context);
+ }
/**
* <p>Returns the replication configuration of a bucket.</p> <p> It can take
@@ -4305,62 +2055,22 @@ namespace Aws
virtual Model::GetBucketReplicationOutcome GetBucketReplication(const Model::GetBucketReplicationRequest& request) const;
/**
- * <p>Returns the replication configuration of a bucket.</p> <p> It can take
- * a while to propagate the put or delete a replication configuration to all Amazon
- * S3 systems. Therefore, a get request soon after put or delete can return a wrong
- * result. </p> <p> For information about replication configuration, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html">Replication</a>
- * in the <i>Amazon S3 User Guide</i>.</p> <p>This action requires permissions for
- * the <code>s3:GetReplicationConfiguration</code> action. For more information
- * about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html">Using
- * Bucket Policies and User Policies</a>.</p> <p>If you include the
- * <code>Filter</code> element in a replication configuration, you must also
- * include the <code>DeleteMarkerReplication</code> and <code>Priority</code>
- * elements. The response also returns those elements.</p> <p>For information about
- * <code>GetBucketReplication</code> errors, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList">List
- * of replication-related error codes</a> </p> <p>The following operations are
- * related to <code>GetBucketReplication</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html">PutBucketReplication</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html">DeleteBucketReplication</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for GetBucketReplication that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::GetBucketReplicationOutcomeCallable GetBucketReplicationCallable(const Model::GetBucketReplicationRequest& request) const;
+ template<typename GetBucketReplicationRequestT = Model::GetBucketReplicationRequest>
+ Model::GetBucketReplicationOutcomeCallable GetBucketReplicationCallable(const GetBucketReplicationRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::GetBucketReplication, request);
+ }
/**
- * <p>Returns the replication configuration of a bucket.</p> <p> It can take
- * a while to propagate the put or delete a replication configuration to all Amazon
- * S3 systems. Therefore, a get request soon after put or delete can return a wrong
- * result. </p> <p> For information about replication configuration, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html">Replication</a>
- * in the <i>Amazon S3 User Guide</i>.</p> <p>This action requires permissions for
- * the <code>s3:GetReplicationConfiguration</code> action. For more information
- * about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html">Using
- * Bucket Policies and User Policies</a>.</p> <p>If you include the
- * <code>Filter</code> element in a replication configuration, you must also
- * include the <code>DeleteMarkerReplication</code> and <code>Priority</code>
- * elements. The response also returns those elements.</p> <p>For information about
- * <code>GetBucketReplication</code> errors, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList">List
- * of replication-related error codes</a> </p> <p>The following operations are
- * related to <code>GetBucketReplication</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketReplication.html">PutBucketReplication</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html">DeleteBucketReplication</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketReplication">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for GetBucketReplication that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void GetBucketReplicationAsync(const Model::GetBucketReplicationRequest& request, const GetBucketReplicationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename GetBucketReplicationRequestT = Model::GetBucketReplicationRequest>
+ void GetBucketReplicationAsync(const GetBucketReplicationRequestT& request, const GetBucketReplicationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::GetBucketReplication, request, handler, context);
+ }
/**
* <p>Returns the request payment configuration of a bucket. To use this version of
@@ -4376,43 +2086,31 @@ namespace Aws
virtual Model::GetBucketRequestPaymentOutcome GetBucketRequestPayment(const Model::GetBucketRequestPaymentRequest& request) const;
/**
- * <p>Returns the request payment configuration of a bucket. To use this version of
- * the operation, you must be the bucket owner. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html">Requester
- * Pays Buckets</a>.</p> <p>The following operations are related to
- * <code>GetBucketRequestPayment</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html">ListObjects</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for GetBucketRequestPayment that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::GetBucketRequestPaymentOutcomeCallable GetBucketRequestPaymentCallable(const Model::GetBucketRequestPaymentRequest& request) const;
+ template<typename GetBucketRequestPaymentRequestT = Model::GetBucketRequestPaymentRequest>
+ Model::GetBucketRequestPaymentOutcomeCallable GetBucketRequestPaymentCallable(const GetBucketRequestPaymentRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::GetBucketRequestPayment, request);
+ }
/**
- * <p>Returns the request payment configuration of a bucket. To use this version of
- * the operation, you must be the bucket owner. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html">Requester
- * Pays Buckets</a>.</p> <p>The following operations are related to
- * <code>GetBucketRequestPayment</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html">ListObjects</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketRequestPayment">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for GetBucketRequestPayment that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void GetBucketRequestPaymentAsync(const Model::GetBucketRequestPaymentRequest& request, const GetBucketRequestPaymentResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename GetBucketRequestPaymentRequestT = Model::GetBucketRequestPaymentRequest>
+ void GetBucketRequestPaymentAsync(const GetBucketRequestPaymentRequestT& request, const GetBucketRequestPaymentResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::GetBucketRequestPayment, request, handler, context);
+ }
/**
* <p>Returns the tag set associated with the bucket.</p> <p>To use this operation,
* you must have permission to perform the <code>s3:GetBucketTagging</code> action.
* By default, the bucket owner has this permission and can grant this permission
* to others.</p> <p> <code>GetBucketTagging</code> has the following special
- * error:</p> <ul> <li> <p>Error code: <code>NoSuchTagSetError</code> </p> <ul>
- * <li> <p>Description: There is no tag set associated with the bucket.</p> </li>
- * </ul> </li> </ul> <p>The following operations are related to
+ * error:</p> <ul> <li> <p>Error code: <code>NoSuchTagSet</code> </p> <ul> <li>
+ * <p>Description: There is no tag set associated with the bucket.</p> </li> </ul>
+ * </li> </ul> <p>The following operations are related to
* <code>GetBucketTagging</code>:</p> <ul> <li> <p> <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html">PutBucketTagging</a>
* </p> </li> <li> <p> <a
@@ -4424,44 +2122,22 @@ namespace Aws
virtual Model::GetBucketTaggingOutcome GetBucketTagging(const Model::GetBucketTaggingRequest& request) const;
/**
- * <p>Returns the tag set associated with the bucket.</p> <p>To use this operation,
- * you must have permission to perform the <code>s3:GetBucketTagging</code> action.
- * By default, the bucket owner has this permission and can grant this permission
- * to others.</p> <p> <code>GetBucketTagging</code> has the following special
- * error:</p> <ul> <li> <p>Error code: <code>NoSuchTagSetError</code> </p> <ul>
- * <li> <p>Description: There is no tag set associated with the bucket.</p> </li>
- * </ul> </li> </ul> <p>The following operations are related to
- * <code>GetBucketTagging</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html">PutBucketTagging</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html">DeleteBucketTagging</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for GetBucketTagging that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::GetBucketTaggingOutcomeCallable GetBucketTaggingCallable(const Model::GetBucketTaggingRequest& request) const;
+ template<typename GetBucketTaggingRequestT = Model::GetBucketTaggingRequest>
+ Model::GetBucketTaggingOutcomeCallable GetBucketTaggingCallable(const GetBucketTaggingRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::GetBucketTagging, request);
+ }
/**
- * <p>Returns the tag set associated with the bucket.</p> <p>To use this operation,
- * you must have permission to perform the <code>s3:GetBucketTagging</code> action.
- * By default, the bucket owner has this permission and can grant this permission
- * to others.</p> <p> <code>GetBucketTagging</code> has the following special
- * error:</p> <ul> <li> <p>Error code: <code>NoSuchTagSetError</code> </p> <ul>
- * <li> <p>Description: There is no tag set associated with the bucket.</p> </li>
- * </ul> </li> </ul> <p>The following operations are related to
- * <code>GetBucketTagging</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketTagging.html">PutBucketTagging</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html">DeleteBucketTagging</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketTagging">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for GetBucketTagging that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void GetBucketTaggingAsync(const Model::GetBucketTaggingRequest& request, const GetBucketTaggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename GetBucketTaggingRequestT = Model::GetBucketTaggingRequest>
+ void GetBucketTaggingAsync(const GetBucketTaggingRequestT& request, const GetBucketTaggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::GetBucketTagging, request, handler, context);
+ }
/**
* <p>Returns the versioning state of a bucket.</p> <p>To retrieve the versioning
@@ -4482,44 +2158,22 @@ namespace Aws
virtual Model::GetBucketVersioningOutcome GetBucketVersioning(const Model::GetBucketVersioningRequest& request) const;
/**
- * <p>Returns the versioning state of a bucket.</p> <p>To retrieve the versioning
- * state of a bucket, you must be the bucket owner.</p> <p>This implementation also
- * returns the MFA Delete status of the versioning state. If the MFA Delete status
- * is <code>enabled</code>, the bucket owner must use an authentication device to
- * change the versioning state of the bucket.</p> <p>The following operations are
- * related to <code>GetBucketVersioning</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html">PutObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html">DeleteObject</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for GetBucketVersioning that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::GetBucketVersioningOutcomeCallable GetBucketVersioningCallable(const Model::GetBucketVersioningRequest& request) const;
+ template<typename GetBucketVersioningRequestT = Model::GetBucketVersioningRequest>
+ Model::GetBucketVersioningOutcomeCallable GetBucketVersioningCallable(const GetBucketVersioningRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::GetBucketVersioning, request);
+ }
/**
- * <p>Returns the versioning state of a bucket.</p> <p>To retrieve the versioning
- * state of a bucket, you must be the bucket owner.</p> <p>This implementation also
- * returns the MFA Delete status of the versioning state. If the MFA Delete status
- * is <code>enabled</code>, the bucket owner must use an authentication device to
- * change the versioning state of the bucket.</p> <p>The following operations are
- * related to <code>GetBucketVersioning</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html">PutObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html">DeleteObject</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketVersioning">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for GetBucketVersioning that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void GetBucketVersioningAsync(const Model::GetBucketVersioningRequest& request, const GetBucketVersioningResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename GetBucketVersioningRequestT = Model::GetBucketVersioningRequest>
+ void GetBucketVersioningAsync(const GetBucketVersioningRequestT& request, const GetBucketVersioningResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::GetBucketVersioning, request, handler, context);
+ }
/**
* <p>Returns the website configuration for a bucket. To host website on Amazon S3,
@@ -4543,50 +2197,22 @@ namespace Aws
virtual Model::GetBucketWebsiteOutcome GetBucketWebsite(const Model::GetBucketWebsiteRequest& request) const;
/**
- * <p>Returns the website configuration for a bucket. To host website on Amazon S3,
- * you can configure a bucket as website by adding a website configuration. For
- * more information about hosting websites, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html">Hosting
- * Websites on Amazon S3</a>. </p> <p>This GET action requires the
- * <code>S3:GetBucketWebsite</code> permission. By default, only the bucket owner
- * can read the bucket website configuration. However, bucket owners can allow
- * other users to read the website configuration by writing a bucket policy
- * granting them the <code>S3:GetBucketWebsite</code> permission.</p> <p>The
- * following operations are related to <code>DeleteBucketWebsite</code>:</p> <ul>
- * <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html">DeleteBucketWebsite</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html">PutBucketWebsite</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for GetBucketWebsite that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::GetBucketWebsiteOutcomeCallable GetBucketWebsiteCallable(const Model::GetBucketWebsiteRequest& request) const;
+ template<typename GetBucketWebsiteRequestT = Model::GetBucketWebsiteRequest>
+ Model::GetBucketWebsiteOutcomeCallable GetBucketWebsiteCallable(const GetBucketWebsiteRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::GetBucketWebsite, request);
+ }
/**
- * <p>Returns the website configuration for a bucket. To host website on Amazon S3,
- * you can configure a bucket as website by adding a website configuration. For
- * more information about hosting websites, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html">Hosting
- * Websites on Amazon S3</a>. </p> <p>This GET action requires the
- * <code>S3:GetBucketWebsite</code> permission. By default, only the bucket owner
- * can read the bucket website configuration. However, bucket owners can allow
- * other users to read the website configuration by writing a bucket policy
- * granting them the <code>S3:GetBucketWebsite</code> permission.</p> <p>The
- * following operations are related to <code>DeleteBucketWebsite</code>:</p> <ul>
- * <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketWebsite.html">DeleteBucketWebsite</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketWebsite.html">PutBucketWebsite</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetBucketWebsite">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for GetBucketWebsite that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void GetBucketWebsiteAsync(const Model::GetBucketWebsiteRequest& request, const GetBucketWebsiteResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename GetBucketWebsiteRequestT = Model::GetBucketWebsiteRequest>
+ void GetBucketWebsiteAsync(const GetBucketWebsiteRequestT& request, const GetBucketWebsiteResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::GetBucketWebsite, request, handler, context);
+ }
/**
* <p>Retrieves objects from Amazon S3. To use <code>GET</code>, you must have
@@ -4607,11 +2233,8 @@ namespace Aws
* <code>/examplebucket/photos/2006/February/sample.jpg</code>. For more
* information about request types, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket">HTTP
- * Host Header Bucket Specification</a>.</p> <p>To distribute large files to many
- * people, you can save bandwidth costs by using BitTorrent. For more information,
- * see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html">Amazon S3
- * Torrent</a>. For more information about returning the ACL of an object, see <a
+ * Host Header Bucket Specification</a>.</p> <p>For more information about
+ * returning the ACL of an object, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html">GetObjectAcl</a>.</p>
* <p>If the object you are retrieving is stored in the S3 Glacier or S3 Glacier
* Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3
@@ -4623,9 +2246,9 @@ namespace Aws
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html">Restoring
* Archived Objects</a>.</p> <p>Encryption request headers, like
* <code>x-amz-server-side-encryption</code>, should not be sent for GET requests
- * if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS)
- * or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If
- * your object does use these types of keys, you’ll get an HTTP 400 BadRequest
+ * if your object uses server-side encryption with KMS keys (SSE-KMS) or
+ * server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If your
+ * object does use these types of keys, you’ll get an HTTP 400 BadRequest
* error.</p> <p>If you encrypt an object by using server-side encryption with
* customer-provided encryption keys (SSE-C) when you store the object in Amazon
* S3, then when you GET the object, you must use the following headers:</p> <ul>
@@ -4635,14 +2258,13 @@ namespace Aws
* information about SSE-C, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
* Encryption (Using Customer-Provided Encryption Keys)</a>.</p> <p>Assuming you
- * have permission to read object tags (permission for the
- * <code>s3:GetObjectVersionTagging</code> action), the response also returns the
+ * have the relevant permission to read object tags, the response also returns the
* <code>x-amz-tagging-count</code> header that provides the count of number of
* tags associated with the object. You can use <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html">GetObjectTagging</a>
* to retrieve the tag set associated with an object.</p> <p> <b>Permissions</b>
- * </p> <p>You need the <code>s3:GetObject</code> permission for this operation.
- * For more information, see <a
+ * </p> <p>You need the relevant read object (or version) permission for this
+ * operation. For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html">Specifying
* Permissions in a Policy</a>. If the object you request does not exist, the error
* Amazon S3 returns depends on whether you also have the
@@ -4653,20 +2275,23 @@ namespace Aws
* code 403 ("access denied") error.</p> </li> </ul> <p> <b>Versioning</b> </p>
* <p>By default, the GET action returns the current version of an object. To
* return a different version, use the <code>versionId</code> subresource.</p>
- * <p>If the current version of the object is a delete marker, Amazon S3
- * behaves as if the object was deleted and includes <code>x-amz-delete-marker:
- * true</code> in the response.</p> <p>For more information about
- * versioning, see <a
+ * <ul> <li> <p> If you supply a <code>versionId</code>, you need the
+ * <code>s3:GetObjectVersion</code> permission to access a specific version of an
+ * object. If you request a specific version, you do not need to have the
+ * <code>s3:GetObject</code> permission. </p> </li> <li> <p>If the current version
+ * of the object is a delete marker, Amazon S3 behaves as if the object was deleted
+ * and includes <code>x-amz-delete-marker: true</code> in the response.</p> </li>
+ * </ul> <p>For more information about versioning, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html">PutBucketVersioning</a>.
* </p> <p> <b>Overriding Response Header Values</b> </p> <p>There are times when
* you want to override certain response header values in a GET response. For
- * example, you might override the Content-Disposition response header value in
- * your GET request.</p> <p>You can override values for a set of response headers
- * using the following query parameters. These response header values are sent only
- * on a successful request, that is, when status code 200 OK is returned. The set
- * of headers you can override using these parameters is a subset of the headers
- * that Amazon S3 accepts when you create an object. The response headers that you
- * can override for the GET response are <code>Content-Type</code>,
+ * example, you might override the <code>Content-Disposition</code> response header
+ * value in your GET request.</p> <p>You can override values for a set of response
+ * headers using the following query parameters. These response header values are
+ * sent only on a successful request, that is, when status code 200 OK is returned.
+ * The set of headers you can override using these parameters is a subset of the
+ * headers that Amazon S3 accepts when you create an object. The response headers
+ * that you can override for the GET response are <code>Content-Type</code>,
* <code>Content-Language</code>, <code>Expires</code>, <code>Cache-Control</code>,
* <code>Content-Disposition</code>, and <code>Content-Encoding</code>. To override
* these header values in the GET response, you use the following request
@@ -4701,243 +2326,37 @@ namespace Aws
virtual Model::GetObjectOutcome GetObject(const Model::GetObjectRequest& request) const;
/**
- * <p>Retrieves objects from Amazon S3. To use <code>GET</code>, you must have
- * <code>READ</code> access to the object. If you grant <code>READ</code> access to
- * the anonymous user, you can return the object without using an authorization
- * header.</p> <p>An Amazon S3 bucket has no directory hierarchy such as you would
- * find in a typical computer file system. You can, however, create a logical
- * hierarchy by using object key names that imply a folder structure. For example,
- * instead of naming an object <code>sample.jpg</code>, you can name it
- * <code>photos/2006/February/sample.jpg</code>.</p> <p>To get an object from such
- * a logical hierarchy, specify the full key name for the object in the
- * <code>GET</code> operation. For a virtual hosted-style request example, if you
- * have the object <code>photos/2006/February/sample.jpg</code>, specify the
- * resource as <code>/photos/2006/February/sample.jpg</code>. For a path-style
- * request example, if you have the object
- * <code>photos/2006/February/sample.jpg</code> in the bucket named
- * <code>examplebucket</code>, specify the resource as
- * <code>/examplebucket/photos/2006/February/sample.jpg</code>. For more
- * information about request types, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket">HTTP
- * Host Header Bucket Specification</a>.</p> <p>To distribute large files to many
- * people, you can save bandwidth costs by using BitTorrent. For more information,
- * see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html">Amazon S3
- * Torrent</a>. For more information about returning the ACL of an object, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html">GetObjectAcl</a>.</p>
- * <p>If the object you are retrieving is stored in the S3 Glacier or S3 Glacier
- * Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3
- * Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you
- * must first restore a copy using <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html">RestoreObject</a>.
- * Otherwise, this action returns an <code>InvalidObjectStateError</code> error.
- * For information about restoring archived objects, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html">Restoring
- * Archived Objects</a>.</p> <p>Encryption request headers, like
- * <code>x-amz-server-side-encryption</code>, should not be sent for GET requests
- * if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS)
- * or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If
- * your object does use these types of keys, you’ll get an HTTP 400 BadRequest
- * error.</p> <p>If you encrypt an object by using server-side encryption with
- * customer-provided encryption keys (SSE-C) when you store the object in Amazon
- * S3, then when you GET the object, you must use the following headers:</p> <ul>
- * <li> <p>x-amz-server-side-encryption-customer-algorithm</p> </li> <li>
- * <p>x-amz-server-side-encryption-customer-key</p> </li> <li>
- * <p>x-amz-server-side-encryption-customer-key-MD5</p> </li> </ul> <p>For more
- * information about SSE-C, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
- * Encryption (Using Customer-Provided Encryption Keys)</a>.</p> <p>Assuming you
- * have permission to read object tags (permission for the
- * <code>s3:GetObjectVersionTagging</code> action), the response also returns the
- * <code>x-amz-tagging-count</code> header that provides the count of number of
- * tags associated with the object. You can use <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html">GetObjectTagging</a>
- * to retrieve the tag set associated with an object.</p> <p> <b>Permissions</b>
- * </p> <p>You need the <code>s3:GetObject</code> permission for this operation.
- * For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html">Specifying
- * Permissions in a Policy</a>. If the object you request does not exist, the error
- * Amazon S3 returns depends on whether you also have the
- * <code>s3:ListBucket</code> permission.</p> <ul> <li> <p>If you have the
- * <code>s3:ListBucket</code> permission on the bucket, Amazon S3 will return an
- * HTTP status code 404 ("no such key") error.</p> </li> <li> <p>If you don’t have
- * the <code>s3:ListBucket</code> permission, Amazon S3 will return an HTTP status
- * code 403 ("access denied") error.</p> </li> </ul> <p> <b>Versioning</b> </p>
- * <p>By default, the GET action returns the current version of an object. To
- * return a different version, use the <code>versionId</code> subresource.</p>
- * <p>If the current version of the object is a delete marker, Amazon S3
- * behaves as if the object was deleted and includes <code>x-amz-delete-marker:
- * true</code> in the response.</p> <p>For more information about
- * versioning, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html">PutBucketVersioning</a>.
- * </p> <p> <b>Overriding Response Header Values</b> </p> <p>There are times when
- * you want to override certain response header values in a GET response. For
- * example, you might override the Content-Disposition response header value in
- * your GET request.</p> <p>You can override values for a set of response headers
- * using the following query parameters. These response header values are sent only
- * on a successful request, that is, when status code 200 OK is returned. The set
- * of headers you can override using these parameters is a subset of the headers
- * that Amazon S3 accepts when you create an object. The response headers that you
- * can override for the GET response are <code>Content-Type</code>,
- * <code>Content-Language</code>, <code>Expires</code>, <code>Cache-Control</code>,
- * <code>Content-Disposition</code>, and <code>Content-Encoding</code>. To override
- * these header values in the GET response, you use the following request
- * parameters.</p> <p>You must sign the request, either using an
- * Authorization header or a presigned URL, when using these parameters. They
- * cannot be used with an unsigned (anonymous) request.</p> <ul> <li> <p>
- * <code>response-content-type</code> </p> </li> <li> <p>
- * <code>response-content-language</code> </p> </li> <li> <p>
- * <code>response-expires</code> </p> </li> <li> <p>
- * <code>response-cache-control</code> </p> </li> <li> <p>
- * <code>response-content-disposition</code> </p> </li> <li> <p>
- * <code>response-content-encoding</code> </p> </li> </ul> <p> <b>Additional
- * Considerations about Request Headers</b> </p> <p>If both of the
- * <code>If-Match</code> and <code>If-Unmodified-Since</code> headers are present
- * in the request as follows: <code>If-Match</code> condition evaluates to
- * <code>true</code>, and; <code>If-Unmodified-Since</code> condition evaluates to
- * <code>false</code>; then, S3 returns 200 OK and the data requested. </p> <p>If
- * both of the <code>If-None-Match</code> and <code>If-Modified-Since</code>
- * headers are present in the request as follows:<code> If-None-Match</code>
- * condition evaluates to <code>false</code>, and; <code>If-Modified-Since</code>
- * condition evaluates to <code>true</code>; then, S3 returns 304 Not Modified
- * response code.</p> <p>For more information about conditional requests, see <a
- * href="https://tools.ietf.org/html/rfc7232">RFC 7232</a>.</p> <p>The following
- * operations are related to <code>GetObject</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html">ListBuckets</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html">GetObjectAcl</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject">AWS API
- * Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for GetObject that returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::GetObjectOutcomeCallable GetObjectCallable(const Model::GetObjectRequest& request) const;
/**
- * <p>Retrieves objects from Amazon S3. To use <code>GET</code>, you must have
- * <code>READ</code> access to the object. If you grant <code>READ</code> access to
- * the anonymous user, you can return the object without using an authorization
- * header.</p> <p>An Amazon S3 bucket has no directory hierarchy such as you would
- * find in a typical computer file system. You can, however, create a logical
- * hierarchy by using object key names that imply a folder structure. For example,
- * instead of naming an object <code>sample.jpg</code>, you can name it
- * <code>photos/2006/February/sample.jpg</code>.</p> <p>To get an object from such
- * a logical hierarchy, specify the full key name for the object in the
- * <code>GET</code> operation. For a virtual hosted-style request example, if you
- * have the object <code>photos/2006/February/sample.jpg</code>, specify the
- * resource as <code>/photos/2006/February/sample.jpg</code>. For a path-style
- * request example, if you have the object
- * <code>photos/2006/February/sample.jpg</code> in the bucket named
- * <code>examplebucket</code>, specify the resource as
- * <code>/examplebucket/photos/2006/February/sample.jpg</code>. For more
- * information about request types, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket">HTTP
- * Host Header Bucket Specification</a>.</p> <p>To distribute large files to many
- * people, you can save bandwidth costs by using BitTorrent. For more information,
- * see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html">Amazon S3
- * Torrent</a>. For more information about returning the ACL of an object, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html">GetObjectAcl</a>.</p>
- * <p>If the object you are retrieving is stored in the S3 Glacier or S3 Glacier
- * Deep Archive storage class, or S3 Intelligent-Tiering Archive or S3
- * Intelligent-Tiering Deep Archive tiers, before you can retrieve the object you
- * must first restore a copy using <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_RestoreObject.html">RestoreObject</a>.
- * Otherwise, this action returns an <code>InvalidObjectStateError</code> error.
- * For information about restoring archived objects, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html">Restoring
- * Archived Objects</a>.</p> <p>Encryption request headers, like
- * <code>x-amz-server-side-encryption</code>, should not be sent for GET requests
- * if your object uses server-side encryption with CMKs stored in AWS KMS (SSE-KMS)
- * or server-side encryption with Amazon S3–managed encryption keys (SSE-S3). If
- * your object does use these types of keys, you’ll get an HTTP 400 BadRequest
- * error.</p> <p>If you encrypt an object by using server-side encryption with
- * customer-provided encryption keys (SSE-C) when you store the object in Amazon
- * S3, then when you GET the object, you must use the following headers:</p> <ul>
- * <li> <p>x-amz-server-side-encryption-customer-algorithm</p> </li> <li>
- * <p>x-amz-server-side-encryption-customer-key</p> </li> <li>
- * <p>x-amz-server-side-encryption-customer-key-MD5</p> </li> </ul> <p>For more
- * information about SSE-C, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
- * Encryption (Using Customer-Provided Encryption Keys)</a>.</p> <p>Assuming you
- * have permission to read object tags (permission for the
- * <code>s3:GetObjectVersionTagging</code> action), the response also returns the
- * <code>x-amz-tagging-count</code> header that provides the count of number of
- * tags associated with the object. You can use <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html">GetObjectTagging</a>
- * to retrieve the tag set associated with an object.</p> <p> <b>Permissions</b>
- * </p> <p>You need the <code>s3:GetObject</code> permission for this operation.
- * For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html">Specifying
- * Permissions in a Policy</a>. If the object you request does not exist, the error
- * Amazon S3 returns depends on whether you also have the
- * <code>s3:ListBucket</code> permission.</p> <ul> <li> <p>If you have the
- * <code>s3:ListBucket</code> permission on the bucket, Amazon S3 will return an
- * HTTP status code 404 ("no such key") error.</p> </li> <li> <p>If you don’t have
- * the <code>s3:ListBucket</code> permission, Amazon S3 will return an HTTP status
- * code 403 ("access denied") error.</p> </li> </ul> <p> <b>Versioning</b> </p>
- * <p>By default, the GET action returns the current version of an object. To
- * return a different version, use the <code>versionId</code> subresource.</p>
- * <p>If the current version of the object is a delete marker, Amazon S3
- * behaves as if the object was deleted and includes <code>x-amz-delete-marker:
- * true</code> in the response.</p> <p>For more information about
- * versioning, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketVersioning.html">PutBucketVersioning</a>.
- * </p> <p> <b>Overriding Response Header Values</b> </p> <p>There are times when
- * you want to override certain response header values in a GET response. For
- * example, you might override the Content-Disposition response header value in
- * your GET request.</p> <p>You can override values for a set of response headers
- * using the following query parameters. These response header values are sent only
- * on a successful request, that is, when status code 200 OK is returned. The set
- * of headers you can override using these parameters is a subset of the headers
- * that Amazon S3 accepts when you create an object. The response headers that you
- * can override for the GET response are <code>Content-Type</code>,
- * <code>Content-Language</code>, <code>Expires</code>, <code>Cache-Control</code>,
- * <code>Content-Disposition</code>, and <code>Content-Encoding</code>. To override
- * these header values in the GET response, you use the following request
- * parameters.</p> <p>You must sign the request, either using an
- * Authorization header or a presigned URL, when using these parameters. They
- * cannot be used with an unsigned (anonymous) request.</p> <ul> <li> <p>
- * <code>response-content-type</code> </p> </li> <li> <p>
- * <code>response-content-language</code> </p> </li> <li> <p>
- * <code>response-expires</code> </p> </li> <li> <p>
- * <code>response-cache-control</code> </p> </li> <li> <p>
- * <code>response-content-disposition</code> </p> </li> <li> <p>
- * <code>response-content-encoding</code> </p> </li> </ul> <p> <b>Additional
- * Considerations about Request Headers</b> </p> <p>If both of the
- * <code>If-Match</code> and <code>If-Unmodified-Since</code> headers are present
- * in the request as follows: <code>If-Match</code> condition evaluates to
- * <code>true</code>, and; <code>If-Unmodified-Since</code> condition evaluates to
- * <code>false</code>; then, S3 returns 200 OK and the data requested. </p> <p>If
- * both of the <code>If-None-Match</code> and <code>If-Modified-Since</code>
- * headers are present in the request as follows:<code> If-None-Match</code>
- * condition evaluates to <code>false</code>, and; <code>If-Modified-Since</code>
- * condition evaluates to <code>true</code>; then, S3 returns 304 Not Modified
- * response code.</p> <p>For more information about conditional requests, see <a
- * href="https://tools.ietf.org/html/rfc7232">RFC 7232</a>.</p> <p>The following
- * operations are related to <code>GetObject</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html">ListBuckets</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html">GetObjectAcl</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObject">AWS API
- * Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for GetObject that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void GetObjectAsync(const Model::GetObjectRequest& request, const GetObjectResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
/**
* <p>Returns the access control list (ACL) of an object. To use this operation,
- * you must have <code>READ_ACP</code> access to the object.</p> <p>This action is
- * not supported by Amazon S3 on Outposts.</p> <p> <b>Versioning</b> </p> <p>By
- * default, GET returns ACL information about the current version of an object. To
- * return ACL information about a different version, use the versionId
- * subresource.</p> <p>The following operations are related to
+ * you must have <code>s3:GetObjectAcl</code> permissions or <code>READ_ACP</code>
+ * access to the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/acl-overview.html#acl-access-policy-permission-mapping">Mapping
+ * of ACL permissions and access policy permissions</a> in the <i>Amazon S3 User
+ * Guide</i> </p> <p>This action is not supported by Amazon S3 on Outposts.</p> <p>
+ * <b>Versioning</b> </p> <p>By default, GET returns ACL information about the
+ * current version of an object. To return ACL information about a different
+ * version, use the versionId subresource.</p> <p>If your bucket uses the
+ * bucket owner enforced setting for S3 Object Ownership, requests to read ACLs are
+ * still supported and return the <code>bucket-owner-full-control</code> ACL with
+ * the owner being the account that created the bucket. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html">
+ * Controlling object ownership and disabling ACLs</a> in the <i>Amazon S3 User
+ * Guide</i>.</p> <p>The following operations are related to
* <code>GetObjectAcl</code>:</p> <ul> <li> <p> <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>
* </p> </li> <li> <p> <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html">GetObjectAttributes</a>
+ * </p> </li> <li> <p> <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html">DeleteObject</a>
* </p> </li> <li> <p> <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html">PutObject</a>
@@ -4948,151 +2367,213 @@ namespace Aws
virtual Model::GetObjectAclOutcome GetObjectAcl(const Model::GetObjectAclRequest& request) const;
/**
- * <p>Returns the access control list (ACL) of an object. To use this operation,
- * you must have <code>READ_ACP</code> access to the object.</p> <p>This action is
- * not supported by Amazon S3 on Outposts.</p> <p> <b>Versioning</b> </p> <p>By
- * default, GET returns ACL information about the current version of an object. To
- * return ACL information about a different version, use the versionId
- * subresource.</p> <p>The following operations are related to
- * <code>GetObjectAcl</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html">DeleteObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html">PutObject</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl">AWS API
- * Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for GetObjectAcl that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::GetObjectAclOutcomeCallable GetObjectAclCallable(const Model::GetObjectAclRequest& request) const;
+ template<typename GetObjectAclRequestT = Model::GetObjectAclRequest>
+ Model::GetObjectAclOutcomeCallable GetObjectAclCallable(const GetObjectAclRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::GetObjectAcl, request);
+ }
/**
- * <p>Returns the access control list (ACL) of an object. To use this operation,
- * you must have <code>READ_ACP</code> access to the object.</p> <p>This action is
- * not supported by Amazon S3 on Outposts.</p> <p> <b>Versioning</b> </p> <p>By
- * default, GET returns ACL information about the current version of an object. To
- * return ACL information about a different version, use the versionId
- * subresource.</p> <p>The following operations are related to
- * <code>GetObjectAcl</code>:</p> <ul> <li> <p> <a
+ * An Async wrapper for GetObjectAcl that queues the request into a thread executor and triggers associated callback when operation has finished.
+ */
+ template<typename GetObjectAclRequestT = Model::GetObjectAclRequest>
+ void GetObjectAclAsync(const GetObjectAclRequestT& request, const GetObjectAclResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::GetObjectAcl, request, handler, context);
+ }
+
+ /**
+ * <p>Retrieves all the metadata from an object without returning the object
+ * itself. This action is useful if you're interested only in an object's metadata.
+ * To use <code>GetObjectAttributes</code>, you must have READ access to the
+ * object.</p> <p> <code>GetObjectAttributes</code> combines the functionality of
+ * <code>GetObjectAcl</code>, <code>GetObjectLegalHold</code>,
+ * <code>GetObjectLockConfiguration</code>, <code>GetObjectRetention</code>,
+ * <code>GetObjectTagging</code>, <code>HeadObject</code>, and
+ * <code>ListParts</code>. All of the data returned with each of those individual
+ * calls can be returned with a single call to
+ * <code>GetObjectAttributes</code>.</p> <p>If you encrypt an object by using
+ * server-side encryption with customer-provided encryption keys (SSE-C) when you
+ * store the object in Amazon S3, then when you retrieve the metadata from the
+ * object, you must use the following headers:</p> <ul> <li> <p>
+ * <code>x-amz-server-side-encryption-customer-algorithm</code> </p> </li> <li> <p>
+ * <code>x-amz-server-side-encryption-customer-key</code> </p> </li> <li> <p>
+ * <code>x-amz-server-side-encryption-customer-key-MD5</code> </p> </li> </ul>
+ * <p>For more information about SSE-C, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
+ * Encryption (Using Customer-Provided Encryption Keys)</a> in the <i>Amazon S3
+ * User Guide</i>.</p> <ul> <li> <p>Encryption request headers, such as
+ * <code>x-amz-server-side-encryption</code>, should not be sent for GET requests
+ * if your object uses server-side encryption with Amazon Web Services KMS keys
+ * stored in Amazon Web Services Key Management Service (SSE-KMS) or server-side
+ * encryption with Amazon S3 managed encryption keys (SSE-S3). If your object does
+ * use these types of keys, you'll get an HTTP <code>400 Bad Request</code>
+ * error.</p> </li> <li> <p> The last modified property in this case is the
+ * creation date of the object.</p> </li> </ul> <p>Consider the following
+ * when using request headers:</p> <ul> <li> <p> If both of the
+ * <code>If-Match</code> and <code>If-Unmodified-Since</code> headers are present
+ * in the request as follows, then Amazon S3 returns the HTTP status code <code>200
+ * OK</code> and the data requested:</p> <ul> <li> <p> <code>If-Match</code>
+ * condition evaluates to <code>true</code>.</p> </li> <li> <p>
+ * <code>If-Unmodified-Since</code> condition evaluates to <code>false</code>.</p>
+ * </li> </ul> </li> <li> <p>If both of the <code>If-None-Match</code> and
+ * <code>If-Modified-Since</code> headers are present in the request as follows,
+ * then Amazon S3 returns the HTTP status code <code>304 Not Modified</code>:</p>
+ * <ul> <li> <p> <code>If-None-Match</code> condition evaluates to
+ * <code>false</code>.</p> </li> <li> <p> <code>If-Modified-Since</code> condition
+ * evaluates to <code>true</code>.</p> </li> </ul> </li> </ul> <p>For more
+ * information about conditional requests, see <a
+ * href="https://tools.ietf.org/html/rfc7232">RFC 7232</a>.</p> <p>
+ * <b>Permissions</b> </p> <p>The permissions that you need to use this operation
+ * depend on whether the bucket is versioned. If the bucket is versioned, you need
+ * both the <code>s3:GetObjectVersion</code> and
+ * <code>s3:GetObjectVersionAttributes</code> permissions for this operation. If
+ * the bucket is not versioned, you need the <code>s3:GetObject</code> and
+ * <code>s3:GetObjectAttributes</code> permissions. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html">Specifying
+ * Permissions in a Policy</a> in the <i>Amazon S3 User Guide</i>. If the object
+ * that you request does not exist, the error Amazon S3 returns depends on whether
+ * you also have the <code>s3:ListBucket</code> permission.</p> <ul> <li> <p>If you
+ * have the <code>s3:ListBucket</code> permission on the bucket, Amazon S3 returns
+ * an HTTP status code <code>404 Not Found</code> ("no such key") error.</p> </li>
+ * <li> <p>If you don't have the <code>s3:ListBucket</code> permission, Amazon S3
+ * returns an HTTP status code <code>403 Forbidden</code> ("access denied")
+ * error.</p> </li> </ul> <p>The following actions are related to
+ * <code>GetObjectAttributes</code>:</p> <ul> <li> <p> <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>
* </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html">DeleteObject</a>
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html">GetObjectAcl</a>
* </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html">PutObject</a>
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLegalHold.html">GetObjectLegalHold</a>
+ * </p> </li> <li> <p> <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectLockConfiguration.html">GetObjectLockConfiguration</a>
+ * </p> </li> <li> <p> <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectRetention.html">GetObjectRetention</a>
+ * </p> </li> <li> <p> <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html">GetObjectTagging</a>
+ * </p> </li> <li> <p> <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_HeadObject.html">HeadObject</a>
+ * </p> </li> <li> <p> <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html">ListParts</a>
* </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAcl">AWS API
- * Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAttributes">AWS
+ * API Reference</a></p>
*/
- virtual void GetObjectAclAsync(const Model::GetObjectAclRequest& request, const GetObjectAclResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ virtual Model::GetObjectAttributesOutcome GetObjectAttributes(const Model::GetObjectAttributesRequest& request) const;
/**
- * <p>Gets an object's current Legal Hold status. For more information, see <a
+ * A Callable wrapper for GetObjectAttributes that returns a future to the operation so that it can be executed in parallel to other requests.
+ */
+ template<typename GetObjectAttributesRequestT = Model::GetObjectAttributesRequest>
+ Model::GetObjectAttributesOutcomeCallable GetObjectAttributesCallable(const GetObjectAttributesRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::GetObjectAttributes, request);
+ }
+
+ /**
+ * An Async wrapper for GetObjectAttributes that queues the request into a thread executor and triggers associated callback when operation has finished.
+ */
+ template<typename GetObjectAttributesRequestT = Model::GetObjectAttributesRequest>
+ void GetObjectAttributesAsync(const GetObjectAttributesRequestT& request, const GetObjectAttributesResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::GetObjectAttributes, request, handler, context);
+ }
+
+ /**
+ * <p>Gets an object's current legal hold status. For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html">Locking
- * Objects</a>.</p> <p>This action is not supported by Amazon S3 on
- * Outposts.</p><p><h3>See Also:</h3> <a
+ * Objects</a>.</p> <p>This action is not supported by Amazon S3 on Outposts.</p>
+ * <p>The following action is related to <code>GetObjectLegalHold</code>:</p> <ul>
+ * <li> <p> <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html">GetObjectAttributes</a>
+ * </p> </li> </ul><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold">AWS
* API Reference</a></p>
*/
virtual Model::GetObjectLegalHoldOutcome GetObjectLegalHold(const Model::GetObjectLegalHoldRequest& request) const;
/**
- * <p>Gets an object's current Legal Hold status. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html">Locking
- * Objects</a>.</p> <p>This action is not supported by Amazon S3 on
- * Outposts.</p><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for GetObjectLegalHold that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::GetObjectLegalHoldOutcomeCallable GetObjectLegalHoldCallable(const Model::GetObjectLegalHoldRequest& request) const;
+ template<typename GetObjectLegalHoldRequestT = Model::GetObjectLegalHoldRequest>
+ Model::GetObjectLegalHoldOutcomeCallable GetObjectLegalHoldCallable(const GetObjectLegalHoldRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::GetObjectLegalHold, request);
+ }
/**
- * <p>Gets an object's current Legal Hold status. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html">Locking
- * Objects</a>.</p> <p>This action is not supported by Amazon S3 on
- * Outposts.</p><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLegalHold">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for GetObjectLegalHold that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void GetObjectLegalHoldAsync(const Model::GetObjectLegalHoldRequest& request, const GetObjectLegalHoldResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename GetObjectLegalHoldRequestT = Model::GetObjectLegalHoldRequest>
+ void GetObjectLegalHoldAsync(const GetObjectLegalHoldRequestT& request, const GetObjectLegalHoldResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::GetObjectLegalHold, request, handler, context);
+ }
/**
* <p>Gets the Object Lock configuration for a bucket. The rule specified in the
* Object Lock configuration will be applied by default to every new object placed
* in the specified bucket. For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html">Locking
- * Objects</a>.</p><p><h3>See Also:</h3> <a
+ * Objects</a>.</p> <p>The following action is related to
+ * <code>GetObjectLockConfiguration</code>:</p> <ul> <li> <p> <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html">GetObjectAttributes</a>
+ * </p> </li> </ul><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration">AWS
* API Reference</a></p>
*/
virtual Model::GetObjectLockConfigurationOutcome GetObjectLockConfiguration(const Model::GetObjectLockConfigurationRequest& request) const;
/**
- * <p>Gets the Object Lock configuration for a bucket. The rule specified in the
- * Object Lock configuration will be applied by default to every new object placed
- * in the specified bucket. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html">Locking
- * Objects</a>.</p><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for GetObjectLockConfiguration that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::GetObjectLockConfigurationOutcomeCallable GetObjectLockConfigurationCallable(const Model::GetObjectLockConfigurationRequest& request) const;
+ template<typename GetObjectLockConfigurationRequestT = Model::GetObjectLockConfigurationRequest>
+ Model::GetObjectLockConfigurationOutcomeCallable GetObjectLockConfigurationCallable(const GetObjectLockConfigurationRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::GetObjectLockConfiguration, request);
+ }
/**
- * <p>Gets the Object Lock configuration for a bucket. The rule specified in the
- * Object Lock configuration will be applied by default to every new object placed
- * in the specified bucket. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html">Locking
- * Objects</a>.</p><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectLockConfiguration">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for GetObjectLockConfiguration that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void GetObjectLockConfigurationAsync(const Model::GetObjectLockConfigurationRequest& request, const GetObjectLockConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename GetObjectLockConfigurationRequestT = Model::GetObjectLockConfigurationRequest>
+ void GetObjectLockConfigurationAsync(const GetObjectLockConfigurationRequestT& request, const GetObjectLockConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::GetObjectLockConfiguration, request, handler, context);
+ }
/**
* <p>Retrieves an object's retention settings. For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html">Locking
- * Objects</a>.</p> <p>This action is not supported by Amazon S3 on
- * Outposts.</p><p><h3>See Also:</h3> <a
+ * Objects</a>.</p> <p>This action is not supported by Amazon S3 on Outposts.</p>
+ * <p>The following action is related to <code>GetObjectRetention</code>:</p> <ul>
+ * <li> <p> <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html">GetObjectAttributes</a>
+ * </p> </li> </ul><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention">AWS
* API Reference</a></p>
*/
virtual Model::GetObjectRetentionOutcome GetObjectRetention(const Model::GetObjectRetentionRequest& request) const;
/**
- * <p>Retrieves an object's retention settings. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html">Locking
- * Objects</a>.</p> <p>This action is not supported by Amazon S3 on
- * Outposts.</p><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for GetObjectRetention that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::GetObjectRetentionOutcomeCallable GetObjectRetentionCallable(const Model::GetObjectRetentionRequest& request) const;
+ template<typename GetObjectRetentionRequestT = Model::GetObjectRetentionRequest>
+ Model::GetObjectRetentionOutcomeCallable GetObjectRetentionCallable(const GetObjectRetentionRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::GetObjectRetention, request);
+ }
/**
- * <p>Retrieves an object's retention settings. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html">Locking
- * Objects</a>.</p> <p>This action is not supported by Amazon S3 on
- * Outposts.</p><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectRetention">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for GetObjectRetention that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void GetObjectRetentionAsync(const Model::GetObjectRetentionRequest& request, const GetObjectRetentionResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename GetObjectRetentionRequestT = Model::GetObjectRetentionRequest>
+ void GetObjectRetentionAsync(const GetObjectRetentionRequestT& request, const GetObjectRetentionResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::GetObjectRetention, request, handler, context);
+ }
/**
* <p>Returns the tag-set of an object. You send the GET request against the
@@ -5106,11 +2587,13 @@ namespace Aws
* owner has this permission and can grant this permission to others.</p> <p> For
* information about the Amazon S3 object tagging feature, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html">Object
- * Tagging</a>.</p> <p>The following action is related to
+ * Tagging</a>.</p> <p>The following actions are related to
* <code>GetObjectTagging</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html">PutObjectTagging</a>
- * </p> </li> <li> <p> <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html">DeleteObjectTagging</a>
+ * </p> </li> <li> <p> <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html">GetObjectAttributes</a>
+ * </p> </li> <li> <p> <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html">PutObjectTagging</a>
* </p> </li> </ul><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging">AWS
* API Reference</a></p>
@@ -5118,54 +2601,22 @@ namespace Aws
virtual Model::GetObjectTaggingOutcome GetObjectTagging(const Model::GetObjectTaggingRequest& request) const;
/**
- * <p>Returns the tag-set of an object. You send the GET request against the
- * tagging subresource associated with the object.</p> <p>To use this operation,
- * you must have permission to perform the <code>s3:GetObjectTagging</code> action.
- * By default, the GET action returns information about current version of an
- * object. For a versioned bucket, you can have multiple versions of an object in
- * your bucket. To retrieve tags of any other version, use the versionId query
- * parameter. You also need permission for the
- * <code>s3:GetObjectVersionTagging</code> action.</p> <p> By default, the bucket
- * owner has this permission and can grant this permission to others.</p> <p> For
- * information about the Amazon S3 object tagging feature, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html">Object
- * Tagging</a>.</p> <p>The following action is related to
- * <code>GetObjectTagging</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html">PutObjectTagging</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html">DeleteObjectTagging</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for GetObjectTagging that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::GetObjectTaggingOutcomeCallable GetObjectTaggingCallable(const Model::GetObjectTaggingRequest& request) const;
+ template<typename GetObjectTaggingRequestT = Model::GetObjectTaggingRequest>
+ Model::GetObjectTaggingOutcomeCallable GetObjectTaggingCallable(const GetObjectTaggingRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::GetObjectTagging, request);
+ }
/**
- * <p>Returns the tag-set of an object. You send the GET request against the
- * tagging subresource associated with the object.</p> <p>To use this operation,
- * you must have permission to perform the <code>s3:GetObjectTagging</code> action.
- * By default, the GET action returns information about current version of an
- * object. For a versioned bucket, you can have multiple versions of an object in
- * your bucket. To retrieve tags of any other version, use the versionId query
- * parameter. You also need permission for the
- * <code>s3:GetObjectVersionTagging</code> action.</p> <p> By default, the bucket
- * owner has this permission and can grant this permission to others.</p> <p> For
- * information about the Amazon S3 object tagging feature, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html">Object
- * Tagging</a>.</p> <p>The following action is related to
- * <code>GetObjectTagging</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObjectTagging.html">PutObjectTagging</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html">DeleteObjectTagging</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTagging">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for GetObjectTagging that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void GetObjectTaggingAsync(const Model::GetObjectTaggingRequest& request, const GetObjectTaggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename GetObjectTaggingRequestT = Model::GetObjectTaggingRequest>
+ void GetObjectTaggingAsync(const GetObjectTaggingRequestT& request, const GetObjectTaggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::GetObjectTagging, request, handler, context);
+ }
/**
* <p>Returns torrent files from a bucket. BitTorrent can save you bandwidth when
@@ -5185,42 +2636,22 @@ namespace Aws
virtual Model::GetObjectTorrentOutcome GetObjectTorrent(const Model::GetObjectTorrentRequest& request) const;
/**
- * <p>Returns torrent files from a bucket. BitTorrent can save you bandwidth when
- * you're distributing large files. For more information about BitTorrent, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html">Using
- * BitTorrent with Amazon S3</a>.</p> <p>You can get torrent only for
- * objects that are less than 5 GB in size, and that are not encrypted using
- * server-side encryption with a customer-provided encryption key.</p>
- * <p>To use GET, you must have READ access to the object.</p> <p>This action is
- * not supported by Amazon S3 on Outposts.</p> <p>The following action is related
- * to <code>GetObjectTorrent</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for GetObjectTorrent that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::GetObjectTorrentOutcomeCallable GetObjectTorrentCallable(const Model::GetObjectTorrentRequest& request) const;
+ template<typename GetObjectTorrentRequestT = Model::GetObjectTorrentRequest>
+ Model::GetObjectTorrentOutcomeCallable GetObjectTorrentCallable(const GetObjectTorrentRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::GetObjectTorrent, request);
+ }
/**
- * <p>Returns torrent files from a bucket. BitTorrent can save you bandwidth when
- * you're distributing large files. For more information about BitTorrent, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/S3Torrent.html">Using
- * BitTorrent with Amazon S3</a>.</p> <p>You can get torrent only for
- * objects that are less than 5 GB in size, and that are not encrypted using
- * server-side encryption with a customer-provided encryption key.</p>
- * <p>To use GET, you must have READ access to the object.</p> <p>This action is
- * not supported by Amazon S3 on Outposts.</p> <p>The following action is related
- * to <code>GetObjectTorrent</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectTorrent">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for GetObjectTorrent that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void GetObjectTorrentAsync(const Model::GetObjectTorrentRequest& request, const GetObjectTorrentResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename GetObjectTorrentRequestT = Model::GetObjectTorrentRequest>
+ void GetObjectTorrentAsync(const GetObjectTorrentRequestT& request, const GetObjectTorrentResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::GetObjectTorrent, request, handler, context);
+ }
/**
* <p>Retrieves the <code>PublicAccessBlock</code> configuration for an Amazon S3
@@ -5253,68 +2684,22 @@ namespace Aws
virtual Model::GetPublicAccessBlockOutcome GetPublicAccessBlock(const Model::GetPublicAccessBlockRequest& request) const;
/**
- * <p>Retrieves the <code>PublicAccessBlock</code> configuration for an Amazon S3
- * bucket. To use this operation, you must have the
- * <code>s3:GetBucketPublicAccessBlock</code> permission. For more information
- * about Amazon S3 permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html">Specifying
- * Permissions in a Policy</a>.</p> <p>When Amazon S3 evaluates the
- * <code>PublicAccessBlock</code> configuration for a bucket or an object, it
- * checks the <code>PublicAccessBlock</code> configuration for both the bucket (or
- * the bucket that contains the object) and the bucket owner's account. If the
- * <code>PublicAccessBlock</code> settings are different between the bucket and the
- * account, Amazon S3 uses the most restrictive combination of the bucket-level and
- * account-level settings.</p> <p>For more information about when
- * Amazon S3 considers a bucket or an object public, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status">The
- * Meaning of "Public"</a>.</p> <p>The following operations are related to
- * <code>GetPublicAccessBlock</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html">Using
- * Amazon S3 Block Public Access</a> </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html">PutPublicAccessBlock</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html">GetPublicAccessBlock</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html">DeletePublicAccessBlock</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for GetPublicAccessBlock that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::GetPublicAccessBlockOutcomeCallable GetPublicAccessBlockCallable(const Model::GetPublicAccessBlockRequest& request) const;
+ template<typename GetPublicAccessBlockRequestT = Model::GetPublicAccessBlockRequest>
+ Model::GetPublicAccessBlockOutcomeCallable GetPublicAccessBlockCallable(const GetPublicAccessBlockRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::GetPublicAccessBlock, request);
+ }
/**
- * <p>Retrieves the <code>PublicAccessBlock</code> configuration for an Amazon S3
- * bucket. To use this operation, you must have the
- * <code>s3:GetBucketPublicAccessBlock</code> permission. For more information
- * about Amazon S3 permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html">Specifying
- * Permissions in a Policy</a>.</p> <p>When Amazon S3 evaluates the
- * <code>PublicAccessBlock</code> configuration for a bucket or an object, it
- * checks the <code>PublicAccessBlock</code> configuration for both the bucket (or
- * the bucket that contains the object) and the bucket owner's account. If the
- * <code>PublicAccessBlock</code> settings are different between the bucket and the
- * account, Amazon S3 uses the most restrictive combination of the bucket-level and
- * account-level settings.</p> <p>For more information about when
- * Amazon S3 considers a bucket or an object public, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status">The
- * Meaning of "Public"</a>.</p> <p>The following operations are related to
- * <code>GetPublicAccessBlock</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html">Using
- * Amazon S3 Block Public Access</a> </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutPublicAccessBlock.html">PutPublicAccessBlock</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html">GetPublicAccessBlock</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html">DeletePublicAccessBlock</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetPublicAccessBlock">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for GetPublicAccessBlock that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void GetPublicAccessBlockAsync(const Model::GetPublicAccessBlockRequest& request, const GetPublicAccessBlockResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename GetPublicAccessBlockRequestT = Model::GetPublicAccessBlockRequest>
+ void GetPublicAccessBlockAsync(const GetPublicAccessBlockRequestT& request, const GetPublicAccessBlockResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::GetPublicAccessBlock, request, handler, context);
+ }
/**
* <p>This action is useful to determine if a bucket exists and you have permission
@@ -5330,55 +2715,38 @@ namespace Aws
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
* Related to Bucket Subresource Operations</a> and <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p><p><h3>See Also:</h3>
- * <a href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket">AWS
- * API Reference</a></p>
+ * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>To use this API
+ * against an access point, you must provide the alias of the access point in place
+ * of the bucket name or specify the access point ARN. When using the access point
+ * ARN, you must direct requests to the access point hostname. The access point
+ * hostname takes the form
+ * AccessPointName-AccountId.s3-accesspoint.Region.amazonaws.com. When using the
+ * Amazon Web Services SDKs, you provide the ARN in place of the bucket name. For
+ * more information see, <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
+ * access points</a>.</p><p><h3>See Also:</h3> <a
+ * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket">AWS API
+ * Reference</a></p>
*/
virtual Model::HeadBucketOutcome HeadBucket(const Model::HeadBucketRequest& request) const;
/**
- * <p>This action is useful to determine if a bucket exists and you have permission
- * to access it. The action returns a <code>200 OK</code> if the bucket exists and
- * you have permission to access it.</p> <p>If the bucket does not exist or you do
- * not have permission to access it, the <code>HEAD</code> request returns a
- * generic <code>404 Not Found</code> or <code>403 Forbidden</code> code. A message
- * body is not included, so you cannot determine the exception beyond these error
- * codes.</p> <p>To use this operation, you must have permissions to perform the
- * <code>s3:ListBucket</code> action. The bucket owner has this permission by
- * default and can grant this permission to others. For more information about
- * permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p><p><h3>See Also:</h3>
- * <a href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for HeadBucket that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::HeadBucketOutcomeCallable HeadBucketCallable(const Model::HeadBucketRequest& request) const;
+ template<typename HeadBucketRequestT = Model::HeadBucketRequest>
+ Model::HeadBucketOutcomeCallable HeadBucketCallable(const HeadBucketRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::HeadBucket, request);
+ }
/**
- * <p>This action is useful to determine if a bucket exists and you have permission
- * to access it. The action returns a <code>200 OK</code> if the bucket exists and
- * you have permission to access it.</p> <p>If the bucket does not exist or you do
- * not have permission to access it, the <code>HEAD</code> request returns a
- * generic <code>404 Not Found</code> or <code>403 Forbidden</code> code. A message
- * body is not included, so you cannot determine the exception beyond these error
- * codes.</p> <p>To use this operation, you must have permissions to perform the
- * <code>s3:ListBucket</code> action. The bucket owner has this permission by
- * default and can grant this permission to others. For more information about
- * permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p><p><h3>See Also:</h3>
- * <a href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadBucket">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for HeadBucket that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void HeadBucketAsync(const Model::HeadBucketRequest& request, const HeadBucketResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename HeadBucketRequestT = Model::HeadBucketRequest>
+ void HeadBucketAsync(const HeadBucketRequestT& request, const HeadBucketResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::HeadBucket, request, handler, context);
+ }
/**
* <p>The HEAD action retrieves metadata from an object without returning the
@@ -5401,12 +2769,11 @@ namespace Aws
* Encryption (Using Customer-Provided Encryption Keys)</a>.</p> <ul> <li>
* <p>Encryption request headers, like <code>x-amz-server-side-encryption</code>,
* should not be sent for GET requests if your object uses server-side encryption
- * with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon
- * S3–managed encryption keys (SSE-S3). If your object does use these types of
- * keys, you’ll get an HTTP 400 BadRequest error.</p> </li> <li> <p> The last
- * modified property in this case is the creation date of the object.</p> </li>
- * </ul> <p>Request headers are limited to 8 KB in size. For more
- * information, see <a
+ * with KMS keys (SSE-KMS) or server-side encryption with Amazon S3–managed
+ * encryption keys (SSE-S3). If your object does use these types of keys, you’ll
+ * get an HTTP 400 BadRequest error.</p> </li> <li> <p> The last modified property
+ * in this case is the creation date of the object.</p> </li> </ul>
+ * <p>Request headers are limited to 8 KB in size. For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html">Common
* Request Headers</a>.</p> <p>Consider the following when using request
* headers:</p> <ul> <li> <p> Consideration 1 – If both of the
@@ -5423,8 +2790,8 @@ namespace Aws
* </li> </ul> <p>Then Amazon S3 returns the <code>304 Not Modified</code> response
* code.</p> </li> </ul> <p>For more information about conditional requests, see <a
* href="https://tools.ietf.org/html/rfc7232">RFC 7232</a>.</p> <p>
- * <b>Permissions</b> </p> <p>You need the <code>s3:GetObject</code> permission for
- * this operation. For more information, see <a
+ * <b>Permissions</b> </p> <p>You need the relevant read object (or version)
+ * permission for this operation. For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html">Specifying
* Permissions in a Policy</a>. If the object you request does not exist, the error
* Amazon S3 returns depends on whether you also have the s3:ListBucket
@@ -5432,9 +2799,11 @@ namespace Aws
* permission on the bucket, Amazon S3 returns an HTTP status code 404 ("no such
* key") error.</p> </li> <li> <p>If you don’t have the <code>s3:ListBucket</code>
* permission, Amazon S3 returns an HTTP status code 403 ("access denied")
- * error.</p> </li> </ul> <p>The following action is related to
+ * error.</p> </li> </ul> <p>The following actions are related to
* <code>HeadObject</code>:</p> <ul> <li> <p> <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>
+ * </p> </li> <li> <p> <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html">GetObjectAttributes</a>
* </p> </li> </ul><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject">AWS API
* Reference</a></p>
@@ -5442,130 +2811,22 @@ namespace Aws
virtual Model::HeadObjectOutcome HeadObject(const Model::HeadObjectRequest& request) const;
/**
- * <p>The HEAD action retrieves metadata from an object without returning the
- * object itself. This action is useful if you're only interested in an object's
- * metadata. To use HEAD, you must have READ access to the object.</p> <p>A
- * <code>HEAD</code> request has the same options as a <code>GET</code> action on
- * an object. The response is identical to the <code>GET</code> response except
- * that there is no response body. Because of this, if the <code>HEAD</code>
- * request generates an error, it returns a generic <code>404 Not Found</code> or
- * <code>403 Forbidden</code> code. It is not possible to retrieve the exact
- * exception beyond these error codes.</p> <p>If you encrypt an object by using
- * server-side encryption with customer-provided encryption keys (SSE-C) when you
- * store the object in Amazon S3, then when you retrieve the metadata from the
- * object, you must use the following headers:</p> <ul> <li>
- * <p>x-amz-server-side-encryption-customer-algorithm</p> </li> <li>
- * <p>x-amz-server-side-encryption-customer-key</p> </li> <li>
- * <p>x-amz-server-side-encryption-customer-key-MD5</p> </li> </ul> <p>For more
- * information about SSE-C, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
- * Encryption (Using Customer-Provided Encryption Keys)</a>.</p> <ul> <li>
- * <p>Encryption request headers, like <code>x-amz-server-side-encryption</code>,
- * should not be sent for GET requests if your object uses server-side encryption
- * with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon
- * S3–managed encryption keys (SSE-S3). If your object does use these types of
- * keys, you’ll get an HTTP 400 BadRequest error.</p> </li> <li> <p> The last
- * modified property in this case is the creation date of the object.</p> </li>
- * </ul> <p>Request headers are limited to 8 KB in size. For more
- * information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html">Common
- * Request Headers</a>.</p> <p>Consider the following when using request
- * headers:</p> <ul> <li> <p> Consideration 1 – If both of the
- * <code>If-Match</code> and <code>If-Unmodified-Since</code> headers are present
- * in the request as follows:</p> <ul> <li> <p> <code>If-Match</code> condition
- * evaluates to <code>true</code>, and;</p> </li> <li> <p>
- * <code>If-Unmodified-Since</code> condition evaluates to <code>false</code>;</p>
- * </li> </ul> <p>Then Amazon S3 returns <code>200 OK</code> and the data
- * requested.</p> </li> <li> <p> Consideration 2 – If both of the
- * <code>If-None-Match</code> and <code>If-Modified-Since</code> headers are
- * present in the request as follows:</p> <ul> <li> <p> <code>If-None-Match</code>
- * condition evaluates to <code>false</code>, and;</p> </li> <li> <p>
- * <code>If-Modified-Since</code> condition evaluates to <code>true</code>;</p>
- * </li> </ul> <p>Then Amazon S3 returns the <code>304 Not Modified</code> response
- * code.</p> </li> </ul> <p>For more information about conditional requests, see <a
- * href="https://tools.ietf.org/html/rfc7232">RFC 7232</a>.</p> <p>
- * <b>Permissions</b> </p> <p>You need the <code>s3:GetObject</code> permission for
- * this operation. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html">Specifying
- * Permissions in a Policy</a>. If the object you request does not exist, the error
- * Amazon S3 returns depends on whether you also have the s3:ListBucket
- * permission.</p> <ul> <li> <p>If you have the <code>s3:ListBucket</code>
- * permission on the bucket, Amazon S3 returns an HTTP status code 404 ("no such
- * key") error.</p> </li> <li> <p>If you don’t have the <code>s3:ListBucket</code>
- * permission, Amazon S3 returns an HTTP status code 403 ("access denied")
- * error.</p> </li> </ul> <p>The following action is related to
- * <code>HeadObject</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject">AWS API
- * Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for HeadObject that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::HeadObjectOutcomeCallable HeadObjectCallable(const Model::HeadObjectRequest& request) const;
+ template<typename HeadObjectRequestT = Model::HeadObjectRequest>
+ Model::HeadObjectOutcomeCallable HeadObjectCallable(const HeadObjectRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::HeadObject, request);
+ }
/**
- * <p>The HEAD action retrieves metadata from an object without returning the
- * object itself. This action is useful if you're only interested in an object's
- * metadata. To use HEAD, you must have READ access to the object.</p> <p>A
- * <code>HEAD</code> request has the same options as a <code>GET</code> action on
- * an object. The response is identical to the <code>GET</code> response except
- * that there is no response body. Because of this, if the <code>HEAD</code>
- * request generates an error, it returns a generic <code>404 Not Found</code> or
- * <code>403 Forbidden</code> code. It is not possible to retrieve the exact
- * exception beyond these error codes.</p> <p>If you encrypt an object by using
- * server-side encryption with customer-provided encryption keys (SSE-C) when you
- * store the object in Amazon S3, then when you retrieve the metadata from the
- * object, you must use the following headers:</p> <ul> <li>
- * <p>x-amz-server-side-encryption-customer-algorithm</p> </li> <li>
- * <p>x-amz-server-side-encryption-customer-key</p> </li> <li>
- * <p>x-amz-server-side-encryption-customer-key-MD5</p> </li> </ul> <p>For more
- * information about SSE-C, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
- * Encryption (Using Customer-Provided Encryption Keys)</a>.</p> <ul> <li>
- * <p>Encryption request headers, like <code>x-amz-server-side-encryption</code>,
- * should not be sent for GET requests if your object uses server-side encryption
- * with CMKs stored in AWS KMS (SSE-KMS) or server-side encryption with Amazon
- * S3–managed encryption keys (SSE-S3). If your object does use these types of
- * keys, you’ll get an HTTP 400 BadRequest error.</p> </li> <li> <p> The last
- * modified property in this case is the creation date of the object.</p> </li>
- * </ul> <p>Request headers are limited to 8 KB in size. For more
- * information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTCommonRequestHeaders.html">Common
- * Request Headers</a>.</p> <p>Consider the following when using request
- * headers:</p> <ul> <li> <p> Consideration 1 – If both of the
- * <code>If-Match</code> and <code>If-Unmodified-Since</code> headers are present
- * in the request as follows:</p> <ul> <li> <p> <code>If-Match</code> condition
- * evaluates to <code>true</code>, and;</p> </li> <li> <p>
- * <code>If-Unmodified-Since</code> condition evaluates to <code>false</code>;</p>
- * </li> </ul> <p>Then Amazon S3 returns <code>200 OK</code> and the data
- * requested.</p> </li> <li> <p> Consideration 2 – If both of the
- * <code>If-None-Match</code> and <code>If-Modified-Since</code> headers are
- * present in the request as follows:</p> <ul> <li> <p> <code>If-None-Match</code>
- * condition evaluates to <code>false</code>, and;</p> </li> <li> <p>
- * <code>If-Modified-Since</code> condition evaluates to <code>true</code>;</p>
- * </li> </ul> <p>Then Amazon S3 returns the <code>304 Not Modified</code> response
- * code.</p> </li> </ul> <p>For more information about conditional requests, see <a
- * href="https://tools.ietf.org/html/rfc7232">RFC 7232</a>.</p> <p>
- * <b>Permissions</b> </p> <p>You need the <code>s3:GetObject</code> permission for
- * this operation. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html">Specifying
- * Permissions in a Policy</a>. If the object you request does not exist, the error
- * Amazon S3 returns depends on whether you also have the s3:ListBucket
- * permission.</p> <ul> <li> <p>If you have the <code>s3:ListBucket</code>
- * permission on the bucket, Amazon S3 returns an HTTP status code 404 ("no such
- * key") error.</p> </li> <li> <p>If you don’t have the <code>s3:ListBucket</code>
- * permission, Amazon S3 returns an HTTP status code 403 ("access denied")
- * error.</p> </li> </ul> <p>The following action is related to
- * <code>HeadObject</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/HeadObject">AWS API
- * Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for HeadObject that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void HeadObjectAsync(const Model::HeadObjectRequest& request, const HeadObjectResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename HeadObjectRequestT = Model::HeadObjectRequest>
+ void HeadObjectAsync(const HeadObjectRequestT& request, const HeadObjectResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::HeadObject, request, handler, context);
+ }
/**
* <p>Lists the analytics configurations for the bucket. You can have up to 1,000
@@ -5601,88 +2862,37 @@ namespace Aws
virtual Model::ListBucketAnalyticsConfigurationsOutcome ListBucketAnalyticsConfigurations(const Model::ListBucketAnalyticsConfigurationsRequest& request) const;
/**
- * <p>Lists the analytics configurations for the bucket. You can have up to 1,000
- * analytics configurations per bucket.</p> <p>This action supports list pagination
- * and does not return more than 100 configurations at a time. You should always
- * check the <code>IsTruncated</code> element in the response. If there are no more
- * configurations to list, <code>IsTruncated</code> is set to false. If there are
- * more configurations to list, <code>IsTruncated</code> is set to true, and there
- * will be a value in <code>NextContinuationToken</code>. You use the
- * <code>NextContinuationToken</code> value to continue the pagination of the list
- * by passing the value in continuation-token in the request to <code>GET</code>
- * the next page.</p> <p>To use this operation, you must have permissions to
- * perform the <code>s3:GetAnalyticsConfiguration</code> action. The bucket owner
- * has this permission by default. The bucket owner can grant this permission to
- * others. For more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>For information about
- * Amazon S3 analytics feature, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html">Amazon
- * S3 Analytics – Storage Class Analysis</a>. </p> <p>The following operations are
- * related to <code>ListBucketAnalyticsConfigurations</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html">GetBucketAnalyticsConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html">DeleteBucketAnalyticsConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html">PutBucketAnalyticsConfiguration</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for ListBucketAnalyticsConfigurations that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::ListBucketAnalyticsConfigurationsOutcomeCallable ListBucketAnalyticsConfigurationsCallable(const Model::ListBucketAnalyticsConfigurationsRequest& request) const;
+ template<typename ListBucketAnalyticsConfigurationsRequestT = Model::ListBucketAnalyticsConfigurationsRequest>
+ Model::ListBucketAnalyticsConfigurationsOutcomeCallable ListBucketAnalyticsConfigurationsCallable(const ListBucketAnalyticsConfigurationsRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::ListBucketAnalyticsConfigurations, request);
+ }
/**
- * <p>Lists the analytics configurations for the bucket. You can have up to 1,000
- * analytics configurations per bucket.</p> <p>This action supports list pagination
- * and does not return more than 100 configurations at a time. You should always
- * check the <code>IsTruncated</code> element in the response. If there are no more
- * configurations to list, <code>IsTruncated</code> is set to false. If there are
- * more configurations to list, <code>IsTruncated</code> is set to true, and there
- * will be a value in <code>NextContinuationToken</code>. You use the
- * <code>NextContinuationToken</code> value to continue the pagination of the list
- * by passing the value in continuation-token in the request to <code>GET</code>
- * the next page.</p> <p>To use this operation, you must have permissions to
- * perform the <code>s3:GetAnalyticsConfiguration</code> action. The bucket owner
- * has this permission by default. The bucket owner can grant this permission to
- * others. For more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>For information about
- * Amazon S3 analytics feature, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html">Amazon
- * S3 Analytics – Storage Class Analysis</a>. </p> <p>The following operations are
- * related to <code>ListBucketAnalyticsConfigurations</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html">GetBucketAnalyticsConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html">DeleteBucketAnalyticsConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketAnalyticsConfiguration.html">PutBucketAnalyticsConfiguration</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketAnalyticsConfigurations">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for ListBucketAnalyticsConfigurations that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void ListBucketAnalyticsConfigurationsAsync(const Model::ListBucketAnalyticsConfigurationsRequest& request, const ListBucketAnalyticsConfigurationsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename ListBucketAnalyticsConfigurationsRequestT = Model::ListBucketAnalyticsConfigurationsRequest>
+ void ListBucketAnalyticsConfigurationsAsync(const ListBucketAnalyticsConfigurationsRequestT& request, const ListBucketAnalyticsConfigurationsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::ListBucketAnalyticsConfigurations, request, handler, context);
+ }
/**
* <p>Lists the S3 Intelligent-Tiering configuration from the specified bucket.</p>
* <p>The S3 Intelligent-Tiering storage class is designed to optimize storage
* costs by automatically moving data to the most cost-effective storage access
- * tier, without additional operational overhead. S3 Intelligent-Tiering delivers
- * automatic cost savings by moving data between access tiers, when access patterns
- * change.</p> <p>The S3 Intelligent-Tiering storage class is suitable for objects
- * larger than 128 KB that you plan to store for at least 30 days. If the size of
- * an object is less than 128 KB, it is not eligible for auto-tiering. Smaller
- * objects can be stored, but they are always charged at the frequent access tier
- * rates in the S3 Intelligent-Tiering storage class. </p> <p>If you delete an
- * object before the end of the 30-day minimum storage duration period, you are
- * charged for 30 days. For more information, see <a
+ * tier, without performance impact or operational overhead. S3 Intelligent-Tiering
+ * delivers automatic cost savings in three low latency and high throughput access
+ * tiers. To get the lowest storage cost on data that can be accessed in minutes to
+ * hours, you can choose to activate additional archiving capabilities.</p> <p>The
+ * S3 Intelligent-Tiering storage class is the ideal storage class for data with
+ * unknown, changing, or unpredictable access patterns, independent of object size
+ * or retention period. If the size of an object is less than 128 KB, it is not
+ * monitored and not eligible for auto-tiering. Smaller objects can be stored, but
+ * they are always charged at the Frequent Access tier rates in the S3
+ * Intelligent-Tiering storage class.</p> <p>For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access">Storage
* class for automatically optimizing frequently and infrequently accessed
* objects</a>.</p> <p>Operations related to
@@ -5700,66 +2910,22 @@ namespace Aws
virtual Model::ListBucketIntelligentTieringConfigurationsOutcome ListBucketIntelligentTieringConfigurations(const Model::ListBucketIntelligentTieringConfigurationsRequest& request) const;
/**
- * <p>Lists the S3 Intelligent-Tiering configuration from the specified bucket.</p>
- * <p>The S3 Intelligent-Tiering storage class is designed to optimize storage
- * costs by automatically moving data to the most cost-effective storage access
- * tier, without additional operational overhead. S3 Intelligent-Tiering delivers
- * automatic cost savings by moving data between access tiers, when access patterns
- * change.</p> <p>The S3 Intelligent-Tiering storage class is suitable for objects
- * larger than 128 KB that you plan to store for at least 30 days. If the size of
- * an object is less than 128 KB, it is not eligible for auto-tiering. Smaller
- * objects can be stored, but they are always charged at the frequent access tier
- * rates in the S3 Intelligent-Tiering storage class. </p> <p>If you delete an
- * object before the end of the 30-day minimum storage duration period, you are
- * charged for 30 days. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access">Storage
- * class for automatically optimizing frequently and infrequently accessed
- * objects</a>.</p> <p>Operations related to
- * <code>ListBucketIntelligentTieringConfigurations</code> include: </p> <ul> <li>
- * <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html">DeleteBucketIntelligentTieringConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html">PutBucketIntelligentTieringConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html">GetBucketIntelligentTieringConfiguration</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketIntelligentTieringConfigurations">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for ListBucketIntelligentTieringConfigurations that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::ListBucketIntelligentTieringConfigurationsOutcomeCallable ListBucketIntelligentTieringConfigurationsCallable(const Model::ListBucketIntelligentTieringConfigurationsRequest& request) const;
+ template<typename ListBucketIntelligentTieringConfigurationsRequestT = Model::ListBucketIntelligentTieringConfigurationsRequest>
+ Model::ListBucketIntelligentTieringConfigurationsOutcomeCallable ListBucketIntelligentTieringConfigurationsCallable(const ListBucketIntelligentTieringConfigurationsRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::ListBucketIntelligentTieringConfigurations, request);
+ }
/**
- * <p>Lists the S3 Intelligent-Tiering configuration from the specified bucket.</p>
- * <p>The S3 Intelligent-Tiering storage class is designed to optimize storage
- * costs by automatically moving data to the most cost-effective storage access
- * tier, without additional operational overhead. S3 Intelligent-Tiering delivers
- * automatic cost savings by moving data between access tiers, when access patterns
- * change.</p> <p>The S3 Intelligent-Tiering storage class is suitable for objects
- * larger than 128 KB that you plan to store for at least 30 days. If the size of
- * an object is less than 128 KB, it is not eligible for auto-tiering. Smaller
- * objects can be stored, but they are always charged at the frequent access tier
- * rates in the S3 Intelligent-Tiering storage class. </p> <p>If you delete an
- * object before the end of the 30-day minimum storage duration period, you are
- * charged for 30 days. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access">Storage
- * class for automatically optimizing frequently and infrequently accessed
- * objects</a>.</p> <p>Operations related to
- * <code>ListBucketIntelligentTieringConfigurations</code> include: </p> <ul> <li>
- * <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html">DeleteBucketIntelligentTieringConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketIntelligentTieringConfiguration.html">PutBucketIntelligentTieringConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html">GetBucketIntelligentTieringConfiguration</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketIntelligentTieringConfigurations">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for ListBucketIntelligentTieringConfigurations that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void ListBucketIntelligentTieringConfigurationsAsync(const Model::ListBucketIntelligentTieringConfigurationsRequest& request, const ListBucketIntelligentTieringConfigurationsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename ListBucketIntelligentTieringConfigurationsRequestT = Model::ListBucketIntelligentTieringConfigurationsRequest>
+ void ListBucketIntelligentTieringConfigurationsAsync(const ListBucketIntelligentTieringConfigurationsRequestT& request, const ListBucketIntelligentTieringConfigurationsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::ListBucketIntelligentTieringConfigurations, request, handler, context);
+ }
/**
* <p>Returns a list of inventory configurations for the bucket. You can have up to
@@ -5795,74 +2961,22 @@ namespace Aws
virtual Model::ListBucketInventoryConfigurationsOutcome ListBucketInventoryConfigurations(const Model::ListBucketInventoryConfigurationsRequest& request) const;
/**
- * <p>Returns a list of inventory configurations for the bucket. You can have up to
- * 1,000 analytics configurations per bucket.</p> <p>This action supports list
- * pagination and does not return more than 100 configurations at a time. Always
- * check the <code>IsTruncated</code> element in the response. If there are no more
- * configurations to list, <code>IsTruncated</code> is set to false. If there are
- * more configurations to list, <code>IsTruncated</code> is set to true, and there
- * is a value in <code>NextContinuationToken</code>. You use the
- * <code>NextContinuationToken</code> value to continue the pagination of the list
- * by passing the value in continuation-token in the request to <code>GET</code>
- * the next page.</p> <p> To use this operation, you must have permissions to
- * perform the <code>s3:GetInventoryConfiguration</code> action. The bucket owner
- * has this permission by default. The bucket owner can grant this permission to
- * others. For more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>For information about
- * the Amazon S3 inventory feature, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html">Amazon
- * S3 Inventory</a> </p> <p>The following operations are related to
- * <code>ListBucketInventoryConfigurations</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html">GetBucketInventoryConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html">DeleteBucketInventoryConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html">PutBucketInventoryConfiguration</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for ListBucketInventoryConfigurations that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::ListBucketInventoryConfigurationsOutcomeCallable ListBucketInventoryConfigurationsCallable(const Model::ListBucketInventoryConfigurationsRequest& request) const;
+ template<typename ListBucketInventoryConfigurationsRequestT = Model::ListBucketInventoryConfigurationsRequest>
+ Model::ListBucketInventoryConfigurationsOutcomeCallable ListBucketInventoryConfigurationsCallable(const ListBucketInventoryConfigurationsRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::ListBucketInventoryConfigurations, request);
+ }
/**
- * <p>Returns a list of inventory configurations for the bucket. You can have up to
- * 1,000 analytics configurations per bucket.</p> <p>This action supports list
- * pagination and does not return more than 100 configurations at a time. Always
- * check the <code>IsTruncated</code> element in the response. If there are no more
- * configurations to list, <code>IsTruncated</code> is set to false. If there are
- * more configurations to list, <code>IsTruncated</code> is set to true, and there
- * is a value in <code>NextContinuationToken</code>. You use the
- * <code>NextContinuationToken</code> value to continue the pagination of the list
- * by passing the value in continuation-token in the request to <code>GET</code>
- * the next page.</p> <p> To use this operation, you must have permissions to
- * perform the <code>s3:GetInventoryConfiguration</code> action. The bucket owner
- * has this permission by default. The bucket owner can grant this permission to
- * others. For more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>For information about
- * the Amazon S3 inventory feature, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html">Amazon
- * S3 Inventory</a> </p> <p>The following operations are related to
- * <code>ListBucketInventoryConfigurations</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html">GetBucketInventoryConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html">DeleteBucketInventoryConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketInventoryConfiguration.html">PutBucketInventoryConfiguration</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketInventoryConfigurations">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for ListBucketInventoryConfigurations that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void ListBucketInventoryConfigurationsAsync(const Model::ListBucketInventoryConfigurationsRequest& request, const ListBucketInventoryConfigurationsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename ListBucketInventoryConfigurationsRequestT = Model::ListBucketInventoryConfigurationsRequest>
+ void ListBucketInventoryConfigurationsAsync(const ListBucketInventoryConfigurationsRequestT& request, const ListBucketInventoryConfigurationsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::ListBucketInventoryConfigurations, request, handler, context);
+ }
/**
* <p>Lists the metrics configurations for the bucket. The metrics configurations
@@ -5900,106 +3014,49 @@ namespace Aws
virtual Model::ListBucketMetricsConfigurationsOutcome ListBucketMetricsConfigurations(const Model::ListBucketMetricsConfigurationsRequest& request) const;
/**
- * <p>Lists the metrics configurations for the bucket. The metrics configurations
- * are only for the request metrics of the bucket and do not provide information on
- * daily storage metrics. You can have up to 1,000 configurations per bucket.</p>
- * <p>This action supports list pagination and does not return more than 100
- * configurations at a time. Always check the <code>IsTruncated</code> element in
- * the response. If there are no more configurations to list,
- * <code>IsTruncated</code> is set to false. If there are more configurations to
- * list, <code>IsTruncated</code> is set to true, and there is a value in
- * <code>NextContinuationToken</code>. You use the
- * <code>NextContinuationToken</code> value to continue the pagination of the list
- * by passing the value in <code>continuation-token</code> in the request to
- * <code>GET</code> the next page.</p> <p>To use this operation, you must have
- * permissions to perform the <code>s3:GetMetricsConfiguration</code> action. The
- * bucket owner has this permission by default. The bucket owner can grant this
- * permission to others. For more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>For more information
- * about metrics configurations and CloudWatch request metrics, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html">Monitoring
- * Metrics with Amazon CloudWatch</a>.</p> <p>The following operations are related
- * to <code>ListBucketMetricsConfigurations</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html">PutBucketMetricsConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html">GetBucketMetricsConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html">DeleteBucketMetricsConfiguration</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for ListBucketMetricsConfigurations that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::ListBucketMetricsConfigurationsOutcomeCallable ListBucketMetricsConfigurationsCallable(const Model::ListBucketMetricsConfigurationsRequest& request) const;
+ template<typename ListBucketMetricsConfigurationsRequestT = Model::ListBucketMetricsConfigurationsRequest>
+ Model::ListBucketMetricsConfigurationsOutcomeCallable ListBucketMetricsConfigurationsCallable(const ListBucketMetricsConfigurationsRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::ListBucketMetricsConfigurations, request);
+ }
/**
- * <p>Lists the metrics configurations for the bucket. The metrics configurations
- * are only for the request metrics of the bucket and do not provide information on
- * daily storage metrics. You can have up to 1,000 configurations per bucket.</p>
- * <p>This action supports list pagination and does not return more than 100
- * configurations at a time. Always check the <code>IsTruncated</code> element in
- * the response. If there are no more configurations to list,
- * <code>IsTruncated</code> is set to false. If there are more configurations to
- * list, <code>IsTruncated</code> is set to true, and there is a value in
- * <code>NextContinuationToken</code>. You use the
- * <code>NextContinuationToken</code> value to continue the pagination of the list
- * by passing the value in <code>continuation-token</code> in the request to
- * <code>GET</code> the next page.</p> <p>To use this operation, you must have
- * permissions to perform the <code>s3:GetMetricsConfiguration</code> action. The
- * bucket owner has this permission by default. The bucket owner can grant this
- * permission to others. For more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>For more information
- * about metrics configurations and CloudWatch request metrics, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html">Monitoring
- * Metrics with Amazon CloudWatch</a>.</p> <p>The following operations are related
- * to <code>ListBucketMetricsConfigurations</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html">PutBucketMetricsConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html">GetBucketMetricsConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html">DeleteBucketMetricsConfiguration</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBucketMetricsConfigurations">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for ListBucketMetricsConfigurations that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void ListBucketMetricsConfigurationsAsync(const Model::ListBucketMetricsConfigurationsRequest& request, const ListBucketMetricsConfigurationsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename ListBucketMetricsConfigurationsRequestT = Model::ListBucketMetricsConfigurationsRequest>
+ void ListBucketMetricsConfigurationsAsync(const ListBucketMetricsConfigurationsRequestT& request, const ListBucketMetricsConfigurationsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::ListBucketMetricsConfigurations, request, handler, context);
+ }
/**
* <p>Returns a list of all buckets owned by the authenticated sender of the
- * request.</p><p><h3>See Also:</h3> <a
+ * request. To use this operation, you must have the
+ * <code>s3:ListAllMyBuckets</code> permission.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets">AWS API
* Reference</a></p>
*/
virtual Model::ListBucketsOutcome ListBuckets() const;
/**
- * <p>Returns a list of all buckets owned by the authenticated sender of the
- * request.</p><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets">AWS API
- * Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for ListBuckets that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::ListBucketsOutcomeCallable ListBucketsCallable() const;
+ template<typename = void>
+ Model::ListBucketsOutcomeCallable ListBucketsCallable() const
+ {
+ return SubmitCallable(&S3Client::ListBuckets);
+ }
/**
- * <p>Returns a list of all buckets owned by the authenticated sender of the
- * request.</p><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListBuckets">AWS API
- * Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for ListBuckets that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void ListBucketsAsync(const ListBucketsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename = void>
+ void ListBucketsAsync(const ListBucketsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::ListBuckets, handler, context);
+ }
/**
* <p>This action lists in-progress multipart uploads. An in-progress multipart
* upload is a multipart upload that has been initiated using the Initiate
@@ -6039,84 +3096,22 @@ namespace Aws
virtual Model::ListMultipartUploadsOutcome ListMultipartUploads(const Model::ListMultipartUploadsRequest& request) const;
/**
- * <p>This action lists in-progress multipart uploads. An in-progress multipart
- * upload is a multipart upload that has been initiated using the Initiate
- * Multipart Upload request, but has not yet been completed or aborted.</p> <p>This
- * action returns at most 1,000 multipart uploads in the response. 1,000 multipart
- * uploads is the maximum number of uploads a response can include, which is also
- * the default value. You can further limit the number of uploads in a response by
- * specifying the <code>max-uploads</code> parameter in the response. If additional
- * multipart uploads satisfy the list criteria, the response will contain an
- * <code>IsTruncated</code> element with the value true. To list the additional
- * multipart uploads, use the <code>key-marker</code> and
- * <code>upload-id-marker</code> request parameters.</p> <p>In the response, the
- * uploads are sorted by key. If your application has initiated more than one
- * multipart upload using the same object key, then uploads in the response are
- * first sorted by key. Additionally, uploads are sorted in ascending order within
- * each key by the upload initiation time.</p> <p>For more information on multipart
- * uploads, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html">Uploading
- * Objects Using Multipart Upload</a>.</p> <p>For information on permissions
- * required to use the multipart upload API, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html">Multipart
- * Upload and Permissions</a>.</p> <p>The following operations are related to
- * <code>ListMultipartUploads</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html">CreateMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html">UploadPart</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html">CompleteMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html">ListParts</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html">AbortMultipartUpload</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for ListMultipartUploads that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::ListMultipartUploadsOutcomeCallable ListMultipartUploadsCallable(const Model::ListMultipartUploadsRequest& request) const;
+ template<typename ListMultipartUploadsRequestT = Model::ListMultipartUploadsRequest>
+ Model::ListMultipartUploadsOutcomeCallable ListMultipartUploadsCallable(const ListMultipartUploadsRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::ListMultipartUploads, request);
+ }
/**
- * <p>This action lists in-progress multipart uploads. An in-progress multipart
- * upload is a multipart upload that has been initiated using the Initiate
- * Multipart Upload request, but has not yet been completed or aborted.</p> <p>This
- * action returns at most 1,000 multipart uploads in the response. 1,000 multipart
- * uploads is the maximum number of uploads a response can include, which is also
- * the default value. You can further limit the number of uploads in a response by
- * specifying the <code>max-uploads</code> parameter in the response. If additional
- * multipart uploads satisfy the list criteria, the response will contain an
- * <code>IsTruncated</code> element with the value true. To list the additional
- * multipart uploads, use the <code>key-marker</code> and
- * <code>upload-id-marker</code> request parameters.</p> <p>In the response, the
- * uploads are sorted by key. If your application has initiated more than one
- * multipart upload using the same object key, then uploads in the response are
- * first sorted by key. Additionally, uploads are sorted in ascending order within
- * each key by the upload initiation time.</p> <p>For more information on multipart
- * uploads, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html">Uploading
- * Objects Using Multipart Upload</a>.</p> <p>For information on permissions
- * required to use the multipart upload API, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html">Multipart
- * Upload and Permissions</a>.</p> <p>The following operations are related to
- * <code>ListMultipartUploads</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html">CreateMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html">UploadPart</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html">CompleteMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html">ListParts</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html">AbortMultipartUpload</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListMultipartUploads">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for ListMultipartUploads that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void ListMultipartUploadsAsync(const Model::ListMultipartUploadsRequest& request, const ListMultipartUploadsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename ListMultipartUploadsRequestT = Model::ListMultipartUploadsRequest>
+ void ListMultipartUploadsAsync(const ListMultipartUploadsRequestT& request, const ListMultipartUploadsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::ListMultipartUploads, request, handler, context);
+ }
/**
* <p>Returns metadata about all versions of the objects in a bucket. You can also
@@ -6143,56 +3138,22 @@ namespace Aws
virtual Model::ListObjectVersionsOutcome ListObjectVersions(const Model::ListObjectVersionsRequest& request) const;
/**
- * <p>Returns metadata about all versions of the objects in a bucket. You can also
- * use request parameters as selection criteria to return metadata about a subset
- * of all the object versions.</p> <p> To use this operation, you must
- * have permissions to perform the <code>s3:ListBucketVersions</code> action. Be
- * aware of the name difference. </p> <p> A 200 OK response can
- * contain valid or invalid XML. Make sure to design your application to parse the
- * contents of the response and handle it appropriately.</p> <p>To use this
- * operation, you must have READ access to the bucket.</p> <p>This action is not
- * supported by Amazon S3 on Outposts.</p> <p>The following operations are related
- * to <code>ListObjectVersions</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html">ListObjectsV2</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html">PutObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html">DeleteObject</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for ListObjectVersions that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::ListObjectVersionsOutcomeCallable ListObjectVersionsCallable(const Model::ListObjectVersionsRequest& request) const;
+ template<typename ListObjectVersionsRequestT = Model::ListObjectVersionsRequest>
+ Model::ListObjectVersionsOutcomeCallable ListObjectVersionsCallable(const ListObjectVersionsRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::ListObjectVersions, request);
+ }
/**
- * <p>Returns metadata about all versions of the objects in a bucket. You can also
- * use request parameters as selection criteria to return metadata about a subset
- * of all the object versions.</p> <p> To use this operation, you must
- * have permissions to perform the <code>s3:ListBucketVersions</code> action. Be
- * aware of the name difference. </p> <p> A 200 OK response can
- * contain valid or invalid XML. Make sure to design your application to parse the
- * contents of the response and handle it appropriately.</p> <p>To use this
- * operation, you must have READ access to the bucket.</p> <p>This action is not
- * supported by Amazon S3 on Outposts.</p> <p>The following operations are related
- * to <code>ListObjectVersions</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html">ListObjectsV2</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html">PutObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html">DeleteObject</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectVersions">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for ListObjectVersions that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void ListObjectVersionsAsync(const Model::ListObjectVersionsRequest& request, const ListObjectVersionsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename ListObjectVersionsRequestT = Model::ListObjectVersionsRequest>
+ void ListObjectVersionsAsync(const ListObjectVersionsRequestT& request, const ListObjectVersionsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::ListObjectVersions, request, handler, context);
+ }
/**
* <p>Returns some or all (up to 1,000) of the objects in a bucket. You can use the
@@ -6221,70 +3182,35 @@ namespace Aws
virtual Model::ListObjectsOutcome ListObjects(const Model::ListObjectsRequest& request) const;
/**
- * <p>Returns some or all (up to 1,000) of the objects in a bucket. You can use the
- * request parameters as selection criteria to return a subset of the objects in a
- * bucket. A 200 OK response can contain valid or invalid XML. Be sure to design
- * your application to parse the contents of the response and handle it
- * appropriately.</p> <p>This action has been revised. We recommend
- * that you use the newer version, <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html">ListObjectsV2</a>,
- * when developing applications. For backward compatibility, Amazon S3 continues to
- * support <code>ListObjects</code>.</p> <p>The following operations
- * are related to <code>ListObjects</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html">ListObjectsV2</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html">PutObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html">ListBuckets</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects">AWS API
- * Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for ListObjects that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::ListObjectsOutcomeCallable ListObjectsCallable(const Model::ListObjectsRequest& request) const;
+ template<typename ListObjectsRequestT = Model::ListObjectsRequest>
+ Model::ListObjectsOutcomeCallable ListObjectsCallable(const ListObjectsRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::ListObjects, request);
+ }
/**
- * <p>Returns some or all (up to 1,000) of the objects in a bucket. You can use the
- * request parameters as selection criteria to return a subset of the objects in a
- * bucket. A 200 OK response can contain valid or invalid XML. Be sure to design
- * your application to parse the contents of the response and handle it
- * appropriately.</p> <p>This action has been revised. We recommend
- * that you use the newer version, <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html">ListObjectsV2</a>,
- * when developing applications. For backward compatibility, Amazon S3 continues to
- * support <code>ListObjects</code>.</p> <p>The following operations
- * are related to <code>ListObjects</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjectsV2.html">ListObjectsV2</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html">PutObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html">ListBuckets</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjects">AWS API
- * Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for ListObjects that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void ListObjectsAsync(const Model::ListObjectsRequest& request, const ListObjectsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename ListObjectsRequestT = Model::ListObjectsRequest>
+ void ListObjectsAsync(const ListObjectsRequestT& request, const ListObjectsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::ListObjects, request, handler, context);
+ }
/**
- * <p>Returns some or all (up to 1,000) of the objects in a bucket. You can use the
- * request parameters as selection criteria to return a subset of the objects in a
- * bucket. A <code>200 OK</code> response can contain valid or invalid XML. Make
- * sure to design your application to parse the contents of the response and handle
- * it appropriately. Objects are returned sorted in an ascending order of the
- * respective key names in the list.</p> <p>To use this operation, you must have
- * READ access to the bucket.</p> <p>To use this action in an AWS Identity and
- * Access Management (IAM) policy, you must have permissions to perform the
+ * <p>Returns some or all (up to 1,000) of the objects in a bucket with each
+ * request. You can use the request parameters as selection criteria to return a
+ * subset of the objects in a bucket. A <code>200 OK</code> response can contain
+ * valid or invalid XML. Make sure to design your application to parse the contents
+ * of the response and handle it appropriately. Objects are returned sorted in an
+ * ascending order of the respective key names in the list. For more information
+ * about listing objects, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/ListingKeysUsingAPIs.html">Listing
+ * object keys programmatically</a> </p> <p>To use this operation, you must have
+ * READ access to the bucket.</p> <p>To use this action in an Identity and Access
+ * Management (IAM) policy, you must have permissions to perform the
* <code>s3:ListBucket</code> action. The bucket owner has this permission by
* default and can grant this permission to others. For more information about
* permissions, see <a
@@ -6312,78 +3238,22 @@ namespace Aws
virtual Model::ListObjectsV2Outcome ListObjectsV2(const Model::ListObjectsV2Request& request) const;
/**
- * <p>Returns some or all (up to 1,000) of the objects in a bucket. You can use the
- * request parameters as selection criteria to return a subset of the objects in a
- * bucket. A <code>200 OK</code> response can contain valid or invalid XML. Make
- * sure to design your application to parse the contents of the response and handle
- * it appropriately. Objects are returned sorted in an ascending order of the
- * respective key names in the list.</p> <p>To use this operation, you must have
- * READ access to the bucket.</p> <p>To use this action in an AWS Identity and
- * Access Management (IAM) policy, you must have permissions to perform the
- * <code>s3:ListBucket</code> action. The bucket owner has this permission by
- * default and can grant this permission to others. For more information about
- * permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>This
- * section describes the latest revision of this action. We recommend that you use
- * this revised API for application development. For backward compatibility, Amazon
- * S3 continues to support the prior version of this API, <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html">ListObjects</a>.</p>
- * <p>To get a list of your buckets, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html">ListBuckets</a>.</p>
- * <p>The following operations are related to <code>ListObjectsV2</code>:</p> <ul>
- * <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html">PutObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for ListObjectsV2 that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::ListObjectsV2OutcomeCallable ListObjectsV2Callable(const Model::ListObjectsV2Request& request) const;
+ template<typename ListObjectsV2RequestT = Model::ListObjectsV2Request>
+ Model::ListObjectsV2OutcomeCallable ListObjectsV2Callable(const ListObjectsV2RequestT& request) const
+ {
+ return SubmitCallable(&S3Client::ListObjectsV2, request);
+ }
/**
- * <p>Returns some or all (up to 1,000) of the objects in a bucket. You can use the
- * request parameters as selection criteria to return a subset of the objects in a
- * bucket. A <code>200 OK</code> response can contain valid or invalid XML. Make
- * sure to design your application to parse the contents of the response and handle
- * it appropriately. Objects are returned sorted in an ascending order of the
- * respective key names in the list.</p> <p>To use this operation, you must have
- * READ access to the bucket.</p> <p>To use this action in an AWS Identity and
- * Access Management (IAM) policy, you must have permissions to perform the
- * <code>s3:ListBucket</code> action. The bucket owner has this permission by
- * default and can grant this permission to others. For more information about
- * permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>This
- * section describes the latest revision of this action. We recommend that you use
- * this revised API for application development. For backward compatibility, Amazon
- * S3 continues to support the prior version of this API, <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListObjects.html">ListObjects</a>.</p>
- * <p>To get a list of your buckets, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBuckets.html">ListBuckets</a>.</p>
- * <p>The following operations are related to <code>ListObjectsV2</code>:</p> <ul>
- * <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html">PutObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListObjectsV2">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for ListObjectsV2 that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void ListObjectsV2Async(const Model::ListObjectsV2Request& request, const ListObjectsV2ResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename ListObjectsV2RequestT = Model::ListObjectsV2Request>
+ void ListObjectsV2Async(const ListObjectsV2RequestT& request, const ListObjectsV2ResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::ListObjectsV2, request, handler, context);
+ }
/**
* <p>Lists the parts that have been uploaded for a specific multipart upload. This
@@ -6398,7 +3268,9 @@ namespace Aws
* <code>NextPartNumberMarker</code> element. In subsequent <code>ListParts</code>
* requests you can include the part-number-marker query string parameter and set
* its value to the <code>NextPartNumberMarker</code> field value from the previous
- * response.</p> <p>For more information on multipart uploads, see <a
+ * response.</p> <p>If the upload was created using a checksum algorithm, you will
+ * need to have permission to the <code>kms:Decrypt</code> action for the request
+ * to succeed. </p> <p>For more information on multipart uploads, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html">Uploading
* Objects Using Multipart Upload</a>.</p> <p>For information on permissions
* required to use the multipart upload API, see <a
@@ -6413,6 +3285,8 @@ namespace Aws
* </p> </li> <li> <p> <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html">AbortMultipartUpload</a>
* </p> </li> <li> <p> <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAttributes.html">GetObjectAttributes</a>
+ * </p> </li> <li> <p> <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html">ListMultipartUploads</a>
* </p> </li> </ul><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts">AWS API
@@ -6421,86 +3295,30 @@ namespace Aws
virtual Model::ListPartsOutcome ListParts(const Model::ListPartsRequest& request) const;
/**
- * <p>Lists the parts that have been uploaded for a specific multipart upload. This
- * operation must include the upload ID, which you obtain by sending the initiate
- * multipart upload request (see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html">CreateMultipartUpload</a>).
- * This request returns a maximum of 1,000 uploaded parts. The default number of
- * parts returned is 1,000 parts. You can restrict the number of parts returned by
- * specifying the <code>max-parts</code> request parameter. If your multipart
- * upload consists of more than 1,000 parts, the response returns an
- * <code>IsTruncated</code> field with the value of true, and a
- * <code>NextPartNumberMarker</code> element. In subsequent <code>ListParts</code>
- * requests you can include the part-number-marker query string parameter and set
- * its value to the <code>NextPartNumberMarker</code> field value from the previous
- * response.</p> <p>For more information on multipart uploads, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html">Uploading
- * Objects Using Multipart Upload</a>.</p> <p>For information on permissions
- * required to use the multipart upload API, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html">Multipart
- * Upload and Permissions</a>.</p> <p>The following operations are related to
- * <code>ListParts</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html">CreateMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html">UploadPart</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html">CompleteMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html">AbortMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html">ListMultipartUploads</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts">AWS API
- * Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for ListParts that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::ListPartsOutcomeCallable ListPartsCallable(const Model::ListPartsRequest& request) const;
+ template<typename ListPartsRequestT = Model::ListPartsRequest>
+ Model::ListPartsOutcomeCallable ListPartsCallable(const ListPartsRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::ListParts, request);
+ }
/**
- * <p>Lists the parts that have been uploaded for a specific multipart upload. This
- * operation must include the upload ID, which you obtain by sending the initiate
- * multipart upload request (see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html">CreateMultipartUpload</a>).
- * This request returns a maximum of 1,000 uploaded parts. The default number of
- * parts returned is 1,000 parts. You can restrict the number of parts returned by
- * specifying the <code>max-parts</code> request parameter. If your multipart
- * upload consists of more than 1,000 parts, the response returns an
- * <code>IsTruncated</code> field with the value of true, and a
- * <code>NextPartNumberMarker</code> element. In subsequent <code>ListParts</code>
- * requests you can include the part-number-marker query string parameter and set
- * its value to the <code>NextPartNumberMarker</code> field value from the previous
- * response.</p> <p>For more information on multipart uploads, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html">Uploading
- * Objects Using Multipart Upload</a>.</p> <p>For information on permissions
- * required to use the multipart upload API, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html">Multipart
- * Upload and Permissions</a>.</p> <p>The following operations are related to
- * <code>ListParts</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html">CreateMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html">UploadPart</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html">CompleteMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html">AbortMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html">ListMultipartUploads</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ListParts">AWS API
- * Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for ListParts that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void ListPartsAsync(const Model::ListPartsRequest& request, const ListPartsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename ListPartsRequestT = Model::ListPartsRequest>
+ void ListPartsAsync(const ListPartsRequestT& request, const ListPartsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::ListParts, request, handler, context);
+ }
/**
* <p>Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer
* Acceleration is a bucket-level feature that enables you to perform faster data
* transfers to Amazon S3.</p> <p> To use this operation, you must have permission
- * to perform the s3:PutAccelerateConfiguration action. The bucket owner has this
- * permission by default. The bucket owner can grant this permission to others. For
- * more information about permissions, see <a
+ * to perform the <code>s3:PutAccelerateConfiguration</code> action. The bucket
+ * owner has this permission by default. The bucket owner can grant this permission
+ * to others. For more information about permissions, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
* Related to Bucket Subresource Operations</a> and <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
@@ -6529,76 +3347,22 @@ namespace Aws
virtual Model::PutBucketAccelerateConfigurationOutcome PutBucketAccelerateConfiguration(const Model::PutBucketAccelerateConfigurationRequest& request) const;
/**
- * <p>Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer
- * Acceleration is a bucket-level feature that enables you to perform faster data
- * transfers to Amazon S3.</p> <p> To use this operation, you must have permission
- * to perform the s3:PutAccelerateConfiguration action. The bucket owner has this
- * permission by default. The bucket owner can grant this permission to others. For
- * more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p> The Transfer
- * Acceleration state of a bucket can be set to one of the following two
- * values:</p> <ul> <li> <p> Enabled – Enables accelerated data transfers to the
- * bucket.</p> </li> <li> <p> Suspended – Disables accelerated data transfers to
- * the bucket.</p> </li> </ul> <p>The <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html">GetBucketAccelerateConfiguration</a>
- * action returns the transfer acceleration state of a bucket.</p> <p>After setting
- * the Transfer Acceleration state of a bucket to Enabled, it might take up to
- * thirty minutes before the data transfer rates to the bucket increase.</p> <p>
- * The name of the bucket used for Transfer Acceleration must be DNS-compliant and
- * must not contain periods (".").</p> <p> For more information about transfer
- * acceleration, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html">Transfer
- * Acceleration</a>.</p> <p>The following operations are related to
- * <code>PutBucketAccelerateConfiguration</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html">GetBucketAccelerateConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for PutBucketAccelerateConfiguration that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::PutBucketAccelerateConfigurationOutcomeCallable PutBucketAccelerateConfigurationCallable(const Model::PutBucketAccelerateConfigurationRequest& request) const;
+ template<typename PutBucketAccelerateConfigurationRequestT = Model::PutBucketAccelerateConfigurationRequest>
+ Model::PutBucketAccelerateConfigurationOutcomeCallable PutBucketAccelerateConfigurationCallable(const PutBucketAccelerateConfigurationRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::PutBucketAccelerateConfiguration, request);
+ }
/**
- * <p>Sets the accelerate configuration of an existing bucket. Amazon S3 Transfer
- * Acceleration is a bucket-level feature that enables you to perform faster data
- * transfers to Amazon S3.</p> <p> To use this operation, you must have permission
- * to perform the s3:PutAccelerateConfiguration action. The bucket owner has this
- * permission by default. The bucket owner can grant this permission to others. For
- * more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p> The Transfer
- * Acceleration state of a bucket can be set to one of the following two
- * values:</p> <ul> <li> <p> Enabled – Enables accelerated data transfers to the
- * bucket.</p> </li> <li> <p> Suspended – Disables accelerated data transfers to
- * the bucket.</p> </li> </ul> <p>The <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html">GetBucketAccelerateConfiguration</a>
- * action returns the transfer acceleration state of a bucket.</p> <p>After setting
- * the Transfer Acceleration state of a bucket to Enabled, it might take up to
- * thirty minutes before the data transfer rates to the bucket increase.</p> <p>
- * The name of the bucket used for Transfer Acceleration must be DNS-compliant and
- * must not contain periods (".").</p> <p> For more information about transfer
- * acceleration, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/transfer-acceleration.html">Transfer
- * Acceleration</a>.</p> <p>The following operations are related to
- * <code>PutBucketAccelerateConfiguration</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAccelerateConfiguration.html">GetBucketAccelerateConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAccelerateConfiguration">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for PutBucketAccelerateConfiguration that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void PutBucketAccelerateConfigurationAsync(const Model::PutBucketAccelerateConfigurationRequest& request, const PutBucketAccelerateConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename PutBucketAccelerateConfigurationRequestT = Model::PutBucketAccelerateConfigurationRequest>
+ void PutBucketAccelerateConfigurationAsync(const PutBucketAccelerateConfigurationRequestT& request, const PutBucketAccelerateConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::PutBucketAccelerateConfiguration, request, handler, context);
+ }
/**
* <p>Sets the permissions on an existing bucket using access control lists (ACL).
@@ -6612,9 +3376,16 @@ namespace Aws
* headers.</p> <p>Depending on your application needs, you may choose to
* set the ACL on a bucket using either the request body or the headers. For
* example, if you have an existing application that updates a bucket ACL using the
- * request body, then you can continue to use that approach.</p> <p> <b>Access
- * Permissions</b> </p> <p>You can set access permissions using one of the
- * following methods:</p> <ul> <li> <p>Specify a canned ACL with the
+ * request body, then you can continue to use that approach.</p> <p>If
+ * your bucket uses the bucket owner enforced setting for S3 Object Ownership, ACLs
+ * are disabled and no longer affect permissions. You must use policies to grant
+ * access to your bucket and the objects in it. Requests to set ACLs or update ACLs
+ * fail and return the <code>AccessControlListNotSupported</code> error code.
+ * Requests to read ACLs are still supported. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html">Controlling
+ * object ownership</a> in the <i>Amazon S3 User Guide</i>.</p> <p>
+ * <b>Access Permissions</b> </p> <p>You can set access permissions using one of
+ * the following methods:</p> <ul> <li> <p>Specify a canned ACL with the
* <code>x-amz-acl</code> request header. Amazon S3 supports a set of predefined
* ACLs, known as <i>canned ACLs</i>. Each canned ACL has a predefined set of
* grantees and permissions. Specify the canned ACL name as the value of
@@ -6625,34 +3396,37 @@ namespace Aws
* <code>x-amz-grant-read</code>, <code>x-amz-grant-read-acp</code>,
* <code>x-amz-grant-write-acp</code>, and <code>x-amz-grant-full-control</code>
* headers. When using these headers, you specify explicit access permissions and
- * grantees (AWS accounts or Amazon S3 groups) who will receive the permission. If
- * you use these ACL-specific headers, you cannot use the <code>x-amz-acl</code>
- * header to set a canned ACL. These parameters map to the set of permissions that
- * Amazon S3 supports in an ACL. For more information, see <a
+ * grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the
+ * permission. If you use these ACL-specific headers, you cannot use the
+ * <code>x-amz-acl</code> header to set a canned ACL. These parameters map to the
+ * set of permissions that Amazon S3 supports in an ACL. For more information, see
+ * <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html">Access
* Control List (ACL) Overview</a>.</p> <p>You specify each grantee as a type=value
* pair, where the type is one of the following:</p> <ul> <li> <p> <code>id</code>
- * – if the value specified is the canonical user ID of an AWS account</p> </li>
- * <li> <p> <code>uri</code> – if you are granting permissions to a predefined
- * group</p> </li> <li> <p> <code>emailAddress</code> – if the value specified is
- * the email address of an AWS account</p> <p>Using email addresses to
- * specify a grantee is only supported in the following AWS Regions: </p> <ul> <li>
- * <p>US East (N. Virginia)</p> </li> <li> <p>US West (N. California)</p> </li>
- * <li> <p> US West (Oregon)</p> </li> <li> <p> Asia Pacific (Singapore)</p> </li>
- * <li> <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia Pacific (Tokyo)</p> </li>
- * <li> <p>Europe (Ireland)</p> </li> <li> <p>South America (São Paulo)</p> </li>
- * </ul> <p>For a list of all the Amazon S3 supported Regions and endpoints, see <a
+ * – if the value specified is the canonical user ID of an Amazon Web Services
+ * account</p> </li> <li> <p> <code>uri</code> – if you are granting permissions to
+ * a predefined group</p> </li> <li> <p> <code>emailAddress</code> – if the value
+ * specified is the email address of an Amazon Web Services account</p>
+ * <p>Using email addresses to specify a grantee is only supported in the following
+ * Amazon Web Services Regions: </p> <ul> <li> <p>US East (N. Virginia)</p> </li>
+ * <li> <p>US West (N. California)</p> </li> <li> <p> US West (Oregon)</p> </li>
+ * <li> <p> Asia Pacific (Singapore)</p> </li> <li> <p>Asia Pacific (Sydney)</p>
+ * </li> <li> <p>Asia Pacific (Tokyo)</p> </li> <li> <p>Europe (Ireland)</p> </li>
+ * <li> <p>South America (São Paulo)</p> </li> </ul> <p>For a list of all the
+ * Amazon S3 supported Regions and endpoints, see <a
* href="https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">Regions
- * and Endpoints</a> in the AWS General Reference.</p> </li> </ul> <p>For
- * example, the following <code>x-amz-grant-write</code> header grants create,
- * overwrite, and delete objects permission to LogDelivery group predefined by
- * Amazon S3 and two AWS accounts identified by their email addresses.</p> <p>
- * <code>x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery",
- * id="111122223333", id="555566667777" </code> </p> </li> </ul> <p>You can use
- * either a canned ACL or specify access permissions explicitly. You cannot do
- * both.</p> <p> <b>Grantee Values</b> </p> <p>You can specify the person (grantee)
- * to whom you're assigning access rights (using request elements) in the following
- * ways:</p> <ul> <li> <p>By the person's ID:</p> <p> <code>&lt;Grantee
+ * and Endpoints</a> in the Amazon Web Services General Reference.</p>
+ * </li> </ul> <p>For example, the following <code>x-amz-grant-write</code> header
+ * grants create, overwrite, and delete objects permission to LogDelivery group
+ * predefined by Amazon S3 and two Amazon Web Services accounts identified by their
+ * email addresses.</p> <p> <code>x-amz-grant-write:
+ * uri="http://acs.amazonaws.com/groups/s3/LogDelivery", id="111122223333",
+ * id="555566667777" </code> </p> </li> </ul> <p>You can use either a canned ACL or
+ * specify access permissions explicitly. You cannot do both.</p> <p> <b>Grantee
+ * Values</b> </p> <p>You can specify the person (grantee) to whom you're assigning
+ * access rights (using request elements) in the following ways:</p> <ul> <li>
+ * <p>By the person's ID:</p> <p> <code>&lt;Grantee
* xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
* xsi:type="CanonicalUser"&gt;&lt;ID&gt;&lt;&gt;ID&lt;&gt;&lt;/ID&gt;&lt;DisplayName&gt;&lt;&gt;GranteesEmail&lt;&gt;&lt;/DisplayName&gt;
* &lt;/Grantee&gt;</code> </p> <p>DisplayName is optional and ignored in the
@@ -6664,16 +3438,16 @@ namespace Aws
* xsi:type="AmazonCustomerByEmail"&gt;&lt;EmailAddress&gt;&lt;&gt;Grantees@email.com&lt;&gt;&lt;/EmailAddress&gt;lt;/Grantee&gt;</code>
* </p> <p>The grantee is resolved to the CanonicalUser and, in a response to a GET
* Object acl request, appears as the CanonicalUser. </p> <p>Using email
- * addresses to specify a grantee is only supported in the following AWS Regions:
- * </p> <ul> <li> <p>US East (N. Virginia)</p> </li> <li> <p>US West (N.
- * California)</p> </li> <li> <p> US West (Oregon)</p> </li> <li> <p> Asia Pacific
- * (Singapore)</p> </li> <li> <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia
- * Pacific (Tokyo)</p> </li> <li> <p>Europe (Ireland)</p> </li> <li> <p>South
- * America (São Paulo)</p> </li> </ul> <p>For a list of all the Amazon S3 supported
- * Regions and endpoints, see <a
+ * addresses to specify a grantee is only supported in the following Amazon Web
+ * Services Regions: </p> <ul> <li> <p>US East (N. Virginia)</p> </li> <li> <p>US
+ * West (N. California)</p> </li> <li> <p> US West (Oregon)</p> </li> <li> <p> Asia
+ * Pacific (Singapore)</p> </li> <li> <p>Asia Pacific (Sydney)</p> </li> <li>
+ * <p>Asia Pacific (Tokyo)</p> </li> <li> <p>Europe (Ireland)</p> </li> <li>
+ * <p>South America (São Paulo)</p> </li> </ul> <p>For a list of all the Amazon S3
+ * supported Regions and endpoints, see <a
* href="https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">Regions
- * and Endpoints</a> in the AWS General Reference.</p> </li> </ul> <p
- * class="title"> <b>Related Resources</b> </p> <ul> <li> <p> <a
+ * and Endpoints</a> in the Amazon Web Services General Reference.</p>
+ * </li> </ul> <p class="title"> <b>Related Resources</b> </p> <ul> <li> <p> <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>
* </p> </li> <li> <p> <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html">DeleteBucket</a>
@@ -6686,178 +3460,22 @@ namespace Aws
virtual Model::PutBucketAclOutcome PutBucketAcl(const Model::PutBucketAclRequest& request) const;
/**
- * <p>Sets the permissions on an existing bucket using access control lists (ACL).
- * For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html">Using
- * ACLs</a>. To set the ACL of a bucket, you must have <code>WRITE_ACP</code>
- * permission.</p> <p>You can use one of the following two ways to set a bucket's
- * permissions:</p> <ul> <li> <p>Specify the ACL in the request body</p> </li> <li>
- * <p>Specify permissions using request headers</p> </li> </ul> <p>You
- * cannot specify access permission using both the body and the request
- * headers.</p> <p>Depending on your application needs, you may choose to
- * set the ACL on a bucket using either the request body or the headers. For
- * example, if you have an existing application that updates a bucket ACL using the
- * request body, then you can continue to use that approach.</p> <p> <b>Access
- * Permissions</b> </p> <p>You can set access permissions using one of the
- * following methods:</p> <ul> <li> <p>Specify a canned ACL with the
- * <code>x-amz-acl</code> request header. Amazon S3 supports a set of predefined
- * ACLs, known as <i>canned ACLs</i>. Each canned ACL has a predefined set of
- * grantees and permissions. Specify the canned ACL name as the value of
- * <code>x-amz-acl</code>. If you use this header, you cannot use other access
- * control-specific headers in your request. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL">Canned
- * ACL</a>.</p> </li> <li> <p>Specify access permissions explicitly with the
- * <code>x-amz-grant-read</code>, <code>x-amz-grant-read-acp</code>,
- * <code>x-amz-grant-write-acp</code>, and <code>x-amz-grant-full-control</code>
- * headers. When using these headers, you specify explicit access permissions and
- * grantees (AWS accounts or Amazon S3 groups) who will receive the permission. If
- * you use these ACL-specific headers, you cannot use the <code>x-amz-acl</code>
- * header to set a canned ACL. These parameters map to the set of permissions that
- * Amazon S3 supports in an ACL. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html">Access
- * Control List (ACL) Overview</a>.</p> <p>You specify each grantee as a type=value
- * pair, where the type is one of the following:</p> <ul> <li> <p> <code>id</code>
- * – if the value specified is the canonical user ID of an AWS account</p> </li>
- * <li> <p> <code>uri</code> – if you are granting permissions to a predefined
- * group</p> </li> <li> <p> <code>emailAddress</code> – if the value specified is
- * the email address of an AWS account</p> <p>Using email addresses to
- * specify a grantee is only supported in the following AWS Regions: </p> <ul> <li>
- * <p>US East (N. Virginia)</p> </li> <li> <p>US West (N. California)</p> </li>
- * <li> <p> US West (Oregon)</p> </li> <li> <p> Asia Pacific (Singapore)</p> </li>
- * <li> <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia Pacific (Tokyo)</p> </li>
- * <li> <p>Europe (Ireland)</p> </li> <li> <p>South America (São Paulo)</p> </li>
- * </ul> <p>For a list of all the Amazon S3 supported Regions and endpoints, see <a
- * href="https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">Regions
- * and Endpoints</a> in the AWS General Reference.</p> </li> </ul> <p>For
- * example, the following <code>x-amz-grant-write</code> header grants create,
- * overwrite, and delete objects permission to LogDelivery group predefined by
- * Amazon S3 and two AWS accounts identified by their email addresses.</p> <p>
- * <code>x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery",
- * id="111122223333", id="555566667777" </code> </p> </li> </ul> <p>You can use
- * either a canned ACL or specify access permissions explicitly. You cannot do
- * both.</p> <p> <b>Grantee Values</b> </p> <p>You can specify the person (grantee)
- * to whom you're assigning access rights (using request elements) in the following
- * ways:</p> <ul> <li> <p>By the person's ID:</p> <p> <code>&lt;Grantee
- * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- * xsi:type="CanonicalUser"&gt;&lt;ID&gt;&lt;&gt;ID&lt;&gt;&lt;/ID&gt;&lt;DisplayName&gt;&lt;&gt;GranteesEmail&lt;&gt;&lt;/DisplayName&gt;
- * &lt;/Grantee&gt;</code> </p> <p>DisplayName is optional and ignored in the
- * request</p> </li> <li> <p>By URI:</p> <p> <code>&lt;Grantee
- * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- * xsi:type="Group"&gt;&lt;URI&gt;&lt;&gt;http://acs.amazonaws.com/groups/global/AuthenticatedUsers&lt;&gt;&lt;/URI&gt;&lt;/Grantee&gt;</code>
- * </p> </li> <li> <p>By Email address:</p> <p> <code>&lt;Grantee
- * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- * xsi:type="AmazonCustomerByEmail"&gt;&lt;EmailAddress&gt;&lt;&gt;Grantees@email.com&lt;&gt;&lt;/EmailAddress&gt;lt;/Grantee&gt;</code>
- * </p> <p>The grantee is resolved to the CanonicalUser and, in a response to a GET
- * Object acl request, appears as the CanonicalUser. </p> <p>Using email
- * addresses to specify a grantee is only supported in the following AWS Regions:
- * </p> <ul> <li> <p>US East (N. Virginia)</p> </li> <li> <p>US West (N.
- * California)</p> </li> <li> <p> US West (Oregon)</p> </li> <li> <p> Asia Pacific
- * (Singapore)</p> </li> <li> <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia
- * Pacific (Tokyo)</p> </li> <li> <p>Europe (Ireland)</p> </li> <li> <p>South
- * America (São Paulo)</p> </li> </ul> <p>For a list of all the Amazon S3 supported
- * Regions and endpoints, see <a
- * href="https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">Regions
- * and Endpoints</a> in the AWS General Reference.</p> </li> </ul> <p
- * class="title"> <b>Related Resources</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html">DeleteBucket</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html">GetObjectAcl</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl">AWS API
- * Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for PutBucketAcl that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::PutBucketAclOutcomeCallable PutBucketAclCallable(const Model::PutBucketAclRequest& request) const;
+ template<typename PutBucketAclRequestT = Model::PutBucketAclRequest>
+ Model::PutBucketAclOutcomeCallable PutBucketAclCallable(const PutBucketAclRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::PutBucketAcl, request);
+ }
/**
- * <p>Sets the permissions on an existing bucket using access control lists (ACL).
- * For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html">Using
- * ACLs</a>. To set the ACL of a bucket, you must have <code>WRITE_ACP</code>
- * permission.</p> <p>You can use one of the following two ways to set a bucket's
- * permissions:</p> <ul> <li> <p>Specify the ACL in the request body</p> </li> <li>
- * <p>Specify permissions using request headers</p> </li> </ul> <p>You
- * cannot specify access permission using both the body and the request
- * headers.</p> <p>Depending on your application needs, you may choose to
- * set the ACL on a bucket using either the request body or the headers. For
- * example, if you have an existing application that updates a bucket ACL using the
- * request body, then you can continue to use that approach.</p> <p> <b>Access
- * Permissions</b> </p> <p>You can set access permissions using one of the
- * following methods:</p> <ul> <li> <p>Specify a canned ACL with the
- * <code>x-amz-acl</code> request header. Amazon S3 supports a set of predefined
- * ACLs, known as <i>canned ACLs</i>. Each canned ACL has a predefined set of
- * grantees and permissions. Specify the canned ACL name as the value of
- * <code>x-amz-acl</code>. If you use this header, you cannot use other access
- * control-specific headers in your request. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL">Canned
- * ACL</a>.</p> </li> <li> <p>Specify access permissions explicitly with the
- * <code>x-amz-grant-read</code>, <code>x-amz-grant-read-acp</code>,
- * <code>x-amz-grant-write-acp</code>, and <code>x-amz-grant-full-control</code>
- * headers. When using these headers, you specify explicit access permissions and
- * grantees (AWS accounts or Amazon S3 groups) who will receive the permission. If
- * you use these ACL-specific headers, you cannot use the <code>x-amz-acl</code>
- * header to set a canned ACL. These parameters map to the set of permissions that
- * Amazon S3 supports in an ACL. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html">Access
- * Control List (ACL) Overview</a>.</p> <p>You specify each grantee as a type=value
- * pair, where the type is one of the following:</p> <ul> <li> <p> <code>id</code>
- * – if the value specified is the canonical user ID of an AWS account</p> </li>
- * <li> <p> <code>uri</code> – if you are granting permissions to a predefined
- * group</p> </li> <li> <p> <code>emailAddress</code> – if the value specified is
- * the email address of an AWS account</p> <p>Using email addresses to
- * specify a grantee is only supported in the following AWS Regions: </p> <ul> <li>
- * <p>US East (N. Virginia)</p> </li> <li> <p>US West (N. California)</p> </li>
- * <li> <p> US West (Oregon)</p> </li> <li> <p> Asia Pacific (Singapore)</p> </li>
- * <li> <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia Pacific (Tokyo)</p> </li>
- * <li> <p>Europe (Ireland)</p> </li> <li> <p>South America (São Paulo)</p> </li>
- * </ul> <p>For a list of all the Amazon S3 supported Regions and endpoints, see <a
- * href="https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">Regions
- * and Endpoints</a> in the AWS General Reference.</p> </li> </ul> <p>For
- * example, the following <code>x-amz-grant-write</code> header grants create,
- * overwrite, and delete objects permission to LogDelivery group predefined by
- * Amazon S3 and two AWS accounts identified by their email addresses.</p> <p>
- * <code>x-amz-grant-write: uri="http://acs.amazonaws.com/groups/s3/LogDelivery",
- * id="111122223333", id="555566667777" </code> </p> </li> </ul> <p>You can use
- * either a canned ACL or specify access permissions explicitly. You cannot do
- * both.</p> <p> <b>Grantee Values</b> </p> <p>You can specify the person (grantee)
- * to whom you're assigning access rights (using request elements) in the following
- * ways:</p> <ul> <li> <p>By the person's ID:</p> <p> <code>&lt;Grantee
- * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- * xsi:type="CanonicalUser"&gt;&lt;ID&gt;&lt;&gt;ID&lt;&gt;&lt;/ID&gt;&lt;DisplayName&gt;&lt;&gt;GranteesEmail&lt;&gt;&lt;/DisplayName&gt;
- * &lt;/Grantee&gt;</code> </p> <p>DisplayName is optional and ignored in the
- * request</p> </li> <li> <p>By URI:</p> <p> <code>&lt;Grantee
- * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- * xsi:type="Group"&gt;&lt;URI&gt;&lt;&gt;http://acs.amazonaws.com/groups/global/AuthenticatedUsers&lt;&gt;&lt;/URI&gt;&lt;/Grantee&gt;</code>
- * </p> </li> <li> <p>By Email address:</p> <p> <code>&lt;Grantee
- * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- * xsi:type="AmazonCustomerByEmail"&gt;&lt;EmailAddress&gt;&lt;&gt;Grantees@email.com&lt;&gt;&lt;/EmailAddress&gt;lt;/Grantee&gt;</code>
- * </p> <p>The grantee is resolved to the CanonicalUser and, in a response to a GET
- * Object acl request, appears as the CanonicalUser. </p> <p>Using email
- * addresses to specify a grantee is only supported in the following AWS Regions:
- * </p> <ul> <li> <p>US East (N. Virginia)</p> </li> <li> <p>US West (N.
- * California)</p> </li> <li> <p> US West (Oregon)</p> </li> <li> <p> Asia Pacific
- * (Singapore)</p> </li> <li> <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia
- * Pacific (Tokyo)</p> </li> <li> <p>Europe (Ireland)</p> </li> <li> <p>South
- * America (São Paulo)</p> </li> </ul> <p>For a list of all the Amazon S3 supported
- * Regions and endpoints, see <a
- * href="https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">Regions
- * and Endpoints</a> in the AWS General Reference.</p> </li> </ul> <p
- * class="title"> <b>Related Resources</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html">DeleteBucket</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectAcl.html">GetObjectAcl</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAcl">AWS API
- * Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for PutBucketAcl that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void PutBucketAclAsync(const Model::PutBucketAclRequest& request, const PutBucketAclResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename PutBucketAclRequestT = Model::PutBucketAclRequest>
+ void PutBucketAclAsync(const PutBucketAclRequestT& request, const PutBucketAclResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::PutBucketAcl, request, handler, context);
+ }
/**
* <p>Sets an analytics configuration for the bucket (specified by the analytics
@@ -6910,108 +3528,22 @@ namespace Aws
virtual Model::PutBucketAnalyticsConfigurationOutcome PutBucketAnalyticsConfiguration(const Model::PutBucketAnalyticsConfigurationRequest& request) const;
/**
- * <p>Sets an analytics configuration for the bucket (specified by the analytics
- * configuration ID). You can have up to 1,000 analytics configurations per
- * bucket.</p> <p>You can choose to have storage class analysis export analysis
- * reports sent to a comma-separated values (CSV) flat file. See the
- * <code>DataExport</code> request element. Reports are updated daily and are based
- * on the object filters that you configure. When selecting data export, you
- * specify a destination bucket and an optional destination prefix where the file
- * is written. You can export the data to a destination bucket in a different
- * account. However, the destination bucket must be in the same Region as the
- * bucket that you are making the PUT analytics configuration to. For more
- * information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html">Amazon
- * S3 Analytics – Storage Class Analysis</a>. </p> <p>You must create a
- * bucket policy on the destination bucket where the exported file is written to
- * grant permissions to Amazon S3 to write objects to the bucket. For an example
- * policy, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9">Granting
- * Permissions for Amazon S3 Inventory and Storage Class Analysis</a>.</p>
- * <p>To use this operation, you must have permissions to perform the
- * <code>s3:PutAnalyticsConfiguration</code> action. The bucket owner has this
- * permission by default. The bucket owner can grant this permission to others. For
- * more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p class="title">
- * <b>Special Errors</b> </p> <ul> <li> <ul> <li> <p> <i>HTTP Error: HTTP 400 Bad
- * Request</i> </p> </li> <li> <p> <i>Code: InvalidArgument</i> </p> </li> <li> <p>
- * <i>Cause: Invalid argument.</i> </p> </li> </ul> </li> <li> <ul> <li> <p>
- * <i>HTTP Error: HTTP 400 Bad Request</i> </p> </li> <li> <p> <i>Code:
- * TooManyConfigurations</i> </p> </li> <li> <p> <i>Cause: You are attempting to
- * create a new configuration but have already reached the 1,000-configuration
- * limit.</i> </p> </li> </ul> </li> <li> <ul> <li> <p> <i>HTTP Error: HTTP 403
- * Forbidden</i> </p> </li> <li> <p> <i>Code: AccessDenied</i> </p> </li> <li> <p>
- * <i>Cause: You are not the owner of the specified bucket, or you do not have the
- * s3:PutAnalyticsConfiguration bucket permission to set the configuration on the
- * bucket.</i> </p> </li> </ul> </li> </ul> <p class="title"> <b>Related
- * Resources</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html">GetBucketAnalyticsConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html">DeleteBucketAnalyticsConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html">ListBucketAnalyticsConfigurations</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for PutBucketAnalyticsConfiguration that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::PutBucketAnalyticsConfigurationOutcomeCallable PutBucketAnalyticsConfigurationCallable(const Model::PutBucketAnalyticsConfigurationRequest& request) const;
+ template<typename PutBucketAnalyticsConfigurationRequestT = Model::PutBucketAnalyticsConfigurationRequest>
+ Model::PutBucketAnalyticsConfigurationOutcomeCallable PutBucketAnalyticsConfigurationCallable(const PutBucketAnalyticsConfigurationRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::PutBucketAnalyticsConfiguration, request);
+ }
/**
- * <p>Sets an analytics configuration for the bucket (specified by the analytics
- * configuration ID). You can have up to 1,000 analytics configurations per
- * bucket.</p> <p>You can choose to have storage class analysis export analysis
- * reports sent to a comma-separated values (CSV) flat file. See the
- * <code>DataExport</code> request element. Reports are updated daily and are based
- * on the object filters that you configure. When selecting data export, you
- * specify a destination bucket and an optional destination prefix where the file
- * is written. You can export the data to a destination bucket in a different
- * account. However, the destination bucket must be in the same Region as the
- * bucket that you are making the PUT analytics configuration to. For more
- * information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/analytics-storage-class.html">Amazon
- * S3 Analytics – Storage Class Analysis</a>. </p> <p>You must create a
- * bucket policy on the destination bucket where the exported file is written to
- * grant permissions to Amazon S3 to write objects to the bucket. For an example
- * policy, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9">Granting
- * Permissions for Amazon S3 Inventory and Storage Class Analysis</a>.</p>
- * <p>To use this operation, you must have permissions to perform the
- * <code>s3:PutAnalyticsConfiguration</code> action. The bucket owner has this
- * permission by default. The bucket owner can grant this permission to others. For
- * more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p class="title">
- * <b>Special Errors</b> </p> <ul> <li> <ul> <li> <p> <i>HTTP Error: HTTP 400 Bad
- * Request</i> </p> </li> <li> <p> <i>Code: InvalidArgument</i> </p> </li> <li> <p>
- * <i>Cause: Invalid argument.</i> </p> </li> </ul> </li> <li> <ul> <li> <p>
- * <i>HTTP Error: HTTP 400 Bad Request</i> </p> </li> <li> <p> <i>Code:
- * TooManyConfigurations</i> </p> </li> <li> <p> <i>Cause: You are attempting to
- * create a new configuration but have already reached the 1,000-configuration
- * limit.</i> </p> </li> </ul> </li> <li> <ul> <li> <p> <i>HTTP Error: HTTP 403
- * Forbidden</i> </p> </li> <li> <p> <i>Code: AccessDenied</i> </p> </li> <li> <p>
- * <i>Cause: You are not the owner of the specified bucket, or you do not have the
- * s3:PutAnalyticsConfiguration bucket permission to set the configuration on the
- * bucket.</i> </p> </li> </ul> </li> </ul> <p class="title"> <b>Related
- * Resources</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketAnalyticsConfiguration.html">GetBucketAnalyticsConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketAnalyticsConfiguration.html">DeleteBucketAnalyticsConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketAnalyticsConfigurations.html">ListBucketAnalyticsConfigurations</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketAnalyticsConfiguration">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for PutBucketAnalyticsConfiguration that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void PutBucketAnalyticsConfigurationAsync(const Model::PutBucketAnalyticsConfigurationRequest& request, const PutBucketAnalyticsConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename PutBucketAnalyticsConfigurationRequestT = Model::PutBucketAnalyticsConfigurationRequest>
+ void PutBucketAnalyticsConfigurationAsync(const PutBucketAnalyticsConfigurationRequestT& request, const PutBucketAnalyticsConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::PutBucketAnalyticsConfiguration, request, handler, context);
+ }
/**
* <p>Sets the <code>cors</code> configuration for your bucket. If the
@@ -7055,107 +3587,43 @@ namespace Aws
virtual Model::PutBucketCorsOutcome PutBucketCors(const Model::PutBucketCorsRequest& request) const;
/**
- * <p>Sets the <code>cors</code> configuration for your bucket. If the
- * configuration exists, Amazon S3 replaces it.</p> <p>To use this operation, you
- * must be allowed to perform the <code>s3:PutBucketCORS</code> action. By default,
- * the bucket owner has this permission and can grant it to others.</p> <p>You set
- * this configuration on a bucket so that the bucket can service cross-origin
- * requests. For example, you might want to enable a request whose origin is
- * <code>http://www.example.com</code> to access your Amazon S3 bucket at
- * <code>my.example.bucket.com</code> by using the browser's
- * <code>XMLHttpRequest</code> capability.</p> <p>To enable cross-origin resource
- * sharing (CORS) on a bucket, you add the <code>cors</code> subresource to the
- * bucket. The <code>cors</code> subresource is an XML document in which you
- * configure rules that identify origins and the HTTP methods that can be executed
- * on your bucket. The document is limited to 64 KB in size. </p> <p>When Amazon S3
- * receives a cross-origin request (or a pre-flight OPTIONS request) against a
- * bucket, it evaluates the <code>cors</code> configuration on the bucket and uses
- * the first <code>CORSRule</code> rule that matches the incoming browser request
- * to enable a cross-origin request. For a rule to match, the following conditions
- * must be met:</p> <ul> <li> <p>The request's <code>Origin</code> header must
- * match <code>AllowedOrigin</code> elements.</p> </li> <li> <p>The request method
- * (for example, GET, PUT, HEAD, and so on) or the
- * <code>Access-Control-Request-Method</code> header in case of a pre-flight
- * <code>OPTIONS</code> request must be one of the <code>AllowedMethod</code>
- * elements. </p> </li> <li> <p>Every header specified in the
- * <code>Access-Control-Request-Headers</code> request header of a pre-flight
- * request must match an <code>AllowedHeader</code> element. </p> </li> </ul> <p>
- * For more information about CORS, go to <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html">Enabling
- * Cross-Origin Resource Sharing</a> in the <i>Amazon S3 User Guide</i>.</p> <p
- * class="title"> <b>Related Resources</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html">GetBucketCors</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html">DeleteBucketCors</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html">RESTOPTIONSobject</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for PutBucketCors that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::PutBucketCorsOutcomeCallable PutBucketCorsCallable(const Model::PutBucketCorsRequest& request) const;
+ template<typename PutBucketCorsRequestT = Model::PutBucketCorsRequest>
+ Model::PutBucketCorsOutcomeCallable PutBucketCorsCallable(const PutBucketCorsRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::PutBucketCors, request);
+ }
/**
- * <p>Sets the <code>cors</code> configuration for your bucket. If the
- * configuration exists, Amazon S3 replaces it.</p> <p>To use this operation, you
- * must be allowed to perform the <code>s3:PutBucketCORS</code> action. By default,
- * the bucket owner has this permission and can grant it to others.</p> <p>You set
- * this configuration on a bucket so that the bucket can service cross-origin
- * requests. For example, you might want to enable a request whose origin is
- * <code>http://www.example.com</code> to access your Amazon S3 bucket at
- * <code>my.example.bucket.com</code> by using the browser's
- * <code>XMLHttpRequest</code> capability.</p> <p>To enable cross-origin resource
- * sharing (CORS) on a bucket, you add the <code>cors</code> subresource to the
- * bucket. The <code>cors</code> subresource is an XML document in which you
- * configure rules that identify origins and the HTTP methods that can be executed
- * on your bucket. The document is limited to 64 KB in size. </p> <p>When Amazon S3
- * receives a cross-origin request (or a pre-flight OPTIONS request) against a
- * bucket, it evaluates the <code>cors</code> configuration on the bucket and uses
- * the first <code>CORSRule</code> rule that matches the incoming browser request
- * to enable a cross-origin request. For a rule to match, the following conditions
- * must be met:</p> <ul> <li> <p>The request's <code>Origin</code> header must
- * match <code>AllowedOrigin</code> elements.</p> </li> <li> <p>The request method
- * (for example, GET, PUT, HEAD, and so on) or the
- * <code>Access-Control-Request-Method</code> header in case of a pre-flight
- * <code>OPTIONS</code> request must be one of the <code>AllowedMethod</code>
- * elements. </p> </li> <li> <p>Every header specified in the
- * <code>Access-Control-Request-Headers</code> request header of a pre-flight
- * request must match an <code>AllowedHeader</code> element. </p> </li> </ul> <p>
- * For more information about CORS, go to <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/cors.html">Enabling
- * Cross-Origin Resource Sharing</a> in the <i>Amazon S3 User Guide</i>.</p> <p
- * class="title"> <b>Related Resources</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketCors.html">GetBucketCors</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketCors.html">DeleteBucketCors</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTOPTIONSobject.html">RESTOPTIONSobject</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketCors">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for PutBucketCors that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void PutBucketCorsAsync(const Model::PutBucketCorsRequest& request, const PutBucketCorsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename PutBucketCorsRequestT = Model::PutBucketCorsRequest>
+ void PutBucketCorsAsync(const PutBucketCorsRequestT& request, const PutBucketCorsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::PutBucketCors, request, handler, context);
+ }
/**
* <p>This action uses the <code>encryption</code> subresource to configure default
* encryption and Amazon S3 Bucket Key for an existing bucket.</p> <p>Default
* encryption for a bucket can use server-side encryption with Amazon S3-managed
- * keys (SSE-S3) or AWS KMS customer master keys (SSE-KMS). If you specify default
- * encryption using SSE-KMS, you can also configure Amazon S3 Bucket Key. For
- * information about default encryption, see <a
+ * keys (SSE-S3) or customer managed keys (SSE-KMS). If you specify default
+ * encryption using SSE-KMS, you can also configure Amazon S3 Bucket Key. When the
+ * default encryption is SSE-KMS, if you upload an object to the bucket and do not
+ * specify the KMS key to use for encryption, Amazon S3 uses the default Amazon Web
+ * Services managed KMS key for your account. For information about default
+ * encryption, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html">Amazon
* S3 default bucket encryption</a> in the <i>Amazon S3 User Guide</i>. For more
* information about S3 Bucket Keys, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html">Amazon S3
* Bucket Keys</a> in the <i>Amazon S3 User Guide</i>.</p> <p>This
- * action requires AWS Signature Version 4. For more information, see <a
+ * action requires Amazon Web Services Signature Version 4. For more information,
+ * see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html">
- * Authenticating Requests (AWS Signature Version 4)</a>. </p> <p>To
- * use this operation, you must have permissions to perform the
+ * Authenticating Requests (Amazon Web Services Signature Version 4)</a>. </p>
+ * <p>To use this operation, you must have permissions to perform the
* <code>s3:PutEncryptionConfiguration</code> action. The bucket owner has this
* permission by default. The bucket owner can grant this permission to others. For
* more information about permissions, see <a
@@ -7174,89 +3642,38 @@ namespace Aws
virtual Model::PutBucketEncryptionOutcome PutBucketEncryption(const Model::PutBucketEncryptionRequest& request) const;
/**
- * <p>This action uses the <code>encryption</code> subresource to configure default
- * encryption and Amazon S3 Bucket Key for an existing bucket.</p> <p>Default
- * encryption for a bucket can use server-side encryption with Amazon S3-managed
- * keys (SSE-S3) or AWS KMS customer master keys (SSE-KMS). If you specify default
- * encryption using SSE-KMS, you can also configure Amazon S3 Bucket Key. For
- * information about default encryption, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html">Amazon
- * S3 default bucket encryption</a> in the <i>Amazon S3 User Guide</i>. For more
- * information about S3 Bucket Keys, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html">Amazon S3
- * Bucket Keys</a> in the <i>Amazon S3 User Guide</i>.</p> <p>This
- * action requires AWS Signature Version 4. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html">
- * Authenticating Requests (AWS Signature Version 4)</a>. </p> <p>To
- * use this operation, you must have permissions to perform the
- * <code>s3:PutEncryptionConfiguration</code> action. The bucket owner has this
- * permission by default. The bucket owner can grant this permission to others. For
- * more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a> in the Amazon S3 User Guide.
- * </p> <p class="title"> <b>Related Resources</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html">GetBucketEncryption</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html">DeleteBucketEncryption</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for PutBucketEncryption that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::PutBucketEncryptionOutcomeCallable PutBucketEncryptionCallable(const Model::PutBucketEncryptionRequest& request) const;
+ template<typename PutBucketEncryptionRequestT = Model::PutBucketEncryptionRequest>
+ Model::PutBucketEncryptionOutcomeCallable PutBucketEncryptionCallable(const PutBucketEncryptionRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::PutBucketEncryption, request);
+ }
/**
- * <p>This action uses the <code>encryption</code> subresource to configure default
- * encryption and Amazon S3 Bucket Key for an existing bucket.</p> <p>Default
- * encryption for a bucket can use server-side encryption with Amazon S3-managed
- * keys (SSE-S3) or AWS KMS customer master keys (SSE-KMS). If you specify default
- * encryption using SSE-KMS, you can also configure Amazon S3 Bucket Key. For
- * information about default encryption, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html">Amazon
- * S3 default bucket encryption</a> in the <i>Amazon S3 User Guide</i>. For more
- * information about S3 Bucket Keys, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html">Amazon S3
- * Bucket Keys</a> in the <i>Amazon S3 User Guide</i>.</p> <p>This
- * action requires AWS Signature Version 4. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/sig-v4-authenticating-requests.html">
- * Authenticating Requests (AWS Signature Version 4)</a>. </p> <p>To
- * use this operation, you must have permissions to perform the
- * <code>s3:PutEncryptionConfiguration</code> action. The bucket owner has this
- * permission by default. The bucket owner can grant this permission to others. For
- * more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a> in the Amazon S3 User Guide.
- * </p> <p class="title"> <b>Related Resources</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketEncryption.html">GetBucketEncryption</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketEncryption.html">DeleteBucketEncryption</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketEncryption">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for PutBucketEncryption that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void PutBucketEncryptionAsync(const Model::PutBucketEncryptionRequest& request, const PutBucketEncryptionResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename PutBucketEncryptionRequestT = Model::PutBucketEncryptionRequest>
+ void PutBucketEncryptionAsync(const PutBucketEncryptionRequestT& request, const PutBucketEncryptionResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::PutBucketEncryption, request, handler, context);
+ }
/**
* <p>Puts a S3 Intelligent-Tiering configuration to the specified bucket. You can
* have up to 1,000 S3 Intelligent-Tiering configurations per bucket.</p> <p>The S3
* Intelligent-Tiering storage class is designed to optimize storage costs by
* automatically moving data to the most cost-effective storage access tier,
- * without additional operational overhead. S3 Intelligent-Tiering delivers
- * automatic cost savings by moving data between access tiers, when access patterns
- * change.</p> <p>The S3 Intelligent-Tiering storage class is suitable for objects
- * larger than 128 KB that you plan to store for at least 30 days. If the size of
- * an object is less than 128 KB, it is not eligible for auto-tiering. Smaller
- * objects can be stored, but they are always charged at the frequent access tier
- * rates in the S3 Intelligent-Tiering storage class. </p> <p>If you delete an
- * object before the end of the 30-day minimum storage duration period, you are
- * charged for 30 days. For more information, see <a
+ * without performance impact or operational overhead. S3 Intelligent-Tiering
+ * delivers automatic cost savings in three low latency and high throughput access
+ * tiers. To get the lowest storage cost on data that can be accessed in minutes to
+ * hours, you can choose to activate additional archiving capabilities.</p> <p>The
+ * S3 Intelligent-Tiering storage class is the ideal storage class for data with
+ * unknown, changing, or unpredictable access patterns, independent of object size
+ * or retention period. If the size of an object is less than 128 KB, it is not
+ * monitored and not eligible for auto-tiering. Smaller objects can be stored, but
+ * they are always charged at the Frequent Access tier rates in the S3
+ * Intelligent-Tiering storage class.</p> <p>For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access">Storage
* class for automatically optimizing frequently and infrequently accessed
* objects</a>.</p> <p>Operations related to
@@ -7288,96 +3705,22 @@ namespace Aws
virtual Model::PutBucketIntelligentTieringConfigurationOutcome PutBucketIntelligentTieringConfiguration(const Model::PutBucketIntelligentTieringConfigurationRequest& request) const;
/**
- * <p>Puts a S3 Intelligent-Tiering configuration to the specified bucket. You can
- * have up to 1,000 S3 Intelligent-Tiering configurations per bucket.</p> <p>The S3
- * Intelligent-Tiering storage class is designed to optimize storage costs by
- * automatically moving data to the most cost-effective storage access tier,
- * without additional operational overhead. S3 Intelligent-Tiering delivers
- * automatic cost savings by moving data between access tiers, when access patterns
- * change.</p> <p>The S3 Intelligent-Tiering storage class is suitable for objects
- * larger than 128 KB that you plan to store for at least 30 days. If the size of
- * an object is less than 128 KB, it is not eligible for auto-tiering. Smaller
- * objects can be stored, but they are always charged at the frequent access tier
- * rates in the S3 Intelligent-Tiering storage class. </p> <p>If you delete an
- * object before the end of the 30-day minimum storage duration period, you are
- * charged for 30 days. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access">Storage
- * class for automatically optimizing frequently and infrequently accessed
- * objects</a>.</p> <p>Operations related to
- * <code>PutBucketIntelligentTieringConfiguration</code> include: </p> <ul> <li>
- * <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html">DeleteBucketIntelligentTieringConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html">GetBucketIntelligentTieringConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html">ListBucketIntelligentTieringConfigurations</a>
- * </p> </li> </ul> <p>You only need S3 Intelligent-Tiering enabled on a
- * bucket if you want to automatically move objects stored in the S3
- * Intelligent-Tiering storage class to the Archive Access or Deep Archive Access
- * tier.</p> <p class="title"> <b>Special Errors</b> </p> <ul> <li> <p
- * class="title"> <b>HTTP 400 Bad Request Error</b> </p> <ul> <li> <p> <i>Code:</i>
- * InvalidArgument</p> </li> <li> <p> <i>Cause:</i> Invalid Argument</p> </li>
- * </ul> </li> <li> <p class="title"> <b>HTTP 400 Bad Request Error</b> </p> <ul>
- * <li> <p> <i>Code:</i> TooManyConfigurations</p> </li> <li> <p> <i>Cause:</i> You
- * are attempting to create a new configuration but have already reached the
- * 1,000-configuration limit. </p> </li> </ul> </li> <li> <p class="title"> <b>HTTP
- * 403 Forbidden Error</b> </p> <ul> <li> <p> <i>Code:</i> AccessDenied</p> </li>
- * <li> <p> <i>Cause:</i> You are not the owner of the specified bucket, or you do
- * not have the <code>s3:PutIntelligentTieringConfiguration</code> bucket
- * permission to set the configuration on the bucket. </p> </li> </ul> </li>
- * </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketIntelligentTieringConfiguration">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for PutBucketIntelligentTieringConfiguration that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::PutBucketIntelligentTieringConfigurationOutcomeCallable PutBucketIntelligentTieringConfigurationCallable(const Model::PutBucketIntelligentTieringConfigurationRequest& request) const;
+ template<typename PutBucketIntelligentTieringConfigurationRequestT = Model::PutBucketIntelligentTieringConfigurationRequest>
+ Model::PutBucketIntelligentTieringConfigurationOutcomeCallable PutBucketIntelligentTieringConfigurationCallable(const PutBucketIntelligentTieringConfigurationRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::PutBucketIntelligentTieringConfiguration, request);
+ }
/**
- * <p>Puts a S3 Intelligent-Tiering configuration to the specified bucket. You can
- * have up to 1,000 S3 Intelligent-Tiering configurations per bucket.</p> <p>The S3
- * Intelligent-Tiering storage class is designed to optimize storage costs by
- * automatically moving data to the most cost-effective storage access tier,
- * without additional operational overhead. S3 Intelligent-Tiering delivers
- * automatic cost savings by moving data between access tiers, when access patterns
- * change.</p> <p>The S3 Intelligent-Tiering storage class is suitable for objects
- * larger than 128 KB that you plan to store for at least 30 days. If the size of
- * an object is less than 128 KB, it is not eligible for auto-tiering. Smaller
- * objects can be stored, but they are always charged at the frequent access tier
- * rates in the S3 Intelligent-Tiering storage class. </p> <p>If you delete an
- * object before the end of the 30-day minimum storage duration period, you are
- * charged for 30 days. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html#sc-dynamic-data-access">Storage
- * class for automatically optimizing frequently and infrequently accessed
- * objects</a>.</p> <p>Operations related to
- * <code>PutBucketIntelligentTieringConfiguration</code> include: </p> <ul> <li>
- * <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketIntelligentTieringConfiguration.html">DeleteBucketIntelligentTieringConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketIntelligentTieringConfiguration.html">GetBucketIntelligentTieringConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketIntelligentTieringConfigurations.html">ListBucketIntelligentTieringConfigurations</a>
- * </p> </li> </ul> <p>You only need S3 Intelligent-Tiering enabled on a
- * bucket if you want to automatically move objects stored in the S3
- * Intelligent-Tiering storage class to the Archive Access or Deep Archive Access
- * tier.</p> <p class="title"> <b>Special Errors</b> </p> <ul> <li> <p
- * class="title"> <b>HTTP 400 Bad Request Error</b> </p> <ul> <li> <p> <i>Code:</i>
- * InvalidArgument</p> </li> <li> <p> <i>Cause:</i> Invalid Argument</p> </li>
- * </ul> </li> <li> <p class="title"> <b>HTTP 400 Bad Request Error</b> </p> <ul>
- * <li> <p> <i>Code:</i> TooManyConfigurations</p> </li> <li> <p> <i>Cause:</i> You
- * are attempting to create a new configuration but have already reached the
- * 1,000-configuration limit. </p> </li> </ul> </li> <li> <p class="title"> <b>HTTP
- * 403 Forbidden Error</b> </p> <ul> <li> <p> <i>Code:</i> AccessDenied</p> </li>
- * <li> <p> <i>Cause:</i> You are not the owner of the specified bucket, or you do
- * not have the <code>s3:PutIntelligentTieringConfiguration</code> bucket
- * permission to set the configuration on the bucket. </p> </li> </ul> </li>
- * </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketIntelligentTieringConfiguration">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for PutBucketIntelligentTieringConfiguration that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void PutBucketIntelligentTieringConfigurationAsync(const Model::PutBucketIntelligentTieringConfigurationRequest& request, const PutBucketIntelligentTieringConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename PutBucketIntelligentTieringConfigurationRequestT = Model::PutBucketIntelligentTieringConfigurationRequest>
+ void PutBucketIntelligentTieringConfigurationAsync(const PutBucketIntelligentTieringConfigurationRequestT& request, const PutBucketIntelligentTieringConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::PutBucketIntelligentTieringConfiguration, request, handler, context);
+ }
/**
* <p>This implementation of the <code>PUT</code> action adds an inventory
@@ -7387,12 +3730,12 @@ namespace Aws
* results are published to a flat file. The bucket that is inventoried is called
* the <i>source</i> bucket, and the bucket where the inventory flat file is stored
* is called the <i>destination</i> bucket. The <i>destination</i> bucket must be
- * in the same AWS Region as the <i>source</i> bucket. </p> <p>When you configure
- * an inventory for a <i>source</i> bucket, you specify the <i>destination</i>
- * bucket where you want the inventory to be stored, and whether to generate the
- * inventory daily or weekly. You can also configure what object metadata to
- * include and whether to inventory all object versions or only current versions.
- * For more information, see <a
+ * in the same Amazon Web Services Region as the <i>source</i> bucket. </p> <p>When
+ * you configure an inventory for a <i>source</i> bucket, you specify the
+ * <i>destination</i> bucket where you want the inventory to be stored, and whether
+ * to generate the inventory daily or weekly. You can also configure what object
+ * metadata to include and whether to inventory all object versions or only current
+ * versions. For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html">Amazon
* S3 Inventory</a> in the Amazon S3 User Guide.</p> <p>You must create
* a bucket policy on the <i>destination</i> bucket to grant permissions to Amazon
@@ -7432,132 +3775,46 @@ namespace Aws
virtual Model::PutBucketInventoryConfigurationOutcome PutBucketInventoryConfiguration(const Model::PutBucketInventoryConfigurationRequest& request) const;
/**
- * <p>This implementation of the <code>PUT</code> action adds an inventory
- * configuration (identified by the inventory ID) to the bucket. You can have up to
- * 1,000 inventory configurations per bucket. </p> <p>Amazon S3 inventory generates
- * inventories of the objects in the bucket on a daily or weekly basis, and the
- * results are published to a flat file. The bucket that is inventoried is called
- * the <i>source</i> bucket, and the bucket where the inventory flat file is stored
- * is called the <i>destination</i> bucket. The <i>destination</i> bucket must be
- * in the same AWS Region as the <i>source</i> bucket. </p> <p>When you configure
- * an inventory for a <i>source</i> bucket, you specify the <i>destination</i>
- * bucket where you want the inventory to be stored, and whether to generate the
- * inventory daily or weekly. You can also configure what object metadata to
- * include and whether to inventory all object versions or only current versions.
- * For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html">Amazon
- * S3 Inventory</a> in the Amazon S3 User Guide.</p> <p>You must create
- * a bucket policy on the <i>destination</i> bucket to grant permissions to Amazon
- * S3 to write objects to the bucket in the defined location. For an example
- * policy, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9">
- * Granting Permissions for Amazon S3 Inventory and Storage Class Analysis</a>.</p>
- * <p>To use this operation, you must have permissions to perform the
- * <code>s3:PutInventoryConfiguration</code> action. The bucket owner has this
- * permission by default and can grant this permission to others. For more
- * information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a> in the Amazon S3 User
- * Guide.</p> <p class="title"> <b>Special Errors</b> </p> <ul> <li> <p
- * class="title"> <b>HTTP 400 Bad Request Error</b> </p> <ul> <li> <p> <i>Code:</i>
- * InvalidArgument</p> </li> <li> <p> <i>Cause:</i> Invalid Argument</p> </li>
- * </ul> </li> <li> <p class="title"> <b>HTTP 400 Bad Request Error</b> </p> <ul>
- * <li> <p> <i>Code:</i> TooManyConfigurations</p> </li> <li> <p> <i>Cause:</i> You
- * are attempting to create a new configuration but have already reached the
- * 1,000-configuration limit. </p> </li> </ul> </li> <li> <p class="title"> <b>HTTP
- * 403 Forbidden Error</b> </p> <ul> <li> <p> <i>Code:</i> AccessDenied</p> </li>
- * <li> <p> <i>Cause:</i> You are not the owner of the specified bucket, or you do
- * not have the <code>s3:PutInventoryConfiguration</code> bucket permission to set
- * the configuration on the bucket. </p> </li> </ul> </li> </ul> <p class="title">
- * <b>Related Resources</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html">GetBucketInventoryConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html">DeleteBucketInventoryConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html">ListBucketInventoryConfigurations</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for PutBucketInventoryConfiguration that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::PutBucketInventoryConfigurationOutcomeCallable PutBucketInventoryConfigurationCallable(const Model::PutBucketInventoryConfigurationRequest& request) const;
+ template<typename PutBucketInventoryConfigurationRequestT = Model::PutBucketInventoryConfigurationRequest>
+ Model::PutBucketInventoryConfigurationOutcomeCallable PutBucketInventoryConfigurationCallable(const PutBucketInventoryConfigurationRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::PutBucketInventoryConfiguration, request);
+ }
/**
- * <p>This implementation of the <code>PUT</code> action adds an inventory
- * configuration (identified by the inventory ID) to the bucket. You can have up to
- * 1,000 inventory configurations per bucket. </p> <p>Amazon S3 inventory generates
- * inventories of the objects in the bucket on a daily or weekly basis, and the
- * results are published to a flat file. The bucket that is inventoried is called
- * the <i>source</i> bucket, and the bucket where the inventory flat file is stored
- * is called the <i>destination</i> bucket. The <i>destination</i> bucket must be
- * in the same AWS Region as the <i>source</i> bucket. </p> <p>When you configure
- * an inventory for a <i>source</i> bucket, you specify the <i>destination</i>
- * bucket where you want the inventory to be stored, and whether to generate the
- * inventory daily or weekly. You can also configure what object metadata to
- * include and whether to inventory all object versions or only current versions.
- * For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-inventory.html">Amazon
- * S3 Inventory</a> in the Amazon S3 User Guide.</p> <p>You must create
- * a bucket policy on the <i>destination</i> bucket to grant permissions to Amazon
- * S3 to write objects to the bucket in the defined location. For an example
- * policy, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/example-bucket-policies.html#example-bucket-policies-use-case-9">
- * Granting Permissions for Amazon S3 Inventory and Storage Class Analysis</a>.</p>
- * <p>To use this operation, you must have permissions to perform the
- * <code>s3:PutInventoryConfiguration</code> action. The bucket owner has this
- * permission by default and can grant this permission to others. For more
- * information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a> in the Amazon S3 User
- * Guide.</p> <p class="title"> <b>Special Errors</b> </p> <ul> <li> <p
- * class="title"> <b>HTTP 400 Bad Request Error</b> </p> <ul> <li> <p> <i>Code:</i>
- * InvalidArgument</p> </li> <li> <p> <i>Cause:</i> Invalid Argument</p> </li>
- * </ul> </li> <li> <p class="title"> <b>HTTP 400 Bad Request Error</b> </p> <ul>
- * <li> <p> <i>Code:</i> TooManyConfigurations</p> </li> <li> <p> <i>Cause:</i> You
- * are attempting to create a new configuration but have already reached the
- * 1,000-configuration limit. </p> </li> </ul> </li> <li> <p class="title"> <b>HTTP
- * 403 Forbidden Error</b> </p> <ul> <li> <p> <i>Code:</i> AccessDenied</p> </li>
- * <li> <p> <i>Cause:</i> You are not the owner of the specified bucket, or you do
- * not have the <code>s3:PutInventoryConfiguration</code> bucket permission to set
- * the configuration on the bucket. </p> </li> </ul> </li> </ul> <p class="title">
- * <b>Related Resources</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketInventoryConfiguration.html">GetBucketInventoryConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketInventoryConfiguration.html">DeleteBucketInventoryConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketInventoryConfigurations.html">ListBucketInventoryConfigurations</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketInventoryConfiguration">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for PutBucketInventoryConfiguration that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void PutBucketInventoryConfigurationAsync(const Model::PutBucketInventoryConfigurationRequest& request, const PutBucketInventoryConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename PutBucketInventoryConfigurationRequestT = Model::PutBucketInventoryConfigurationRequest>
+ void PutBucketInventoryConfigurationAsync(const PutBucketInventoryConfigurationRequestT& request, const PutBucketInventoryConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::PutBucketInventoryConfiguration, request, handler, context);
+ }
/**
* <p>Creates a new lifecycle configuration for the bucket or replaces an existing
- * lifecycle configuration. For information about lifecycle configuration, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>Bucket
- * lifecycle configuration now supports specifying a lifecycle rule using an object
- * key name prefix, one or more object tags, or a combination of both. Accordingly,
- * this section describes the latest API. The previous version of the API supported
- * filtering based only on an object key name prefix, which is supported for
- * backward compatibility. For the related API description, see <a
+ * lifecycle configuration. Keep in mind that this will overwrite an existing
+ * lifecycle configuration, so if you want to retain any configuration details,
+ * they must be included in the new lifecycle configuration. For information about
+ * lifecycle configuration, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/object-lifecycle-mgmt.html">Managing
+ * your storage lifecycle</a>.</p> <p>Bucket lifecycle configuration now
+ * supports specifying a lifecycle rule using an object key name prefix, one or
+ * more object tags, or a combination of both. Accordingly, this section describes
+ * the latest API. The previous version of the API supported filtering based only
+ * on an object key name prefix, which is supported for backward compatibility. For
+ * the related API description, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html">PutBucketLifecycle</a>.</p>
* <p> <b>Rules</b> </p> <p>You specify the lifecycle configuration in your
* request body. The lifecycle configuration is specified as XML consisting of one
- * or more rules. Each rule consists of the following:</p> <ul> <li> <p>Filter
- * identifying a subset of objects to which the rule applies. The filter can be
- * based on a key name prefix, object tags, or a combination of both.</p> </li>
- * <li> <p>Status whether the rule is in effect.</p> </li> <li> <p>One or more
- * lifecycle transition and expiration actions that you want Amazon S3 to perform
- * on the objects identified by the filter. If the state of your bucket is
+ * or more rules. An Amazon S3 Lifecycle configuration can have up to 1,000 rules.
+ * This limit is not adjustable. Each rule consists of the following:</p> <ul> <li>
+ * <p>Filter identifying a subset of objects to which the rule applies. The filter
+ * can be based on a key name prefix, object tags, or a combination of both.</p>
+ * </li> <li> <p>Status whether the rule is in effect.</p> </li> <li> <p>One or
+ * more lifecycle transition and expiration actions that you want Amazon S3 to
+ * perform on the objects identified by the filter. If the state of your bucket is
* versioning-enabled or versioning-suspended, you can have many versions of the
* same object (one current version and zero or more noncurrent versions). Amazon
* S3 provides predefined actions that you can specify for current and noncurrent
@@ -7568,16 +3825,17 @@ namespace Aws
* Configuration Elements</a>.</p> <p> <b>Permissions</b> </p> <p>By default, all
* Amazon S3 resources are private, including buckets, objects, and related
* subresources (for example, lifecycle configuration and website configuration).
- * Only the resource owner (that is, the AWS account that created it) can access
- * the resource. The resource owner can optionally grant access permissions to
- * others by writing an access policy. For this operation, a user must get the
- * s3:PutLifecycleConfiguration permission.</p> <p>You can also explicitly deny
- * permissions. Explicit deny also supersedes any other permissions. If you want to
- * block users or accounts from removing or deleting objects from your bucket, you
- * must deny them permissions for the following actions:</p> <ul> <li>
- * <p>s3:DeleteObject</p> </li> <li> <p>s3:DeleteObjectVersion</p> </li> <li>
- * <p>s3:PutLifecycleConfiguration</p> </li> </ul> <p>For more information about
- * permissions, see <a
+ * Only the resource owner (that is, the Amazon Web Services account that created
+ * it) can access the resource. The resource owner can optionally grant access
+ * permissions to others by writing an access policy. For this operation, a user
+ * must get the <code>s3:PutLifecycleConfiguration</code> permission.</p> <p>You
+ * can also explicitly deny permissions. Explicit deny also supersedes any other
+ * permissions. If you want to block users or accounts from removing or deleting
+ * objects from your bucket, you must deny them permissions for the following
+ * actions:</p> <ul> <li> <p> <code>s3:DeleteObject</code> </p> </li> <li> <p>
+ * <code>s3:DeleteObjectVersion</code> </p> </li> <li> <p>
+ * <code>s3:PutLifecycleConfiguration</code> </p> </li> </ul> <p>For more
+ * information about permissions, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
* Access Permissions to Your Amazon S3 Resources</a>.</p> <p>The following are
* related to <code>PutBucketLifecycleConfiguration</code>:</p> <ul> <li> <p> <a
@@ -7593,126 +3851,40 @@ namespace Aws
virtual Model::PutBucketLifecycleConfigurationOutcome PutBucketLifecycleConfiguration(const Model::PutBucketLifecycleConfigurationRequest& request) const;
/**
- * <p>Creates a new lifecycle configuration for the bucket or replaces an existing
- * lifecycle configuration. For information about lifecycle configuration, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>Bucket
- * lifecycle configuration now supports specifying a lifecycle rule using an object
- * key name prefix, one or more object tags, or a combination of both. Accordingly,
- * this section describes the latest API. The previous version of the API supported
- * filtering based only on an object key name prefix, which is supported for
- * backward compatibility. For the related API description, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html">PutBucketLifecycle</a>.</p>
- * <p> <b>Rules</b> </p> <p>You specify the lifecycle configuration in your
- * request body. The lifecycle configuration is specified as XML consisting of one
- * or more rules. Each rule consists of the following:</p> <ul> <li> <p>Filter
- * identifying a subset of objects to which the rule applies. The filter can be
- * based on a key name prefix, object tags, or a combination of both.</p> </li>
- * <li> <p>Status whether the rule is in effect.</p> </li> <li> <p>One or more
- * lifecycle transition and expiration actions that you want Amazon S3 to perform
- * on the objects identified by the filter. If the state of your bucket is
- * versioning-enabled or versioning-suspended, you can have many versions of the
- * same object (one current version and zero or more noncurrent versions). Amazon
- * S3 provides predefined actions that you can specify for current and noncurrent
- * object versions.</p> </li> </ul> <p>For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html">Object
- * Lifecycle Management</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html">Lifecycle
- * Configuration Elements</a>.</p> <p> <b>Permissions</b> </p> <p>By default, all
- * Amazon S3 resources are private, including buckets, objects, and related
- * subresources (for example, lifecycle configuration and website configuration).
- * Only the resource owner (that is, the AWS account that created it) can access
- * the resource. The resource owner can optionally grant access permissions to
- * others by writing an access policy. For this operation, a user must get the
- * s3:PutLifecycleConfiguration permission.</p> <p>You can also explicitly deny
- * permissions. Explicit deny also supersedes any other permissions. If you want to
- * block users or accounts from removing or deleting objects from your bucket, you
- * must deny them permissions for the following actions:</p> <ul> <li>
- * <p>s3:DeleteObject</p> </li> <li> <p>s3:DeleteObjectVersion</p> </li> <li>
- * <p>s3:PutLifecycleConfiguration</p> </li> </ul> <p>For more information about
- * permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>The following are
- * related to <code>PutBucketLifecycleConfiguration</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html">Examples
- * of Lifecycle Configuration</a> </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html">GetBucketLifecycleConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html">DeleteBucketLifecycle</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for PutBucketLifecycleConfiguration that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::PutBucketLifecycleConfigurationOutcomeCallable PutBucketLifecycleConfigurationCallable(const Model::PutBucketLifecycleConfigurationRequest& request) const;
+ template<typename PutBucketLifecycleConfigurationRequestT = Model::PutBucketLifecycleConfigurationRequest>
+ Model::PutBucketLifecycleConfigurationOutcomeCallable PutBucketLifecycleConfigurationCallable(const PutBucketLifecycleConfigurationRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::PutBucketLifecycleConfiguration, request);
+ }
/**
- * <p>Creates a new lifecycle configuration for the bucket or replaces an existing
- * lifecycle configuration. For information about lifecycle configuration, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>Bucket
- * lifecycle configuration now supports specifying a lifecycle rule using an object
- * key name prefix, one or more object tags, or a combination of both. Accordingly,
- * this section describes the latest API. The previous version of the API supported
- * filtering based only on an object key name prefix, which is supported for
- * backward compatibility. For the related API description, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycle.html">PutBucketLifecycle</a>.</p>
- * <p> <b>Rules</b> </p> <p>You specify the lifecycle configuration in your
- * request body. The lifecycle configuration is specified as XML consisting of one
- * or more rules. Each rule consists of the following:</p> <ul> <li> <p>Filter
- * identifying a subset of objects to which the rule applies. The filter can be
- * based on a key name prefix, object tags, or a combination of both.</p> </li>
- * <li> <p>Status whether the rule is in effect.</p> </li> <li> <p>One or more
- * lifecycle transition and expiration actions that you want Amazon S3 to perform
- * on the objects identified by the filter. If the state of your bucket is
- * versioning-enabled or versioning-suspended, you can have many versions of the
- * same object (one current version and zero or more noncurrent versions). Amazon
- * S3 provides predefined actions that you can specify for current and noncurrent
- * object versions.</p> </li> </ul> <p>For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html">Object
- * Lifecycle Management</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html">Lifecycle
- * Configuration Elements</a>.</p> <p> <b>Permissions</b> </p> <p>By default, all
- * Amazon S3 resources are private, including buckets, objects, and related
- * subresources (for example, lifecycle configuration and website configuration).
- * Only the resource owner (that is, the AWS account that created it) can access
- * the resource. The resource owner can optionally grant access permissions to
- * others by writing an access policy. For this operation, a user must get the
- * s3:PutLifecycleConfiguration permission.</p> <p>You can also explicitly deny
- * permissions. Explicit deny also supersedes any other permissions. If you want to
- * block users or accounts from removing or deleting objects from your bucket, you
- * must deny them permissions for the following actions:</p> <ul> <li>
- * <p>s3:DeleteObject</p> </li> <li> <p>s3:DeleteObjectVersion</p> </li> <li>
- * <p>s3:PutLifecycleConfiguration</p> </li> </ul> <p>For more information about
- * permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>The following are
- * related to <code>PutBucketLifecycleConfiguration</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/lifecycle-configuration-examples.html">Examples
- * of Lifecycle Configuration</a> </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html">GetBucketLifecycleConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketLifecycle.html">DeleteBucketLifecycle</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLifecycleConfiguration">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for PutBucketLifecycleConfiguration that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void PutBucketLifecycleConfigurationAsync(const Model::PutBucketLifecycleConfigurationRequest& request, const PutBucketLifecycleConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename PutBucketLifecycleConfigurationRequestT = Model::PutBucketLifecycleConfigurationRequest>
+ void PutBucketLifecycleConfigurationAsync(const PutBucketLifecycleConfigurationRequestT& request, const PutBucketLifecycleConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::PutBucketLifecycleConfiguration, request, handler, context);
+ }
/**
* <p>Set the logging parameters for a bucket and to specify permissions for who
* can view and modify the logging parameters. All logs are saved to buckets in the
- * same AWS Region as the source bucket. To set the logging status of a bucket, you
- * must be the bucket owner.</p> <p>The bucket owner is automatically granted
- * FULL_CONTROL to all logs. You use the <code>Grantee</code> request element to
- * grant access to other people. The <code>Permissions</code> request element
- * specifies the kind of access the grantee has to the logs.</p> <p> <b>Grantee
- * Values</b> </p> <p>You can specify the person (grantee) to whom you're assigning
- * access rights (using request elements) in the following ways:</p> <ul> <li>
- * <p>By the person's ID:</p> <p> <code>&lt;Grantee
+ * same Amazon Web Services Region as the source bucket. To set the logging status
+ * of a bucket, you must be the bucket owner.</p> <p>The bucket owner is
+ * automatically granted FULL_CONTROL to all logs. You use the <code>Grantee</code>
+ * request element to grant access to other people. The <code>Permissions</code>
+ * request element specifies the kind of access the grantee has to the logs.</p>
+ * <p>If the target bucket for log delivery uses the bucket owner
+ * enforced setting for S3 Object Ownership, you can't use the <code>Grantee</code>
+ * request element to grant access to others. Permissions can only be granted using
+ * policies. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general">Permissions
+ * for server access log delivery</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * <p> <b>Grantee Values</b> </p> <p>You can specify the person
+ * (grantee) to whom you're assigning access rights (using request elements) in the
+ * following ways:</p> <ul> <li> <p>By the person's ID:</p> <p> <code>&lt;Grantee
* xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
* xsi:type="CanonicalUser"&gt;&lt;ID&gt;&lt;&gt;ID&lt;&gt;&lt;/ID&gt;&lt;DisplayName&gt;&lt;&gt;GranteesEmail&lt;&gt;&lt;/DisplayName&gt;
* &lt;/Grantee&gt;</code> </p> <p>DisplayName is optional and ignored in the
@@ -7728,8 +3900,9 @@ namespace Aws
* request element:</p> <p> <code>&lt;BucketLoggingStatus
* xmlns="http://doc.s3.amazonaws.com/2006-03-01" /&gt;</code> </p> <p>For more
* information about server access logging, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerLogs.html">Server
- * Access Logging</a>. </p> <p>For more information about creating a bucket, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerLogs.html">Server
+ * Access Logging</a> in the <i>Amazon S3 User Guide</i>. </p> <p>For more
+ * information about creating a bucket, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>.
* For more information about returning the logging status of a bucket, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html">GetBucketLogging</a>.</p>
@@ -7749,100 +3922,22 @@ namespace Aws
virtual Model::PutBucketLoggingOutcome PutBucketLogging(const Model::PutBucketLoggingRequest& request) const;
/**
- * <p>Set the logging parameters for a bucket and to specify permissions for who
- * can view and modify the logging parameters. All logs are saved to buckets in the
- * same AWS Region as the source bucket. To set the logging status of a bucket, you
- * must be the bucket owner.</p> <p>The bucket owner is automatically granted
- * FULL_CONTROL to all logs. You use the <code>Grantee</code> request element to
- * grant access to other people. The <code>Permissions</code> request element
- * specifies the kind of access the grantee has to the logs.</p> <p> <b>Grantee
- * Values</b> </p> <p>You can specify the person (grantee) to whom you're assigning
- * access rights (using request elements) in the following ways:</p> <ul> <li>
- * <p>By the person's ID:</p> <p> <code>&lt;Grantee
- * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- * xsi:type="CanonicalUser"&gt;&lt;ID&gt;&lt;&gt;ID&lt;&gt;&lt;/ID&gt;&lt;DisplayName&gt;&lt;&gt;GranteesEmail&lt;&gt;&lt;/DisplayName&gt;
- * &lt;/Grantee&gt;</code> </p> <p>DisplayName is optional and ignored in the
- * request.</p> </li> <li> <p>By Email address:</p> <p> <code> &lt;Grantee
- * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- * xsi:type="AmazonCustomerByEmail"&gt;&lt;EmailAddress&gt;&lt;&gt;Grantees@email.com&lt;&gt;&lt;/EmailAddress&gt;&lt;/Grantee&gt;</code>
- * </p> <p>The grantee is resolved to the CanonicalUser and, in a response to a GET
- * Object acl request, appears as the CanonicalUser.</p> </li> <li> <p>By URI:</p>
- * <p> <code>&lt;Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- * xsi:type="Group"&gt;&lt;URI&gt;&lt;&gt;http://acs.amazonaws.com/groups/global/AuthenticatedUsers&lt;&gt;&lt;/URI&gt;&lt;/Grantee&gt;</code>
- * </p> </li> </ul> <p>To enable logging, you use LoggingEnabled and its children
- * request elements. To disable logging, you use an empty BucketLoggingStatus
- * request element:</p> <p> <code>&lt;BucketLoggingStatus
- * xmlns="http://doc.s3.amazonaws.com/2006-03-01" /&gt;</code> </p> <p>For more
- * information about server access logging, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerLogs.html">Server
- * Access Logging</a>. </p> <p>For more information about creating a bucket, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>.
- * For more information about returning the logging status of a bucket, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html">GetBucketLogging</a>.</p>
- * <p>The following operations are related to <code>PutBucketLogging</code>:</p>
- * <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html">PutObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html">DeleteBucket</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html">GetBucketLogging</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for PutBucketLogging that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::PutBucketLoggingOutcomeCallable PutBucketLoggingCallable(const Model::PutBucketLoggingRequest& request) const;
+ template<typename PutBucketLoggingRequestT = Model::PutBucketLoggingRequest>
+ Model::PutBucketLoggingOutcomeCallable PutBucketLoggingCallable(const PutBucketLoggingRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::PutBucketLogging, request);
+ }
/**
- * <p>Set the logging parameters for a bucket and to specify permissions for who
- * can view and modify the logging parameters. All logs are saved to buckets in the
- * same AWS Region as the source bucket. To set the logging status of a bucket, you
- * must be the bucket owner.</p> <p>The bucket owner is automatically granted
- * FULL_CONTROL to all logs. You use the <code>Grantee</code> request element to
- * grant access to other people. The <code>Permissions</code> request element
- * specifies the kind of access the grantee has to the logs.</p> <p> <b>Grantee
- * Values</b> </p> <p>You can specify the person (grantee) to whom you're assigning
- * access rights (using request elements) in the following ways:</p> <ul> <li>
- * <p>By the person's ID:</p> <p> <code>&lt;Grantee
- * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- * xsi:type="CanonicalUser"&gt;&lt;ID&gt;&lt;&gt;ID&lt;&gt;&lt;/ID&gt;&lt;DisplayName&gt;&lt;&gt;GranteesEmail&lt;&gt;&lt;/DisplayName&gt;
- * &lt;/Grantee&gt;</code> </p> <p>DisplayName is optional and ignored in the
- * request.</p> </li> <li> <p>By Email address:</p> <p> <code> &lt;Grantee
- * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- * xsi:type="AmazonCustomerByEmail"&gt;&lt;EmailAddress&gt;&lt;&gt;Grantees@email.com&lt;&gt;&lt;/EmailAddress&gt;&lt;/Grantee&gt;</code>
- * </p> <p>The grantee is resolved to the CanonicalUser and, in a response to a GET
- * Object acl request, appears as the CanonicalUser.</p> </li> <li> <p>By URI:</p>
- * <p> <code>&lt;Grantee xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- * xsi:type="Group"&gt;&lt;URI&gt;&lt;&gt;http://acs.amazonaws.com/groups/global/AuthenticatedUsers&lt;&gt;&lt;/URI&gt;&lt;/Grantee&gt;</code>
- * </p> </li> </ul> <p>To enable logging, you use LoggingEnabled and its children
- * request elements. To disable logging, you use an empty BucketLoggingStatus
- * request element:</p> <p> <code>&lt;BucketLoggingStatus
- * xmlns="http://doc.s3.amazonaws.com/2006-03-01" /&gt;</code> </p> <p>For more
- * information about server access logging, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerLogs.html">Server
- * Access Logging</a>. </p> <p>For more information about creating a bucket, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>.
- * For more information about returning the logging status of a bucket, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html">GetBucketLogging</a>.</p>
- * <p>The following operations are related to <code>PutBucketLogging</code>:</p>
- * <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html">PutObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html">DeleteBucket</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLogging.html">GetBucketLogging</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketLogging">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for PutBucketLogging that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void PutBucketLoggingAsync(const Model::PutBucketLoggingRequest& request, const PutBucketLoggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename PutBucketLoggingRequestT = Model::PutBucketLoggingRequest>
+ void PutBucketLoggingAsync(const PutBucketLoggingRequestT& request, const PutBucketLoggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::PutBucketLogging, request, handler, context);
+ }
/**
* <p>Sets a metrics configuration (specified by the metrics configuration ID) for
@@ -7863,7 +3958,7 @@ namespace Aws
* to <code>PutBucketMetricsConfiguration</code>:</p> <ul> <li> <p> <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html">DeleteBucketMetricsConfiguration</a>
* </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html">PutBucketMetricsConfiguration</a>
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketMetricsConfiguration.html">GetBucketMetricsConfiguration</a>
* </p> </li> <li> <p> <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html">ListBucketMetricsConfigurations</a>
* </p> </li> </ul> <p> <code>GetBucketLifecycle</code> has the following special
@@ -7877,72 +3972,22 @@ namespace Aws
virtual Model::PutBucketMetricsConfigurationOutcome PutBucketMetricsConfiguration(const Model::PutBucketMetricsConfigurationRequest& request) const;
/**
- * <p>Sets a metrics configuration (specified by the metrics configuration ID) for
- * the bucket. You can have up to 1,000 metrics configurations per bucket. If
- * you're updating an existing metrics configuration, note that this is a full
- * replacement of the existing metrics configuration. If you don't include the
- * elements you want to keep, they are erased.</p> <p>To use this operation, you
- * must have permissions to perform the <code>s3:PutMetricsConfiguration</code>
- * action. The bucket owner has this permission by default. The bucket owner can
- * grant this permission to others. For more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>For information about
- * CloudWatch request metrics for Amazon S3, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html">Monitoring
- * Metrics with Amazon CloudWatch</a>.</p> <p>The following operations are related
- * to <code>PutBucketMetricsConfiguration</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html">DeleteBucketMetricsConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html">PutBucketMetricsConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html">ListBucketMetricsConfigurations</a>
- * </p> </li> </ul> <p> <code>GetBucketLifecycle</code> has the following special
- * error:</p> <ul> <li> <p>Error code: <code>TooManyConfigurations</code> </p> <ul>
- * <li> <p>Description: You are attempting to create a new configuration but have
- * already reached the 1,000-configuration limit.</p> </li> <li> <p>HTTP Status
- * Code: HTTP 400 Bad Request</p> </li> </ul> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for PutBucketMetricsConfiguration that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::PutBucketMetricsConfigurationOutcomeCallable PutBucketMetricsConfigurationCallable(const Model::PutBucketMetricsConfigurationRequest& request) const;
+ template<typename PutBucketMetricsConfigurationRequestT = Model::PutBucketMetricsConfigurationRequest>
+ Model::PutBucketMetricsConfigurationOutcomeCallable PutBucketMetricsConfigurationCallable(const PutBucketMetricsConfigurationRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::PutBucketMetricsConfiguration, request);
+ }
/**
- * <p>Sets a metrics configuration (specified by the metrics configuration ID) for
- * the bucket. You can have up to 1,000 metrics configurations per bucket. If
- * you're updating an existing metrics configuration, note that this is a full
- * replacement of the existing metrics configuration. If you don't include the
- * elements you want to keep, they are erased.</p> <p>To use this operation, you
- * must have permissions to perform the <code>s3:PutMetricsConfiguration</code>
- * action. The bucket owner has this permission by default. The bucket owner can
- * grant this permission to others. For more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>For information about
- * CloudWatch request metrics for Amazon S3, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/cloudwatch-monitoring.html">Monitoring
- * Metrics with Amazon CloudWatch</a>.</p> <p>The following operations are related
- * to <code>PutBucketMetricsConfiguration</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketMetricsConfiguration.html">DeleteBucketMetricsConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html">PutBucketMetricsConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListBucketMetricsConfigurations.html">ListBucketMetricsConfigurations</a>
- * </p> </li> </ul> <p> <code>GetBucketLifecycle</code> has the following special
- * error:</p> <ul> <li> <p>Error code: <code>TooManyConfigurations</code> </p> <ul>
- * <li> <p>Description: You are attempting to create a new configuration but have
- * already reached the 1,000-configuration limit.</p> </li> <li> <p>HTTP Status
- * Code: HTTP 400 Bad Request</p> </li> </ul> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketMetricsConfiguration">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for PutBucketMetricsConfiguration that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void PutBucketMetricsConfigurationAsync(const Model::PutBucketMetricsConfigurationRequest& request, const PutBucketMetricsConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename PutBucketMetricsConfigurationRequestT = Model::PutBucketMetricsConfigurationRequest>
+ void PutBucketMetricsConfigurationAsync(const PutBucketMetricsConfigurationRequestT& request, const PutBucketMetricsConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::PutBucketMetricsConfiguration, request, handler, context);
+ }
/**
* <p>Enables notifications of specified events for a bucket. For more information
@@ -7961,27 +4006,32 @@ namespace Aws
* request body.</p> <p>After Amazon S3 receives this request, it first verifies
* that any Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue
* Service (Amazon SQS) destination exists, and that the bucket owner has
- * permission to publish to it by sending a test notification. In the case of AWS
+ * permission to publish to it by sending a test notification. In the case of
* Lambda destinations, Amazon S3 verifies that the Lambda function permissions
* grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For
* more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html">Configuring
* Notifications for Amazon S3 Events</a>.</p> <p>You can disable notifications by
- * adding the empty NotificationConfiguration element.</p> <p>By default, only the
- * bucket owner can configure notifications on a bucket. However, bucket owners can
- * use a bucket policy to grant permission to other users to set this configuration
- * with <code>s3:PutBucketNotification</code> permission.</p> <p>The PUT
- * notification is an atomic operation. For example, suppose your notification
- * configuration includes SNS topic, SQS queue, and Lambda function configurations.
- * When you send a PUT request with this configuration, Amazon S3 sends test
- * messages to your SNS topic. If the message fails, the entire PUT action will
- * fail, and Amazon S3 will not add the configuration to your bucket.</p>
- * <p> <b>Responses</b> </p> <p>If the configuration in the request body includes
- * only one <code>TopicConfiguration</code> specifying only the
- * <code>s3:ReducedRedundancyLostObject</code> event type, the response will also
- * include the <code>x-amz-sns-test-message-id</code> header containing the message
- * ID of the test notification sent to the topic.</p> <p>The following action is
- * related to <code>PutBucketNotificationConfiguration</code>:</p> <ul> <li> <p> <a
+ * adding the empty NotificationConfiguration element.</p> <p>For more information
+ * about the number of event notification configurations that you can create per
+ * bucket, see <a
+ * href="https://docs.aws.amazon.com/general/latest/gr/s3.html#limits_s3">Amazon S3
+ * service quotas</a> in <i>Amazon Web Services General Reference</i>.</p> <p>By
+ * default, only the bucket owner can configure notifications on a bucket. However,
+ * bucket owners can use a bucket policy to grant permission to other users to set
+ * this configuration with <code>s3:PutBucketNotification</code> permission.</p>
+ * <p>The PUT notification is an atomic operation. For example, suppose your
+ * notification configuration includes SNS topic, SQS queue, and Lambda function
+ * configurations. When you send a PUT request with this configuration, Amazon S3
+ * sends test messages to your SNS topic. If the message fails, the entire PUT
+ * action will fail, and Amazon S3 will not add the configuration to your
+ * bucket.</p> <p> <b>Responses</b> </p> <p>If the configuration in the
+ * request body includes only one <code>TopicConfiguration</code> specifying only
+ * the <code>s3:ReducedRedundancyLostObject</code> event type, the response will
+ * also include the <code>x-amz-sns-test-message-id</code> header containing the
+ * message ID of the test notification sent to the topic.</p> <p>The following
+ * action is related to <code>PutBucketNotificationConfiguration</code>:</p> <ul>
+ * <li> <p> <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html">GetBucketNotificationConfiguration</a>
* </p> </li> </ul><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration">AWS
@@ -7990,109 +4040,33 @@ namespace Aws
virtual Model::PutBucketNotificationConfigurationOutcome PutBucketNotificationConfiguration(const Model::PutBucketNotificationConfigurationRequest& request) const;
/**
- * <p>Enables notifications of specified events for a bucket. For more information
- * about event notifications, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html">Configuring
- * Event Notifications</a>.</p> <p>Using this API, you can replace an existing
- * notification configuration. The configuration is an XML file that defines the
- * event types that you want Amazon S3 to publish and the destination where you
- * want Amazon S3 to publish an event notification when it detects an event of the
- * specified type.</p> <p>By default, your bucket has no event notifications
- * configured. That is, the notification configuration will be an empty
- * <code>NotificationConfiguration</code>.</p> <p>
- * <code>&lt;NotificationConfiguration&gt;</code> </p> <p>
- * <code>&lt;/NotificationConfiguration&gt;</code> </p> <p>This action replaces the
- * existing notification configuration with the configuration you include in the
- * request body.</p> <p>After Amazon S3 receives this request, it first verifies
- * that any Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue
- * Service (Amazon SQS) destination exists, and that the bucket owner has
- * permission to publish to it by sending a test notification. In the case of AWS
- * Lambda destinations, Amazon S3 verifies that the Lambda function permissions
- * grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For
- * more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html">Configuring
- * Notifications for Amazon S3 Events</a>.</p> <p>You can disable notifications by
- * adding the empty NotificationConfiguration element.</p> <p>By default, only the
- * bucket owner can configure notifications on a bucket. However, bucket owners can
- * use a bucket policy to grant permission to other users to set this configuration
- * with <code>s3:PutBucketNotification</code> permission.</p> <p>The PUT
- * notification is an atomic operation. For example, suppose your notification
- * configuration includes SNS topic, SQS queue, and Lambda function configurations.
- * When you send a PUT request with this configuration, Amazon S3 sends test
- * messages to your SNS topic. If the message fails, the entire PUT action will
- * fail, and Amazon S3 will not add the configuration to your bucket.</p>
- * <p> <b>Responses</b> </p> <p>If the configuration in the request body includes
- * only one <code>TopicConfiguration</code> specifying only the
- * <code>s3:ReducedRedundancyLostObject</code> event type, the response will also
- * include the <code>x-amz-sns-test-message-id</code> header containing the message
- * ID of the test notification sent to the topic.</p> <p>The following action is
- * related to <code>PutBucketNotificationConfiguration</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html">GetBucketNotificationConfiguration</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for PutBucketNotificationConfiguration that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::PutBucketNotificationConfigurationOutcomeCallable PutBucketNotificationConfigurationCallable(const Model::PutBucketNotificationConfigurationRequest& request) const;
+ template<typename PutBucketNotificationConfigurationRequestT = Model::PutBucketNotificationConfigurationRequest>
+ Model::PutBucketNotificationConfigurationOutcomeCallable PutBucketNotificationConfigurationCallable(const PutBucketNotificationConfigurationRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::PutBucketNotificationConfiguration, request);
+ }
/**
- * <p>Enables notifications of specified events for a bucket. For more information
- * about event notifications, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html">Configuring
- * Event Notifications</a>.</p> <p>Using this API, you can replace an existing
- * notification configuration. The configuration is an XML file that defines the
- * event types that you want Amazon S3 to publish and the destination where you
- * want Amazon S3 to publish an event notification when it detects an event of the
- * specified type.</p> <p>By default, your bucket has no event notifications
- * configured. That is, the notification configuration will be an empty
- * <code>NotificationConfiguration</code>.</p> <p>
- * <code>&lt;NotificationConfiguration&gt;</code> </p> <p>
- * <code>&lt;/NotificationConfiguration&gt;</code> </p> <p>This action replaces the
- * existing notification configuration with the configuration you include in the
- * request body.</p> <p>After Amazon S3 receives this request, it first verifies
- * that any Amazon Simple Notification Service (Amazon SNS) or Amazon Simple Queue
- * Service (Amazon SQS) destination exists, and that the bucket owner has
- * permission to publish to it by sending a test notification. In the case of AWS
- * Lambda destinations, Amazon S3 verifies that the Lambda function permissions
- * grant Amazon S3 permission to invoke the function from the Amazon S3 bucket. For
- * more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html">Configuring
- * Notifications for Amazon S3 Events</a>.</p> <p>You can disable notifications by
- * adding the empty NotificationConfiguration element.</p> <p>By default, only the
- * bucket owner can configure notifications on a bucket. However, bucket owners can
- * use a bucket policy to grant permission to other users to set this configuration
- * with <code>s3:PutBucketNotification</code> permission.</p> <p>The PUT
- * notification is an atomic operation. For example, suppose your notification
- * configuration includes SNS topic, SQS queue, and Lambda function configurations.
- * When you send a PUT request with this configuration, Amazon S3 sends test
- * messages to your SNS topic. If the message fails, the entire PUT action will
- * fail, and Amazon S3 will not add the configuration to your bucket.</p>
- * <p> <b>Responses</b> </p> <p>If the configuration in the request body includes
- * only one <code>TopicConfiguration</code> specifying only the
- * <code>s3:ReducedRedundancyLostObject</code> event type, the response will also
- * include the <code>x-amz-sns-test-message-id</code> header containing the message
- * ID of the test notification sent to the topic.</p> <p>The following action is
- * related to <code>PutBucketNotificationConfiguration</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html">GetBucketNotificationConfiguration</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketNotificationConfiguration">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for PutBucketNotificationConfiguration that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void PutBucketNotificationConfigurationAsync(const Model::PutBucketNotificationConfigurationRequest& request, const PutBucketNotificationConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename PutBucketNotificationConfigurationRequestT = Model::PutBucketNotificationConfigurationRequest>
+ void PutBucketNotificationConfigurationAsync(const PutBucketNotificationConfigurationRequestT& request, const PutBucketNotificationConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::PutBucketNotificationConfiguration, request, handler, context);
+ }
/**
* <p>Creates or modifies <code>OwnershipControls</code> for an Amazon S3 bucket.
* To use this operation, you must have the
* <code>s3:PutBucketOwnershipControls</code> permission. For more information
* about Amazon S3 permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html">Specifying
- * Permissions in a Policy</a>. </p> <p>For information about Amazon S3 Object
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/user-guide/using-with-s3-actions.html">Specifying
+ * permissions in a policy</a>. </p> <p>For information about Amazon S3 Object
* Ownership, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html">Using
- * Object Ownership</a>. </p> <p>The following operations are related to
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/user-guide/about-object-ownership.html">Using
+ * object ownership</a>. </p> <p>The following operations are related to
* <code>PutBucketOwnershipControls</code>:</p> <ul> <li> <p>
* <a>GetBucketOwnershipControls</a> </p> </li> <li> <p>
* <a>DeleteBucketOwnershipControls</a> </p> </li> </ul><p><h3>See Also:</h3> <a
@@ -8102,61 +4076,40 @@ namespace Aws
virtual Model::PutBucketOwnershipControlsOutcome PutBucketOwnershipControls(const Model::PutBucketOwnershipControlsRequest& request) const;
/**
- * <p>Creates or modifies <code>OwnershipControls</code> for an Amazon S3 bucket.
- * To use this operation, you must have the
- * <code>s3:PutBucketOwnershipControls</code> permission. For more information
- * about Amazon S3 permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html">Specifying
- * Permissions in a Policy</a>. </p> <p>For information about Amazon S3 Object
- * Ownership, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html">Using
- * Object Ownership</a>. </p> <p>The following operations are related to
- * <code>PutBucketOwnershipControls</code>:</p> <ul> <li> <p>
- * <a>GetBucketOwnershipControls</a> </p> </li> <li> <p>
- * <a>DeleteBucketOwnershipControls</a> </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketOwnershipControls">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for PutBucketOwnershipControls that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::PutBucketOwnershipControlsOutcomeCallable PutBucketOwnershipControlsCallable(const Model::PutBucketOwnershipControlsRequest& request) const;
+ template<typename PutBucketOwnershipControlsRequestT = Model::PutBucketOwnershipControlsRequest>
+ Model::PutBucketOwnershipControlsOutcomeCallable PutBucketOwnershipControlsCallable(const PutBucketOwnershipControlsRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::PutBucketOwnershipControls, request);
+ }
/**
- * <p>Creates or modifies <code>OwnershipControls</code> for an Amazon S3 bucket.
- * To use this operation, you must have the
- * <code>s3:PutBucketOwnershipControls</code> permission. For more information
- * about Amazon S3 permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html">Specifying
- * Permissions in a Policy</a>. </p> <p>For information about Amazon S3 Object
- * Ownership, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/about-object-ownership.html">Using
- * Object Ownership</a>. </p> <p>The following operations are related to
- * <code>PutBucketOwnershipControls</code>:</p> <ul> <li> <p>
- * <a>GetBucketOwnershipControls</a> </p> </li> <li> <p>
- * <a>DeleteBucketOwnershipControls</a> </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketOwnershipControls">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for PutBucketOwnershipControls that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void PutBucketOwnershipControlsAsync(const Model::PutBucketOwnershipControlsRequest& request, const PutBucketOwnershipControlsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename PutBucketOwnershipControlsRequestT = Model::PutBucketOwnershipControlsRequest>
+ void PutBucketOwnershipControlsAsync(const PutBucketOwnershipControlsRequestT& request, const PutBucketOwnershipControlsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::PutBucketOwnershipControls, request, handler, context);
+ }
/**
* <p>Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using
- * an identity other than the root user of the AWS account that owns the bucket,
- * the calling identity must have the <code>PutBucketPolicy</code> permissions on
- * the specified bucket and belong to the bucket owner's account in order to use
- * this operation.</p> <p>If you don't have <code>PutBucketPolicy</code>
- * permissions, Amazon S3 returns a <code>403 Access Denied</code> error. If you
- * have the correct permissions, but you're not using an identity that belongs to
- * the bucket owner's account, Amazon S3 returns a <code>405 Method Not
- * Allowed</code> error.</p> <p> As a security precaution, the root
- * user of the AWS account that owns a bucket can always use this operation, even
- * if the policy explicitly denies the root user the ability to perform this
- * action. </p> <p>For more information about bucket policies, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html">Using
- * Bucket Policies and User Policies</a>.</p> <p>The following operations are
- * related to <code>PutBucketPolicy</code>:</p> <ul> <li> <p> <a
+ * an identity other than the root user of the Amazon Web Services account that
+ * owns the bucket, the calling identity must have the <code>PutBucketPolicy</code>
+ * permissions on the specified bucket and belong to the bucket owner's account in
+ * order to use this operation.</p> <p>If you don't have
+ * <code>PutBucketPolicy</code> permissions, Amazon S3 returns a <code>403 Access
+ * Denied</code> error. If you have the correct permissions, but you're not using
+ * an identity that belongs to the bucket owner's account, Amazon S3 returns a
+ * <code>405 Method Not Allowed</code> error.</p> <p> As a security
+ * precaution, the root user of the Amazon Web Services account that owns a bucket
+ * can always use this operation, even if the policy explicitly denies the root
+ * user the ability to perform this action. </p> <p>For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/example-bucket-policies.html">Bucket
+ * policy examples</a>.</p> <p>The following operations are related to
+ * <code>PutBucketPolicy</code>:</p> <ul> <li> <p> <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>
* </p> </li> <li> <p> <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html">DeleteBucket</a>
@@ -8167,107 +4120,74 @@ namespace Aws
virtual Model::PutBucketPolicyOutcome PutBucketPolicy(const Model::PutBucketPolicyRequest& request) const;
/**
- * <p>Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using
- * an identity other than the root user of the AWS account that owns the bucket,
- * the calling identity must have the <code>PutBucketPolicy</code> permissions on
- * the specified bucket and belong to the bucket owner's account in order to use
- * this operation.</p> <p>If you don't have <code>PutBucketPolicy</code>
- * permissions, Amazon S3 returns a <code>403 Access Denied</code> error. If you
- * have the correct permissions, but you're not using an identity that belongs to
- * the bucket owner's account, Amazon S3 returns a <code>405 Method Not
- * Allowed</code> error.</p> <p> As a security precaution, the root
- * user of the AWS account that owns a bucket can always use this operation, even
- * if the policy explicitly denies the root user the ability to perform this
- * action. </p> <p>For more information about bucket policies, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html">Using
- * Bucket Policies and User Policies</a>.</p> <p>The following operations are
- * related to <code>PutBucketPolicy</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html">DeleteBucket</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for PutBucketPolicy that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::PutBucketPolicyOutcomeCallable PutBucketPolicyCallable(const Model::PutBucketPolicyRequest& request) const;
+ template<typename PutBucketPolicyRequestT = Model::PutBucketPolicyRequest>
+ Model::PutBucketPolicyOutcomeCallable PutBucketPolicyCallable(const PutBucketPolicyRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::PutBucketPolicy, request);
+ }
/**
- * <p>Applies an Amazon S3 bucket policy to an Amazon S3 bucket. If you are using
- * an identity other than the root user of the AWS account that owns the bucket,
- * the calling identity must have the <code>PutBucketPolicy</code> permissions on
- * the specified bucket and belong to the bucket owner's account in order to use
- * this operation.</p> <p>If you don't have <code>PutBucketPolicy</code>
- * permissions, Amazon S3 returns a <code>403 Access Denied</code> error. If you
- * have the correct permissions, but you're not using an identity that belongs to
- * the bucket owner's account, Amazon S3 returns a <code>405 Method Not
- * Allowed</code> error.</p> <p> As a security precaution, the root
- * user of the AWS account that owns a bucket can always use this operation, even
- * if the policy explicitly denies the root user the ability to perform this
- * action. </p> <p>For more information about bucket policies, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-iam-policies.html">Using
- * Bucket Policies and User Policies</a>.</p> <p>The following operations are
- * related to <code>PutBucketPolicy</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html">DeleteBucket</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketPolicy">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for PutBucketPolicy that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void PutBucketPolicyAsync(const Model::PutBucketPolicyRequest& request, const PutBucketPolicyResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename PutBucketPolicyRequestT = Model::PutBucketPolicyRequest>
+ void PutBucketPolicyAsync(const PutBucketPolicyRequestT& request, const PutBucketPolicyResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::PutBucketPolicy, request, handler, context);
+ }
/**
* <p> Creates a replication configuration or replaces an existing one. For more
* information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html">Replication</a>
- * in the <i>Amazon S3 Developer Guide</i>. </p> <p>To perform this
- * operation, the user or role performing the action must have the <a
- * href="https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html">iam:PassRole</a>
- * permission.</p> <p>Specify the replication configuration in the request
- * body. In the replication configuration, you provide the name of the destination
- * bucket or buckets where you want Amazon S3 to replicate objects, the IAM role
- * that Amazon S3 can assume to replicate objects on your behalf, and other
- * relevant information.</p> <p>A replication configuration must include at least
- * one rule, and can contain a maximum of 1,000. Each rule identifies a subset of
- * objects to replicate by filtering the objects in the source bucket. To choose
- * additional subsets of objects to replicate, add a rule for each subset.</p>
- * <p>To specify a subset of the objects in the source bucket to apply a
- * replication rule to, add the Filter element as a child of the Rule element. You
- * can filter objects based on an object key prefix, one or more object tags, or
- * both. When you add the Filter element in the configuration, you must also add
- * the following elements: <code>DeleteMarkerReplication</code>,
- * <code>Status</code>, and <code>Priority</code>.</p> <p>If you are using
- * an earlier version of the replication configuration, Amazon S3 handles
- * replication of delete markers differently. For more information, see <a
+ * in the <i>Amazon S3 User Guide</i>. </p> <p>Specify the replication
+ * configuration in the request body. In the replication configuration, you provide
+ * the name of the destination bucket or buckets where you want Amazon S3 to
+ * replicate objects, the IAM role that Amazon S3 can assume to replicate objects
+ * on your behalf, and other relevant information.</p> <p>A replication
+ * configuration must include at least one rule, and can contain a maximum of
+ * 1,000. Each rule identifies a subset of objects to replicate by filtering the
+ * objects in the source bucket. To choose additional subsets of objects to
+ * replicate, add a rule for each subset.</p> <p>To specify a subset of the objects
+ * in the source bucket to apply a replication rule to, add the Filter element as a
+ * child of the Rule element. You can filter objects based on an object key prefix,
+ * one or more object tags, or both. When you add the Filter element in the
+ * configuration, you must also add the following elements:
+ * <code>DeleteMarkerReplication</code>, <code>Status</code>, and
+ * <code>Priority</code>.</p> <p>If you are using an earlier version of the
+ * replication configuration, Amazon S3 handles replication of delete markers
+ * differently. For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations">Backward
* Compatibility</a>.</p> <p>For information about enabling versioning on a
* bucket, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html">Using
- * Versioning</a>.</p> <p>By default, a resource owner, in this case the AWS
- * account that created the bucket, can perform this operation. The resource owner
- * can also grant others permissions to perform the operation. For more information
- * about permissions, see <a
+ * Versioning</a>.</p> <p> <b>Handling Replication of Encrypted Objects</b> </p>
+ * <p>By default, Amazon S3 doesn't replicate objects that are stored at rest using
+ * server-side encryption with KMS keys. To replicate Amazon Web Services
+ * KMS-encrypted objects, add the following: <code>SourceSelectionCriteria</code>,
+ * <code>SseKmsEncryptedObjects</code>, <code>Status</code>,
+ * <code>EncryptionConfiguration</code>, and <code>ReplicaKmsKeyID</code>. For
+ * information about replication configuration, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html">Replicating
+ * Objects Created with SSE Using KMS keys</a>.</p> <p>For information on
+ * <code>PutBucketReplication</code> errors, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList">List
+ * of replication-related error codes</a> </p> <p> <b>Permissions</b> </p> <p>To
+ * create a <code>PutBucketReplication</code> request, you must have
+ * <code>s3:PutReplicationConfiguration</code> permissions for the bucket. </p>
+ * <p>By default, a resource owner, in this case the Amazon Web Services account
+ * that created the bucket, can perform this operation. The resource owner can also
+ * grant others permissions to perform the operation. For more information about
+ * permissions, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html">Specifying
* Permissions in a Policy</a> and <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p> <b>Handling
- * Replication of Encrypted Objects</b> </p> <p>By default, Amazon S3 doesn't
- * replicate objects that are stored at rest using server-side encryption with CMKs
- * stored in AWS KMS. To replicate AWS KMS-encrypted objects, add the following:
- * <code>SourceSelectionCriteria</code>, <code>SseKmsEncryptedObjects</code>,
- * <code>Status</code>, <code>EncryptionConfiguration</code>, and
- * <code>ReplicaKmsKeyID</code>. For information about replication configuration,
- * see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html">Replicating
- * Objects Created with SSE Using CMKs stored in AWS KMS</a>.</p> <p>For
- * information on <code>PutBucketReplication</code> errors, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList">List
- * of replication-related error codes</a> </p> <p>The following operations are
- * related to <code>PutBucketReplication</code>:</p> <ul> <li> <p> <a
+ * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>To perform
+ * this operation, the user or role performing the action must have the <a
+ * href="https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html">iam:PassRole</a>
+ * permission.</p> <p>The following operations are related to
+ * <code>PutBucketReplication</code>:</p> <ul> <li> <p> <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html">GetBucketReplication</a>
* </p> </li> <li> <p> <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html">DeleteBucketReplication</a>
@@ -8278,122 +4198,22 @@ namespace Aws
virtual Model::PutBucketReplicationOutcome PutBucketReplication(const Model::PutBucketReplicationRequest& request) const;
/**
- * <p> Creates a replication configuration or replaces an existing one. For more
- * information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html">Replication</a>
- * in the <i>Amazon S3 Developer Guide</i>. </p> <p>To perform this
- * operation, the user or role performing the action must have the <a
- * href="https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html">iam:PassRole</a>
- * permission.</p> <p>Specify the replication configuration in the request
- * body. In the replication configuration, you provide the name of the destination
- * bucket or buckets where you want Amazon S3 to replicate objects, the IAM role
- * that Amazon S3 can assume to replicate objects on your behalf, and other
- * relevant information.</p> <p>A replication configuration must include at least
- * one rule, and can contain a maximum of 1,000. Each rule identifies a subset of
- * objects to replicate by filtering the objects in the source bucket. To choose
- * additional subsets of objects to replicate, add a rule for each subset.</p>
- * <p>To specify a subset of the objects in the source bucket to apply a
- * replication rule to, add the Filter element as a child of the Rule element. You
- * can filter objects based on an object key prefix, one or more object tags, or
- * both. When you add the Filter element in the configuration, you must also add
- * the following elements: <code>DeleteMarkerReplication</code>,
- * <code>Status</code>, and <code>Priority</code>.</p> <p>If you are using
- * an earlier version of the replication configuration, Amazon S3 handles
- * replication of delete markers differently. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations">Backward
- * Compatibility</a>.</p> <p>For information about enabling versioning on a
- * bucket, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html">Using
- * Versioning</a>.</p> <p>By default, a resource owner, in this case the AWS
- * account that created the bucket, can perform this operation. The resource owner
- * can also grant others permissions to perform the operation. For more information
- * about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html">Specifying
- * Permissions in a Policy</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p> <b>Handling
- * Replication of Encrypted Objects</b> </p> <p>By default, Amazon S3 doesn't
- * replicate objects that are stored at rest using server-side encryption with CMKs
- * stored in AWS KMS. To replicate AWS KMS-encrypted objects, add the following:
- * <code>SourceSelectionCriteria</code>, <code>SseKmsEncryptedObjects</code>,
- * <code>Status</code>, <code>EncryptionConfiguration</code>, and
- * <code>ReplicaKmsKeyID</code>. For information about replication configuration,
- * see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html">Replicating
- * Objects Created with SSE Using CMKs stored in AWS KMS</a>.</p> <p>For
- * information on <code>PutBucketReplication</code> errors, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList">List
- * of replication-related error codes</a> </p> <p>The following operations are
- * related to <code>PutBucketReplication</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html">GetBucketReplication</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html">DeleteBucketReplication</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for PutBucketReplication that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::PutBucketReplicationOutcomeCallable PutBucketReplicationCallable(const Model::PutBucketReplicationRequest& request) const;
+ template<typename PutBucketReplicationRequestT = Model::PutBucketReplicationRequest>
+ Model::PutBucketReplicationOutcomeCallable PutBucketReplicationCallable(const PutBucketReplicationRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::PutBucketReplication, request);
+ }
/**
- * <p> Creates a replication configuration or replaces an existing one. For more
- * information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication.html">Replication</a>
- * in the <i>Amazon S3 Developer Guide</i>. </p> <p>To perform this
- * operation, the user or role performing the action must have the <a
- * href="https://docs.aws.amazon.com/IAM/latest/UserGuide/id_roles_use_passrole.html">iam:PassRole</a>
- * permission.</p> <p>Specify the replication configuration in the request
- * body. In the replication configuration, you provide the name of the destination
- * bucket or buckets where you want Amazon S3 to replicate objects, the IAM role
- * that Amazon S3 can assume to replicate objects on your behalf, and other
- * relevant information.</p> <p>A replication configuration must include at least
- * one rule, and can contain a maximum of 1,000. Each rule identifies a subset of
- * objects to replicate by filtering the objects in the source bucket. To choose
- * additional subsets of objects to replicate, add a rule for each subset.</p>
- * <p>To specify a subset of the objects in the source bucket to apply a
- * replication rule to, add the Filter element as a child of the Rule element. You
- * can filter objects based on an object key prefix, one or more object tags, or
- * both. When you add the Filter element in the configuration, you must also add
- * the following elements: <code>DeleteMarkerReplication</code>,
- * <code>Status</code>, and <code>Priority</code>.</p> <p>If you are using
- * an earlier version of the replication configuration, Amazon S3 handles
- * replication of delete markers differently. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-add-config.html#replication-backward-compat-considerations">Backward
- * Compatibility</a>.</p> <p>For information about enabling versioning on a
- * bucket, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/Versioning.html">Using
- * Versioning</a>.</p> <p>By default, a resource owner, in this case the AWS
- * account that created the bucket, can perform this operation. The resource owner
- * can also grant others permissions to perform the operation. For more information
- * about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html">Specifying
- * Permissions in a Policy</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p> <b>Handling
- * Replication of Encrypted Objects</b> </p> <p>By default, Amazon S3 doesn't
- * replicate objects that are stored at rest using server-side encryption with CMKs
- * stored in AWS KMS. To replicate AWS KMS-encrypted objects, add the following:
- * <code>SourceSelectionCriteria</code>, <code>SseKmsEncryptedObjects</code>,
- * <code>Status</code>, <code>EncryptionConfiguration</code>, and
- * <code>ReplicaKmsKeyID</code>. For information about replication configuration,
- * see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-config-for-kms-objects.html">Replicating
- * Objects Created with SSE Using CMKs stored in AWS KMS</a>.</p> <p>For
- * information on <code>PutBucketReplication</code> errors, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#ReplicationErrorCodeList">List
- * of replication-related error codes</a> </p> <p>The following operations are
- * related to <code>PutBucketReplication</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketReplication.html">GetBucketReplication</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketReplication.html">DeleteBucketReplication</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketReplication">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for PutBucketReplication that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void PutBucketReplicationAsync(const Model::PutBucketReplicationRequest& request, const PutBucketReplicationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename PutBucketReplicationRequestT = Model::PutBucketReplicationRequest>
+ void PutBucketReplicationAsync(const PutBucketReplicationRequestT& request, const PutBucketReplicationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::PutBucketReplication, request, handler, context);
+ }
/**
* <p>Sets the request payment configuration for a bucket. By default, the bucket
@@ -8413,61 +4233,42 @@ namespace Aws
virtual Model::PutBucketRequestPaymentOutcome PutBucketRequestPayment(const Model::PutBucketRequestPaymentRequest& request) const;
/**
- * <p>Sets the request payment configuration for a bucket. By default, the bucket
- * owner pays for downloads from the bucket. This configuration parameter enables
- * the bucket owner (only) to specify that the person requesting the download will
- * be charged for the download. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html">Requester
- * Pays Buckets</a>.</p> <p>The following operations are related to
- * <code>PutBucketRequestPayment</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html">GetBucketRequestPayment</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for PutBucketRequestPayment that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::PutBucketRequestPaymentOutcomeCallable PutBucketRequestPaymentCallable(const Model::PutBucketRequestPaymentRequest& request) const;
+ template<typename PutBucketRequestPaymentRequestT = Model::PutBucketRequestPaymentRequest>
+ Model::PutBucketRequestPaymentOutcomeCallable PutBucketRequestPaymentCallable(const PutBucketRequestPaymentRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::PutBucketRequestPayment, request);
+ }
/**
- * <p>Sets the request payment configuration for a bucket. By default, the bucket
- * owner pays for downloads from the bucket. This configuration parameter enables
- * the bucket owner (only) to specify that the person requesting the download will
- * be charged for the download. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/RequesterPaysBuckets.html">Requester
- * Pays Buckets</a>.</p> <p>The following operations are related to
- * <code>PutBucketRequestPayment</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketRequestPayment.html">GetBucketRequestPayment</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketRequestPayment">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for PutBucketRequestPayment that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void PutBucketRequestPaymentAsync(const Model::PutBucketRequestPaymentRequest& request, const PutBucketRequestPaymentResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename PutBucketRequestPaymentRequestT = Model::PutBucketRequestPaymentRequest>
+ void PutBucketRequestPaymentAsync(const PutBucketRequestPaymentRequestT& request, const PutBucketRequestPaymentResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::PutBucketRequestPayment, request, handler, context);
+ }
/**
- * <p>Sets the tags for a bucket.</p> <p>Use tags to organize your AWS bill to
- * reflect your own cost structure. To do this, sign up to get your AWS account
- * bill with tag key values included. Then, to see the cost of combined resources,
- * organize your billing information according to resources with the same tag key
- * values. For example, you can tag several resources with a specific application
- * name, and then organize your billing information to see the total cost of that
- * application across several services. For more information, see <a
+ * <p>Sets the tags for a bucket.</p> <p>Use tags to organize your Amazon Web
+ * Services bill to reflect your own cost structure. To do this, sign up to get
+ * your Amazon Web Services account bill with tag key values included. Then, to see
+ * the cost of combined resources, organize your billing information according to
+ * resources with the same tag key values. For example, you can tag several
+ * resources with a specific application name, and then organize your billing
+ * information to see the total cost of that application across several services.
+ * For more information, see <a
* href="https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html">Cost
- * Allocation and Tagging</a>.</p> <p>Within a bucket, if you add a tag that
- * has the same key as an existing tag, the new value overwrites the old value. For
- * more information, see <a
+ * Allocation and Tagging</a> and <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/CostAllocTagging.html">Using
- * Cost Allocation in Amazon S3 Bucket Tags</a>.</p> <p>To use this
- * operation, you must have permissions to perform the
- * <code>s3:PutBucketTagging</code> action. The bucket owner has this permission by
- * default and can grant this permission to others. For more information about
- * permissions, see <a
+ * Cost Allocation in Amazon S3 Bucket Tags</a>.</p> <p> When this operation
+ * sets the tags for a bucket, it will overwrite any current tags the bucket
+ * already has. You cannot use this operation to add tags to an existing list of
+ * tags.</p> <p>To use this operation, you must have permissions to perform
+ * the <code>s3:PutBucketTagging</code> action. The bucket owner has this
+ * permission by default and can grant this permission to others. For more
+ * information about permissions, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
* Related to Bucket Subresource Operations</a> and <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
@@ -8478,16 +4279,17 @@ namespace Aws
* input validation. For information about tag restrictions, see <a
* href="https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html">User-Defined
* Tag Restrictions</a> and <a
- * href="https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/aws-tag-restrictions.html">AWS-Generated
- * Cost Allocation Tag Restrictions</a>.</p> </li> </ul> </li> <li> <p>Error code:
- * <code>MalformedXMLError</code> </p> <ul> <li> <p>Description: The XML provided
- * does not match the schema.</p> </li> </ul> </li> <li> <p>Error code:
- * <code>OperationAbortedError </code> </p> <ul> <li> <p>Description: A conflicting
- * conditional action is currently in progress against this resource. Please try
- * again.</p> </li> </ul> </li> <li> <p>Error code: <code>InternalError</code> </p>
- * <ul> <li> <p>Description: The service was unable to apply the provided tag to
- * the bucket.</p> </li> </ul> </li> </ul> <p>The following operations are related
- * to <code>PutBucketTagging</code>:</p> <ul> <li> <p> <a
+ * href="https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/aws-tag-restrictions.html">Amazon
+ * Web Services-Generated Cost Allocation Tag Restrictions</a>.</p> </li> </ul>
+ * </li> <li> <p>Error code: <code>MalformedXMLError</code> </p> <ul> <li>
+ * <p>Description: The XML provided does not match the schema.</p> </li> </ul>
+ * </li> <li> <p>Error code: <code>OperationAbortedError </code> </p> <ul> <li>
+ * <p>Description: A conflicting conditional action is currently in progress
+ * against this resource. Please try again.</p> </li> </ul> </li> <li> <p>Error
+ * code: <code>InternalError</code> </p> <ul> <li> <p>Description: The service was
+ * unable to apply the provided tag to the bucket.</p> </li> </ul> </li> </ul>
+ * <p>The following operations are related to <code>PutBucketTagging</code>:</p>
+ * <ul> <li> <p> <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html">GetBucketTagging</a>
* </p> </li> <li> <p> <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html">DeleteBucketTagging</a>
@@ -8498,124 +4300,44 @@ namespace Aws
virtual Model::PutBucketTaggingOutcome PutBucketTagging(const Model::PutBucketTaggingRequest& request) const;
/**
- * <p>Sets the tags for a bucket.</p> <p>Use tags to organize your AWS bill to
- * reflect your own cost structure. To do this, sign up to get your AWS account
- * bill with tag key values included. Then, to see the cost of combined resources,
- * organize your billing information according to resources with the same tag key
- * values. For example, you can tag several resources with a specific application
- * name, and then organize your billing information to see the total cost of that
- * application across several services. For more information, see <a
- * href="https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html">Cost
- * Allocation and Tagging</a>.</p> <p>Within a bucket, if you add a tag that
- * has the same key as an existing tag, the new value overwrites the old value. For
- * more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/CostAllocTagging.html">Using
- * Cost Allocation in Amazon S3 Bucket Tags</a>.</p> <p>To use this
- * operation, you must have permissions to perform the
- * <code>s3:PutBucketTagging</code> action. The bucket owner has this permission by
- * default and can grant this permission to others. For more information about
- * permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>
- * <code>PutBucketTagging</code> has the following special errors:</p> <ul> <li>
- * <p>Error code: <code>InvalidTagError</code> </p> <ul> <li> <p>Description: The
- * tag provided was not a valid tag. This error can occur if the tag did not pass
- * input validation. For information about tag restrictions, see <a
- * href="https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html">User-Defined
- * Tag Restrictions</a> and <a
- * href="https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/aws-tag-restrictions.html">AWS-Generated
- * Cost Allocation Tag Restrictions</a>.</p> </li> </ul> </li> <li> <p>Error code:
- * <code>MalformedXMLError</code> </p> <ul> <li> <p>Description: The XML provided
- * does not match the schema.</p> </li> </ul> </li> <li> <p>Error code:
- * <code>OperationAbortedError </code> </p> <ul> <li> <p>Description: A conflicting
- * conditional action is currently in progress against this resource. Please try
- * again.</p> </li> </ul> </li> <li> <p>Error code: <code>InternalError</code> </p>
- * <ul> <li> <p>Description: The service was unable to apply the provided tag to
- * the bucket.</p> </li> </ul> </li> </ul> <p>The following operations are related
- * to <code>PutBucketTagging</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html">GetBucketTagging</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html">DeleteBucketTagging</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for PutBucketTagging that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::PutBucketTaggingOutcomeCallable PutBucketTaggingCallable(const Model::PutBucketTaggingRequest& request) const;
+ template<typename PutBucketTaggingRequestT = Model::PutBucketTaggingRequest>
+ Model::PutBucketTaggingOutcomeCallable PutBucketTaggingCallable(const PutBucketTaggingRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::PutBucketTagging, request);
+ }
/**
- * <p>Sets the tags for a bucket.</p> <p>Use tags to organize your AWS bill to
- * reflect your own cost structure. To do this, sign up to get your AWS account
- * bill with tag key values included. Then, to see the cost of combined resources,
- * organize your billing information according to resources with the same tag key
- * values. For example, you can tag several resources with a specific application
- * name, and then organize your billing information to see the total cost of that
- * application across several services. For more information, see <a
- * href="https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/cost-alloc-tags.html">Cost
- * Allocation and Tagging</a>.</p> <p>Within a bucket, if you add a tag that
- * has the same key as an existing tag, the new value overwrites the old value. For
- * more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/CostAllocTagging.html">Using
- * Cost Allocation in Amazon S3 Bucket Tags</a>.</p> <p>To use this
- * operation, you must have permissions to perform the
- * <code>s3:PutBucketTagging</code> action. The bucket owner has this permission by
- * default and can grant this permission to others. For more information about
- * permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a>.</p> <p>
- * <code>PutBucketTagging</code> has the following special errors:</p> <ul> <li>
- * <p>Error code: <code>InvalidTagError</code> </p> <ul> <li> <p>Description: The
- * tag provided was not a valid tag. This error can occur if the tag did not pass
- * input validation. For information about tag restrictions, see <a
- * href="https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html">User-Defined
- * Tag Restrictions</a> and <a
- * href="https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/aws-tag-restrictions.html">AWS-Generated
- * Cost Allocation Tag Restrictions</a>.</p> </li> </ul> </li> <li> <p>Error code:
- * <code>MalformedXMLError</code> </p> <ul> <li> <p>Description: The XML provided
- * does not match the schema.</p> </li> </ul> </li> <li> <p>Error code:
- * <code>OperationAbortedError </code> </p> <ul> <li> <p>Description: A conflicting
- * conditional action is currently in progress against this resource. Please try
- * again.</p> </li> </ul> </li> <li> <p>Error code: <code>InternalError</code> </p>
- * <ul> <li> <p>Description: The service was unable to apply the provided tag to
- * the bucket.</p> </li> </ul> </li> </ul> <p>The following operations are related
- * to <code>PutBucketTagging</code>:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketTagging.html">GetBucketTagging</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucketTagging.html">DeleteBucketTagging</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketTagging">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for PutBucketTagging that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void PutBucketTaggingAsync(const Model::PutBucketTaggingRequest& request, const PutBucketTaggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename PutBucketTaggingRequestT = Model::PutBucketTaggingRequest>
+ void PutBucketTaggingAsync(const PutBucketTaggingRequestT& request, const PutBucketTaggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::PutBucketTagging, request, handler, context);
+ }
/**
- * <p>Sets the versioning state of an existing bucket. To set the versioning state,
- * you must be the bucket owner.</p> <p>You can set the versioning state with one
- * of the following values:</p> <p> <b>Enabled</b>—Enables versioning for the
- * objects in the bucket. All objects added to the bucket receive a unique version
- * ID.</p> <p> <b>Suspended</b>—Disables versioning for the objects in the bucket.
- * All objects added to the bucket receive the version ID null.</p> <p>If the
- * versioning state has never been set on a bucket, it has no versioning state; a
- * <a
+ * <p>Sets the versioning state of an existing bucket.</p> <p>You can set the
+ * versioning state with one of the following values:</p> <p>
+ * <b>Enabled</b>—Enables versioning for the objects in the bucket. All objects
+ * added to the bucket receive a unique version ID.</p> <p>
+ * <b>Suspended</b>—Disables versioning for the objects in the bucket. All objects
+ * added to the bucket receive the version ID null.</p> <p>If the versioning state
+ * has never been set on a bucket, it has no versioning state; a <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html">GetBucketVersioning</a>
- * request does not return a versioning state value.</p> <p>If the bucket owner
- * enables MFA Delete in the bucket versioning configuration, the bucket owner must
- * include the <code>x-amz-mfa request</code> header and the <code>Status</code>
- * and the <code>MfaDelete</code> request elements in a request to set the
- * versioning state of the bucket.</p> <p>If you have an object
- * expiration lifecycle policy in your non-versioned bucket and you want to
- * maintain the same permanent delete behavior when you enable versioning, you must
- * add a noncurrent expiration policy. The noncurrent expiration lifecycle policy
- * will manage the deletes of the noncurrent object versions in the version-enabled
- * bucket. (A version-enabled bucket maintains one current and zero or more
- * noncurrent object versions.) For more information, see <a
+ * request does not return a versioning state value.</p> <p>In order to enable MFA
+ * Delete, you must be the bucket owner. If you are the bucket owner and want to
+ * enable MFA Delete in the bucket versioning configuration, you must include the
+ * <code>x-amz-mfa request</code> header and the <code>Status</code> and the
+ * <code>MfaDelete</code> request elements in a request to set the versioning state
+ * of the bucket.</p> <p>If you have an object expiration lifecycle
+ * policy in your non-versioned bucket and you want to maintain the same permanent
+ * delete behavior when you enable versioning, you must add a noncurrent expiration
+ * policy. The noncurrent expiration lifecycle policy will manage the deletes of
+ * the noncurrent object versions in the version-enabled bucket. (A version-enabled
+ * bucket maintains one current and zero or more noncurrent object versions.) For
+ * more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config">Lifecycle
* and Versioning</a>.</p> <p class="title"> <b>Related Resources</b>
* </p> <ul> <li> <p> <a
@@ -8631,78 +4353,22 @@ namespace Aws
virtual Model::PutBucketVersioningOutcome PutBucketVersioning(const Model::PutBucketVersioningRequest& request) const;
/**
- * <p>Sets the versioning state of an existing bucket. To set the versioning state,
- * you must be the bucket owner.</p> <p>You can set the versioning state with one
- * of the following values:</p> <p> <b>Enabled</b>—Enables versioning for the
- * objects in the bucket. All objects added to the bucket receive a unique version
- * ID.</p> <p> <b>Suspended</b>—Disables versioning for the objects in the bucket.
- * All objects added to the bucket receive the version ID null.</p> <p>If the
- * versioning state has never been set on a bucket, it has no versioning state; a
- * <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html">GetBucketVersioning</a>
- * request does not return a versioning state value.</p> <p>If the bucket owner
- * enables MFA Delete in the bucket versioning configuration, the bucket owner must
- * include the <code>x-amz-mfa request</code> header and the <code>Status</code>
- * and the <code>MfaDelete</code> request elements in a request to set the
- * versioning state of the bucket.</p> <p>If you have an object
- * expiration lifecycle policy in your non-versioned bucket and you want to
- * maintain the same permanent delete behavior when you enable versioning, you must
- * add a noncurrent expiration policy. The noncurrent expiration lifecycle policy
- * will manage the deletes of the noncurrent object versions in the version-enabled
- * bucket. (A version-enabled bucket maintains one current and zero or more
- * noncurrent object versions.) For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config">Lifecycle
- * and Versioning</a>.</p> <p class="title"> <b>Related Resources</b>
- * </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html">DeleteBucket</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html">GetBucketVersioning</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for PutBucketVersioning that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::PutBucketVersioningOutcomeCallable PutBucketVersioningCallable(const Model::PutBucketVersioningRequest& request) const;
+ template<typename PutBucketVersioningRequestT = Model::PutBucketVersioningRequest>
+ Model::PutBucketVersioningOutcomeCallable PutBucketVersioningCallable(const PutBucketVersioningRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::PutBucketVersioning, request);
+ }
/**
- * <p>Sets the versioning state of an existing bucket. To set the versioning state,
- * you must be the bucket owner.</p> <p>You can set the versioning state with one
- * of the following values:</p> <p> <b>Enabled</b>—Enables versioning for the
- * objects in the bucket. All objects added to the bucket receive a unique version
- * ID.</p> <p> <b>Suspended</b>—Disables versioning for the objects in the bucket.
- * All objects added to the bucket receive the version ID null.</p> <p>If the
- * versioning state has never been set on a bucket, it has no versioning state; a
- * <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html">GetBucketVersioning</a>
- * request does not return a versioning state value.</p> <p>If the bucket owner
- * enables MFA Delete in the bucket versioning configuration, the bucket owner must
- * include the <code>x-amz-mfa request</code> header and the <code>Status</code>
- * and the <code>MfaDelete</code> request elements in a request to set the
- * versioning state of the bucket.</p> <p>If you have an object
- * expiration lifecycle policy in your non-versioned bucket and you want to
- * maintain the same permanent delete behavior when you enable versioning, you must
- * add a noncurrent expiration policy. The noncurrent expiration lifecycle policy
- * will manage the deletes of the noncurrent object versions in the version-enabled
- * bucket. (A version-enabled bucket maintains one current and zero or more
- * noncurrent object versions.) For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html#lifecycle-and-other-bucket-config">Lifecycle
- * and Versioning</a>.</p> <p class="title"> <b>Related Resources</b>
- * </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateBucket.html">CreateBucket</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteBucket.html">DeleteBucket</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html">GetBucketVersioning</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketVersioning">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for PutBucketVersioning that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void PutBucketVersioningAsync(const Model::PutBucketVersioningRequest& request, const PutBucketVersioningResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename PutBucketVersioningRequestT = Model::PutBucketVersioningRequest>
+ void PutBucketVersioningAsync(const PutBucketVersioningRequestT& request, const PutBucketVersioningResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::PutBucketVersioning, request, handler, context);
+ }
/**
* <p>Sets the configuration of the website that is specified in the
@@ -8748,94 +4414,22 @@ namespace Aws
virtual Model::PutBucketWebsiteOutcome PutBucketWebsite(const Model::PutBucketWebsiteRequest& request) const;
/**
- * <p>Sets the configuration of the website that is specified in the
- * <code>website</code> subresource. To configure a bucket as a website, you can
- * add this subresource on the bucket with website configuration information such
- * as the file name of the index document and any redirect rules. For more
- * information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html">Hosting
- * Websites on Amazon S3</a>.</p> <p>This PUT action requires the
- * <code>S3:PutBucketWebsite</code> permission. By default, only the bucket owner
- * can configure the website attached to a bucket; however, bucket owners can allow
- * other users to set the website configuration by writing a bucket policy that
- * grants them the <code>S3:PutBucketWebsite</code> permission.</p> <p>To redirect
- * all website requests sent to the bucket's website endpoint, you add a website
- * configuration with the following elements. Because all requests are sent to
- * another website, you don't need to provide index document name for the
- * bucket.</p> <ul> <li> <p> <code>WebsiteConfiguration</code> </p> </li> <li> <p>
- * <code>RedirectAllRequestsTo</code> </p> </li> <li> <p> <code>HostName</code>
- * </p> </li> <li> <p> <code>Protocol</code> </p> </li> </ul> <p>If you want
- * granular control over redirects, you can use the following elements to add
- * routing rules that describe conditions for redirecting requests and information
- * about the redirect destination. In this case, the website configuration must
- * provide an index document for the bucket, because some requests might not be
- * redirected. </p> <ul> <li> <p> <code>WebsiteConfiguration</code> </p> </li> <li>
- * <p> <code>IndexDocument</code> </p> </li> <li> <p> <code>Suffix</code> </p>
- * </li> <li> <p> <code>ErrorDocument</code> </p> </li> <li> <p> <code>Key</code>
- * </p> </li> <li> <p> <code>RoutingRules</code> </p> </li> <li> <p>
- * <code>RoutingRule</code> </p> </li> <li> <p> <code>Condition</code> </p> </li>
- * <li> <p> <code>HttpErrorCodeReturnedEquals</code> </p> </li> <li> <p>
- * <code>KeyPrefixEquals</code> </p> </li> <li> <p> <code>Redirect</code> </p>
- * </li> <li> <p> <code>Protocol</code> </p> </li> <li> <p> <code>HostName</code>
- * </p> </li> <li> <p> <code>ReplaceKeyPrefixWith</code> </p> </li> <li> <p>
- * <code>ReplaceKeyWith</code> </p> </li> <li> <p> <code>HttpRedirectCode</code>
- * </p> </li> </ul> <p>Amazon S3 has a limitation of 50 routing rules per website
- * configuration. If you require more than 50 routing rules, you can use object
- * redirect. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html">Configuring
- * an Object Redirect</a> in the <i>Amazon S3 User Guide</i>.</p><p><h3>See
- * Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for PutBucketWebsite that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::PutBucketWebsiteOutcomeCallable PutBucketWebsiteCallable(const Model::PutBucketWebsiteRequest& request) const;
+ template<typename PutBucketWebsiteRequestT = Model::PutBucketWebsiteRequest>
+ Model::PutBucketWebsiteOutcomeCallable PutBucketWebsiteCallable(const PutBucketWebsiteRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::PutBucketWebsite, request);
+ }
/**
- * <p>Sets the configuration of the website that is specified in the
- * <code>website</code> subresource. To configure a bucket as a website, you can
- * add this subresource on the bucket with website configuration information such
- * as the file name of the index document and any redirect rules. For more
- * information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/WebsiteHosting.html">Hosting
- * Websites on Amazon S3</a>.</p> <p>This PUT action requires the
- * <code>S3:PutBucketWebsite</code> permission. By default, only the bucket owner
- * can configure the website attached to a bucket; however, bucket owners can allow
- * other users to set the website configuration by writing a bucket policy that
- * grants them the <code>S3:PutBucketWebsite</code> permission.</p> <p>To redirect
- * all website requests sent to the bucket's website endpoint, you add a website
- * configuration with the following elements. Because all requests are sent to
- * another website, you don't need to provide index document name for the
- * bucket.</p> <ul> <li> <p> <code>WebsiteConfiguration</code> </p> </li> <li> <p>
- * <code>RedirectAllRequestsTo</code> </p> </li> <li> <p> <code>HostName</code>
- * </p> </li> <li> <p> <code>Protocol</code> </p> </li> </ul> <p>If you want
- * granular control over redirects, you can use the following elements to add
- * routing rules that describe conditions for redirecting requests and information
- * about the redirect destination. In this case, the website configuration must
- * provide an index document for the bucket, because some requests might not be
- * redirected. </p> <ul> <li> <p> <code>WebsiteConfiguration</code> </p> </li> <li>
- * <p> <code>IndexDocument</code> </p> </li> <li> <p> <code>Suffix</code> </p>
- * </li> <li> <p> <code>ErrorDocument</code> </p> </li> <li> <p> <code>Key</code>
- * </p> </li> <li> <p> <code>RoutingRules</code> </p> </li> <li> <p>
- * <code>RoutingRule</code> </p> </li> <li> <p> <code>Condition</code> </p> </li>
- * <li> <p> <code>HttpErrorCodeReturnedEquals</code> </p> </li> <li> <p>
- * <code>KeyPrefixEquals</code> </p> </li> <li> <p> <code>Redirect</code> </p>
- * </li> <li> <p> <code>Protocol</code> </p> </li> <li> <p> <code>HostName</code>
- * </p> </li> <li> <p> <code>ReplaceKeyPrefixWith</code> </p> </li> <li> <p>
- * <code>ReplaceKeyWith</code> </p> </li> <li> <p> <code>HttpRedirectCode</code>
- * </p> </li> </ul> <p>Amazon S3 has a limitation of 50 routing rules per website
- * configuration. If you require more than 50 routing rules, you can use object
- * redirect. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/how-to-page-redirect.html">Configuring
- * an Object Redirect</a> in the <i>Amazon S3 User Guide</i>.</p><p><h3>See
- * Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutBucketWebsite">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for PutBucketWebsite that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void PutBucketWebsiteAsync(const Model::PutBucketWebsiteRequest& request, const PutBucketWebsiteResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename PutBucketWebsiteRequestT = Model::PutBucketWebsiteRequest>
+ void PutBucketWebsiteAsync(const PutBucketWebsiteRequestT& request, const PutBucketWebsiteResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::PutBucketWebsite, request, handler, context);
+ }
/**
* <p>Adds an object to a bucket. You must have WRITE permissions on a bucket to
@@ -8849,46 +4443,64 @@ namespace Aws
* header. When you use this header, Amazon S3 checks the object against the
* provided MD5 value and, if they do not match, returns an error. Additionally,
* you can calculate the MD5 while putting an object to Amazon S3 and compare the
- * returned ETag to the calculated MD5 value.</p> <p> The
- * <code>Content-MD5</code> header is required for any request to upload an object
- * with a retention period configured using Amazon S3 Object Lock. For more
- * information about Amazon S3 Object Lock, see <a
+ * returned ETag to the calculated MD5 value.</p> <ul> <li> <p>To
+ * successfully complete the <code>PutObject</code> request, you must have the
+ * <code>s3:PutObject</code> in your IAM permissions.</p> </li> <li> <p>To
+ * successfully change the objects acl of your <code>PutObject</code> request, you
+ * must have the <code>s3:PutObjectAcl</code> in your IAM permissions.</p> </li>
+ * <li> <p> The <code>Content-MD5</code> header is required for any request to
+ * upload an object with a retention period configured using Amazon S3 Object Lock.
+ * For more information about Amazon S3 Object Lock, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html">Amazon
- * S3 Object Lock Overview</a> in the <i>Amazon S3 User Guide</i>. </p> <p>
- * <b>Server-side Encryption</b> </p> <p>You can optionally request server-side
- * encryption. With server-side encryption, Amazon S3 encrypts your data as it
- * writes it to disks in its data centers and decrypts the data when you access it.
- * You have the option to provide your own encryption key or use AWS managed
- * encryption keys (SSE-S3 or SSE-KMS). For more information, see <a
+ * S3 Object Lock Overview</a> in the <i>Amazon S3 User Guide</i>. </p> </li> </ul>
+ * <p> <b>Server-side Encryption</b> </p> <p>You can optionally request
+ * server-side encryption. With server-side encryption, Amazon S3 encrypts your
+ * data as it writes it to disks in its data centers and decrypts the data when you
+ * access it. You have the option to provide your own encryption key or use Amazon
+ * Web Services managed encryption keys (SSE-S3 or SSE-KMS). For more information,
+ * see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html">Using
* Server-Side Encryption</a>.</p> <p>If you request server-side encryption using
- * AWS Key Management Service (SSE-KMS), you can enable an S3 Bucket Key at the
- * object-level. For more information, see <a
+ * Amazon Web Services Key Management Service (SSE-KMS), you can enable an S3
+ * Bucket Key at the object-level. For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html">Amazon S3
* Bucket Keys</a> in the <i>Amazon S3 User Guide</i>.</p> <p> <b>Access Control
* List (ACL)-Specific Request Headers</b> </p> <p>You can use headers to grant
* ACL- based permissions. By default, all objects are private. Only the owner has
* full access control. When adding a new object, you can grant permissions to
- * individual AWS accounts or to predefined groups defined by Amazon S3. These
- * permissions are then added to the ACL on the object. For more information, see
- * <a
+ * individual Amazon Web Services accounts or to predefined groups defined by
+ * Amazon S3. These permissions are then added to the ACL on the object. For more
+ * information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html">Access
* Control List (ACL) Overview</a> and <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html">Managing
- * ACLs Using the REST API</a>. </p> <p> <b>Storage Class Options</b> </p> <p>By
- * default, Amazon S3 uses the STANDARD Storage Class to store newly created
+ * ACLs Using the REST API</a>. </p> <p>If the bucket that you're uploading objects
+ * to uses the bucket owner enforced setting for S3 Object Ownership, ACLs are
+ * disabled and no longer affect permissions. Buckets that use this setting only
+ * accept PUT requests that don't specify an ACL or PUT requests that specify
+ * bucket owner full control ACLs, such as the
+ * <code>bucket-owner-full-control</code> canned ACL or an equivalent form of this
+ * ACL expressed in the XML format. PUT requests that contain other ACLs (for
+ * example, custom grants to certain Amazon Web Services accounts) fail and return
+ * a <code>400</code> error with the error code
+ * <code>AccessControlListNotSupported</code>.</p> <p>For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html">
+ * Controlling ownership of objects and disabling ACLs</a> in the <i>Amazon S3 User
+ * Guide</i>.</p> <p>If your bucket uses the bucket owner enforced setting
+ * for Object Ownership, all objects written to the bucket by any account will be
+ * owned by the bucket owner.</p> <p> <b>Storage Class Options</b> </p>
+ * <p>By default, Amazon S3 uses the STANDARD Storage Class to store newly created
* objects. The STANDARD storage class provides high durability and high
* availability. Depending on performance needs, you can specify a different
* Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For
* more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
- * Classes</a> in the <i>Amazon S3 Service Developer Guide</i>.</p> <p>
- * <b>Versioning</b> </p> <p>If you enable versioning for a bucket, Amazon S3
- * automatically generates a unique version ID for the object being stored. Amazon
- * S3 returns this ID in the response. When you enable versioning for a bucket, if
- * Amazon S3 receives multiple write requests for the same object simultaneously,
- * it stores all of the objects.</p> <p>For more information about versioning, see
- * <a
+ * Classes</a> in the <i>Amazon S3 User Guide</i>.</p> <p> <b>Versioning</b> </p>
+ * <p>If you enable versioning for a bucket, Amazon S3 automatically generates a
+ * unique version ID for the object being stored. Amazon S3 returns this ID in the
+ * response. When you enable versioning for a bucket, if Amazon S3 receives
+ * multiple write requests for the same object simultaneously, it stores all of the
+ * objects.</p> <p>For more information about versioning, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html">Adding
* Objects to Versioning Enabled Buckets</a>. For information about returning the
* versioning state of a bucket, see <a
@@ -8904,138 +4516,12 @@ namespace Aws
virtual Model::PutObjectOutcome PutObject(const Model::PutObjectRequest& request) const;
/**
- * <p>Adds an object to a bucket. You must have WRITE permissions on a bucket to
- * add an object to it.</p> <p>Amazon S3 never adds partial objects; if you receive
- * a success response, Amazon S3 added the entire object to the bucket.</p>
- * <p>Amazon S3 is a distributed system. If it receives multiple write requests for
- * the same object simultaneously, it overwrites all but the last object written.
- * Amazon S3 does not provide object locking; if you need this, make sure to build
- * it into your application layer or use versioning instead.</p> <p>To ensure that
- * data is not corrupted traversing the network, use the <code>Content-MD5</code>
- * header. When you use this header, Amazon S3 checks the object against the
- * provided MD5 value and, if they do not match, returns an error. Additionally,
- * you can calculate the MD5 while putting an object to Amazon S3 and compare the
- * returned ETag to the calculated MD5 value.</p> <p> The
- * <code>Content-MD5</code> header is required for any request to upload an object
- * with a retention period configured using Amazon S3 Object Lock. For more
- * information about Amazon S3 Object Lock, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html">Amazon
- * S3 Object Lock Overview</a> in the <i>Amazon S3 User Guide</i>. </p> <p>
- * <b>Server-side Encryption</b> </p> <p>You can optionally request server-side
- * encryption. With server-side encryption, Amazon S3 encrypts your data as it
- * writes it to disks in its data centers and decrypts the data when you access it.
- * You have the option to provide your own encryption key or use AWS managed
- * encryption keys (SSE-S3 or SSE-KMS). For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html">Using
- * Server-Side Encryption</a>.</p> <p>If you request server-side encryption using
- * AWS Key Management Service (SSE-KMS), you can enable an S3 Bucket Key at the
- * object-level. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html">Amazon S3
- * Bucket Keys</a> in the <i>Amazon S3 User Guide</i>.</p> <p> <b>Access Control
- * List (ACL)-Specific Request Headers</b> </p> <p>You can use headers to grant
- * ACL- based permissions. By default, all objects are private. Only the owner has
- * full access control. When adding a new object, you can grant permissions to
- * individual AWS accounts or to predefined groups defined by Amazon S3. These
- * permissions are then added to the ACL on the object. For more information, see
- * <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html">Access
- * Control List (ACL) Overview</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html">Managing
- * ACLs Using the REST API</a>. </p> <p> <b>Storage Class Options</b> </p> <p>By
- * default, Amazon S3 uses the STANDARD Storage Class to store newly created
- * objects. The STANDARD storage class provides high durability and high
- * availability. Depending on performance needs, you can specify a different
- * Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For
- * more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
- * Classes</a> in the <i>Amazon S3 Service Developer Guide</i>.</p> <p>
- * <b>Versioning</b> </p> <p>If you enable versioning for a bucket, Amazon S3
- * automatically generates a unique version ID for the object being stored. Amazon
- * S3 returns this ID in the response. When you enable versioning for a bucket, if
- * Amazon S3 receives multiple write requests for the same object simultaneously,
- * it stores all of the objects.</p> <p>For more information about versioning, see
- * <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html">Adding
- * Objects to Versioning Enabled Buckets</a>. For information about returning the
- * versioning state of a bucket, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html">GetBucketVersioning</a>.
- * </p> <p class="title"> <b>Related Resources</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html">CopyObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html">DeleteObject</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject">AWS API
- * Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for PutObject that returns a future to the operation so that it can be executed in parallel to other requests.
*/
virtual Model::PutObjectOutcomeCallable PutObjectCallable(const Model::PutObjectRequest& request) const;
/**
- * <p>Adds an object to a bucket. You must have WRITE permissions on a bucket to
- * add an object to it.</p> <p>Amazon S3 never adds partial objects; if you receive
- * a success response, Amazon S3 added the entire object to the bucket.</p>
- * <p>Amazon S3 is a distributed system. If it receives multiple write requests for
- * the same object simultaneously, it overwrites all but the last object written.
- * Amazon S3 does not provide object locking; if you need this, make sure to build
- * it into your application layer or use versioning instead.</p> <p>To ensure that
- * data is not corrupted traversing the network, use the <code>Content-MD5</code>
- * header. When you use this header, Amazon S3 checks the object against the
- * provided MD5 value and, if they do not match, returns an error. Additionally,
- * you can calculate the MD5 while putting an object to Amazon S3 and compare the
- * returned ETag to the calculated MD5 value.</p> <p> The
- * <code>Content-MD5</code> header is required for any request to upload an object
- * with a retention period configured using Amazon S3 Object Lock. For more
- * information about Amazon S3 Object Lock, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock-overview.html">Amazon
- * S3 Object Lock Overview</a> in the <i>Amazon S3 User Guide</i>. </p> <p>
- * <b>Server-side Encryption</b> </p> <p>You can optionally request server-side
- * encryption. With server-side encryption, Amazon S3 encrypts your data as it
- * writes it to disks in its data centers and decrypts the data when you access it.
- * You have the option to provide your own encryption key or use AWS managed
- * encryption keys (SSE-S3 or SSE-KMS). For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html">Using
- * Server-Side Encryption</a>.</p> <p>If you request server-side encryption using
- * AWS Key Management Service (SSE-KMS), you can enable an S3 Bucket Key at the
- * object-level. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-key.html">Amazon S3
- * Bucket Keys</a> in the <i>Amazon S3 User Guide</i>.</p> <p> <b>Access Control
- * List (ACL)-Specific Request Headers</b> </p> <p>You can use headers to grant
- * ACL- based permissions. By default, all objects are private. Only the owner has
- * full access control. When adding a new object, you can grant permissions to
- * individual AWS accounts or to predefined groups defined by Amazon S3. These
- * permissions are then added to the ACL on the object. For more information, see
- * <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html">Access
- * Control List (ACL) Overview</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-using-rest-api.html">Managing
- * ACLs Using the REST API</a>. </p> <p> <b>Storage Class Options</b> </p> <p>By
- * default, Amazon S3 uses the STANDARD Storage Class to store newly created
- * objects. The STANDARD storage class provides high durability and high
- * availability. Depending on performance needs, you can specify a different
- * Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For
- * more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
- * Classes</a> in the <i>Amazon S3 Service Developer Guide</i>.</p> <p>
- * <b>Versioning</b> </p> <p>If you enable versioning for a bucket, Amazon S3
- * automatically generates a unique version ID for the object being stored. Amazon
- * S3 returns this ID in the response. When you enable versioning for a bucket, if
- * Amazon S3 receives multiple write requests for the same object simultaneously,
- * it stores all of the objects.</p> <p>For more information about versioning, see
- * <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/AddingObjectstoVersioningEnabledBuckets.html">Adding
- * Objects to Versioning Enabled Buckets</a>. For information about returning the
- * versioning state of a bucket, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketVersioning.html">GetBucketVersioning</a>.
- * </p> <p class="title"> <b>Related Resources</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html">CopyObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObject.html">DeleteObject</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObject">AWS API
- * Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for PutObject that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
virtual void PutObjectAsync(const Model::PutObjectRequest& request, const PutObjectResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
@@ -9052,7 +4538,15 @@ namespace Aws
* that updates a bucket ACL using the request body, you can continue to use that
* approach. For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html">Access
- * Control List (ACL) Overview</a> in the <i>Amazon S3 Developer Guide</i>.</p> <p>
+ * Control List (ACL) Overview</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * <p>If your bucket uses the bucket owner enforced setting for S3
+ * Object Ownership, ACLs are disabled and no longer affect permissions. You must
+ * use policies to grant access to your bucket and the objects in it. Requests to
+ * set ACLs or update ACLs fail and return the
+ * <code>AccessControlListNotSupported</code> error code. Requests to read ACLs are
+ * still supported. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/about-object-ownership.html">Controlling
+ * object ownership</a> in the <i>Amazon S3 User Guide</i>.</p> <p>
* <b>Access Permissions</b> </p> <p>You can set access permissions using one of
* the following methods:</p> <ul> <li> <p>Specify a canned ACL with the
* <code>x-amz-acl</code> request header. Amazon S3 supports a set of predefined
@@ -9065,33 +4559,36 @@ namespace Aws
* <code>x-amz-grant-read</code>, <code>x-amz-grant-read-acp</code>,
* <code>x-amz-grant-write-acp</code>, and <code>x-amz-grant-full-control</code>
* headers. When using these headers, you specify explicit access permissions and
- * grantees (AWS accounts or Amazon S3 groups) who will receive the permission. If
- * you use these ACL-specific headers, you cannot use <code>x-amz-acl</code> header
- * to set a canned ACL. These parameters map to the set of permissions that Amazon
- * S3 supports in an ACL. For more information, see <a
+ * grantees (Amazon Web Services accounts or Amazon S3 groups) who will receive the
+ * permission. If you use these ACL-specific headers, you cannot use
+ * <code>x-amz-acl</code> header to set a canned ACL. These parameters map to the
+ * set of permissions that Amazon S3 supports in an ACL. For more information, see
+ * <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html">Access
* Control List (ACL) Overview</a>.</p> <p>You specify each grantee as a type=value
* pair, where the type is one of the following:</p> <ul> <li> <p> <code>id</code>
- * – if the value specified is the canonical user ID of an AWS account</p> </li>
- * <li> <p> <code>uri</code> – if you are granting permissions to a predefined
- * group</p> </li> <li> <p> <code>emailAddress</code> – if the value specified is
- * the email address of an AWS account</p> <p>Using email addresses to
- * specify a grantee is only supported in the following AWS Regions: </p> <ul> <li>
- * <p>US East (N. Virginia)</p> </li> <li> <p>US West (N. California)</p> </li>
- * <li> <p> US West (Oregon)</p> </li> <li> <p> Asia Pacific (Singapore)</p> </li>
- * <li> <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia Pacific (Tokyo)</p> </li>
- * <li> <p>Europe (Ireland)</p> </li> <li> <p>South America (São Paulo)</p> </li>
- * </ul> <p>For a list of all the Amazon S3 supported Regions and endpoints, see <a
+ * – if the value specified is the canonical user ID of an Amazon Web Services
+ * account</p> </li> <li> <p> <code>uri</code> – if you are granting permissions to
+ * a predefined group</p> </li> <li> <p> <code>emailAddress</code> – if the value
+ * specified is the email address of an Amazon Web Services account</p>
+ * <p>Using email addresses to specify a grantee is only supported in the following
+ * Amazon Web Services Regions: </p> <ul> <li> <p>US East (N. Virginia)</p> </li>
+ * <li> <p>US West (N. California)</p> </li> <li> <p> US West (Oregon)</p> </li>
+ * <li> <p> Asia Pacific (Singapore)</p> </li> <li> <p>Asia Pacific (Sydney)</p>
+ * </li> <li> <p>Asia Pacific (Tokyo)</p> </li> <li> <p>Europe (Ireland)</p> </li>
+ * <li> <p>South America (São Paulo)</p> </li> </ul> <p>For a list of all the
+ * Amazon S3 supported Regions and endpoints, see <a
* href="https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">Regions
- * and Endpoints</a> in the AWS General Reference.</p> </li> </ul> <p>For
- * example, the following <code>x-amz-grant-read</code> header grants list objects
- * permission to the two AWS accounts identified by their email addresses.</p> <p>
- * <code>x-amz-grant-read: emailAddress="xyz@amazon.com",
- * emailAddress="abc@amazon.com" </code> </p> </li> </ul> <p>You can use either a
- * canned ACL or specify access permissions explicitly. You cannot do both.</p> <p>
- * <b>Grantee Values</b> </p> <p>You can specify the person (grantee) to whom
- * you're assigning access rights (using request elements) in the following
- * ways:</p> <ul> <li> <p>By the person's ID:</p> <p> <code>&lt;Grantee
+ * and Endpoints</a> in the Amazon Web Services General Reference.</p>
+ * </li> </ul> <p>For example, the following <code>x-amz-grant-read</code> header
+ * grants list objects permission to the two Amazon Web Services accounts
+ * identified by their email addresses.</p> <p> <code>x-amz-grant-read:
+ * emailAddress="xyz@amazon.com", emailAddress="abc@amazon.com" </code> </p> </li>
+ * </ul> <p>You can use either a canned ACL or specify access permissions
+ * explicitly. You cannot do both.</p> <p> <b>Grantee Values</b> </p> <p>You can
+ * specify the person (grantee) to whom you're assigning access rights (using
+ * request elements) in the following ways:</p> <ul> <li> <p>By the person's
+ * ID:</p> <p> <code>&lt;Grantee
* xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
* xsi:type="CanonicalUser"&gt;&lt;ID&gt;&lt;&gt;ID&lt;&gt;&lt;/ID&gt;&lt;DisplayName&gt;&lt;&gt;GranteesEmail&lt;&gt;&lt;/DisplayName&gt;
* &lt;/Grantee&gt;</code> </p> <p>DisplayName is optional and ignored in the
@@ -9103,19 +4600,20 @@ namespace Aws
* xsi:type="AmazonCustomerByEmail"&gt;&lt;EmailAddress&gt;&lt;&gt;Grantees@email.com&lt;&gt;&lt;/EmailAddress&gt;lt;/Grantee&gt;</code>
* </p> <p>The grantee is resolved to the CanonicalUser and, in a response to a GET
* Object acl request, appears as the CanonicalUser.</p> <p>Using email
- * addresses to specify a grantee is only supported in the following AWS Regions:
- * </p> <ul> <li> <p>US East (N. Virginia)</p> </li> <li> <p>US West (N.
- * California)</p> </li> <li> <p> US West (Oregon)</p> </li> <li> <p> Asia Pacific
- * (Singapore)</p> </li> <li> <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia
- * Pacific (Tokyo)</p> </li> <li> <p>Europe (Ireland)</p> </li> <li> <p>South
- * America (São Paulo)</p> </li> </ul> <p>For a list of all the Amazon S3 supported
- * Regions and endpoints, see <a
+ * addresses to specify a grantee is only supported in the following Amazon Web
+ * Services Regions: </p> <ul> <li> <p>US East (N. Virginia)</p> </li> <li> <p>US
+ * West (N. California)</p> </li> <li> <p> US West (Oregon)</p> </li> <li> <p> Asia
+ * Pacific (Singapore)</p> </li> <li> <p>Asia Pacific (Sydney)</p> </li> <li>
+ * <p>Asia Pacific (Tokyo)</p> </li> <li> <p>Europe (Ireland)</p> </li> <li>
+ * <p>South America (São Paulo)</p> </li> </ul> <p>For a list of all the Amazon S3
+ * supported Regions and endpoints, see <a
* href="https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">Regions
- * and Endpoints</a> in the AWS General Reference.</p> </li> </ul> <p>
- * <b>Versioning</b> </p> <p>The ACL of an object is set at the object version
- * level. By default, PUT sets the ACL of the current version of an object. To set
- * the ACL of a different version, use the <code>versionId</code> subresource.</p>
- * <p class="title"> <b>Related Resources</b> </p> <ul> <li> <p> <a
+ * and Endpoints</a> in the Amazon Web Services General Reference.</p>
+ * </li> </ul> <p> <b>Versioning</b> </p> <p>The ACL of an object is set at the
+ * object version level. By default, PUT sets the ACL of the current version of an
+ * object. To set the ACL of a different version, use the <code>versionId</code>
+ * subresource.</p> <p class="title"> <b>Related Resources</b> </p> <ul> <li> <p>
+ * <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html">CopyObject</a>
* </p> </li> <li> <p> <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>
@@ -9126,183 +4624,25 @@ namespace Aws
virtual Model::PutObjectAclOutcome PutObjectAcl(const Model::PutObjectAclRequest& request) const;
/**
- * <p>Uses the <code>acl</code> subresource to set the access control list (ACL)
- * permissions for a new or existing object in an S3 bucket. You must have
- * <code>WRITE_ACP</code> permission to set the ACL of an object. For more
- * information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions">What
- * permissions can I grant?</a> in the <i>Amazon S3 User Guide</i>.</p> <p>This
- * action is not supported by Amazon S3 on Outposts.</p> <p>Depending on your
- * application needs, you can choose to set the ACL on an object using either the
- * request body or the headers. For example, if you have an existing application
- * that updates a bucket ACL using the request body, you can continue to use that
- * approach. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html">Access
- * Control List (ACL) Overview</a> in the <i>Amazon S3 Developer Guide</i>.</p> <p>
- * <b>Access Permissions</b> </p> <p>You can set access permissions using one of
- * the following methods:</p> <ul> <li> <p>Specify a canned ACL with the
- * <code>x-amz-acl</code> request header. Amazon S3 supports a set of predefined
- * ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and
- * permissions. Specify the canned ACL name as the value of <code>x-amz-ac</code>l.
- * If you use this header, you cannot use other access control-specific headers in
- * your request. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL">Canned
- * ACL</a>.</p> </li> <li> <p>Specify access permissions explicitly with the
- * <code>x-amz-grant-read</code>, <code>x-amz-grant-read-acp</code>,
- * <code>x-amz-grant-write-acp</code>, and <code>x-amz-grant-full-control</code>
- * headers. When using these headers, you specify explicit access permissions and
- * grantees (AWS accounts or Amazon S3 groups) who will receive the permission. If
- * you use these ACL-specific headers, you cannot use <code>x-amz-acl</code> header
- * to set a canned ACL. These parameters map to the set of permissions that Amazon
- * S3 supports in an ACL. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html">Access
- * Control List (ACL) Overview</a>.</p> <p>You specify each grantee as a type=value
- * pair, where the type is one of the following:</p> <ul> <li> <p> <code>id</code>
- * – if the value specified is the canonical user ID of an AWS account</p> </li>
- * <li> <p> <code>uri</code> – if you are granting permissions to a predefined
- * group</p> </li> <li> <p> <code>emailAddress</code> – if the value specified is
- * the email address of an AWS account</p> <p>Using email addresses to
- * specify a grantee is only supported in the following AWS Regions: </p> <ul> <li>
- * <p>US East (N. Virginia)</p> </li> <li> <p>US West (N. California)</p> </li>
- * <li> <p> US West (Oregon)</p> </li> <li> <p> Asia Pacific (Singapore)</p> </li>
- * <li> <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia Pacific (Tokyo)</p> </li>
- * <li> <p>Europe (Ireland)</p> </li> <li> <p>South America (São Paulo)</p> </li>
- * </ul> <p>For a list of all the Amazon S3 supported Regions and endpoints, see <a
- * href="https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">Regions
- * and Endpoints</a> in the AWS General Reference.</p> </li> </ul> <p>For
- * example, the following <code>x-amz-grant-read</code> header grants list objects
- * permission to the two AWS accounts identified by their email addresses.</p> <p>
- * <code>x-amz-grant-read: emailAddress="xyz@amazon.com",
- * emailAddress="abc@amazon.com" </code> </p> </li> </ul> <p>You can use either a
- * canned ACL or specify access permissions explicitly. You cannot do both.</p> <p>
- * <b>Grantee Values</b> </p> <p>You can specify the person (grantee) to whom
- * you're assigning access rights (using request elements) in the following
- * ways:</p> <ul> <li> <p>By the person's ID:</p> <p> <code>&lt;Grantee
- * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- * xsi:type="CanonicalUser"&gt;&lt;ID&gt;&lt;&gt;ID&lt;&gt;&lt;/ID&gt;&lt;DisplayName&gt;&lt;&gt;GranteesEmail&lt;&gt;&lt;/DisplayName&gt;
- * &lt;/Grantee&gt;</code> </p> <p>DisplayName is optional and ignored in the
- * request.</p> </li> <li> <p>By URI:</p> <p> <code>&lt;Grantee
- * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- * xsi:type="Group"&gt;&lt;URI&gt;&lt;&gt;http://acs.amazonaws.com/groups/global/AuthenticatedUsers&lt;&gt;&lt;/URI&gt;&lt;/Grantee&gt;</code>
- * </p> </li> <li> <p>By Email address:</p> <p> <code>&lt;Grantee
- * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- * xsi:type="AmazonCustomerByEmail"&gt;&lt;EmailAddress&gt;&lt;&gt;Grantees@email.com&lt;&gt;&lt;/EmailAddress&gt;lt;/Grantee&gt;</code>
- * </p> <p>The grantee is resolved to the CanonicalUser and, in a response to a GET
- * Object acl request, appears as the CanonicalUser.</p> <p>Using email
- * addresses to specify a grantee is only supported in the following AWS Regions:
- * </p> <ul> <li> <p>US East (N. Virginia)</p> </li> <li> <p>US West (N.
- * California)</p> </li> <li> <p> US West (Oregon)</p> </li> <li> <p> Asia Pacific
- * (Singapore)</p> </li> <li> <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia
- * Pacific (Tokyo)</p> </li> <li> <p>Europe (Ireland)</p> </li> <li> <p>South
- * America (São Paulo)</p> </li> </ul> <p>For a list of all the Amazon S3 supported
- * Regions and endpoints, see <a
- * href="https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">Regions
- * and Endpoints</a> in the AWS General Reference.</p> </li> </ul> <p>
- * <b>Versioning</b> </p> <p>The ACL of an object is set at the object version
- * level. By default, PUT sets the ACL of the current version of an object. To set
- * the ACL of a different version, use the <code>versionId</code> subresource.</p>
- * <p class="title"> <b>Related Resources</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html">CopyObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl">AWS API
- * Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for PutObjectAcl that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::PutObjectAclOutcomeCallable PutObjectAclCallable(const Model::PutObjectAclRequest& request) const;
+ template<typename PutObjectAclRequestT = Model::PutObjectAclRequest>
+ Model::PutObjectAclOutcomeCallable PutObjectAclCallable(const PutObjectAclRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::PutObjectAcl, request);
+ }
/**
- * <p>Uses the <code>acl</code> subresource to set the access control list (ACL)
- * permissions for a new or existing object in an S3 bucket. You must have
- * <code>WRITE_ACP</code> permission to set the ACL of an object. For more
- * information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#permissions">What
- * permissions can I grant?</a> in the <i>Amazon S3 User Guide</i>.</p> <p>This
- * action is not supported by Amazon S3 on Outposts.</p> <p>Depending on your
- * application needs, you can choose to set the ACL on an object using either the
- * request body or the headers. For example, if you have an existing application
- * that updates a bucket ACL using the request body, you can continue to use that
- * approach. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html">Access
- * Control List (ACL) Overview</a> in the <i>Amazon S3 Developer Guide</i>.</p> <p>
- * <b>Access Permissions</b> </p> <p>You can set access permissions using one of
- * the following methods:</p> <ul> <li> <p>Specify a canned ACL with the
- * <code>x-amz-acl</code> request header. Amazon S3 supports a set of predefined
- * ACLs, known as canned ACLs. Each canned ACL has a predefined set of grantees and
- * permissions. Specify the canned ACL name as the value of <code>x-amz-ac</code>l.
- * If you use this header, you cannot use other access control-specific headers in
- * your request. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#CannedACL">Canned
- * ACL</a>.</p> </li> <li> <p>Specify access permissions explicitly with the
- * <code>x-amz-grant-read</code>, <code>x-amz-grant-read-acp</code>,
- * <code>x-amz-grant-write-acp</code>, and <code>x-amz-grant-full-control</code>
- * headers. When using these headers, you specify explicit access permissions and
- * grantees (AWS accounts or Amazon S3 groups) who will receive the permission. If
- * you use these ACL-specific headers, you cannot use <code>x-amz-acl</code> header
- * to set a canned ACL. These parameters map to the set of permissions that Amazon
- * S3 supports in an ACL. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html">Access
- * Control List (ACL) Overview</a>.</p> <p>You specify each grantee as a type=value
- * pair, where the type is one of the following:</p> <ul> <li> <p> <code>id</code>
- * – if the value specified is the canonical user ID of an AWS account</p> </li>
- * <li> <p> <code>uri</code> – if you are granting permissions to a predefined
- * group</p> </li> <li> <p> <code>emailAddress</code> – if the value specified is
- * the email address of an AWS account</p> <p>Using email addresses to
- * specify a grantee is only supported in the following AWS Regions: </p> <ul> <li>
- * <p>US East (N. Virginia)</p> </li> <li> <p>US West (N. California)</p> </li>
- * <li> <p> US West (Oregon)</p> </li> <li> <p> Asia Pacific (Singapore)</p> </li>
- * <li> <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia Pacific (Tokyo)</p> </li>
- * <li> <p>Europe (Ireland)</p> </li> <li> <p>South America (São Paulo)</p> </li>
- * </ul> <p>For a list of all the Amazon S3 supported Regions and endpoints, see <a
- * href="https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">Regions
- * and Endpoints</a> in the AWS General Reference.</p> </li> </ul> <p>For
- * example, the following <code>x-amz-grant-read</code> header grants list objects
- * permission to the two AWS accounts identified by their email addresses.</p> <p>
- * <code>x-amz-grant-read: emailAddress="xyz@amazon.com",
- * emailAddress="abc@amazon.com" </code> </p> </li> </ul> <p>You can use either a
- * canned ACL or specify access permissions explicitly. You cannot do both.</p> <p>
- * <b>Grantee Values</b> </p> <p>You can specify the person (grantee) to whom
- * you're assigning access rights (using request elements) in the following
- * ways:</p> <ul> <li> <p>By the person's ID:</p> <p> <code>&lt;Grantee
- * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- * xsi:type="CanonicalUser"&gt;&lt;ID&gt;&lt;&gt;ID&lt;&gt;&lt;/ID&gt;&lt;DisplayName&gt;&lt;&gt;GranteesEmail&lt;&gt;&lt;/DisplayName&gt;
- * &lt;/Grantee&gt;</code> </p> <p>DisplayName is optional and ignored in the
- * request.</p> </li> <li> <p>By URI:</p> <p> <code>&lt;Grantee
- * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- * xsi:type="Group"&gt;&lt;URI&gt;&lt;&gt;http://acs.amazonaws.com/groups/global/AuthenticatedUsers&lt;&gt;&lt;/URI&gt;&lt;/Grantee&gt;</code>
- * </p> </li> <li> <p>By Email address:</p> <p> <code>&lt;Grantee
- * xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- * xsi:type="AmazonCustomerByEmail"&gt;&lt;EmailAddress&gt;&lt;&gt;Grantees@email.com&lt;&gt;&lt;/EmailAddress&gt;lt;/Grantee&gt;</code>
- * </p> <p>The grantee is resolved to the CanonicalUser and, in a response to a GET
- * Object acl request, appears as the CanonicalUser.</p> <p>Using email
- * addresses to specify a grantee is only supported in the following AWS Regions:
- * </p> <ul> <li> <p>US East (N. Virginia)</p> </li> <li> <p>US West (N.
- * California)</p> </li> <li> <p> US West (Oregon)</p> </li> <li> <p> Asia Pacific
- * (Singapore)</p> </li> <li> <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia
- * Pacific (Tokyo)</p> </li> <li> <p>Europe (Ireland)</p> </li> <li> <p>South
- * America (São Paulo)</p> </li> </ul> <p>For a list of all the Amazon S3 supported
- * Regions and endpoints, see <a
- * href="https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">Regions
- * and Endpoints</a> in the AWS General Reference.</p> </li> </ul> <p>
- * <b>Versioning</b> </p> <p>The ACL of an object is set at the object version
- * level. By default, PUT sets the ACL of the current version of an object. To set
- * the ACL of a different version, use the <code>versionId</code> subresource.</p>
- * <p class="title"> <b>Related Resources</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html">CopyObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectAcl">AWS API
- * Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for PutObjectAcl that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void PutObjectAclAsync(const Model::PutObjectAclRequest& request, const PutObjectAclResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename PutObjectAclRequestT = Model::PutObjectAclRequest>
+ void PutObjectAclAsync(const PutObjectAclRequestT& request, const PutObjectAclResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::PutObjectAcl, request, handler, context);
+ }
/**
- * <p>Applies a Legal Hold configuration to the specified object. For more
+ * <p>Applies a legal hold configuration to the specified object. For more
* information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html">Locking
* Objects</a>.</p> <p>This action is not supported by Amazon S3 on
@@ -9313,30 +4653,22 @@ namespace Aws
virtual Model::PutObjectLegalHoldOutcome PutObjectLegalHold(const Model::PutObjectLegalHoldRequest& request) const;
/**
- * <p>Applies a Legal Hold configuration to the specified object. For more
- * information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html">Locking
- * Objects</a>.</p> <p>This action is not supported by Amazon S3 on
- * Outposts.</p><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for PutObjectLegalHold that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::PutObjectLegalHoldOutcomeCallable PutObjectLegalHoldCallable(const Model::PutObjectLegalHoldRequest& request) const;
+ template<typename PutObjectLegalHoldRequestT = Model::PutObjectLegalHoldRequest>
+ Model::PutObjectLegalHoldOutcomeCallable PutObjectLegalHoldCallable(const PutObjectLegalHoldRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::PutObjectLegalHold, request);
+ }
/**
- * <p>Applies a Legal Hold configuration to the specified object. For more
- * information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html">Locking
- * Objects</a>.</p> <p>This action is not supported by Amazon S3 on
- * Outposts.</p><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLegalHold">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for PutObjectLegalHold that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void PutObjectLegalHoldAsync(const Model::PutObjectLegalHoldRequest& request, const PutObjectLegalHoldResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename PutObjectLegalHoldRequestT = Model::PutObjectLegalHoldRequest>
+ void PutObjectLegalHoldAsync(const PutObjectLegalHoldRequestT& request, const PutObjectLegalHoldResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::PutObjectLegalHold, request, handler, context);
+ }
/**
* <p>Places an Object Lock configuration on the specified bucket. The rule
@@ -9349,86 +4681,62 @@ namespace Aws
* <code>Years</code> but you must select one. You cannot specify <code>Days</code>
* and <code>Years</code> at the same time.</p> </li> <li> <p>You can only enable
* Object Lock for new buckets. If you want to turn on Object Lock for an existing
- * bucket, contact AWS Support.</p> </li> </ul> <p><h3>See Also:</h3> <a
+ * bucket, contact Amazon Web Services Support.</p> </li> </ul> <p><h3>See
+ * Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration">AWS
* API Reference</a></p>
*/
virtual Model::PutObjectLockConfigurationOutcome PutObjectLockConfiguration(const Model::PutObjectLockConfigurationRequest& request) const;
/**
- * <p>Places an Object Lock configuration on the specified bucket. The rule
- * specified in the Object Lock configuration will be applied by default to every
- * new object placed in the specified bucket. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html">Locking
- * Objects</a>. </p> <ul> <li> <p>The <code>DefaultRetention</code> settings
- * require both a mode and a period.</p> </li> <li> <p>The
- * <code>DefaultRetention</code> period can be either <code>Days</code> or
- * <code>Years</code> but you must select one. You cannot specify <code>Days</code>
- * and <code>Years</code> at the same time.</p> </li> <li> <p>You can only enable
- * Object Lock for new buckets. If you want to turn on Object Lock for an existing
- * bucket, contact AWS Support.</p> </li> </ul> <p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for PutObjectLockConfiguration that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::PutObjectLockConfigurationOutcomeCallable PutObjectLockConfigurationCallable(const Model::PutObjectLockConfigurationRequest& request) const;
+ template<typename PutObjectLockConfigurationRequestT = Model::PutObjectLockConfigurationRequest>
+ Model::PutObjectLockConfigurationOutcomeCallable PutObjectLockConfigurationCallable(const PutObjectLockConfigurationRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::PutObjectLockConfiguration, request);
+ }
/**
- * <p>Places an Object Lock configuration on the specified bucket. The rule
- * specified in the Object Lock configuration will be applied by default to every
- * new object placed in the specified bucket. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html">Locking
- * Objects</a>. </p> <ul> <li> <p>The <code>DefaultRetention</code> settings
- * require both a mode and a period.</p> </li> <li> <p>The
- * <code>DefaultRetention</code> period can be either <code>Days</code> or
- * <code>Years</code> but you must select one. You cannot specify <code>Days</code>
- * and <code>Years</code> at the same time.</p> </li> <li> <p>You can only enable
- * Object Lock for new buckets. If you want to turn on Object Lock for an existing
- * bucket, contact AWS Support.</p> </li> </ul> <p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectLockConfiguration">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for PutObjectLockConfiguration that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void PutObjectLockConfigurationAsync(const Model::PutObjectLockConfigurationRequest& request, const PutObjectLockConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename PutObjectLockConfigurationRequestT = Model::PutObjectLockConfigurationRequest>
+ void PutObjectLockConfigurationAsync(const PutObjectLockConfigurationRequestT& request, const PutObjectLockConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::PutObjectLockConfiguration, request, handler, context);
+ }
/**
* <p>Places an Object Retention configuration on an object. For more information,
* see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html">Locking
- * Objects</a>. </p> <p>This action is not supported by Amazon S3 on
- * Outposts.</p><p><h3>See Also:</h3> <a
+ * Objects</a>. Users or accounts require the <code>s3:PutObjectRetention</code>
+ * permission in order to place an Object Retention configuration on objects.
+ * Bypassing a Governance Retention configuration requires the
+ * <code>s3:BypassGovernanceRetention</code> permission. </p> <p>This action is not
+ * supported by Amazon S3 on Outposts.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention">AWS
* API Reference</a></p>
*/
virtual Model::PutObjectRetentionOutcome PutObjectRetention(const Model::PutObjectRetentionRequest& request) const;
/**
- * <p>Places an Object Retention configuration on an object. For more information,
- * see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html">Locking
- * Objects</a>. </p> <p>This action is not supported by Amazon S3 on
- * Outposts.</p><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for PutObjectRetention that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::PutObjectRetentionOutcomeCallable PutObjectRetentionCallable(const Model::PutObjectRetentionRequest& request) const;
+ template<typename PutObjectRetentionRequestT = Model::PutObjectRetentionRequest>
+ Model::PutObjectRetentionOutcomeCallable PutObjectRetentionCallable(const PutObjectRetentionRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::PutObjectRetention, request);
+ }
/**
- * <p>Places an Object Retention configuration on an object. For more information,
- * see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lock.html">Locking
- * Objects</a>. </p> <p>This action is not supported by Amazon S3 on
- * Outposts.</p><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectRetention">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for PutObjectRetention that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void PutObjectRetentionAsync(const Model::PutObjectRetentionRequest& request, const PutObjectRetentionResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename PutObjectRetentionRequestT = Model::PutObjectRetentionRequest>
+ void PutObjectRetentionAsync(const PutObjectRetentionRequestT& request, const PutObjectRetentionResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::PutObjectRetention, request, handler, context);
+ }
/**
* <p>Sets the supplied tag-set to an object that already exists in a bucket.</p>
@@ -9470,86 +4778,22 @@ namespace Aws
virtual Model::PutObjectTaggingOutcome PutObjectTagging(const Model::PutObjectTaggingRequest& request) const;
/**
- * <p>Sets the supplied tag-set to an object that already exists in a bucket.</p>
- * <p>A tag is a key-value pair. You can associate tags with an object by sending a
- * PUT request against the tagging subresource that is associated with the object.
- * You can retrieve tags by sending a GET request. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html">GetObjectTagging</a>.</p>
- * <p>For tagging-related restrictions related to characters and encodings, see <a
- * href="https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html">Tag
- * Restrictions</a>. Note that Amazon S3 limits the maximum number of tags to 10
- * tags per object.</p> <p>To use this operation, you must have permission to
- * perform the <code>s3:PutObjectTagging</code> action. By default, the bucket
- * owner has this permission and can grant this permission to others.</p> <p>To put
- * tags of any other version, use the <code>versionId</code> query parameter. You
- * also need permission for the <code>s3:PutObjectVersionTagging</code> action.</p>
- * <p>For information about the Amazon S3 object tagging feature, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html">Object
- * Tagging</a>.</p> <p class="title"> <b>Special Errors</b> </p> <ul> <li> <ul>
- * <li> <p> <i>Code: InvalidTagError </i> </p> </li> <li> <p> <i>Cause: The tag
- * provided was not a valid tag. This error can occur if the tag did not pass input
- * validation. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html">Object
- * Tagging</a>.</i> </p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:
- * MalformedXMLError </i> </p> </li> <li> <p> <i>Cause: The XML provided does not
- * match the schema.</i> </p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:
- * OperationAbortedError </i> </p> </li> <li> <p> <i>Cause: A conflicting
- * conditional action is currently in progress against this resource. Please try
- * again.</i> </p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code: InternalError</i>
- * </p> </li> <li> <p> <i>Cause: The service was unable to apply the provided tag
- * to the object.</i> </p> </li> </ul> </li> </ul> <p class="title"> <b>Related
- * Resources</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html">GetObjectTagging</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html">DeleteObjectTagging</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for PutObjectTagging that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::PutObjectTaggingOutcomeCallable PutObjectTaggingCallable(const Model::PutObjectTaggingRequest& request) const;
+ template<typename PutObjectTaggingRequestT = Model::PutObjectTaggingRequest>
+ Model::PutObjectTaggingOutcomeCallable PutObjectTaggingCallable(const PutObjectTaggingRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::PutObjectTagging, request);
+ }
/**
- * <p>Sets the supplied tag-set to an object that already exists in a bucket.</p>
- * <p>A tag is a key-value pair. You can associate tags with an object by sending a
- * PUT request against the tagging subresource that is associated with the object.
- * You can retrieve tags by sending a GET request. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html">GetObjectTagging</a>.</p>
- * <p>For tagging-related restrictions related to characters and encodings, see <a
- * href="https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/allocation-tag-restrictions.html">Tag
- * Restrictions</a>. Note that Amazon S3 limits the maximum number of tags to 10
- * tags per object.</p> <p>To use this operation, you must have permission to
- * perform the <code>s3:PutObjectTagging</code> action. By default, the bucket
- * owner has this permission and can grant this permission to others.</p> <p>To put
- * tags of any other version, use the <code>versionId</code> query parameter. You
- * also need permission for the <code>s3:PutObjectVersionTagging</code> action.</p>
- * <p>For information about the Amazon S3 object tagging feature, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html">Object
- * Tagging</a>.</p> <p class="title"> <b>Special Errors</b> </p> <ul> <li> <ul>
- * <li> <p> <i>Code: InvalidTagError </i> </p> </li> <li> <p> <i>Cause: The tag
- * provided was not a valid tag. This error can occur if the tag did not pass input
- * validation. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-tagging.html">Object
- * Tagging</a>.</i> </p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:
- * MalformedXMLError </i> </p> </li> <li> <p> <i>Cause: The XML provided does not
- * match the schema.</i> </p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:
- * OperationAbortedError </i> </p> </li> <li> <p> <i>Cause: A conflicting
- * conditional action is currently in progress against this resource. Please try
- * again.</i> </p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code: InternalError</i>
- * </p> </li> <li> <p> <i>Cause: The service was unable to apply the provided tag
- * to the object.</i> </p> </li> </ul> </li> </ul> <p class="title"> <b>Related
- * Resources</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObjectTagging.html">GetObjectTagging</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeleteObjectTagging.html">DeleteObjectTagging</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutObjectTagging">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for PutObjectTagging that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void PutObjectTaggingAsync(const Model::PutObjectTaggingRequest& request, const PutObjectTaggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename PutObjectTaggingRequestT = Model::PutObjectTaggingRequest>
+ void PutObjectTaggingAsync(const PutObjectTaggingRequestT& request, const PutObjectTaggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::PutObjectTagging, request, handler, context);
+ }
/**
* <p>Creates or modifies the <code>PublicAccessBlock</code> configuration for an
@@ -9582,68 +4826,22 @@ namespace Aws
virtual Model::PutPublicAccessBlockOutcome PutPublicAccessBlock(const Model::PutPublicAccessBlockRequest& request) const;
/**
- * <p>Creates or modifies the <code>PublicAccessBlock</code> configuration for an
- * Amazon S3 bucket. To use this operation, you must have the
- * <code>s3:PutBucketPublicAccessBlock</code> permission. For more information
- * about Amazon S3 permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html">Specifying
- * Permissions in a Policy</a>.</p> <p>When Amazon S3 evaluates the
- * <code>PublicAccessBlock</code> configuration for a bucket or an object, it
- * checks the <code>PublicAccessBlock</code> configuration for both the bucket (or
- * the bucket that contains the object) and the bucket owner's account. If the
- * <code>PublicAccessBlock</code> configurations are different between the bucket
- * and the account, Amazon S3 uses the most restrictive combination of the
- * bucket-level and account-level settings.</p> <p>For more
- * information about when Amazon S3 considers a bucket or an object public, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status">The
- * Meaning of "Public"</a>.</p> <p class="title"> <b>Related Resources</b> </p>
- * <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html">GetPublicAccessBlock</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html">DeletePublicAccessBlock</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html">GetBucketPolicyStatus</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html">Using
- * Amazon S3 Block Public Access</a> </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for PutPublicAccessBlock that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::PutPublicAccessBlockOutcomeCallable PutPublicAccessBlockCallable(const Model::PutPublicAccessBlockRequest& request) const;
+ template<typename PutPublicAccessBlockRequestT = Model::PutPublicAccessBlockRequest>
+ Model::PutPublicAccessBlockOutcomeCallable PutPublicAccessBlockCallable(const PutPublicAccessBlockRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::PutPublicAccessBlock, request);
+ }
/**
- * <p>Creates or modifies the <code>PublicAccessBlock</code> configuration for an
- * Amazon S3 bucket. To use this operation, you must have the
- * <code>s3:PutBucketPublicAccessBlock</code> permission. For more information
- * about Amazon S3 permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html">Specifying
- * Permissions in a Policy</a>.</p> <p>When Amazon S3 evaluates the
- * <code>PublicAccessBlock</code> configuration for a bucket or an object, it
- * checks the <code>PublicAccessBlock</code> configuration for both the bucket (or
- * the bucket that contains the object) and the bucket owner's account. If the
- * <code>PublicAccessBlock</code> configurations are different between the bucket
- * and the account, Amazon S3 uses the most restrictive combination of the
- * bucket-level and account-level settings.</p> <p>For more
- * information about when Amazon S3 considers a bucket or an object public, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html#access-control-block-public-access-policy-status">The
- * Meaning of "Public"</a>.</p> <p class="title"> <b>Related Resources</b> </p>
- * <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetPublicAccessBlock.html">GetPublicAccessBlock</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_DeletePublicAccessBlock.html">DeletePublicAccessBlock</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketPolicyStatus.html">GetBucketPolicyStatus</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/access-control-block-public-access.html">Using
- * Amazon S3 Block Public Access</a> </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PutPublicAccessBlock">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for PutPublicAccessBlock that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void PutPublicAccessBlockAsync(const Model::PutPublicAccessBlockRequest& request, const PutPublicAccessBlockResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename PutPublicAccessBlockRequestT = Model::PutPublicAccessBlockRequest>
+ void PutPublicAccessBlockAsync(const PutPublicAccessBlockRequestT& request, const PutPublicAccessBlockResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::PutPublicAccessBlock, request, handler, context);
+ }
/**
* <p>Restores an archived copy of an object back into Amazon S3</p> <p>This action
@@ -9668,11 +4866,12 @@ namespace Aws
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html">Querying
* Archived Objects</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When making a
* select request, do the following:</p> <ul> <li> <p>Define an output location for
- * the select query's output. This must be an Amazon S3 bucket in the same AWS
- * Region as the bucket that contains the archive object that is being queried. The
- * AWS account that initiates the job must have permissions to write to the S3
- * bucket. You can specify the storage class and encryption for the output objects
- * stored in the bucket. For more information about output, see <a
+ * the select query's output. This must be an Amazon S3 bucket in the same Amazon
+ * Web Services Region as the bucket that contains the archive object that is being
+ * queried. The Amazon Web Services account that initiates the job must have
+ * permissions to write to the S3 bucket. You can specify the storage class and
+ * encryption for the output objects stored in the bucket. For more information
+ * about output, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html">Querying
* Archived Objects</a> in the <i>Amazon S3 User Guide</i>.</p> <p>For more
* information about the <code>S3</code> structure in the request body, see the
@@ -9726,33 +4925,33 @@ namespace Aws
* version ID. If you don't provide a version ID, Amazon S3 restores the current
* version.</p> <p>When restoring an archived object (or using a select request),
* you can specify one of the following data access tier options in the
- * <code>Tier</code> element of the request body: </p> <ul> <li> <p> <b>
- * <code>Expedited</code> </b> - Expedited retrievals allow you to quickly access
- * your data stored in the S3 Glacier storage class or S3 Intelligent-Tiering
- * Archive tier when occasional urgent requests for a subset of archives are
- * required. For all but the largest archived objects (250 MB+), data accessed
- * using Expedited retrievals is typically made available within 1–5 minutes.
- * Provisioned capacity ensures that retrieval capacity for Expedited retrievals is
- * available when you need it. Expedited retrievals and provisioned capacity are
- * not available for objects stored in the S3 Glacier Deep Archive storage class or
- * S3 Intelligent-Tiering Deep Archive tier.</p> </li> <li> <p> <b>
- * <code>Standard</code> </b> - Standard retrievals allow you to access any of your
- * archived objects within several hours. This is the default option for retrieval
- * requests that do not specify the retrieval option. Standard retrievals typically
- * finish within 3–5 hours for objects stored in the S3 Glacier storage class or S3
+ * <code>Tier</code> element of the request body: </p> <ul> <li> <p>
+ * <code>Expedited</code> - Expedited retrievals allow you to quickly access your
+ * data stored in the S3 Glacier storage class or S3 Intelligent-Tiering Archive
+ * tier when occasional urgent requests for a subset of archives are required. For
+ * all but the largest archived objects (250 MB+), data accessed using Expedited
+ * retrievals is typically made available within 1–5 minutes. Provisioned capacity
+ * ensures that retrieval capacity for Expedited retrievals is available when you
+ * need it. Expedited retrievals and provisioned capacity are not available for
+ * objects stored in the S3 Glacier Deep Archive storage class or S3
+ * Intelligent-Tiering Deep Archive tier.</p> </li> <li> <p> <code>Standard</code>
+ * - Standard retrievals allow you to access any of your archived objects within
+ * several hours. This is the default option for retrieval requests that do not
+ * specify the retrieval option. Standard retrievals typically finish within 3–5
+ * hours for objects stored in the S3 Glacier storage class or S3
* Intelligent-Tiering Archive tier. They typically finish within 12 hours for
* objects stored in the S3 Glacier Deep Archive storage class or S3
* Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects
- * stored in S3 Intelligent-Tiering.</p> </li> <li> <p> <b> <code>Bulk</code> </b>
- * - Bulk retrievals are the lowest-cost retrieval option in S3 Glacier, enabling
- * you to retrieve large amounts, even petabytes, of data inexpensively. Bulk
- * retrievals typically finish within 5–12 hours for objects stored in the S3
- * Glacier storage class or S3 Intelligent-Tiering Archive tier. They typically
- * finish within 48 hours for objects stored in the S3 Glacier Deep Archive storage
- * class or S3 Intelligent-Tiering Deep Archive tier. Bulk retrievals are free for
- * objects stored in S3 Intelligent-Tiering.</p> </li> </ul> <p>For more
- * information about archive retrieval options and provisioned capacity for
- * <code>Expedited</code> data access, see <a
+ * stored in S3 Intelligent-Tiering.</p> </li> <li> <p> <code>Bulk</code> - Bulk
+ * retrievals are the lowest-cost retrieval option in S3 Glacier, enabling you to
+ * retrieve large amounts, even petabytes, of data inexpensively. Bulk retrievals
+ * typically finish within 5–12 hours for objects stored in the S3 Glacier storage
+ * class or S3 Intelligent-Tiering Archive tier. They typically finish within 48
+ * hours for objects stored in the S3 Glacier Deep Archive storage class or S3
+ * Intelligent-Tiering Deep Archive tier. Bulk retrievals are free for objects
+ * stored in S3 Intelligent-Tiering.</p> </li> </ul> <p>For more information about
+ * archive retrieval options and provisioned capacity for <code>Expedited</code>
+ * data access, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html">Restoring
* Archived Objects</a> in the <i>Amazon S3 User Guide</i>. </p> <p>You can use
* Amazon S3 restore speed upgrade to change the restore speed to a faster speed
@@ -9810,336 +5009,22 @@ namespace Aws
virtual Model::RestoreObjectOutcome RestoreObject(const Model::RestoreObjectRequest& request) const;
/**
- * <p>Restores an archived copy of an object back into Amazon S3</p> <p>This action
- * is not supported by Amazon S3 on Outposts.</p> <p>This action performs the
- * following types of requests: </p> <ul> <li> <p> <code>select</code> - Perform a
- * select query on an archived object</p> </li> <li> <p> <code>restore an
- * archive</code> - Restore an archived object</p> </li> </ul> <p>To use this
- * operation, you must have permissions to perform the
- * <code>s3:RestoreObject</code> action. The bucket owner has this permission by
- * default and can grant this permission to others. For more information about
- * permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a> in the <i>Amazon S3 User
- * Guide</i>.</p> <p> <b>Querying Archives with Select Requests</b> </p> <p>You use
- * a select type of request to perform SQL queries on archived objects. The
- * archived objects that are being queried by the select request must be formatted
- * as uncompressed comma-separated values (CSV) files. You can run queries and
- * custom analytics on your archived data without having to restore your data to a
- * hotter Amazon S3 tier. For an overview about select requests, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html">Querying
- * Archived Objects</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When making a
- * select request, do the following:</p> <ul> <li> <p>Define an output location for
- * the select query's output. This must be an Amazon S3 bucket in the same AWS
- * Region as the bucket that contains the archive object that is being queried. The
- * AWS account that initiates the job must have permissions to write to the S3
- * bucket. You can specify the storage class and encryption for the output objects
- * stored in the bucket. For more information about output, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html">Querying
- * Archived Objects</a> in the <i>Amazon S3 User Guide</i>.</p> <p>For more
- * information about the <code>S3</code> structure in the request body, see the
- * following:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html">PutObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html">Managing
- * Access with ACLs</a> in the <i>Amazon S3 User Guide</i> </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html">Protecting
- * Data Using Server-Side Encryption</a> in the <i>Amazon S3 User Guide</i> </p>
- * </li> </ul> </li> <li> <p>Define the SQL expression for the <code>SELECT</code>
- * type of restoration for your query in the request body's
- * <code>SelectParameters</code> structure. You can use expressions like the
- * following examples.</p> <ul> <li> <p>The following expression returns all
- * records from the specified object.</p> <p> <code>SELECT * FROM Object</code>
- * </p> </li> <li> <p>Assuming that you are not using any headers for data stored
- * in the object, you can specify columns with positional headers.</p> <p>
- * <code>SELECT s._1, s._2 FROM Object s WHERE s._3 &gt; 100</code> </p> </li> <li>
- * <p>If you have headers and you set the <code>fileHeaderInfo</code> in the
- * <code>CSV</code> structure in the request body to <code>USE</code>, you can
- * specify headers in the query. (If you set the <code>fileHeaderInfo</code> field
- * to <code>IGNORE</code>, the first row is skipped for the query.) You cannot mix
- * ordinal positions with header column names. </p> <p> <code>SELECT s.Id,
- * s.FirstName, s.SSN FROM S3Object s</code> </p> </li> </ul> </li> </ul> <p>For
- * more information about using SQL with S3 Glacier Select restore, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html">SQL
- * Reference for Amazon S3 Select and S3 Glacier Select</a> in the <i>Amazon S3
- * User Guide</i>. </p> <p>When making a select request, you can also do the
- * following:</p> <ul> <li> <p>To expedite your queries, specify the
- * <code>Expedited</code> tier. For more information about tiers, see "Restoring
- * Archives," later in this topic.</p> </li> <li> <p>Specify details about the data
- * serialization format of both the input object that is being queried and the
- * serialization of the CSV-encoded query results.</p> </li> </ul> <p>The following
- * are additional important facts about the select feature:</p> <ul> <li> <p>The
- * output results are new Amazon S3 objects. Unlike archive retrievals, they are
- * stored until explicitly deleted-manually or through a lifecycle policy.</p>
- * </li> <li> <p>You can issue more than one select request on the same Amazon S3
- * object. Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate
- * requests.</p> </li> <li> <p> Amazon S3 accepts a select request even if the
- * object has already been restored. A select request doesn’t return error response
- * <code>409</code>.</p> </li> </ul> <p> <b>Restoring objects</b> </p> <p>Objects
- * that you archive to the S3 Glacier or S3 Glacier Deep Archive storage class, and
- * S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers are
- * not accessible in real time. For objects in Archive Access or Deep Archive
- * Access tiers you must first initiate a restore request, and then wait until the
- * object is moved into the Frequent Access tier. For objects in S3 Glacier or S3
- * Glacier Deep Archive storage classes you must first initiate a restore request,
- * and then wait until a temporary copy of the object is available. To access an
- * archived object, you must restore the object for the duration (number of days)
- * that you specify.</p> <p>To restore a specific object version, you can provide a
- * version ID. If you don't provide a version ID, Amazon S3 restores the current
- * version.</p> <p>When restoring an archived object (or using a select request),
- * you can specify one of the following data access tier options in the
- * <code>Tier</code> element of the request body: </p> <ul> <li> <p> <b>
- * <code>Expedited</code> </b> - Expedited retrievals allow you to quickly access
- * your data stored in the S3 Glacier storage class or S3 Intelligent-Tiering
- * Archive tier when occasional urgent requests for a subset of archives are
- * required. For all but the largest archived objects (250 MB+), data accessed
- * using Expedited retrievals is typically made available within 1–5 minutes.
- * Provisioned capacity ensures that retrieval capacity for Expedited retrievals is
- * available when you need it. Expedited retrievals and provisioned capacity are
- * not available for objects stored in the S3 Glacier Deep Archive storage class or
- * S3 Intelligent-Tiering Deep Archive tier.</p> </li> <li> <p> <b>
- * <code>Standard</code> </b> - Standard retrievals allow you to access any of your
- * archived objects within several hours. This is the default option for retrieval
- * requests that do not specify the retrieval option. Standard retrievals typically
- * finish within 3–5 hours for objects stored in the S3 Glacier storage class or S3
- * Intelligent-Tiering Archive tier. They typically finish within 12 hours for
- * objects stored in the S3 Glacier Deep Archive storage class or S3
- * Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects
- * stored in S3 Intelligent-Tiering.</p> </li> <li> <p> <b> <code>Bulk</code> </b>
- * - Bulk retrievals are the lowest-cost retrieval option in S3 Glacier, enabling
- * you to retrieve large amounts, even petabytes, of data inexpensively. Bulk
- * retrievals typically finish within 5–12 hours for objects stored in the S3
- * Glacier storage class or S3 Intelligent-Tiering Archive tier. They typically
- * finish within 48 hours for objects stored in the S3 Glacier Deep Archive storage
- * class or S3 Intelligent-Tiering Deep Archive tier. Bulk retrievals are free for
- * objects stored in S3 Intelligent-Tiering.</p> </li> </ul> <p>For more
- * information about archive retrieval options and provisioned capacity for
- * <code>Expedited</code> data access, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html">Restoring
- * Archived Objects</a> in the <i>Amazon S3 User Guide</i>. </p> <p>You can use
- * Amazon S3 restore speed upgrade to change the restore speed to a faster speed
- * while it is in progress. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html">
- * Upgrading the speed of an in-progress restore</a> in the <i>Amazon S3 User
- * Guide</i>. </p> <p>To get the status of object restoration, you can send a
- * <code>HEAD</code> request. Operations return the <code>x-amz-restore</code>
- * header, which provides information about the restoration status, in the
- * response. You can use Amazon S3 event notifications to notify you when a restore
- * is initiated or completed. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html">Configuring
- * Amazon S3 Event Notifications</a> in the <i>Amazon S3 User Guide</i>.</p>
- * <p>After restoring an archived object, you can update the restoration period by
- * reissuing the request with a new period. Amazon S3 updates the restoration
- * period relative to the current time and charges only for the request-there are
- * no data transfer charges. You cannot update the restoration period when Amazon
- * S3 is actively processing your current restore request for the object.</p> <p>If
- * your bucket has a lifecycle configuration with a rule that includes an
- * expiration action, the object expiration overrides the life span that you
- * specify in a restore request. For example, if you restore an object copy for 10
- * days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the
- * object in 3 days. For more information about lifecycle configuration, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html">PutBucketLifecycleConfiguration</a>
- * and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html">Object
- * Lifecycle Management</a> in <i>Amazon S3 User Guide</i>.</p> <p>
- * <b>Responses</b> </p> <p>A successful action returns either the <code>200
- * OK</code> or <code>202 Accepted</code> status code. </p> <ul> <li> <p>If the
- * object is not previously restored, then Amazon S3 returns <code>202
- * Accepted</code> in the response. </p> </li> <li> <p>If the object is previously
- * restored, Amazon S3 returns <code>200 OK</code> in the response. </p> </li>
- * </ul> <p class="title"> <b>Special Errors</b> </p> <ul> <li> <ul> <li> <p>
- * <i>Code: RestoreAlreadyInProgress</i> </p> </li> <li> <p> <i>Cause: Object
- * restore is already in progress. (This error does not apply to SELECT type
- * requests.)</i> </p> </li> <li> <p> <i>HTTP Status Code: 409 Conflict</i> </p>
- * </li> <li> <p> <i>SOAP Fault Code Prefix: Client</i> </p> </li> </ul> </li> <li>
- * <ul> <li> <p> <i>Code: GlacierExpeditedRetrievalNotAvailable</i> </p> </li> <li>
- * <p> <i>Cause: expedited retrievals are currently not available. Try again later.
- * (Returned if there is insufficient capacity to process the Expedited request.
- * This error applies only to Expedited retrievals and not to S3 Standard or Bulk
- * retrievals.)</i> </p> </li> <li> <p> <i>HTTP Status Code: 503</i> </p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix: N/A</i> </p> </li> </ul> </li> </ul> <p
- * class="title"> <b>Related Resources</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html">PutBucketLifecycleConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html">GetBucketNotificationConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html">SQL
- * Reference for Amazon S3 Select and S3 Glacier Select </a> in the <i>Amazon S3
- * User Guide</i> </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for RestoreObject that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::RestoreObjectOutcomeCallable RestoreObjectCallable(const Model::RestoreObjectRequest& request) const;
+ template<typename RestoreObjectRequestT = Model::RestoreObjectRequest>
+ Model::RestoreObjectOutcomeCallable RestoreObjectCallable(const RestoreObjectRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::RestoreObject, request);
+ }
/**
- * <p>Restores an archived copy of an object back into Amazon S3</p> <p>This action
- * is not supported by Amazon S3 on Outposts.</p> <p>This action performs the
- * following types of requests: </p> <ul> <li> <p> <code>select</code> - Perform a
- * select query on an archived object</p> </li> <li> <p> <code>restore an
- * archive</code> - Restore an archived object</p> </li> </ul> <p>To use this
- * operation, you must have permissions to perform the
- * <code>s3:RestoreObject</code> action. The bucket owner has this permission by
- * default and can grant this permission to others. For more information about
- * permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-with-s3-actions.html#using-with-s3-actions-related-to-bucket-subresources">Permissions
- * Related to Bucket Subresource Operations</a> and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-access-control.html">Managing
- * Access Permissions to Your Amazon S3 Resources</a> in the <i>Amazon S3 User
- * Guide</i>.</p> <p> <b>Querying Archives with Select Requests</b> </p> <p>You use
- * a select type of request to perform SQL queries on archived objects. The
- * archived objects that are being queried by the select request must be formatted
- * as uncompressed comma-separated values (CSV) files. You can run queries and
- * custom analytics on your archived data without having to restore your data to a
- * hotter Amazon S3 tier. For an overview about select requests, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html">Querying
- * Archived Objects</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When making a
- * select request, do the following:</p> <ul> <li> <p>Define an output location for
- * the select query's output. This must be an Amazon S3 bucket in the same AWS
- * Region as the bucket that contains the archive object that is being queried. The
- * AWS account that initiates the job must have permissions to write to the S3
- * bucket. You can specify the storage class and encryption for the output objects
- * stored in the bucket. For more information about output, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/querying-glacier-archives.html">Querying
- * Archived Objects</a> in the <i>Amazon S3 User Guide</i>.</p> <p>For more
- * information about the <code>S3</code> structure in the request body, see the
- * following:</p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutObject.html">PutObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/S3_ACLs_UsingACLs.html">Managing
- * Access with ACLs</a> in the <i>Amazon S3 User Guide</i> </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html">Protecting
- * Data Using Server-Side Encryption</a> in the <i>Amazon S3 User Guide</i> </p>
- * </li> </ul> </li> <li> <p>Define the SQL expression for the <code>SELECT</code>
- * type of restoration for your query in the request body's
- * <code>SelectParameters</code> structure. You can use expressions like the
- * following examples.</p> <ul> <li> <p>The following expression returns all
- * records from the specified object.</p> <p> <code>SELECT * FROM Object</code>
- * </p> </li> <li> <p>Assuming that you are not using any headers for data stored
- * in the object, you can specify columns with positional headers.</p> <p>
- * <code>SELECT s._1, s._2 FROM Object s WHERE s._3 &gt; 100</code> </p> </li> <li>
- * <p>If you have headers and you set the <code>fileHeaderInfo</code> in the
- * <code>CSV</code> structure in the request body to <code>USE</code>, you can
- * specify headers in the query. (If you set the <code>fileHeaderInfo</code> field
- * to <code>IGNORE</code>, the first row is skipped for the query.) You cannot mix
- * ordinal positions with header column names. </p> <p> <code>SELECT s.Id,
- * s.FirstName, s.SSN FROM S3Object s</code> </p> </li> </ul> </li> </ul> <p>For
- * more information about using SQL with S3 Glacier Select restore, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html">SQL
- * Reference for Amazon S3 Select and S3 Glacier Select</a> in the <i>Amazon S3
- * User Guide</i>. </p> <p>When making a select request, you can also do the
- * following:</p> <ul> <li> <p>To expedite your queries, specify the
- * <code>Expedited</code> tier. For more information about tiers, see "Restoring
- * Archives," later in this topic.</p> </li> <li> <p>Specify details about the data
- * serialization format of both the input object that is being queried and the
- * serialization of the CSV-encoded query results.</p> </li> </ul> <p>The following
- * are additional important facts about the select feature:</p> <ul> <li> <p>The
- * output results are new Amazon S3 objects. Unlike archive retrievals, they are
- * stored until explicitly deleted-manually or through a lifecycle policy.</p>
- * </li> <li> <p>You can issue more than one select request on the same Amazon S3
- * object. Amazon S3 doesn't deduplicate requests, so avoid issuing duplicate
- * requests.</p> </li> <li> <p> Amazon S3 accepts a select request even if the
- * object has already been restored. A select request doesn’t return error response
- * <code>409</code>.</p> </li> </ul> <p> <b>Restoring objects</b> </p> <p>Objects
- * that you archive to the S3 Glacier or S3 Glacier Deep Archive storage class, and
- * S3 Intelligent-Tiering Archive or S3 Intelligent-Tiering Deep Archive tiers are
- * not accessible in real time. For objects in Archive Access or Deep Archive
- * Access tiers you must first initiate a restore request, and then wait until the
- * object is moved into the Frequent Access tier. For objects in S3 Glacier or S3
- * Glacier Deep Archive storage classes you must first initiate a restore request,
- * and then wait until a temporary copy of the object is available. To access an
- * archived object, you must restore the object for the duration (number of days)
- * that you specify.</p> <p>To restore a specific object version, you can provide a
- * version ID. If you don't provide a version ID, Amazon S3 restores the current
- * version.</p> <p>When restoring an archived object (or using a select request),
- * you can specify one of the following data access tier options in the
- * <code>Tier</code> element of the request body: </p> <ul> <li> <p> <b>
- * <code>Expedited</code> </b> - Expedited retrievals allow you to quickly access
- * your data stored in the S3 Glacier storage class or S3 Intelligent-Tiering
- * Archive tier when occasional urgent requests for a subset of archives are
- * required. For all but the largest archived objects (250 MB+), data accessed
- * using Expedited retrievals is typically made available within 1–5 minutes.
- * Provisioned capacity ensures that retrieval capacity for Expedited retrievals is
- * available when you need it. Expedited retrievals and provisioned capacity are
- * not available for objects stored in the S3 Glacier Deep Archive storage class or
- * S3 Intelligent-Tiering Deep Archive tier.</p> </li> <li> <p> <b>
- * <code>Standard</code> </b> - Standard retrievals allow you to access any of your
- * archived objects within several hours. This is the default option for retrieval
- * requests that do not specify the retrieval option. Standard retrievals typically
- * finish within 3–5 hours for objects stored in the S3 Glacier storage class or S3
- * Intelligent-Tiering Archive tier. They typically finish within 12 hours for
- * objects stored in the S3 Glacier Deep Archive storage class or S3
- * Intelligent-Tiering Deep Archive tier. Standard retrievals are free for objects
- * stored in S3 Intelligent-Tiering.</p> </li> <li> <p> <b> <code>Bulk</code> </b>
- * - Bulk retrievals are the lowest-cost retrieval option in S3 Glacier, enabling
- * you to retrieve large amounts, even petabytes, of data inexpensively. Bulk
- * retrievals typically finish within 5–12 hours for objects stored in the S3
- * Glacier storage class or S3 Intelligent-Tiering Archive tier. They typically
- * finish within 48 hours for objects stored in the S3 Glacier Deep Archive storage
- * class or S3 Intelligent-Tiering Deep Archive tier. Bulk retrievals are free for
- * objects stored in S3 Intelligent-Tiering.</p> </li> </ul> <p>For more
- * information about archive retrieval options and provisioned capacity for
- * <code>Expedited</code> data access, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html">Restoring
- * Archived Objects</a> in the <i>Amazon S3 User Guide</i>. </p> <p>You can use
- * Amazon S3 restore speed upgrade to change the restore speed to a faster speed
- * while it is in progress. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/restoring-objects.html#restoring-objects-upgrade-tier.title.html">
- * Upgrading the speed of an in-progress restore</a> in the <i>Amazon S3 User
- * Guide</i>. </p> <p>To get the status of object restoration, you can send a
- * <code>HEAD</code> request. Operations return the <code>x-amz-restore</code>
- * header, which provides information about the restoration status, in the
- * response. You can use Amazon S3 event notifications to notify you when a restore
- * is initiated or completed. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html">Configuring
- * Amazon S3 Event Notifications</a> in the <i>Amazon S3 User Guide</i>.</p>
- * <p>After restoring an archived object, you can update the restoration period by
- * reissuing the request with a new period. Amazon S3 updates the restoration
- * period relative to the current time and charges only for the request-there are
- * no data transfer charges. You cannot update the restoration period when Amazon
- * S3 is actively processing your current restore request for the object.</p> <p>If
- * your bucket has a lifecycle configuration with a rule that includes an
- * expiration action, the object expiration overrides the life span that you
- * specify in a restore request. For example, if you restore an object copy for 10
- * days, but the object is scheduled to expire in 3 days, Amazon S3 deletes the
- * object in 3 days. For more information about lifecycle configuration, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html">PutBucketLifecycleConfiguration</a>
- * and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/object-lifecycle-mgmt.html">Object
- * Lifecycle Management</a> in <i>Amazon S3 User Guide</i>.</p> <p>
- * <b>Responses</b> </p> <p>A successful action returns either the <code>200
- * OK</code> or <code>202 Accepted</code> status code. </p> <ul> <li> <p>If the
- * object is not previously restored, then Amazon S3 returns <code>202
- * Accepted</code> in the response. </p> </li> <li> <p>If the object is previously
- * restored, Amazon S3 returns <code>200 OK</code> in the response. </p> </li>
- * </ul> <p class="title"> <b>Special Errors</b> </p> <ul> <li> <ul> <li> <p>
- * <i>Code: RestoreAlreadyInProgress</i> </p> </li> <li> <p> <i>Cause: Object
- * restore is already in progress. (This error does not apply to SELECT type
- * requests.)</i> </p> </li> <li> <p> <i>HTTP Status Code: 409 Conflict</i> </p>
- * </li> <li> <p> <i>SOAP Fault Code Prefix: Client</i> </p> </li> </ul> </li> <li>
- * <ul> <li> <p> <i>Code: GlacierExpeditedRetrievalNotAvailable</i> </p> </li> <li>
- * <p> <i>Cause: expedited retrievals are currently not available. Try again later.
- * (Returned if there is insufficient capacity to process the Expedited request.
- * This error applies only to Expedited retrievals and not to S3 Standard or Bulk
- * retrievals.)</i> </p> </li> <li> <p> <i>HTTP Status Code: 503</i> </p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix: N/A</i> </p> </li> </ul> </li> </ul> <p
- * class="title"> <b>Related Resources</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html">PutBucketLifecycleConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketNotificationConfiguration.html">GetBucketNotificationConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html">SQL
- * Reference for Amazon S3 Select and S3 Glacier Select </a> in the <i>Amazon S3
- * User Guide</i> </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreObject">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for RestoreObject that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void RestoreObjectAsync(const Model::RestoreObjectRequest& request, const RestoreObjectResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename RestoreObjectRequestT = Model::RestoreObjectRequest>
+ void RestoreObjectAsync(const RestoreObjectRequestT& request, const RestoreObjectResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::RestoreObject, request, handler, context);
+ }
/**
* <p>This action filters the contents of an Amazon S3 object based on a simple
@@ -10151,8 +5036,10 @@ namespace Aws
* <p>This action is not supported by Amazon S3 on Outposts.</p> <p>For more
* information about Amazon S3 Select, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html">Selecting
- * Content from Objects</a> in the <i>Amazon S3 User Guide</i>.</p> <p>For more
- * information about using SQL with Amazon S3 Select, see <a
+ * Content from Objects</a> and <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/s3-glacier-select-sql-reference-select.html">SELECT
+ * Command</a> in the <i>Amazon S3 User Guide</i>.</p> <p>For more information
+ * about using SQL with Amazon S3 Select, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html">
* SQL Reference for Amazon S3 Select and S3 Glacier Select</a> in the <i>Amazon S3
* User Guide</i>.</p> <p/> <p> <b>Permissions</b> </p> <p>You must have
@@ -10178,10 +5065,10 @@ namespace Aws
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
* Encryption (Using Customer-Provided Encryption Keys)</a> in the <i>Amazon S3
* User Guide</i>.</p> <p>For objects that are encrypted with Amazon S3 managed
- * encryption keys (SSE-S3) and customer master keys (CMKs) stored in AWS Key
- * Management Service (SSE-KMS), server-side encryption is handled transparently,
- * so you don't need to specify anything. For more information about server-side
- * encryption, including SSE-S3 and SSE-KMS, see <a
+ * encryption keys (SSE-S3) and Amazon Web Services KMS keys (SSE-KMS), server-side
+ * encryption is handled transparently, so you don't need to specify anything. For
+ * more information about server-side encryption, including SSE-S3 and SSE-KMS, see
+ * <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html">Protecting
* Data Using Server-Side Encryption</a> in the <i>Amazon S3 User Guide</i>.</p>
* </li> </ul> <p> <b>Working with the Response Body</b> </p> <p>Given the response
@@ -10189,7 +5076,7 @@ namespace Aws
* and includes a <code>Transfer-Encoding</code> header with <code>chunked</code>
* as its value in the response. For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html">Appendix:
- * SelectObjectContent Response</a> .</p> <p/> <p> <b>GetObject Support</b> </p>
+ * SelectObjectContent Response</a>.</p> <p/> <p> <b>GetObject Support</b> </p>
* <p>The <code>SelectObjectContent</code> action does not support the following
* <code>GetObject</code> functionality. For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>.</p>
@@ -10220,164 +5107,22 @@ namespace Aws
virtual Model::SelectObjectContentOutcome SelectObjectContent(Model::SelectObjectContentRequest& request) const;
/**
- * <p>This action filters the contents of an Amazon S3 object based on a simple
- * structured query language (SQL) statement. In the request, along with the SQL
- * expression, you must also specify a data serialization format (JSON, CSV, or
- * Apache Parquet) of the object. Amazon S3 uses this format to parse object data
- * into records, and returns only records that match the specified SQL expression.
- * You must also specify the data serialization format for the response.</p>
- * <p>This action is not supported by Amazon S3 on Outposts.</p> <p>For more
- * information about Amazon S3 Select, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html">Selecting
- * Content from Objects</a> in the <i>Amazon S3 User Guide</i>.</p> <p>For more
- * information about using SQL with Amazon S3 Select, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html">
- * SQL Reference for Amazon S3 Select and S3 Glacier Select</a> in the <i>Amazon S3
- * User Guide</i>.</p> <p/> <p> <b>Permissions</b> </p> <p>You must have
- * <code>s3:GetObject</code> permission for this operation. Amazon S3 Select does
- * not support anonymous access. For more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html">Specifying
- * Permissions in a Policy</a> in the <i>Amazon S3 User Guide</i>.</p> <p/> <p>
- * <i>Object Data Formats</i> </p> <p>You can use Amazon S3 Select to query objects
- * that have the following format properties:</p> <ul> <li> <p> <i>CSV, JSON, and
- * Parquet</i> - Objects must be in CSV, JSON, or Parquet format.</p> </li> <li>
- * <p> <i>UTF-8</i> - UTF-8 is the only encoding type Amazon S3 Select
- * supports.</p> </li> <li> <p> <i>GZIP or BZIP2</i> - CSV and JSON files can be
- * compressed using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats
- * that Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports
- * columnar compression for Parquet using GZIP or Snappy. Amazon S3 Select does not
- * support whole-object compression for Parquet objects.</p> </li> <li> <p>
- * <i>Server-side encryption</i> - Amazon S3 Select supports querying objects that
- * are protected with server-side encryption.</p> <p>For objects that are encrypted
- * with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must
- * use the headers that are documented in the <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>.
- * For more information about SSE-C, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
- * Encryption (Using Customer-Provided Encryption Keys)</a> in the <i>Amazon S3
- * User Guide</i>.</p> <p>For objects that are encrypted with Amazon S3 managed
- * encryption keys (SSE-S3) and customer master keys (CMKs) stored in AWS Key
- * Management Service (SSE-KMS), server-side encryption is handled transparently,
- * so you don't need to specify anything. For more information about server-side
- * encryption, including SSE-S3 and SSE-KMS, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html">Protecting
- * Data Using Server-Side Encryption</a> in the <i>Amazon S3 User Guide</i>.</p>
- * </li> </ul> <p> <b>Working with the Response Body</b> </p> <p>Given the response
- * size is unknown, Amazon S3 Select streams the response as a series of messages
- * and includes a <code>Transfer-Encoding</code> header with <code>chunked</code>
- * as its value in the response. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html">Appendix:
- * SelectObjectContent Response</a> .</p> <p/> <p> <b>GetObject Support</b> </p>
- * <p>The <code>SelectObjectContent</code> action does not support the following
- * <code>GetObject</code> functionality. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>.</p>
- * <ul> <li> <p> <code>Range</code>: Although you can specify a scan range for an
- * Amazon S3 Select request (see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange">SelectObjectContentRequest
- * - ScanRange</a> in the request parameters), you cannot specify the range of
- * bytes of an object to return. </p> </li> <li> <p>GLACIER, DEEP_ARCHIVE and
- * REDUCED_REDUNDANCY storage classes: You cannot specify the GLACIER,
- * DEEP_ARCHIVE, or <code>REDUCED_REDUNDANCY</code> storage classes. For more
- * information, about storage classes see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#storage-class-intro">Storage
- * Classes</a> in the <i>Amazon S3 User Guide</i>.</p> </li> </ul> <p/> <p>
- * <b>Special Errors</b> </p> <p>For a list of special errors for this operation,
- * see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList">List
- * of SELECT Object Content Error Codes</a> </p> <p class="title"> <b>Related
- * Resources</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html">GetBucketLifecycleConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html">PutBucketLifecycleConfiguration</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for SelectObjectContent that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::SelectObjectContentOutcomeCallable SelectObjectContentCallable(Model::SelectObjectContentRequest& request) const;
+ template<typename SelectObjectContentRequestT = Model::SelectObjectContentRequest>
+ Model::SelectObjectContentOutcomeCallable SelectObjectContentCallable(SelectObjectContentRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::SelectObjectContent, request);
+ }
/**
- * <p>This action filters the contents of an Amazon S3 object based on a simple
- * structured query language (SQL) statement. In the request, along with the SQL
- * expression, you must also specify a data serialization format (JSON, CSV, or
- * Apache Parquet) of the object. Amazon S3 uses this format to parse object data
- * into records, and returns only records that match the specified SQL expression.
- * You must also specify the data serialization format for the response.</p>
- * <p>This action is not supported by Amazon S3 on Outposts.</p> <p>For more
- * information about Amazon S3 Select, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/selecting-content-from-objects.html">Selecting
- * Content from Objects</a> in the <i>Amazon S3 User Guide</i>.</p> <p>For more
- * information about using SQL with Amazon S3 Select, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/s3-glacier-select-sql-reference.html">
- * SQL Reference for Amazon S3 Select and S3 Glacier Select</a> in the <i>Amazon S3
- * User Guide</i>.</p> <p/> <p> <b>Permissions</b> </p> <p>You must have
- * <code>s3:GetObject</code> permission for this operation. Amazon S3 Select does
- * not support anonymous access. For more information about permissions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/using-with-s3-actions.html">Specifying
- * Permissions in a Policy</a> in the <i>Amazon S3 User Guide</i>.</p> <p/> <p>
- * <i>Object Data Formats</i> </p> <p>You can use Amazon S3 Select to query objects
- * that have the following format properties:</p> <ul> <li> <p> <i>CSV, JSON, and
- * Parquet</i> - Objects must be in CSV, JSON, or Parquet format.</p> </li> <li>
- * <p> <i>UTF-8</i> - UTF-8 is the only encoding type Amazon S3 Select
- * supports.</p> </li> <li> <p> <i>GZIP or BZIP2</i> - CSV and JSON files can be
- * compressed using GZIP or BZIP2. GZIP and BZIP2 are the only compression formats
- * that Amazon S3 Select supports for CSV and JSON files. Amazon S3 Select supports
- * columnar compression for Parquet using GZIP or Snappy. Amazon S3 Select does not
- * support whole-object compression for Parquet objects.</p> </li> <li> <p>
- * <i>Server-side encryption</i> - Amazon S3 Select supports querying objects that
- * are protected with server-side encryption.</p> <p>For objects that are encrypted
- * with customer-provided encryption keys (SSE-C), you must use HTTPS, and you must
- * use the headers that are documented in the <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>.
- * For more information about SSE-C, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
- * Encryption (Using Customer-Provided Encryption Keys)</a> in the <i>Amazon S3
- * User Guide</i>.</p> <p>For objects that are encrypted with Amazon S3 managed
- * encryption keys (SSE-S3) and customer master keys (CMKs) stored in AWS Key
- * Management Service (SSE-KMS), server-side encryption is handled transparently,
- * so you don't need to specify anything. For more information about server-side
- * encryption, including SSE-S3 and SSE-KMS, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html">Protecting
- * Data Using Server-Side Encryption</a> in the <i>Amazon S3 User Guide</i>.</p>
- * </li> </ul> <p> <b>Working with the Response Body</b> </p> <p>Given the response
- * size is unknown, Amazon S3 Select streams the response as a series of messages
- * and includes a <code>Transfer-Encoding</code> header with <code>chunked</code>
- * as its value in the response. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTSelectObjectAppendix.html">Appendix:
- * SelectObjectContent Response</a> .</p> <p/> <p> <b>GetObject Support</b> </p>
- * <p>The <code>SelectObjectContent</code> action does not support the following
- * <code>GetObject</code> functionality. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>.</p>
- * <ul> <li> <p> <code>Range</code>: Although you can specify a scan range for an
- * Amazon S3 Select request (see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_SelectObjectContent.html#AmazonS3-SelectObjectContent-request-ScanRange">SelectObjectContentRequest
- * - ScanRange</a> in the request parameters), you cannot specify the range of
- * bytes of an object to return. </p> </li> <li> <p>GLACIER, DEEP_ARCHIVE and
- * REDUCED_REDUNDANCY storage classes: You cannot specify the GLACIER,
- * DEEP_ARCHIVE, or <code>REDUCED_REDUNDANCY</code> storage classes. For more
- * information, about storage classes see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingMetadata.html#storage-class-intro">Storage
- * Classes</a> in the <i>Amazon S3 User Guide</i>.</p> </li> </ul> <p/> <p>
- * <b>Special Errors</b> </p> <p>For a list of special errors for this operation,
- * see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/ErrorResponses.html#SelectObjectContentErrorCodeList">List
- * of SELECT Object Content Error Codes</a> </p> <p class="title"> <b>Related
- * Resources</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetBucketLifecycleConfiguration.html">GetBucketLifecycleConfiguration</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html">PutBucketLifecycleConfiguration</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContent">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for SelectObjectContent that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void SelectObjectContentAsync(Model::SelectObjectContentRequest& request, const SelectObjectContentResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename SelectObjectContentRequestT = Model::SelectObjectContentRequest>
+ void SelectObjectContentAsync(SelectObjectContentRequestT& request, const SelectObjectContentResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::SelectObjectContent, request, handler, context);
+ }
/**
* <p>Uploads a part in a multipart upload.</p> <p>In this operation, you
@@ -10392,23 +5137,25 @@ namespace Aws
* part request.</p> <p>Part numbers can be any number from 1 to 10,000, inclusive.
* A part number uniquely identifies a part and also defines its position within
* the object being created. If you upload a new part using the same part number
- * that was used with a previous part, the previously uploaded part is overwritten.
- * Each part must be at least 5 MB in size, except the last part. There is no size
- * limit on the last part of your multipart upload.</p> <p>To ensure that data is
- * not corrupted when traversing the network, specify the <code>Content-MD5</code>
- * header in the upload part request. Amazon S3 checks the part data against the
- * provided MD5 value. If they do not match, Amazon S3 returns an error. </p> <p>If
- * the upload request is signed with Signature Version 4, then AWS S3 uses the
- * <code>x-amz-content-sha256</code> header as a checksum instead of
- * <code>Content-MD5</code>. For more information see <a
+ * that was used with a previous part, the previously uploaded part is
+ * overwritten.</p> <p>For information about maximum and minimum part sizes and
+ * other multipart upload specifications, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html">Multipart
+ * upload limits</a> in the <i>Amazon S3 User Guide</i>.</p> <p>To ensure that data
+ * is not corrupted when traversing the network, specify the
+ * <code>Content-MD5</code> header in the upload part request. Amazon S3 checks the
+ * part data against the provided MD5 value. If they do not match, Amazon S3
+ * returns an error. </p> <p>If the upload request is signed with Signature Version
+ * 4, then Amazon Web Services S3 uses the <code>x-amz-content-sha256</code> header
+ * as a checksum instead of <code>Content-MD5</code>. For more information see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html">Authenticating
- * Requests: Using the Authorization Header (AWS Signature Version 4)</a>. </p> <p>
- * <b>Note:</b> After you initiate multipart upload and upload one or more parts,
- * you must either complete or abort multipart upload in order to stop getting
- * charged for storage of the uploaded parts. Only after you either complete or
- * abort multipart upload, Amazon S3 frees up the parts storage and stops charging
- * you for the parts storage.</p> <p>For more information on multipart uploads, go
- * to <a
+ * Requests: Using the Authorization Header (Amazon Web Services Signature Version
+ * 4)</a>. </p> <p> <b>Note:</b> After you initiate multipart upload and upload one
+ * or more parts, you must either complete or abort multipart upload in order to
+ * stop getting charged for storage of the uploaded parts. Only after you either
+ * complete or abort multipart upload, Amazon S3 frees up the parts storage and
+ * stops charging you for the parts storage.</p> <p>For more information on
+ * multipart uploads, go to <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html">Multipart
* Upload Overview</a> in the <i>Amazon S3 User Guide </i>.</p> <p>For information
* on the permissions required to use the multipart upload API, go to <a
@@ -10417,9 +5164,9 @@ namespace Aws
* optionally request server-side encryption where Amazon S3 encrypts your data as
* it writes it to disks in its data centers and decrypts it for you when you
* access it. You have the option of providing your own encryption key, or you can
- * use the AWS managed encryption keys. If you choose to provide your own
- * encryption key, the request headers you provide in the request must match the
- * headers you used in the request to initiate the upload by using <a
+ * use the Amazon Web Services managed encryption keys. If you choose to provide
+ * your own encryption key, the request headers you provide in the request must
+ * match the headers you used in the request to initiate the upload by using <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html">CreateMultipartUpload</a>.
* For more information, go to <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html">Using
@@ -10459,177 +5206,33 @@ namespace Aws
virtual Model::UploadPartOutcome UploadPart(const Model::UploadPartRequest& request) const;
/**
- * <p>Uploads a part in a multipart upload.</p> <p>In this operation, you
- * provide part data in your request. However, you have an option to specify your
- * existing Amazon S3 object as a data source for the part you are uploading. To
- * upload a part from an existing object, you use the <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html">UploadPartCopy</a>
- * operation. </p> <p>You must initiate a multipart upload (see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html">CreateMultipartUpload</a>)
- * before you can upload any part. In response to your initiate request, Amazon S3
- * returns an upload ID, a unique identifier, that you must include in your upload
- * part request.</p> <p>Part numbers can be any number from 1 to 10,000, inclusive.
- * A part number uniquely identifies a part and also defines its position within
- * the object being created. If you upload a new part using the same part number
- * that was used with a previous part, the previously uploaded part is overwritten.
- * Each part must be at least 5 MB in size, except the last part. There is no size
- * limit on the last part of your multipart upload.</p> <p>To ensure that data is
- * not corrupted when traversing the network, specify the <code>Content-MD5</code>
- * header in the upload part request. Amazon S3 checks the part data against the
- * provided MD5 value. If they do not match, Amazon S3 returns an error. </p> <p>If
- * the upload request is signed with Signature Version 4, then AWS S3 uses the
- * <code>x-amz-content-sha256</code> header as a checksum instead of
- * <code>Content-MD5</code>. For more information see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html">Authenticating
- * Requests: Using the Authorization Header (AWS Signature Version 4)</a>. </p> <p>
- * <b>Note:</b> After you initiate multipart upload and upload one or more parts,
- * you must either complete or abort multipart upload in order to stop getting
- * charged for storage of the uploaded parts. Only after you either complete or
- * abort multipart upload, Amazon S3 frees up the parts storage and stops charging
- * you for the parts storage.</p> <p>For more information on multipart uploads, go
- * to <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html">Multipart
- * Upload Overview</a> in the <i>Amazon S3 User Guide </i>.</p> <p>For information
- * on the permissions required to use the multipart upload API, go to <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html">Multipart
- * Upload and Permissions</a> in the <i>Amazon S3 User Guide</i>.</p> <p>You can
- * optionally request server-side encryption where Amazon S3 encrypts your data as
- * it writes it to disks in its data centers and decrypts it for you when you
- * access it. You have the option of providing your own encryption key, or you can
- * use the AWS managed encryption keys. If you choose to provide your own
- * encryption key, the request headers you provide in the request must match the
- * headers you used in the request to initiate the upload by using <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html">CreateMultipartUpload</a>.
- * For more information, go to <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html">Using
- * Server-Side Encryption</a> in the <i>Amazon S3 User Guide</i>.</p>
- * <p>Server-side encryption is supported by the S3 Multipart Upload actions.
- * Unless you are using a customer-provided encryption key, you don't need to
- * specify the encryption parameters in each UploadPart request. Instead, you only
- * need to specify the server-side encryption parameters in the initial Initiate
- * Multipart request. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html">CreateMultipartUpload</a>.</p>
- * <p>If you requested server-side encryption using a customer-provided encryption
- * key in your initiate multipart upload request, you must provide identical
- * encryption information in each part upload using the following headers.</p> <ul>
- * <li> <p>x-amz-server-side-encryption-customer-algorithm</p> </li> <li>
- * <p>x-amz-server-side-encryption-customer-key</p> </li> <li>
- * <p>x-amz-server-side-encryption-customer-key-MD5</p> </li> </ul> <p
- * class="title"> <b>Special Errors</b> </p> <ul> <li> <ul> <li> <p> <i>Code:
- * NoSuchUpload</i> </p> </li> <li> <p> <i>Cause: The specified multipart upload
- * does not exist. The upload ID might be invalid, or the multipart upload might
- * have been aborted or completed.</i> </p> </li> <li> <p> <i> HTTP Status Code:
- * 404 Not Found </i> </p> </li> <li> <p> <i>SOAP Fault Code Prefix: Client</i>
- * </p> </li> </ul> </li> </ul> <p class="title"> <b>Related Resources</b> </p>
- * <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html">CreateMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html">CompleteMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html">AbortMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html">ListParts</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html">ListMultipartUploads</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart">AWS API
- * Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for UploadPart that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::UploadPartOutcomeCallable UploadPartCallable(const Model::UploadPartRequest& request) const;
+ template<typename UploadPartRequestT = Model::UploadPartRequest>
+ Model::UploadPartOutcomeCallable UploadPartCallable(const UploadPartRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::UploadPart, request);
+ }
/**
- * <p>Uploads a part in a multipart upload.</p> <p>In this operation, you
- * provide part data in your request. However, you have an option to specify your
- * existing Amazon S3 object as a data source for the part you are uploading. To
- * upload a part from an existing object, you use the <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPartCopy.html">UploadPartCopy</a>
- * operation. </p> <p>You must initiate a multipart upload (see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html">CreateMultipartUpload</a>)
- * before you can upload any part. In response to your initiate request, Amazon S3
- * returns an upload ID, a unique identifier, that you must include in your upload
- * part request.</p> <p>Part numbers can be any number from 1 to 10,000, inclusive.
- * A part number uniquely identifies a part and also defines its position within
- * the object being created. If you upload a new part using the same part number
- * that was used with a previous part, the previously uploaded part is overwritten.
- * Each part must be at least 5 MB in size, except the last part. There is no size
- * limit on the last part of your multipart upload.</p> <p>To ensure that data is
- * not corrupted when traversing the network, specify the <code>Content-MD5</code>
- * header in the upload part request. Amazon S3 checks the part data against the
- * provided MD5 value. If they do not match, Amazon S3 returns an error. </p> <p>If
- * the upload request is signed with Signature Version 4, then AWS S3 uses the
- * <code>x-amz-content-sha256</code> header as a checksum instead of
- * <code>Content-MD5</code>. For more information see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-auth-using-authorization-header.html">Authenticating
- * Requests: Using the Authorization Header (AWS Signature Version 4)</a>. </p> <p>
- * <b>Note:</b> After you initiate multipart upload and upload one or more parts,
- * you must either complete or abort multipart upload in order to stop getting
- * charged for storage of the uploaded parts. Only after you either complete or
- * abort multipart upload, Amazon S3 frees up the parts storage and stops charging
- * you for the parts storage.</p> <p>For more information on multipart uploads, go
- * to <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuoverview.html">Multipart
- * Upload Overview</a> in the <i>Amazon S3 User Guide </i>.</p> <p>For information
- * on the permissions required to use the multipart upload API, go to <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html">Multipart
- * Upload and Permissions</a> in the <i>Amazon S3 User Guide</i>.</p> <p>You can
- * optionally request server-side encryption where Amazon S3 encrypts your data as
- * it writes it to disks in its data centers and decrypts it for you when you
- * access it. You have the option of providing your own encryption key, or you can
- * use the AWS managed encryption keys. If you choose to provide your own
- * encryption key, the request headers you provide in the request must match the
- * headers you used in the request to initiate the upload by using <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html">CreateMultipartUpload</a>.
- * For more information, go to <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingServerSideEncryption.html">Using
- * Server-Side Encryption</a> in the <i>Amazon S3 User Guide</i>.</p>
- * <p>Server-side encryption is supported by the S3 Multipart Upload actions.
- * Unless you are using a customer-provided encryption key, you don't need to
- * specify the encryption parameters in each UploadPart request. Instead, you only
- * need to specify the server-side encryption parameters in the initial Initiate
- * Multipart request. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html">CreateMultipartUpload</a>.</p>
- * <p>If you requested server-side encryption using a customer-provided encryption
- * key in your initiate multipart upload request, you must provide identical
- * encryption information in each part upload using the following headers.</p> <ul>
- * <li> <p>x-amz-server-side-encryption-customer-algorithm</p> </li> <li>
- * <p>x-amz-server-side-encryption-customer-key</p> </li> <li>
- * <p>x-amz-server-side-encryption-customer-key-MD5</p> </li> </ul> <p
- * class="title"> <b>Special Errors</b> </p> <ul> <li> <ul> <li> <p> <i>Code:
- * NoSuchUpload</i> </p> </li> <li> <p> <i>Cause: The specified multipart upload
- * does not exist. The upload ID might be invalid, or the multipart upload might
- * have been aborted or completed.</i> </p> </li> <li> <p> <i> HTTP Status Code:
- * 404 Not Found </i> </p> </li> <li> <p> <i>SOAP Fault Code Prefix: Client</i>
- * </p> </li> </ul> </li> </ul> <p class="title"> <b>Related Resources</b> </p>
- * <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html">CreateMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html">CompleteMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html">AbortMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html">ListParts</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html">ListMultipartUploads</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPart">AWS API
- * Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for UploadPart that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void UploadPartAsync(const Model::UploadPartRequest& request, const UploadPartResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename UploadPartRequestT = Model::UploadPartRequest>
+ void UploadPartAsync(const UploadPartRequestT& request, const UploadPartResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::UploadPart, request, handler, context);
+ }
/**
* <p>Uploads a part by copying data from an existing object as data source. You
* specify the data source by adding the request header
* <code>x-amz-copy-source</code> in your request and a byte range by adding the
- * request header <code>x-amz-copy-source-range</code> in your request. </p> <p>The
- * minimum allowable part size for a multipart upload is 5 MB. For more information
- * about multipart upload limits, go to <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html">Quick
- * Facts</a> in the <i>Amazon S3 User Guide</i>. </p> <p>Instead of using an
- * existing object as part data, you might use the <a
+ * request header <code>x-amz-copy-source-range</code> in your request. </p> <p>For
+ * information about maximum and minimum part sizes and other multipart upload
+ * specifications, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html">Multipart
+ * upload limits</a> in the <i>Amazon S3 User Guide</i>. </p> <p>Instead of
+ * using an existing object as part data, you might use the <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html">UploadPart</a>
* action and provide data in your request.</p> <p>You must initiate a
* multipart upload before you can upload any part. In response to your initiate
@@ -10643,12 +5246,12 @@ namespace Aws
* API, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html">Multipart
* Upload and Permissions</a> in the <i>Amazon S3 User Guide</i>.</p> </li> <li>
- * <p>For information about copying objects using a single atomic action vs. the
+ * <p>For information about copying objects using a single atomic action vs. a
* multipart upload, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html">Operations
* on Objects</a> in the <i>Amazon S3 User Guide</i>.</p> </li> <li> <p>For
* information about using server-side encryption with customer-provided encryption
- * keys with the UploadPartCopy operation, see <a
+ * keys with the <code>UploadPartCopy</code> operation, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html">CopyObject</a>
* and <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html">UploadPart</a>.</p>
@@ -10709,322 +5312,100 @@ namespace Aws
virtual Model::UploadPartCopyOutcome UploadPartCopy(const Model::UploadPartCopyRequest& request) const;
/**
- * <p>Uploads a part by copying data from an existing object as data source. You
- * specify the data source by adding the request header
- * <code>x-amz-copy-source</code> in your request and a byte range by adding the
- * request header <code>x-amz-copy-source-range</code> in your request. </p> <p>The
- * minimum allowable part size for a multipart upload is 5 MB. For more information
- * about multipart upload limits, go to <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html">Quick
- * Facts</a> in the <i>Amazon S3 User Guide</i>. </p> <p>Instead of using an
- * existing object as part data, you might use the <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html">UploadPart</a>
- * action and provide data in your request.</p> <p>You must initiate a
- * multipart upload before you can upload any part. In response to your initiate
- * request. Amazon S3 returns a unique identifier, the upload ID, that you must
- * include in your upload part request.</p> <p>For more information about using the
- * <code>UploadPartCopy</code> operation, see the following:</p> <ul> <li> <p>For
- * conceptual information about multipart uploads, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html">Uploading
- * Objects Using Multipart Upload</a> in the <i>Amazon S3 User Guide</i>.</p> </li>
- * <li> <p>For information about permissions required to use the multipart upload
- * API, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html">Multipart
- * Upload and Permissions</a> in the <i>Amazon S3 User Guide</i>.</p> </li> <li>
- * <p>For information about copying objects using a single atomic action vs. the
- * multipart upload, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html">Operations
- * on Objects</a> in the <i>Amazon S3 User Guide</i>.</p> </li> <li> <p>For
- * information about using server-side encryption with customer-provided encryption
- * keys with the UploadPartCopy operation, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html">CopyObject</a>
- * and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html">UploadPart</a>.</p>
- * </li> </ul> <p>Note the following additional considerations about the request
- * headers <code>x-amz-copy-source-if-match</code>,
- * <code>x-amz-copy-source-if-none-match</code>,
- * <code>x-amz-copy-source-if-unmodified-since</code>, and
- * <code>x-amz-copy-source-if-modified-since</code>:</p> <p> </p> <ul> <li> <p>
- * <b>Consideration 1</b> - If both of the <code>x-amz-copy-source-if-match</code>
- * and <code>x-amz-copy-source-if-unmodified-since</code> headers are present in
- * the request as follows:</p> <p> <code>x-amz-copy-source-if-match</code>
- * condition evaluates to <code>true</code>, and;</p> <p>
- * <code>x-amz-copy-source-if-unmodified-since</code> condition evaluates to
- * <code>false</code>;</p> <p>Amazon S3 returns <code>200 OK</code> and copies the
- * data. </p> </li> <li> <p> <b>Consideration 2</b> - If both of the
- * <code>x-amz-copy-source-if-none-match</code> and
- * <code>x-amz-copy-source-if-modified-since</code> headers are present in the
- * request as follows:</p> <p> <code>x-amz-copy-source-if-none-match</code>
- * condition evaluates to <code>false</code>, and;</p> <p>
- * <code>x-amz-copy-source-if-modified-since</code> condition evaluates to
- * <code>true</code>;</p> <p>Amazon S3 returns <code>412 Precondition Failed</code>
- * response code. </p> </li> </ul> <p> <b>Versioning</b> </p> <p>If your bucket has
- * versioning enabled, you could have multiple versions of the same object. By
- * default, <code>x-amz-copy-source</code> identifies the current version of the
- * object to copy. If the current version is a delete marker and you don't specify
- * a versionId in the <code>x-amz-copy-source</code>, Amazon S3 returns a 404
- * error, because the object does not exist. If you specify versionId in the
- * <code>x-amz-copy-source</code> and the versionId is a delete marker, Amazon S3
- * returns an HTTP 400 error, because you are not allowed to specify a delete
- * marker as a version for the <code>x-amz-copy-source</code>. </p> <p>You can
- * optionally specify a specific version of the source object to copy by adding the
- * <code>versionId</code> subresource as shown in the following example:</p> <p>
- * <code>x-amz-copy-source: /bucket/object?versionId=version id</code> </p> <p
- * class="title"> <b>Special Errors</b> </p> <ul> <li> <ul> <li> <p> <i>Code:
- * NoSuchUpload</i> </p> </li> <li> <p> <i>Cause: The specified multipart upload
- * does not exist. The upload ID might be invalid, or the multipart upload might
- * have been aborted or completed.</i> </p> </li> <li> <p> <i>HTTP Status Code: 404
- * Not Found</i> </p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:
- * InvalidRequest</i> </p> </li> <li> <p> <i>Cause: The specified copy source is
- * not supported as a byte-range copy source.</i> </p> </li> <li> <p> <i>HTTP
- * Status Code: 400 Bad Request</i> </p> </li> </ul> </li> </ul> <p class="title">
- * <b>Related Resources</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html">CreateMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html">UploadPart</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html">CompleteMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html">AbortMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html">ListParts</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html">ListMultipartUploads</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for UploadPartCopy that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::UploadPartCopyOutcomeCallable UploadPartCopyCallable(const Model::UploadPartCopyRequest& request) const;
+ template<typename UploadPartCopyRequestT = Model::UploadPartCopyRequest>
+ Model::UploadPartCopyOutcomeCallable UploadPartCopyCallable(const UploadPartCopyRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::UploadPartCopy, request);
+ }
/**
- * <p>Uploads a part by copying data from an existing object as data source. You
- * specify the data source by adding the request header
- * <code>x-amz-copy-source</code> in your request and a byte range by adding the
- * request header <code>x-amz-copy-source-range</code> in your request. </p> <p>The
- * minimum allowable part size for a multipart upload is 5 MB. For more information
- * about multipart upload limits, go to <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/qfacts.html">Quick
- * Facts</a> in the <i>Amazon S3 User Guide</i>. </p> <p>Instead of using an
- * existing object as part data, you might use the <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html">UploadPart</a>
- * action and provide data in your request.</p> <p>You must initiate a
- * multipart upload before you can upload any part. In response to your initiate
- * request. Amazon S3 returns a unique identifier, the upload ID, that you must
- * include in your upload part request.</p> <p>For more information about using the
- * <code>UploadPartCopy</code> operation, see the following:</p> <ul> <li> <p>For
- * conceptual information about multipart uploads, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html">Uploading
- * Objects Using Multipart Upload</a> in the <i>Amazon S3 User Guide</i>.</p> </li>
- * <li> <p>For information about permissions required to use the multipart upload
- * API, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/mpuAndPermissions.html">Multipart
- * Upload and Permissions</a> in the <i>Amazon S3 User Guide</i>.</p> </li> <li>
- * <p>For information about copying objects using a single atomic action vs. the
- * multipart upload, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ObjectOperations.html">Operations
- * on Objects</a> in the <i>Amazon S3 User Guide</i>.</p> </li> <li> <p>For
- * information about using server-side encryption with customer-provided encryption
- * keys with the UploadPartCopy operation, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CopyObject.html">CopyObject</a>
- * and <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html">UploadPart</a>.</p>
- * </li> </ul> <p>Note the following additional considerations about the request
- * headers <code>x-amz-copy-source-if-match</code>,
- * <code>x-amz-copy-source-if-none-match</code>,
- * <code>x-amz-copy-source-if-unmodified-since</code>, and
- * <code>x-amz-copy-source-if-modified-since</code>:</p> <p> </p> <ul> <li> <p>
- * <b>Consideration 1</b> - If both of the <code>x-amz-copy-source-if-match</code>
- * and <code>x-amz-copy-source-if-unmodified-since</code> headers are present in
- * the request as follows:</p> <p> <code>x-amz-copy-source-if-match</code>
- * condition evaluates to <code>true</code>, and;</p> <p>
- * <code>x-amz-copy-source-if-unmodified-since</code> condition evaluates to
- * <code>false</code>;</p> <p>Amazon S3 returns <code>200 OK</code> and copies the
- * data. </p> </li> <li> <p> <b>Consideration 2</b> - If both of the
- * <code>x-amz-copy-source-if-none-match</code> and
- * <code>x-amz-copy-source-if-modified-since</code> headers are present in the
- * request as follows:</p> <p> <code>x-amz-copy-source-if-none-match</code>
- * condition evaluates to <code>false</code>, and;</p> <p>
- * <code>x-amz-copy-source-if-modified-since</code> condition evaluates to
- * <code>true</code>;</p> <p>Amazon S3 returns <code>412 Precondition Failed</code>
- * response code. </p> </li> </ul> <p> <b>Versioning</b> </p> <p>If your bucket has
- * versioning enabled, you could have multiple versions of the same object. By
- * default, <code>x-amz-copy-source</code> identifies the current version of the
- * object to copy. If the current version is a delete marker and you don't specify
- * a versionId in the <code>x-amz-copy-source</code>, Amazon S3 returns a 404
- * error, because the object does not exist. If you specify versionId in the
- * <code>x-amz-copy-source</code> and the versionId is a delete marker, Amazon S3
- * returns an HTTP 400 error, because you are not allowed to specify a delete
- * marker as a version for the <code>x-amz-copy-source</code>. </p> <p>You can
- * optionally specify a specific version of the source object to copy by adding the
- * <code>versionId</code> subresource as shown in the following example:</p> <p>
- * <code>x-amz-copy-source: /bucket/object?versionId=version id</code> </p> <p
- * class="title"> <b>Special Errors</b> </p> <ul> <li> <ul> <li> <p> <i>Code:
- * NoSuchUpload</i> </p> </li> <li> <p> <i>Cause: The specified multipart upload
- * does not exist. The upload ID might be invalid, or the multipart upload might
- * have been aborted or completed.</i> </p> </li> <li> <p> <i>HTTP Status Code: 404
- * Not Found</i> </p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:
- * InvalidRequest</i> </p> </li> <li> <p> <i>Cause: The specified copy source is
- * not supported as a byte-range copy source.</i> </p> </li> <li> <p> <i>HTTP
- * Status Code: 400 Bad Request</i> </p> </li> </ul> </li> </ul> <p class="title">
- * <b>Related Resources</b> </p> <ul> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CreateMultipartUpload.html">CreateMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_UploadPart.html">UploadPart</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html">CompleteMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_AbortMultipartUpload.html">AbortMultipartUpload</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListParts.html">ListParts</a>
- * </p> </li> <li> <p> <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_ListMultipartUploads.html">ListMultipartUploads</a>
- * </p> </li> </ul><p><h3>See Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/UploadPartCopy">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for UploadPartCopy that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void UploadPartCopyAsync(const Model::UploadPartCopyRequest& request, const UploadPartCopyResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename UploadPartCopyRequestT = Model::UploadPartCopyRequest>
+ void UploadPartCopyAsync(const UploadPartCopyRequestT& request, const UploadPartCopyResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::UploadPartCopy, request, handler, context);
+ }
/**
* <p>Passes transformed objects to a <code>GetObject</code> operation when using
- * Object Lambda Access Points. For information about Object Lambda Access Points,
+ * Object Lambda access points. For information about Object Lambda access points,
* see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html">Transforming
- * objects with Object Lambda Access Points</a> in the <i>Amazon S3 User
+ * objects with Object Lambda access points</a> in the <i>Amazon S3 User
* Guide</i>.</p> <p>This operation supports metadata that can be returned by <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>,
* in addition to <code>RequestRoute</code>, <code>RequestToken</code>,
* <code>StatusCode</code>, <code>ErrorCode</code>, and <code>ErrorMessage</code>.
* The <code>GetObject</code> response metadata is supported so that the
- * <code>WriteGetObjectResponse</code> caller, typically an AWS Lambda function,
- * can provide the same metadata when it internally invokes <code>GetObject</code>.
+ * <code>WriteGetObjectResponse</code> caller, typically an Lambda function, can
+ * provide the same metadata when it internally invokes <code>GetObject</code>.
* When <code>WriteGetObjectResponse</code> is called by a customer-owned Lambda
* function, the metadata returned to the end user <code>GetObject</code> call
- * might differ from what Amazon S3 would normally return.</p> <p>AWS provides some
- * prebuilt Lambda functions that you can use with S3 Object Lambda to detect and
- * redact personally identifiable information (PII) and decompress S3 objects.
- * These Lambda functions are available in the AWS Serverless Application
- * Repository, and can be selected through the AWS Management Console when you
- * create your Object Lambda Access Point.</p> <p>Example 1: PII Access Control -
- * This Lambda function uses Amazon Comprehend, a natural language processing (NLP)
- * service using machine learning to find insights and relationships in text. It
- * automatically detects personally identifiable information (PII) such as names,
- * addresses, dates, credit card numbers, and social security numbers from
- * documents in your Amazon S3 bucket. </p> <p>Example 2: PII Redaction - This
- * Lambda function uses Amazon Comprehend, a natural language processing (NLP)
- * service using machine learning to find insights and relationships in text. It
- * automatically redacts personally identifiable information (PII) such as names,
- * addresses, dates, credit card numbers, and social security numbers from
- * documents in your Amazon S3 bucket. </p> <p>Example 3: Decompression - The
- * Lambda function S3ObjectLambdaDecompression, is equipped to decompress objects
- * stored in S3 in one of six compressed file formats including bzip2, gzip,
- * snappy, zlib, zstandard and ZIP. </p> <p>For information on how to view and use
- * these functions, see <a
+ * might differ from what Amazon S3 would normally return.</p> <p>You can include
+ * any number of metadata headers. When including a metadata header, it should be
+ * prefaced with <code>x-amz-meta</code>. For example,
+ * <code>x-amz-meta-my-custom-header: MyCustomValue</code>. The primary use case
+ * for this is to forward <code>GetObject</code> metadata.</p> <p>Amazon Web
+ * Services provides some prebuilt Lambda functions that you can use with S3 Object
+ * Lambda to detect and redact personally identifiable information (PII) and
+ * decompress S3 objects. These Lambda functions are available in the Amazon Web
+ * Services Serverless Application Repository, and can be selected through the
+ * Amazon Web Services Management Console when you create your Object Lambda access
+ * point.</p> <p>Example 1: PII Access Control - This Lambda function uses Amazon
+ * Comprehend, a natural language processing (NLP) service using machine learning
+ * to find insights and relationships in text. It automatically detects personally
+ * identifiable information (PII) such as names, addresses, dates, credit card
+ * numbers, and social security numbers from documents in your Amazon S3 bucket.
+ * </p> <p>Example 2: PII Redaction - This Lambda function uses Amazon Comprehend,
+ * a natural language processing (NLP) service using machine learning to find
+ * insights and relationships in text. It automatically redacts personally
+ * identifiable information (PII) such as names, addresses, dates, credit card
+ * numbers, and social security numbers from documents in your Amazon S3 bucket.
+ * </p> <p>Example 3: Decompression - The Lambda function
+ * S3ObjectLambdaDecompression, is equipped to decompress objects stored in S3 in
+ * one of six compressed file formats including bzip2, gzip, snappy, zlib,
+ * zstandard and ZIP. </p> <p>For information on how to view and use these
+ * functions, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html">Using
- * AWS built Lambda functions</a> in the <i>Amazon S3 User Guide</i>.</p><p><h3>See
- * Also:</h3> <a
+ * Amazon Web Services built Lambda functions</a> in the <i>Amazon S3 User
+ * Guide</i>.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WriteGetObjectResponse">AWS
* API Reference</a></p>
*/
virtual Model::WriteGetObjectResponseOutcome WriteGetObjectResponse(const Model::WriteGetObjectResponseRequest& request) const;
/**
- * <p>Passes transformed objects to a <code>GetObject</code> operation when using
- * Object Lambda Access Points. For information about Object Lambda Access Points,
- * see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html">Transforming
- * objects with Object Lambda Access Points</a> in the <i>Amazon S3 User
- * Guide</i>.</p> <p>This operation supports metadata that can be returned by <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>,
- * in addition to <code>RequestRoute</code>, <code>RequestToken</code>,
- * <code>StatusCode</code>, <code>ErrorCode</code>, and <code>ErrorMessage</code>.
- * The <code>GetObject</code> response metadata is supported so that the
- * <code>WriteGetObjectResponse</code> caller, typically an AWS Lambda function,
- * can provide the same metadata when it internally invokes <code>GetObject</code>.
- * When <code>WriteGetObjectResponse</code> is called by a customer-owned Lambda
- * function, the metadata returned to the end user <code>GetObject</code> call
- * might differ from what Amazon S3 would normally return.</p> <p>AWS provides some
- * prebuilt Lambda functions that you can use with S3 Object Lambda to detect and
- * redact personally identifiable information (PII) and decompress S3 objects.
- * These Lambda functions are available in the AWS Serverless Application
- * Repository, and can be selected through the AWS Management Console when you
- * create your Object Lambda Access Point.</p> <p>Example 1: PII Access Control -
- * This Lambda function uses Amazon Comprehend, a natural language processing (NLP)
- * service using machine learning to find insights and relationships in text. It
- * automatically detects personally identifiable information (PII) such as names,
- * addresses, dates, credit card numbers, and social security numbers from
- * documents in your Amazon S3 bucket. </p> <p>Example 2: PII Redaction - This
- * Lambda function uses Amazon Comprehend, a natural language processing (NLP)
- * service using machine learning to find insights and relationships in text. It
- * automatically redacts personally identifiable information (PII) such as names,
- * addresses, dates, credit card numbers, and social security numbers from
- * documents in your Amazon S3 bucket. </p> <p>Example 3: Decompression - The
- * Lambda function S3ObjectLambdaDecompression, is equipped to decompress objects
- * stored in S3 in one of six compressed file formats including bzip2, gzip,
- * snappy, zlib, zstandard and ZIP. </p> <p>For information on how to view and use
- * these functions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html">Using
- * AWS built Lambda functions</a> in the <i>Amazon S3 User Guide</i>.</p><p><h3>See
- * Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WriteGetObjectResponse">AWS
- * API Reference</a></p>
- *
- * returns a future to the operation so that it can be executed in parallel to other requests.
+ * A Callable wrapper for WriteGetObjectResponse that returns a future to the operation so that it can be executed in parallel to other requests.
*/
- virtual Model::WriteGetObjectResponseOutcomeCallable WriteGetObjectResponseCallable(const Model::WriteGetObjectResponseRequest& request) const;
+ template<typename WriteGetObjectResponseRequestT = Model::WriteGetObjectResponseRequest>
+ Model::WriteGetObjectResponseOutcomeCallable WriteGetObjectResponseCallable(const WriteGetObjectResponseRequestT& request) const
+ {
+ return SubmitCallable(&S3Client::WriteGetObjectResponse, request);
+ }
/**
- * <p>Passes transformed objects to a <code>GetObject</code> operation when using
- * Object Lambda Access Points. For information about Object Lambda Access Points,
- * see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/transforming-objects.html">Transforming
- * objects with Object Lambda Access Points</a> in the <i>Amazon S3 User
- * Guide</i>.</p> <p>This operation supports metadata that can be returned by <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_GetObject.html">GetObject</a>,
- * in addition to <code>RequestRoute</code>, <code>RequestToken</code>,
- * <code>StatusCode</code>, <code>ErrorCode</code>, and <code>ErrorMessage</code>.
- * The <code>GetObject</code> response metadata is supported so that the
- * <code>WriteGetObjectResponse</code> caller, typically an AWS Lambda function,
- * can provide the same metadata when it internally invokes <code>GetObject</code>.
- * When <code>WriteGetObjectResponse</code> is called by a customer-owned Lambda
- * function, the metadata returned to the end user <code>GetObject</code> call
- * might differ from what Amazon S3 would normally return.</p> <p>AWS provides some
- * prebuilt Lambda functions that you can use with S3 Object Lambda to detect and
- * redact personally identifiable information (PII) and decompress S3 objects.
- * These Lambda functions are available in the AWS Serverless Application
- * Repository, and can be selected through the AWS Management Console when you
- * create your Object Lambda Access Point.</p> <p>Example 1: PII Access Control -
- * This Lambda function uses Amazon Comprehend, a natural language processing (NLP)
- * service using machine learning to find insights and relationships in text. It
- * automatically detects personally identifiable information (PII) such as names,
- * addresses, dates, credit card numbers, and social security numbers from
- * documents in your Amazon S3 bucket. </p> <p>Example 2: PII Redaction - This
- * Lambda function uses Amazon Comprehend, a natural language processing (NLP)
- * service using machine learning to find insights and relationships in text. It
- * automatically redacts personally identifiable information (PII) such as names,
- * addresses, dates, credit card numbers, and social security numbers from
- * documents in your Amazon S3 bucket. </p> <p>Example 3: Decompression - The
- * Lambda function S3ObjectLambdaDecompression, is equipped to decompress objects
- * stored in S3 in one of six compressed file formats including bzip2, gzip,
- * snappy, zlib, zstandard and ZIP. </p> <p>For information on how to view and use
- * these functions, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/olap-examples.html">Using
- * AWS built Lambda functions</a> in the <i>Amazon S3 User Guide</i>.</p><p><h3>See
- * Also:</h3> <a
- * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WriteGetObjectResponse">AWS
- * API Reference</a></p>
- *
- * Queues the request into a thread executor and triggers associated callback when operation has finished.
+ * An Async wrapper for WriteGetObjectResponse that queues the request into a thread executor and triggers associated callback when operation has finished.
*/
- virtual void WriteGetObjectResponseAsync(const Model::WriteGetObjectResponseRequest& request, const WriteGetObjectResponseResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const;
+ template<typename WriteGetObjectResponseRequestT = Model::WriteGetObjectResponseRequest>
+ void WriteGetObjectResponseAsync(const WriteGetObjectResponseRequestT& request, const WriteGetObjectResponseResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context = nullptr) const
+ {
+ return SubmitAsync(&S3Client::WriteGetObjectResponse, request, handler, context);
+ }
- Aws::String GeneratePresignedUrl(const Aws::String& bucket, const Aws::String& key, Aws::Http::HttpMethod method, long long expirationInSeconds = MAX_EXPIRATION_SECONDS);
+ Aws::String GeneratePresignedUrl(const Aws::String& bucket,
+ const Aws::String& key,
+ Aws::Http::HttpMethod method,
+ uint64_t expirationInSeconds = MAX_EXPIRATION_SECONDS);
- Aws::String GeneratePresignedUrl(const Aws::String& bucket, const Aws::String& key, Aws::Http::HttpMethod method, const Http::HeaderValueCollection& customizedHeaders, long long expirationInSeconds = MAX_EXPIRATION_SECONDS);
+ Aws::String GeneratePresignedUrl(const Aws::String& bucket,
+ const Aws::String& key,
+ Aws::Http::HttpMethod method,
+ const Http::HeaderValueCollection& customizedHeaders,
+ uint64_t expirationInSeconds = MAX_EXPIRATION_SECONDS);
/**
* Server Side Encryption Headers and Algorithm
@@ -11037,155 +5418,77 @@ namespace Aws
* Generate presigned URL with Sever Side Encryption(SSE) and with S3 managed keys.
* https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html (algo: AES256)
*/
- Aws::String GeneratePresignedUrlWithSSES3(const Aws::String& bucket, const Aws::String& key, Aws::Http::HttpMethod method, long long expirationInSeconds = MAX_EXPIRATION_SECONDS);
+ Aws::String GeneratePresignedUrlWithSSES3(const Aws::String& bucket,
+ const Aws::String& key,
+ Aws::Http::HttpMethod method,
+ uint64_t expirationInSeconds = MAX_EXPIRATION_SECONDS);
/**
* Generate presigned URL with Sever Side Encryption(SSE) and with S3 managed keys.
* https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html (algo: AES256)
* Header: "x-amz-server-side-encryption" will be added internally, don't customize it.
*/
- Aws::String GeneratePresignedUrlWithSSES3(const Aws::String& bucket, const Aws::String& key, Aws::Http::HttpMethod method, Http::HeaderValueCollection customizedHeaders, long long expirationInSeconds = MAX_EXPIRATION_SECONDS);
+ Aws::String GeneratePresignedUrlWithSSES3(const Aws::String& bucket,
+ const Aws::String& key,
+ Aws::Http::HttpMethod method,
+ Http::HeaderValueCollection customizedHeaders,
+ uint64_t expirationInSeconds = MAX_EXPIRATION_SECONDS);
/**
* Generate presigned URL with Server Side Encryption(SSE) and with KMS master key id.
* if kmsMasterKeyId is empty, we will end up use the default one generated by KMS for you. You can find it via AWS IAM console, it's the one aliased as "aws/s3".
* https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html (algo: aws:kms)
*/
- Aws::String GeneratePresignedUrlWithSSEKMS(const Aws::String& bucket, const Aws::String& key, Aws::Http::HttpMethod method, const Aws::String& kmsMasterKeyId = "", long long expirationInSeconds = MAX_EXPIRATION_SECONDS);
+ Aws::String GeneratePresignedUrlWithSSEKMS(const Aws::String& bucket,
+ const Aws::String& key,
+ Aws::Http::HttpMethod method,
+ const Aws::String& kmsMasterKeyId = "",
+ uint64_t expirationInSeconds = MAX_EXPIRATION_SECONDS);
/**
* Generate presigned URL with Server Side Encryption(SSE) and with KMS master key id.
* if kmsMasterKeyId is empty, we will end up use the default one generated by KMS for you. You can find it via AWS IAM console, it's the one aliased as "aws/s3".
* https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html (algo: aws:kms)
* Headers: "x-amz-server-side-encryption" and "x-amz-server-side-encryption-aws-kms-key-id" will be added internally, don't customize them.
*/
- Aws::String GeneratePresignedUrlWithSSEKMS(const Aws::String& bucket, const Aws::String& key, Aws::Http::HttpMethod method, Http::HeaderValueCollection customizedHeaders, const Aws::String& kmsMasterKeyId = "", long long expirationInSeconds = MAX_EXPIRATION_SECONDS);
+ Aws::String GeneratePresignedUrlWithSSEKMS(const Aws::String& bucket,
+ const Aws::String& key,
+ Aws::Http::HttpMethod method,
+ Http::HeaderValueCollection customizedHeaders,
+ const Aws::String& kmsMasterKeyId = "",
+ uint64_t expirationInSeconds = MAX_EXPIRATION_SECONDS);
/**
* Generate presigned URL with Sever Side Encryption(SSE) and with customer supplied Key.
* https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html (algo: AES256)
*/
- Aws::String GeneratePresignedUrlWithSSEC(const Aws::String& bucket, const Aws::String& key, Aws::Http::HttpMethod method, const Aws::String& base64EncodedAES256Key, long long expirationInSeconds = MAX_EXPIRATION_SECONDS);
+ Aws::String GeneratePresignedUrlWithSSEC(const Aws::String& bucket,
+ const Aws::String& key,
+ Aws::Http::HttpMethod method,
+ const Aws::String& base64EncodedAES256Key,
+ uint64_t expirationInSeconds = MAX_EXPIRATION_SECONDS);
/**
* Generate presigned URL with Sever Side Encryption(SSE) and with customer supplied Key.
* https://docs.aws.amazon.com/AmazonS3/latest/dev/serv-side-encryption.html (algo: AES256)
* Headers: "x-amz-server-side-encryption-customer-algorithm","x-amz-server-side-encryption-customer-key" and "x-amz-server-side-encryption-customer-key-MD5" will be added internally, don't customize them.
*/
- Aws::String GeneratePresignedUrlWithSSEC(const Aws::String& bucket, const Aws::String& key, Aws::Http::HttpMethod method, Http::HeaderValueCollection customizedHeaders, const Aws::String& base64EncodedAES256Key, long long expirationInSeconds = MAX_EXPIRATION_SECONDS);
+ Aws::String GeneratePresignedUrlWithSSEC(const Aws::String& bucket,
+ const Aws::String& key,
+ Aws::Http::HttpMethod method,
+ Http::HeaderValueCollection customizedHeaders,
+ const Aws::String& base64EncodedAES256Key,
+ uint64_t expirationInSeconds = MAX_EXPIRATION_SECONDS);
virtual bool MultipartUploadSupported() const;
void OverrideEndpoint(const Aws::String& endpoint);
+ std::shared_ptr<S3EndpointProviderBase>& accessEndpointProvider();
private:
- void init(const Client::ClientConfiguration& clientConfiguration);
- void LoadS3SpecificConfig(const Aws::String& profile);
- ComputeEndpointOutcome ComputeEndpointString(const Aws::String& bucket) const;
- ComputeEndpointOutcome ComputeEndpointString() const;
- ComputeEndpointOutcome ComputeEndpointStringWithServiceName(const Aws::String& serviceNameOverride = "") const;
-
- void AbortMultipartUploadAsyncHelper(const Model::AbortMultipartUploadRequest& request, const AbortMultipartUploadResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void CompleteMultipartUploadAsyncHelper(const Model::CompleteMultipartUploadRequest& request, const CompleteMultipartUploadResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void CopyObjectAsyncHelper(const Model::CopyObjectRequest& request, const CopyObjectResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void CreateBucketAsyncHelper(const Model::CreateBucketRequest& request, const CreateBucketResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void CreateMultipartUploadAsyncHelper(const Model::CreateMultipartUploadRequest& request, const CreateMultipartUploadResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void DeleteBucketAsyncHelper(const Model::DeleteBucketRequest& request, const DeleteBucketResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void DeleteBucketAnalyticsConfigurationAsyncHelper(const Model::DeleteBucketAnalyticsConfigurationRequest& request, const DeleteBucketAnalyticsConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void DeleteBucketCorsAsyncHelper(const Model::DeleteBucketCorsRequest& request, const DeleteBucketCorsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void DeleteBucketEncryptionAsyncHelper(const Model::DeleteBucketEncryptionRequest& request, const DeleteBucketEncryptionResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void DeleteBucketIntelligentTieringConfigurationAsyncHelper(const Model::DeleteBucketIntelligentTieringConfigurationRequest& request, const DeleteBucketIntelligentTieringConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void DeleteBucketInventoryConfigurationAsyncHelper(const Model::DeleteBucketInventoryConfigurationRequest& request, const DeleteBucketInventoryConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void DeleteBucketLifecycleAsyncHelper(const Model::DeleteBucketLifecycleRequest& request, const DeleteBucketLifecycleResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void DeleteBucketMetricsConfigurationAsyncHelper(const Model::DeleteBucketMetricsConfigurationRequest& request, const DeleteBucketMetricsConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void DeleteBucketOwnershipControlsAsyncHelper(const Model::DeleteBucketOwnershipControlsRequest& request, const DeleteBucketOwnershipControlsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void DeleteBucketPolicyAsyncHelper(const Model::DeleteBucketPolicyRequest& request, const DeleteBucketPolicyResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void DeleteBucketReplicationAsyncHelper(const Model::DeleteBucketReplicationRequest& request, const DeleteBucketReplicationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void DeleteBucketTaggingAsyncHelper(const Model::DeleteBucketTaggingRequest& request, const DeleteBucketTaggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void DeleteBucketWebsiteAsyncHelper(const Model::DeleteBucketWebsiteRequest& request, const DeleteBucketWebsiteResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void DeleteObjectAsyncHelper(const Model::DeleteObjectRequest& request, const DeleteObjectResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void DeleteObjectTaggingAsyncHelper(const Model::DeleteObjectTaggingRequest& request, const DeleteObjectTaggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void DeleteObjectsAsyncHelper(const Model::DeleteObjectsRequest& request, const DeleteObjectsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void DeletePublicAccessBlockAsyncHelper(const Model::DeletePublicAccessBlockRequest& request, const DeletePublicAccessBlockResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void GetBucketAccelerateConfigurationAsyncHelper(const Model::GetBucketAccelerateConfigurationRequest& request, const GetBucketAccelerateConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void GetBucketAclAsyncHelper(const Model::GetBucketAclRequest& request, const GetBucketAclResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void GetBucketAnalyticsConfigurationAsyncHelper(const Model::GetBucketAnalyticsConfigurationRequest& request, const GetBucketAnalyticsConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void GetBucketCorsAsyncHelper(const Model::GetBucketCorsRequest& request, const GetBucketCorsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void GetBucketEncryptionAsyncHelper(const Model::GetBucketEncryptionRequest& request, const GetBucketEncryptionResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void GetBucketIntelligentTieringConfigurationAsyncHelper(const Model::GetBucketIntelligentTieringConfigurationRequest& request, const GetBucketIntelligentTieringConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void GetBucketInventoryConfigurationAsyncHelper(const Model::GetBucketInventoryConfigurationRequest& request, const GetBucketInventoryConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void GetBucketLifecycleConfigurationAsyncHelper(const Model::GetBucketLifecycleConfigurationRequest& request, const GetBucketLifecycleConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void GetBucketLocationAsyncHelper(const Model::GetBucketLocationRequest& request, const GetBucketLocationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void GetBucketLoggingAsyncHelper(const Model::GetBucketLoggingRequest& request, const GetBucketLoggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void GetBucketMetricsConfigurationAsyncHelper(const Model::GetBucketMetricsConfigurationRequest& request, const GetBucketMetricsConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void GetBucketNotificationConfigurationAsyncHelper(const Model::GetBucketNotificationConfigurationRequest& request, const GetBucketNotificationConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void GetBucketOwnershipControlsAsyncHelper(const Model::GetBucketOwnershipControlsRequest& request, const GetBucketOwnershipControlsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void GetBucketPolicyAsyncHelper(const Model::GetBucketPolicyRequest& request, const GetBucketPolicyResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void GetBucketPolicyStatusAsyncHelper(const Model::GetBucketPolicyStatusRequest& request, const GetBucketPolicyStatusResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void GetBucketReplicationAsyncHelper(const Model::GetBucketReplicationRequest& request, const GetBucketReplicationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void GetBucketRequestPaymentAsyncHelper(const Model::GetBucketRequestPaymentRequest& request, const GetBucketRequestPaymentResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void GetBucketTaggingAsyncHelper(const Model::GetBucketTaggingRequest& request, const GetBucketTaggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void GetBucketVersioningAsyncHelper(const Model::GetBucketVersioningRequest& request, const GetBucketVersioningResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void GetBucketWebsiteAsyncHelper(const Model::GetBucketWebsiteRequest& request, const GetBucketWebsiteResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void GetObjectAsyncHelper(const Model::GetObjectRequest& request, const GetObjectResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void GetObjectAclAsyncHelper(const Model::GetObjectAclRequest& request, const GetObjectAclResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void GetObjectLegalHoldAsyncHelper(const Model::GetObjectLegalHoldRequest& request, const GetObjectLegalHoldResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void GetObjectLockConfigurationAsyncHelper(const Model::GetObjectLockConfigurationRequest& request, const GetObjectLockConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void GetObjectRetentionAsyncHelper(const Model::GetObjectRetentionRequest& request, const GetObjectRetentionResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void GetObjectTaggingAsyncHelper(const Model::GetObjectTaggingRequest& request, const GetObjectTaggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void GetObjectTorrentAsyncHelper(const Model::GetObjectTorrentRequest& request, const GetObjectTorrentResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void GetPublicAccessBlockAsyncHelper(const Model::GetPublicAccessBlockRequest& request, const GetPublicAccessBlockResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void HeadBucketAsyncHelper(const Model::HeadBucketRequest& request, const HeadBucketResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void HeadObjectAsyncHelper(const Model::HeadObjectRequest& request, const HeadObjectResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void ListBucketAnalyticsConfigurationsAsyncHelper(const Model::ListBucketAnalyticsConfigurationsRequest& request, const ListBucketAnalyticsConfigurationsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void ListBucketIntelligentTieringConfigurationsAsyncHelper(const Model::ListBucketIntelligentTieringConfigurationsRequest& request, const ListBucketIntelligentTieringConfigurationsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void ListBucketInventoryConfigurationsAsyncHelper(const Model::ListBucketInventoryConfigurationsRequest& request, const ListBucketInventoryConfigurationsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void ListBucketMetricsConfigurationsAsyncHelper(const Model::ListBucketMetricsConfigurationsRequest& request, const ListBucketMetricsConfigurationsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void ListBucketsAsyncHelper(const ListBucketsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void ListMultipartUploadsAsyncHelper(const Model::ListMultipartUploadsRequest& request, const ListMultipartUploadsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void ListObjectVersionsAsyncHelper(const Model::ListObjectVersionsRequest& request, const ListObjectVersionsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void ListObjectsAsyncHelper(const Model::ListObjectsRequest& request, const ListObjectsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void ListObjectsV2AsyncHelper(const Model::ListObjectsV2Request& request, const ListObjectsV2ResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void ListPartsAsyncHelper(const Model::ListPartsRequest& request, const ListPartsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void PutBucketAccelerateConfigurationAsyncHelper(const Model::PutBucketAccelerateConfigurationRequest& request, const PutBucketAccelerateConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void PutBucketAclAsyncHelper(const Model::PutBucketAclRequest& request, const PutBucketAclResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void PutBucketAnalyticsConfigurationAsyncHelper(const Model::PutBucketAnalyticsConfigurationRequest& request, const PutBucketAnalyticsConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void PutBucketCorsAsyncHelper(const Model::PutBucketCorsRequest& request, const PutBucketCorsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void PutBucketEncryptionAsyncHelper(const Model::PutBucketEncryptionRequest& request, const PutBucketEncryptionResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void PutBucketIntelligentTieringConfigurationAsyncHelper(const Model::PutBucketIntelligentTieringConfigurationRequest& request, const PutBucketIntelligentTieringConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void PutBucketInventoryConfigurationAsyncHelper(const Model::PutBucketInventoryConfigurationRequest& request, const PutBucketInventoryConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void PutBucketLifecycleConfigurationAsyncHelper(const Model::PutBucketLifecycleConfigurationRequest& request, const PutBucketLifecycleConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void PutBucketLoggingAsyncHelper(const Model::PutBucketLoggingRequest& request, const PutBucketLoggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void PutBucketMetricsConfigurationAsyncHelper(const Model::PutBucketMetricsConfigurationRequest& request, const PutBucketMetricsConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void PutBucketNotificationConfigurationAsyncHelper(const Model::PutBucketNotificationConfigurationRequest& request, const PutBucketNotificationConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void PutBucketOwnershipControlsAsyncHelper(const Model::PutBucketOwnershipControlsRequest& request, const PutBucketOwnershipControlsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void PutBucketPolicyAsyncHelper(const Model::PutBucketPolicyRequest& request, const PutBucketPolicyResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void PutBucketReplicationAsyncHelper(const Model::PutBucketReplicationRequest& request, const PutBucketReplicationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void PutBucketRequestPaymentAsyncHelper(const Model::PutBucketRequestPaymentRequest& request, const PutBucketRequestPaymentResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void PutBucketTaggingAsyncHelper(const Model::PutBucketTaggingRequest& request, const PutBucketTaggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void PutBucketVersioningAsyncHelper(const Model::PutBucketVersioningRequest& request, const PutBucketVersioningResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void PutBucketWebsiteAsyncHelper(const Model::PutBucketWebsiteRequest& request, const PutBucketWebsiteResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void PutObjectAsyncHelper(const Model::PutObjectRequest& request, const PutObjectResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void PutObjectAclAsyncHelper(const Model::PutObjectAclRequest& request, const PutObjectAclResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void PutObjectLegalHoldAsyncHelper(const Model::PutObjectLegalHoldRequest& request, const PutObjectLegalHoldResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void PutObjectLockConfigurationAsyncHelper(const Model::PutObjectLockConfigurationRequest& request, const PutObjectLockConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void PutObjectRetentionAsyncHelper(const Model::PutObjectRetentionRequest& request, const PutObjectRetentionResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void PutObjectTaggingAsyncHelper(const Model::PutObjectTaggingRequest& request, const PutObjectTaggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void PutPublicAccessBlockAsyncHelper(const Model::PutPublicAccessBlockRequest& request, const PutPublicAccessBlockResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void RestoreObjectAsyncHelper(const Model::RestoreObjectRequest& request, const RestoreObjectResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void SelectObjectContentAsyncHelper(Model::SelectObjectContentRequest& request, const SelectObjectContentResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void UploadPartAsyncHelper(const Model::UploadPartRequest& request, const UploadPartResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void UploadPartCopyAsyncHelper(const Model::UploadPartCopyRequest& request, const UploadPartCopyResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
- void WriteGetObjectResponseAsyncHelper(const Model::WriteGetObjectResponseRequest& request, const WriteGetObjectResponseResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const;
-
- Aws::String m_baseUri;
- Aws::String m_scheme;
- bool m_enableHostPrefixInjection;
- Aws::String m_configScheme;
+ friend class Aws::Client::ClientWithAsyncTemplateMethods<S3Client>;
+ void init(const S3ClientConfiguration& clientConfiguration);
+ S3ClientConfiguration m_clientConfiguration;
std::shared_ptr<Utils::Threading::Executor> m_executor;
- bool m_useVirtualAddressing;
- bool m_useDualStack;
- bool m_useArnRegion;
- bool m_useCustomEndpoint;
- Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION m_USEast1RegionalEndpointOption;
+ std::shared_ptr<S3EndpointProviderBase> m_endpointProvider;
};
} // namespace S3
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3ClientConfiguration.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3ClientConfiguration.h
new file mode 100644
index 0000000000..771bd85a39
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3ClientConfiguration.h
@@ -0,0 +1,63 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+
+#include <aws/s3/S3_EXPORTS.h>
+#include <aws/core/client/GenericClientConfiguration.h>
+#include <aws/core/auth/signer/AWSAuthV4Signer.h>
+
+
+namespace Aws
+{
+ namespace S3
+ {
+ enum class US_EAST_1_REGIONAL_ENDPOINT_OPTION
+ {
+ NOT_SET,
+ LEGACY, //stands for using global endpoint for us-east-1,
+ REGIONAL //stands for using regional endpoint for us-east-1
+ };
+
+ struct AWS_S3_API S3ClientConfiguration : public Aws::Client::GenericClientConfiguration</*EndpointDiscoverySupported*/true>
+ {
+ using BaseClientConfigClass = Aws::Client::GenericClientConfiguration</*EndpointDiscoverySupported*/true>;
+
+ S3ClientConfiguration();
+
+ /**
+ * Create a configuration based on settings in the aws configuration file for the given profile name.
+ * The configuration file location can be set via the environment variable AWS_CONFIG_FILE
+ * @param profileName the aws profile name.
+ * @param shouldDisableIMDS whether or not to disable IMDS calls.
+ */
+ S3ClientConfiguration(const char* profileName, bool shouldDisableIMDS = false);
+
+ /**
+ * Create a configuration with a predefined smart defaults
+ * @param useSmartDefaults, required to differentiate c-tors
+ * @param defaultMode, default mode to use
+ * @param shouldDisableIMDS whether or not to disable IMDS calls.
+ */
+ S3ClientConfiguration(bool useSmartDefaults, const char* defaultMode = "legacy", bool shouldDisableIMDS = false);
+
+ /**
+ * Converting constructors for compatibility with a legacy code
+ */
+ S3ClientConfiguration(const Client::ClientConfiguration& config,
+ Client::AWSAuthV4Signer::PayloadSigningPolicy iPayloadSigningPolicy = Client::AWSAuthV4Signer::PayloadSigningPolicy::Never,
+ bool iUseVirtualAddressing = true,
+ US_EAST_1_REGIONAL_ENDPOINT_OPTION iUseUSEast1RegionalEndPointOption = US_EAST_1_REGIONAL_ENDPOINT_OPTION::NOT_SET);
+
+ bool useVirtualAddressing = true;
+ US_EAST_1_REGIONAL_ENDPOINT_OPTION useUSEast1RegionalEndPointOption = US_EAST_1_REGIONAL_ENDPOINT_OPTION::NOT_SET;
+ bool disableMultiRegionAccessPoints = false;
+ bool useArnRegion = false;
+ Client::AWSAuthV4Signer::PayloadSigningPolicy payloadSigningPolicy = Client::AWSAuthV4Signer::PayloadSigningPolicy::RequestDependent;
+ private:
+ void LoadS3SpecificConfig(const Aws::String& profileName);
+ };
+ }
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3Endpoint.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3Endpoint.h
deleted file mode 100644
index 64c2fc2c44..0000000000
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3Endpoint.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-
-#pragma once
-#include <aws/s3/S3_EXPORTS.h>
-#include <aws/core/Region.h>
-#include <aws/core/utils/memory/stl/AWSString.h>
-#include <aws/s3/S3ARN.h>
-
-namespace Aws
-{
-
-namespace S3
-{
-namespace S3Endpoint
-{
- /**
- * Compute endpoint based on region.
- * @param regionName The AWS region used in the endpoint
- * @param useDualStack Using dual-stack endpoint if true
- * @param USEast1UseRegionalEndpoint Using global endpoint for us-east-1 if the value is LEGACY, or using regional endpoint if it's REGIONAL
- */
- AWS_S3_API Aws::String ForRegion(const Aws::String& regionName, bool useDualStack = false, bool USEast1UseRegionalEndpoint = false, const Aws::String& serviceName = "");
-
- /**
- * Compute endpoint based on Access Point ARN.
- * @param arn The S3 Access Point ARN
- * @param regionNameOverride Override region name in ARN if it's not empty
- * @param useDualStack Using dual-stack endpoint if true
- * @param endpointOverride Override endpoint if it's not empty
- */
- AWS_S3_API Aws::String ForAccessPointArn(const S3ARN& arn, const Aws::String& regionNameOverride = "", bool useDualStack = false, const Aws::String& endpointOverride = "");
-
- /**
- * Compute endpoint based on Outposts ARN.
- * @param arn The S3 Outposts ARN
- * @param regionNameOverride Override region name in ARN if it's not empty
- * @param useDualStack Using dual-stack endpoint if true
- * @param endpointOverride Override endpoint if it's not empty
- */
- AWS_S3_API Aws::String ForOutpostsArn(const S3ARN& arn, const Aws::String& regionNameOverride = "", bool useDualStack = false, const Aws::String& endpointOverride = "");
-
- /**
- * Compute endpoint based on Object Lambda Access Point ARN.
- * @param arn The S3 Object Lambda Access Point ARN
- * @param regionNameOverride Override region name in ARN if it's not empty
- * @param useDualStack Using dual-stack endpoint if true
- * @param endpointOverride Override endpoint if it's not empty
- */
- AWS_S3_API Aws::String ForObjectLambdaAccessPointArn(const S3ARN& arn, const Aws::String& regionNameOverride = "", bool useDualStack = false, const Aws::String& endpointOverride = "");
-} // namespace S3Endpoint
-} // namespace S3
-} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3EndpointProvider.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3EndpointProvider.h
new file mode 100644
index 0000000000..d62a5c0582
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3EndpointProvider.h
@@ -0,0 +1,114 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+#include <aws/s3/S3_EXPORTS.h>
+#include <aws/s3/S3ClientConfiguration.h>
+#include <aws/core/endpoint/DefaultEndpointProvider.h>
+#include <aws/core/endpoint/EndpointParameter.h>
+#include <aws/core/utils/memory/stl/AWSString.h>
+#include <aws/core/utils/memory/stl/AWSVector.h>
+
+#include <aws/s3/S3EndpointRules.h>
+
+
+namespace Aws
+{
+namespace S3
+{
+namespace Endpoint
+{
+using S3ClientConfiguration = Aws::S3::S3ClientConfiguration;
+using EndpointParameters = Aws::Endpoint::EndpointParameters;
+using Aws::Endpoint::EndpointProviderBase;
+using Aws::Endpoint::DefaultEndpointProvider;
+
+class AWS_S3_API S3ClientContextParameters : public Aws::Endpoint::ClientContextParameters
+{
+public:
+ virtual ~S3ClientContextParameters(){};
+
+ /**
+ * Forces this client to use path-style addressing for buckets.
+ */
+ void SetForcePathStyle(bool value);
+ const ClientContextParameters::EndpointParameter& GetForcePathStyle() const;
+
+ /**
+ * Disables this client's usage of Multi-Region Access Points.
+ */
+ void SetDisableMultiRegionAccessPoints(bool value);
+ const ClientContextParameters::EndpointParameter& GetDisableMultiRegionAccessPoints() const;
+
+ /**
+ * Enables this client to use an ARN's region when constructing an endpoint instead of the client's configured region.
+ */
+ void SetUseArnRegion(bool value);
+ const ClientContextParameters::EndpointParameter& GetUseArnRegion() const;
+
+ /**
+ * Enables this client to use S3 Transfer Acceleration endpoints.
+ */
+ void SetAccelerate(bool value);
+ const ClientContextParameters::EndpointParameter& GetAccelerate() const;
+};
+
+class AWS_S3_API S3BuiltInParameters : public Aws::Endpoint::BuiltInParameters
+{
+public:
+ virtual ~S3BuiltInParameters(){};
+ using Aws::Endpoint::BuiltInParameters::SetFromClientConfiguration;
+ virtual void SetFromClientConfiguration(const S3ClientConfiguration& config);
+};
+
+/**
+ * The type for the S3 Client Endpoint Provider.
+ * Inherit from this Base class / "Interface" should you want to provide a custom endpoint provider.
+ * The SDK must use service-specific type for each service per specification.
+ */
+using S3EndpointProviderBase =
+ EndpointProviderBase<S3ClientConfiguration, S3BuiltInParameters, S3ClientContextParameters>;
+
+using S3DefaultEpProviderBase =
+ DefaultEndpointProvider<S3ClientConfiguration, S3BuiltInParameters, S3ClientContextParameters>;
+
+} // namespace Endpoint
+} // namespace S3
+
+namespace Endpoint
+{
+/**
+ * Export endpoint provider symbols from DLL
+ */
+template class AWS_S3_API
+ Aws::Endpoint::EndpointProviderBase<S3::Endpoint::S3ClientConfiguration, S3::Endpoint::S3BuiltInParameters, S3::Endpoint::S3ClientContextParameters>;
+
+template class AWS_S3_API
+ Aws::Endpoint::DefaultEndpointProvider<S3::Endpoint::S3ClientConfiguration, S3::Endpoint::S3BuiltInParameters, S3::Endpoint::S3ClientContextParameters>;
+} // namespace Endpoint
+
+namespace S3
+{
+namespace Endpoint
+{
+/**
+ * Default endpoint provider used for this service
+ */
+class AWS_S3_API S3EndpointProvider : public S3DefaultEpProviderBase
+{
+public:
+ using S3ResolveEndpointOutcome = Aws::Endpoint::ResolveEndpointOutcome;
+
+ S3EndpointProvider()
+ : S3DefaultEpProviderBase(Aws::S3::S3EndpointRules::GetRulesBlob(), Aws::S3::S3EndpointRules::RulesBlobSize)
+ {}
+
+ ~S3EndpointProvider()
+ {
+ }
+};
+} // namespace Endpoint
+} // namespace S3
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3EndpointRules.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3EndpointRules.h
new file mode 100644
index 0000000000..6185d3ed78
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3EndpointRules.h
@@ -0,0 +1,23 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+#include <cstddef>
+#include <aws/s3/S3_EXPORTS.h>
+
+namespace Aws
+{
+namespace S3
+{
+class S3EndpointRules
+{
+public:
+ static const size_t RulesBlobStrLen;
+ static const size_t RulesBlobSize;
+
+ static const char* GetRulesBlob();
+};
+} // namespace S3
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3Request.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3Request.h
index 3747292c2f..b4f966919e 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3Request.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3Request.h
@@ -5,6 +5,7 @@
#pragma once
#include <aws/s3/S3_EXPORTS.h>
+#include <aws/core/endpoint/AWSEndpoint.h>
#include <aws/core/AmazonSerializableWebServiceRequest.h>
#include <aws/core/utils/UnreferencedParam.h>
#include <aws/core/http/HttpRequest.h>
@@ -17,6 +18,9 @@ namespace S3
class AWS_S3_API S3Request : public Aws::AmazonSerializableWebServiceRequest
{
public:
+ using EndpointParameter = Aws::Endpoint::EndpointParameter;
+ using EndpointParameters = Aws::Endpoint::EndpointParameters;
+
virtual ~S3Request () {}
void AddParametersToRequest(Aws::Http::HttpRequest& httpRequest) const { AWS_UNREFERENCED_PARAM(httpRequest); }
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3ServiceClientModel.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3ServiceClientModel.h
new file mode 100644
index 0000000000..73df2297e1
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/S3ServiceClientModel.h
@@ -0,0 +1,503 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+
+/* Generic header includes */
+#include <aws/s3/S3Errors.h>
+#include <aws/s3/S3ClientConfiguration.h>
+#include <aws/core/client/AWSError.h>
+#include <aws/core/utils/memory/stl/AWSString.h>
+#include <aws/core/client/AsyncCallerContext.h>
+#include <aws/core/http/HttpTypes.h>
+#include <aws/s3/S3EndpointProvider.h>
+#include <future>
+#include <functional>
+/* End of generic header includes */
+
+/* Service model headers required in S3Client header */
+#include <aws/s3/model/AbortMultipartUploadResult.h>
+#include <aws/s3/model/CompleteMultipartUploadResult.h>
+#include <aws/s3/model/CopyObjectResult.h>
+#include <aws/s3/model/CreateBucketResult.h>
+#include <aws/s3/model/CreateMultipartUploadResult.h>
+#include <aws/s3/model/DeleteObjectResult.h>
+#include <aws/s3/model/DeleteObjectTaggingResult.h>
+#include <aws/s3/model/DeleteObjectsResult.h>
+#include <aws/s3/model/GetBucketAccelerateConfigurationResult.h>
+#include <aws/s3/model/GetBucketAclResult.h>
+#include <aws/s3/model/GetBucketAnalyticsConfigurationResult.h>
+#include <aws/s3/model/GetBucketCorsResult.h>
+#include <aws/s3/model/GetBucketEncryptionResult.h>
+#include <aws/s3/model/GetBucketIntelligentTieringConfigurationResult.h>
+#include <aws/s3/model/GetBucketInventoryConfigurationResult.h>
+#include <aws/s3/model/GetBucketLifecycleConfigurationResult.h>
+#include <aws/s3/model/GetBucketLocationResult.h>
+#include <aws/s3/model/GetBucketLoggingResult.h>
+#include <aws/s3/model/GetBucketMetricsConfigurationResult.h>
+#include <aws/s3/model/GetBucketNotificationConfigurationResult.h>
+#include <aws/s3/model/GetBucketOwnershipControlsResult.h>
+#include <aws/s3/model/GetBucketPolicyResult.h>
+#include <aws/s3/model/GetBucketPolicyStatusResult.h>
+#include <aws/s3/model/GetBucketReplicationResult.h>
+#include <aws/s3/model/GetBucketRequestPaymentResult.h>
+#include <aws/s3/model/GetBucketTaggingResult.h>
+#include <aws/s3/model/GetBucketVersioningResult.h>
+#include <aws/s3/model/GetBucketWebsiteResult.h>
+#include <aws/s3/model/GetObjectResult.h>
+#include <aws/s3/model/GetObjectAclResult.h>
+#include <aws/s3/model/GetObjectAttributesResult.h>
+#include <aws/s3/model/GetObjectLegalHoldResult.h>
+#include <aws/s3/model/GetObjectLockConfigurationResult.h>
+#include <aws/s3/model/GetObjectRetentionResult.h>
+#include <aws/s3/model/GetObjectTaggingResult.h>
+#include <aws/s3/model/GetObjectTorrentResult.h>
+#include <aws/s3/model/GetPublicAccessBlockResult.h>
+#include <aws/s3/model/HeadObjectResult.h>
+#include <aws/s3/model/ListBucketAnalyticsConfigurationsResult.h>
+#include <aws/s3/model/ListBucketIntelligentTieringConfigurationsResult.h>
+#include <aws/s3/model/ListBucketInventoryConfigurationsResult.h>
+#include <aws/s3/model/ListBucketMetricsConfigurationsResult.h>
+#include <aws/s3/model/ListBucketsResult.h>
+#include <aws/s3/model/ListMultipartUploadsResult.h>
+#include <aws/s3/model/ListObjectVersionsResult.h>
+#include <aws/s3/model/ListObjectsResult.h>
+#include <aws/s3/model/ListObjectsV2Result.h>
+#include <aws/s3/model/ListPartsResult.h>
+#include <aws/s3/model/PutObjectResult.h>
+#include <aws/s3/model/PutObjectAclResult.h>
+#include <aws/s3/model/PutObjectLegalHoldResult.h>
+#include <aws/s3/model/PutObjectLockConfigurationResult.h>
+#include <aws/s3/model/PutObjectRetentionResult.h>
+#include <aws/s3/model/PutObjectTaggingResult.h>
+#include <aws/s3/model/RestoreObjectResult.h>
+#include <aws/s3/model/UploadPartResult.h>
+#include <aws/s3/model/UploadPartCopyResult.h>
+#include <aws/core/NoResult.h>
+/* End of service model headers required in S3Client header */
+
+namespace Aws
+{
+ namespace Http
+ {
+ class HttpClient;
+ class HttpClientFactory;
+ } // namespace Http
+
+ namespace Utils
+ {
+ template< typename R, typename E> class Outcome;
+
+ namespace Threading
+ {
+ class Executor;
+ } // namespace Threading
+ } // namespace Utils
+
+ namespace Auth
+ {
+ class AWSCredentials;
+ class AWSCredentialsProvider;
+ } // namespace Auth
+
+ namespace Client
+ {
+ class RetryStrategy;
+ } // namespace Client
+
+ namespace S3
+ {
+ using S3EndpointProviderBase = Aws::S3::Endpoint::S3EndpointProviderBase;
+ using S3EndpointProvider = Aws::S3::Endpoint::S3EndpointProvider;
+
+ namespace Model
+ {
+ /* Service model forward declarations required in S3Client header */
+ class AbortMultipartUploadRequest;
+ class CompleteMultipartUploadRequest;
+ class CopyObjectRequest;
+ class CreateBucketRequest;
+ class CreateMultipartUploadRequest;
+ class DeleteBucketRequest;
+ class DeleteBucketAnalyticsConfigurationRequest;
+ class DeleteBucketCorsRequest;
+ class DeleteBucketEncryptionRequest;
+ class DeleteBucketIntelligentTieringConfigurationRequest;
+ class DeleteBucketInventoryConfigurationRequest;
+ class DeleteBucketLifecycleRequest;
+ class DeleteBucketMetricsConfigurationRequest;
+ class DeleteBucketOwnershipControlsRequest;
+ class DeleteBucketPolicyRequest;
+ class DeleteBucketReplicationRequest;
+ class DeleteBucketTaggingRequest;
+ class DeleteBucketWebsiteRequest;
+ class DeleteObjectRequest;
+ class DeleteObjectTaggingRequest;
+ class DeleteObjectsRequest;
+ class DeletePublicAccessBlockRequest;
+ class GetBucketAccelerateConfigurationRequest;
+ class GetBucketAclRequest;
+ class GetBucketAnalyticsConfigurationRequest;
+ class GetBucketCorsRequest;
+ class GetBucketEncryptionRequest;
+ class GetBucketIntelligentTieringConfigurationRequest;
+ class GetBucketInventoryConfigurationRequest;
+ class GetBucketLifecycleConfigurationRequest;
+ class GetBucketLocationRequest;
+ class GetBucketLoggingRequest;
+ class GetBucketMetricsConfigurationRequest;
+ class GetBucketNotificationConfigurationRequest;
+ class GetBucketOwnershipControlsRequest;
+ class GetBucketPolicyRequest;
+ class GetBucketPolicyStatusRequest;
+ class GetBucketReplicationRequest;
+ class GetBucketRequestPaymentRequest;
+ class GetBucketTaggingRequest;
+ class GetBucketVersioningRequest;
+ class GetBucketWebsiteRequest;
+ class GetObjectRequest;
+ class GetObjectAclRequest;
+ class GetObjectAttributesRequest;
+ class GetObjectLegalHoldRequest;
+ class GetObjectLockConfigurationRequest;
+ class GetObjectRetentionRequest;
+ class GetObjectTaggingRequest;
+ class GetObjectTorrentRequest;
+ class GetPublicAccessBlockRequest;
+ class HeadBucketRequest;
+ class HeadObjectRequest;
+ class ListBucketAnalyticsConfigurationsRequest;
+ class ListBucketIntelligentTieringConfigurationsRequest;
+ class ListBucketInventoryConfigurationsRequest;
+ class ListBucketMetricsConfigurationsRequest;
+ class ListMultipartUploadsRequest;
+ class ListObjectVersionsRequest;
+ class ListObjectsRequest;
+ class ListObjectsV2Request;
+ class ListPartsRequest;
+ class PutBucketAccelerateConfigurationRequest;
+ class PutBucketAclRequest;
+ class PutBucketAnalyticsConfigurationRequest;
+ class PutBucketCorsRequest;
+ class PutBucketEncryptionRequest;
+ class PutBucketIntelligentTieringConfigurationRequest;
+ class PutBucketInventoryConfigurationRequest;
+ class PutBucketLifecycleConfigurationRequest;
+ class PutBucketLoggingRequest;
+ class PutBucketMetricsConfigurationRequest;
+ class PutBucketNotificationConfigurationRequest;
+ class PutBucketOwnershipControlsRequest;
+ class PutBucketPolicyRequest;
+ class PutBucketReplicationRequest;
+ class PutBucketRequestPaymentRequest;
+ class PutBucketTaggingRequest;
+ class PutBucketVersioningRequest;
+ class PutBucketWebsiteRequest;
+ class PutObjectRequest;
+ class PutObjectAclRequest;
+ class PutObjectLegalHoldRequest;
+ class PutObjectLockConfigurationRequest;
+ class PutObjectRetentionRequest;
+ class PutObjectTaggingRequest;
+ class PutPublicAccessBlockRequest;
+ class RestoreObjectRequest;
+ class SelectObjectContentRequest;
+ class UploadPartRequest;
+ class UploadPartCopyRequest;
+ class WriteGetObjectResponseRequest;
+ /* End of service model forward declarations required in S3Client header */
+
+ /* Service model Outcome class definitions */
+ typedef Aws::Utils::Outcome<AbortMultipartUploadResult, S3Error> AbortMultipartUploadOutcome;
+ typedef Aws::Utils::Outcome<CompleteMultipartUploadResult, S3Error> CompleteMultipartUploadOutcome;
+ typedef Aws::Utils::Outcome<CopyObjectResult, S3Error> CopyObjectOutcome;
+ typedef Aws::Utils::Outcome<CreateBucketResult, S3Error> CreateBucketOutcome;
+ typedef Aws::Utils::Outcome<CreateMultipartUploadResult, S3Error> CreateMultipartUploadOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> DeleteBucketOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> DeleteBucketAnalyticsConfigurationOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> DeleteBucketCorsOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> DeleteBucketEncryptionOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> DeleteBucketIntelligentTieringConfigurationOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> DeleteBucketInventoryConfigurationOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> DeleteBucketLifecycleOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> DeleteBucketMetricsConfigurationOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> DeleteBucketOwnershipControlsOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> DeleteBucketPolicyOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> DeleteBucketReplicationOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> DeleteBucketTaggingOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> DeleteBucketWebsiteOutcome;
+ typedef Aws::Utils::Outcome<DeleteObjectResult, S3Error> DeleteObjectOutcome;
+ typedef Aws::Utils::Outcome<DeleteObjectTaggingResult, S3Error> DeleteObjectTaggingOutcome;
+ typedef Aws::Utils::Outcome<DeleteObjectsResult, S3Error> DeleteObjectsOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> DeletePublicAccessBlockOutcome;
+ typedef Aws::Utils::Outcome<GetBucketAccelerateConfigurationResult, S3Error> GetBucketAccelerateConfigurationOutcome;
+ typedef Aws::Utils::Outcome<GetBucketAclResult, S3Error> GetBucketAclOutcome;
+ typedef Aws::Utils::Outcome<GetBucketAnalyticsConfigurationResult, S3Error> GetBucketAnalyticsConfigurationOutcome;
+ typedef Aws::Utils::Outcome<GetBucketCorsResult, S3Error> GetBucketCorsOutcome;
+ typedef Aws::Utils::Outcome<GetBucketEncryptionResult, S3Error> GetBucketEncryptionOutcome;
+ typedef Aws::Utils::Outcome<GetBucketIntelligentTieringConfigurationResult, S3Error> GetBucketIntelligentTieringConfigurationOutcome;
+ typedef Aws::Utils::Outcome<GetBucketInventoryConfigurationResult, S3Error> GetBucketInventoryConfigurationOutcome;
+ typedef Aws::Utils::Outcome<GetBucketLifecycleConfigurationResult, S3Error> GetBucketLifecycleConfigurationOutcome;
+ typedef Aws::Utils::Outcome<GetBucketLocationResult, S3Error> GetBucketLocationOutcome;
+ typedef Aws::Utils::Outcome<GetBucketLoggingResult, S3Error> GetBucketLoggingOutcome;
+ typedef Aws::Utils::Outcome<GetBucketMetricsConfigurationResult, S3Error> GetBucketMetricsConfigurationOutcome;
+ typedef Aws::Utils::Outcome<GetBucketNotificationConfigurationResult, S3Error> GetBucketNotificationConfigurationOutcome;
+ typedef Aws::Utils::Outcome<GetBucketOwnershipControlsResult, S3Error> GetBucketOwnershipControlsOutcome;
+ typedef Aws::Utils::Outcome<GetBucketPolicyResult, S3Error> GetBucketPolicyOutcome;
+ typedef Aws::Utils::Outcome<GetBucketPolicyStatusResult, S3Error> GetBucketPolicyStatusOutcome;
+ typedef Aws::Utils::Outcome<GetBucketReplicationResult, S3Error> GetBucketReplicationOutcome;
+ typedef Aws::Utils::Outcome<GetBucketRequestPaymentResult, S3Error> GetBucketRequestPaymentOutcome;
+ typedef Aws::Utils::Outcome<GetBucketTaggingResult, S3Error> GetBucketTaggingOutcome;
+ typedef Aws::Utils::Outcome<GetBucketVersioningResult, S3Error> GetBucketVersioningOutcome;
+ typedef Aws::Utils::Outcome<GetBucketWebsiteResult, S3Error> GetBucketWebsiteOutcome;
+ typedef Aws::Utils::Outcome<GetObjectResult, S3Error> GetObjectOutcome;
+ typedef Aws::Utils::Outcome<GetObjectAclResult, S3Error> GetObjectAclOutcome;
+ typedef Aws::Utils::Outcome<GetObjectAttributesResult, S3Error> GetObjectAttributesOutcome;
+ typedef Aws::Utils::Outcome<GetObjectLegalHoldResult, S3Error> GetObjectLegalHoldOutcome;
+ typedef Aws::Utils::Outcome<GetObjectLockConfigurationResult, S3Error> GetObjectLockConfigurationOutcome;
+ typedef Aws::Utils::Outcome<GetObjectRetentionResult, S3Error> GetObjectRetentionOutcome;
+ typedef Aws::Utils::Outcome<GetObjectTaggingResult, S3Error> GetObjectTaggingOutcome;
+ typedef Aws::Utils::Outcome<GetObjectTorrentResult, S3Error> GetObjectTorrentOutcome;
+ typedef Aws::Utils::Outcome<GetPublicAccessBlockResult, S3Error> GetPublicAccessBlockOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> HeadBucketOutcome;
+ typedef Aws::Utils::Outcome<HeadObjectResult, S3Error> HeadObjectOutcome;
+ typedef Aws::Utils::Outcome<ListBucketAnalyticsConfigurationsResult, S3Error> ListBucketAnalyticsConfigurationsOutcome;
+ typedef Aws::Utils::Outcome<ListBucketIntelligentTieringConfigurationsResult, S3Error> ListBucketIntelligentTieringConfigurationsOutcome;
+ typedef Aws::Utils::Outcome<ListBucketInventoryConfigurationsResult, S3Error> ListBucketInventoryConfigurationsOutcome;
+ typedef Aws::Utils::Outcome<ListBucketMetricsConfigurationsResult, S3Error> ListBucketMetricsConfigurationsOutcome;
+ typedef Aws::Utils::Outcome<ListBucketsResult, S3Error> ListBucketsOutcome;
+ typedef Aws::Utils::Outcome<ListMultipartUploadsResult, S3Error> ListMultipartUploadsOutcome;
+ typedef Aws::Utils::Outcome<ListObjectVersionsResult, S3Error> ListObjectVersionsOutcome;
+ typedef Aws::Utils::Outcome<ListObjectsResult, S3Error> ListObjectsOutcome;
+ typedef Aws::Utils::Outcome<ListObjectsV2Result, S3Error> ListObjectsV2Outcome;
+ typedef Aws::Utils::Outcome<ListPartsResult, S3Error> ListPartsOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketAccelerateConfigurationOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketAclOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketAnalyticsConfigurationOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketCorsOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketEncryptionOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketIntelligentTieringConfigurationOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketInventoryConfigurationOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketLifecycleConfigurationOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketLoggingOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketMetricsConfigurationOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketNotificationConfigurationOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketOwnershipControlsOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketPolicyOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketReplicationOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketRequestPaymentOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketTaggingOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketVersioningOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutBucketWebsiteOutcome;
+ typedef Aws::Utils::Outcome<PutObjectResult, S3Error> PutObjectOutcome;
+ typedef Aws::Utils::Outcome<PutObjectAclResult, S3Error> PutObjectAclOutcome;
+ typedef Aws::Utils::Outcome<PutObjectLegalHoldResult, S3Error> PutObjectLegalHoldOutcome;
+ typedef Aws::Utils::Outcome<PutObjectLockConfigurationResult, S3Error> PutObjectLockConfigurationOutcome;
+ typedef Aws::Utils::Outcome<PutObjectRetentionResult, S3Error> PutObjectRetentionOutcome;
+ typedef Aws::Utils::Outcome<PutObjectTaggingResult, S3Error> PutObjectTaggingOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> PutPublicAccessBlockOutcome;
+ typedef Aws::Utils::Outcome<RestoreObjectResult, S3Error> RestoreObjectOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> SelectObjectContentOutcome;
+ typedef Aws::Utils::Outcome<UploadPartResult, S3Error> UploadPartOutcome;
+ typedef Aws::Utils::Outcome<UploadPartCopyResult, S3Error> UploadPartCopyOutcome;
+ typedef Aws::Utils::Outcome<Aws::NoResult, S3Error> WriteGetObjectResponseOutcome;
+ /* End of service model Outcome class definitions */
+
+ /* Service model Outcome callable definitions */
+ typedef std::future<AbortMultipartUploadOutcome> AbortMultipartUploadOutcomeCallable;
+ typedef std::future<CompleteMultipartUploadOutcome> CompleteMultipartUploadOutcomeCallable;
+ typedef std::future<CopyObjectOutcome> CopyObjectOutcomeCallable;
+ typedef std::future<CreateBucketOutcome> CreateBucketOutcomeCallable;
+ typedef std::future<CreateMultipartUploadOutcome> CreateMultipartUploadOutcomeCallable;
+ typedef std::future<DeleteBucketOutcome> DeleteBucketOutcomeCallable;
+ typedef std::future<DeleteBucketAnalyticsConfigurationOutcome> DeleteBucketAnalyticsConfigurationOutcomeCallable;
+ typedef std::future<DeleteBucketCorsOutcome> DeleteBucketCorsOutcomeCallable;
+ typedef std::future<DeleteBucketEncryptionOutcome> DeleteBucketEncryptionOutcomeCallable;
+ typedef std::future<DeleteBucketIntelligentTieringConfigurationOutcome> DeleteBucketIntelligentTieringConfigurationOutcomeCallable;
+ typedef std::future<DeleteBucketInventoryConfigurationOutcome> DeleteBucketInventoryConfigurationOutcomeCallable;
+ typedef std::future<DeleteBucketLifecycleOutcome> DeleteBucketLifecycleOutcomeCallable;
+ typedef std::future<DeleteBucketMetricsConfigurationOutcome> DeleteBucketMetricsConfigurationOutcomeCallable;
+ typedef std::future<DeleteBucketOwnershipControlsOutcome> DeleteBucketOwnershipControlsOutcomeCallable;
+ typedef std::future<DeleteBucketPolicyOutcome> DeleteBucketPolicyOutcomeCallable;
+ typedef std::future<DeleteBucketReplicationOutcome> DeleteBucketReplicationOutcomeCallable;
+ typedef std::future<DeleteBucketTaggingOutcome> DeleteBucketTaggingOutcomeCallable;
+ typedef std::future<DeleteBucketWebsiteOutcome> DeleteBucketWebsiteOutcomeCallable;
+ typedef std::future<DeleteObjectOutcome> DeleteObjectOutcomeCallable;
+ typedef std::future<DeleteObjectTaggingOutcome> DeleteObjectTaggingOutcomeCallable;
+ typedef std::future<DeleteObjectsOutcome> DeleteObjectsOutcomeCallable;
+ typedef std::future<DeletePublicAccessBlockOutcome> DeletePublicAccessBlockOutcomeCallable;
+ typedef std::future<GetBucketAccelerateConfigurationOutcome> GetBucketAccelerateConfigurationOutcomeCallable;
+ typedef std::future<GetBucketAclOutcome> GetBucketAclOutcomeCallable;
+ typedef std::future<GetBucketAnalyticsConfigurationOutcome> GetBucketAnalyticsConfigurationOutcomeCallable;
+ typedef std::future<GetBucketCorsOutcome> GetBucketCorsOutcomeCallable;
+ typedef std::future<GetBucketEncryptionOutcome> GetBucketEncryptionOutcomeCallable;
+ typedef std::future<GetBucketIntelligentTieringConfigurationOutcome> GetBucketIntelligentTieringConfigurationOutcomeCallable;
+ typedef std::future<GetBucketInventoryConfigurationOutcome> GetBucketInventoryConfigurationOutcomeCallable;
+ typedef std::future<GetBucketLifecycleConfigurationOutcome> GetBucketLifecycleConfigurationOutcomeCallable;
+ typedef std::future<GetBucketLocationOutcome> GetBucketLocationOutcomeCallable;
+ typedef std::future<GetBucketLoggingOutcome> GetBucketLoggingOutcomeCallable;
+ typedef std::future<GetBucketMetricsConfigurationOutcome> GetBucketMetricsConfigurationOutcomeCallable;
+ typedef std::future<GetBucketNotificationConfigurationOutcome> GetBucketNotificationConfigurationOutcomeCallable;
+ typedef std::future<GetBucketOwnershipControlsOutcome> GetBucketOwnershipControlsOutcomeCallable;
+ typedef std::future<GetBucketPolicyOutcome> GetBucketPolicyOutcomeCallable;
+ typedef std::future<GetBucketPolicyStatusOutcome> GetBucketPolicyStatusOutcomeCallable;
+ typedef std::future<GetBucketReplicationOutcome> GetBucketReplicationOutcomeCallable;
+ typedef std::future<GetBucketRequestPaymentOutcome> GetBucketRequestPaymentOutcomeCallable;
+ typedef std::future<GetBucketTaggingOutcome> GetBucketTaggingOutcomeCallable;
+ typedef std::future<GetBucketVersioningOutcome> GetBucketVersioningOutcomeCallable;
+ typedef std::future<GetBucketWebsiteOutcome> GetBucketWebsiteOutcomeCallable;
+ typedef std::future<GetObjectOutcome> GetObjectOutcomeCallable;
+ typedef std::future<GetObjectAclOutcome> GetObjectAclOutcomeCallable;
+ typedef std::future<GetObjectAttributesOutcome> GetObjectAttributesOutcomeCallable;
+ typedef std::future<GetObjectLegalHoldOutcome> GetObjectLegalHoldOutcomeCallable;
+ typedef std::future<GetObjectLockConfigurationOutcome> GetObjectLockConfigurationOutcomeCallable;
+ typedef std::future<GetObjectRetentionOutcome> GetObjectRetentionOutcomeCallable;
+ typedef std::future<GetObjectTaggingOutcome> GetObjectTaggingOutcomeCallable;
+ typedef std::future<GetObjectTorrentOutcome> GetObjectTorrentOutcomeCallable;
+ typedef std::future<GetPublicAccessBlockOutcome> GetPublicAccessBlockOutcomeCallable;
+ typedef std::future<HeadBucketOutcome> HeadBucketOutcomeCallable;
+ typedef std::future<HeadObjectOutcome> HeadObjectOutcomeCallable;
+ typedef std::future<ListBucketAnalyticsConfigurationsOutcome> ListBucketAnalyticsConfigurationsOutcomeCallable;
+ typedef std::future<ListBucketIntelligentTieringConfigurationsOutcome> ListBucketIntelligentTieringConfigurationsOutcomeCallable;
+ typedef std::future<ListBucketInventoryConfigurationsOutcome> ListBucketInventoryConfigurationsOutcomeCallable;
+ typedef std::future<ListBucketMetricsConfigurationsOutcome> ListBucketMetricsConfigurationsOutcomeCallable;
+ typedef std::future<ListBucketsOutcome> ListBucketsOutcomeCallable;
+ typedef std::future<ListMultipartUploadsOutcome> ListMultipartUploadsOutcomeCallable;
+ typedef std::future<ListObjectVersionsOutcome> ListObjectVersionsOutcomeCallable;
+ typedef std::future<ListObjectsOutcome> ListObjectsOutcomeCallable;
+ typedef std::future<ListObjectsV2Outcome> ListObjectsV2OutcomeCallable;
+ typedef std::future<ListPartsOutcome> ListPartsOutcomeCallable;
+ typedef std::future<PutBucketAccelerateConfigurationOutcome> PutBucketAccelerateConfigurationOutcomeCallable;
+ typedef std::future<PutBucketAclOutcome> PutBucketAclOutcomeCallable;
+ typedef std::future<PutBucketAnalyticsConfigurationOutcome> PutBucketAnalyticsConfigurationOutcomeCallable;
+ typedef std::future<PutBucketCorsOutcome> PutBucketCorsOutcomeCallable;
+ typedef std::future<PutBucketEncryptionOutcome> PutBucketEncryptionOutcomeCallable;
+ typedef std::future<PutBucketIntelligentTieringConfigurationOutcome> PutBucketIntelligentTieringConfigurationOutcomeCallable;
+ typedef std::future<PutBucketInventoryConfigurationOutcome> PutBucketInventoryConfigurationOutcomeCallable;
+ typedef std::future<PutBucketLifecycleConfigurationOutcome> PutBucketLifecycleConfigurationOutcomeCallable;
+ typedef std::future<PutBucketLoggingOutcome> PutBucketLoggingOutcomeCallable;
+ typedef std::future<PutBucketMetricsConfigurationOutcome> PutBucketMetricsConfigurationOutcomeCallable;
+ typedef std::future<PutBucketNotificationConfigurationOutcome> PutBucketNotificationConfigurationOutcomeCallable;
+ typedef std::future<PutBucketOwnershipControlsOutcome> PutBucketOwnershipControlsOutcomeCallable;
+ typedef std::future<PutBucketPolicyOutcome> PutBucketPolicyOutcomeCallable;
+ typedef std::future<PutBucketReplicationOutcome> PutBucketReplicationOutcomeCallable;
+ typedef std::future<PutBucketRequestPaymentOutcome> PutBucketRequestPaymentOutcomeCallable;
+ typedef std::future<PutBucketTaggingOutcome> PutBucketTaggingOutcomeCallable;
+ typedef std::future<PutBucketVersioningOutcome> PutBucketVersioningOutcomeCallable;
+ typedef std::future<PutBucketWebsiteOutcome> PutBucketWebsiteOutcomeCallable;
+ typedef std::future<PutObjectOutcome> PutObjectOutcomeCallable;
+ typedef std::future<PutObjectAclOutcome> PutObjectAclOutcomeCallable;
+ typedef std::future<PutObjectLegalHoldOutcome> PutObjectLegalHoldOutcomeCallable;
+ typedef std::future<PutObjectLockConfigurationOutcome> PutObjectLockConfigurationOutcomeCallable;
+ typedef std::future<PutObjectRetentionOutcome> PutObjectRetentionOutcomeCallable;
+ typedef std::future<PutObjectTaggingOutcome> PutObjectTaggingOutcomeCallable;
+ typedef std::future<PutPublicAccessBlockOutcome> PutPublicAccessBlockOutcomeCallable;
+ typedef std::future<RestoreObjectOutcome> RestoreObjectOutcomeCallable;
+ typedef std::future<SelectObjectContentOutcome> SelectObjectContentOutcomeCallable;
+ typedef std::future<UploadPartOutcome> UploadPartOutcomeCallable;
+ typedef std::future<UploadPartCopyOutcome> UploadPartCopyOutcomeCallable;
+ typedef std::future<WriteGetObjectResponseOutcome> WriteGetObjectResponseOutcomeCallable;
+ /* End of service model Outcome callable definitions */
+ } // namespace Model
+
+ class S3Client;
+
+ /* Service model async handlers definitions */
+ typedef std::function<void(const S3Client*, const Model::AbortMultipartUploadRequest&, const Model::AbortMultipartUploadOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > AbortMultipartUploadResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::CompleteMultipartUploadRequest&, const Model::CompleteMultipartUploadOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > CompleteMultipartUploadResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::CopyObjectRequest&, const Model::CopyObjectOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > CopyObjectResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::CreateBucketRequest&, const Model::CreateBucketOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > CreateBucketResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::CreateMultipartUploadRequest&, const Model::CreateMultipartUploadOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > CreateMultipartUploadResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::DeleteBucketRequest&, const Model::DeleteBucketOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteBucketResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::DeleteBucketAnalyticsConfigurationRequest&, const Model::DeleteBucketAnalyticsConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteBucketAnalyticsConfigurationResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::DeleteBucketCorsRequest&, const Model::DeleteBucketCorsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteBucketCorsResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::DeleteBucketEncryptionRequest&, const Model::DeleteBucketEncryptionOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteBucketEncryptionResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::DeleteBucketIntelligentTieringConfigurationRequest&, const Model::DeleteBucketIntelligentTieringConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteBucketIntelligentTieringConfigurationResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::DeleteBucketInventoryConfigurationRequest&, const Model::DeleteBucketInventoryConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteBucketInventoryConfigurationResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::DeleteBucketLifecycleRequest&, const Model::DeleteBucketLifecycleOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteBucketLifecycleResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::DeleteBucketMetricsConfigurationRequest&, const Model::DeleteBucketMetricsConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteBucketMetricsConfigurationResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::DeleteBucketOwnershipControlsRequest&, const Model::DeleteBucketOwnershipControlsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteBucketOwnershipControlsResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::DeleteBucketPolicyRequest&, const Model::DeleteBucketPolicyOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteBucketPolicyResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::DeleteBucketReplicationRequest&, const Model::DeleteBucketReplicationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteBucketReplicationResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::DeleteBucketTaggingRequest&, const Model::DeleteBucketTaggingOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteBucketTaggingResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::DeleteBucketWebsiteRequest&, const Model::DeleteBucketWebsiteOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteBucketWebsiteResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::DeleteObjectRequest&, const Model::DeleteObjectOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteObjectResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::DeleteObjectTaggingRequest&, const Model::DeleteObjectTaggingOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteObjectTaggingResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::DeleteObjectsRequest&, const Model::DeleteObjectsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeleteObjectsResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::DeletePublicAccessBlockRequest&, const Model::DeletePublicAccessBlockOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > DeletePublicAccessBlockResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::GetBucketAccelerateConfigurationRequest&, const Model::GetBucketAccelerateConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketAccelerateConfigurationResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::GetBucketAclRequest&, const Model::GetBucketAclOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketAclResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::GetBucketAnalyticsConfigurationRequest&, const Model::GetBucketAnalyticsConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketAnalyticsConfigurationResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::GetBucketCorsRequest&, const Model::GetBucketCorsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketCorsResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::GetBucketEncryptionRequest&, const Model::GetBucketEncryptionOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketEncryptionResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::GetBucketIntelligentTieringConfigurationRequest&, const Model::GetBucketIntelligentTieringConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketIntelligentTieringConfigurationResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::GetBucketInventoryConfigurationRequest&, const Model::GetBucketInventoryConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketInventoryConfigurationResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::GetBucketLifecycleConfigurationRequest&, const Model::GetBucketLifecycleConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketLifecycleConfigurationResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::GetBucketLocationRequest&, const Model::GetBucketLocationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketLocationResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::GetBucketLoggingRequest&, const Model::GetBucketLoggingOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketLoggingResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::GetBucketMetricsConfigurationRequest&, const Model::GetBucketMetricsConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketMetricsConfigurationResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::GetBucketNotificationConfigurationRequest&, const Model::GetBucketNotificationConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketNotificationConfigurationResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::GetBucketOwnershipControlsRequest&, const Model::GetBucketOwnershipControlsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketOwnershipControlsResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::GetBucketPolicyRequest&, Model::GetBucketPolicyOutcome, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketPolicyResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::GetBucketPolicyStatusRequest&, const Model::GetBucketPolicyStatusOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketPolicyStatusResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::GetBucketReplicationRequest&, const Model::GetBucketReplicationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketReplicationResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::GetBucketRequestPaymentRequest&, const Model::GetBucketRequestPaymentOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketRequestPaymentResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::GetBucketTaggingRequest&, const Model::GetBucketTaggingOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketTaggingResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::GetBucketVersioningRequest&, const Model::GetBucketVersioningOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketVersioningResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::GetBucketWebsiteRequest&, const Model::GetBucketWebsiteOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetBucketWebsiteResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::GetObjectRequest&, Model::GetObjectOutcome, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetObjectResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::GetObjectAclRequest&, const Model::GetObjectAclOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetObjectAclResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::GetObjectAttributesRequest&, const Model::GetObjectAttributesOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetObjectAttributesResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::GetObjectLegalHoldRequest&, const Model::GetObjectLegalHoldOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetObjectLegalHoldResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::GetObjectLockConfigurationRequest&, const Model::GetObjectLockConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetObjectLockConfigurationResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::GetObjectRetentionRequest&, const Model::GetObjectRetentionOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetObjectRetentionResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::GetObjectTaggingRequest&, const Model::GetObjectTaggingOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetObjectTaggingResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::GetObjectTorrentRequest&, Model::GetObjectTorrentOutcome, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetObjectTorrentResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::GetPublicAccessBlockRequest&, const Model::GetPublicAccessBlockOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > GetPublicAccessBlockResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::HeadBucketRequest&, const Model::HeadBucketOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > HeadBucketResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::HeadObjectRequest&, const Model::HeadObjectOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > HeadObjectResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::ListBucketAnalyticsConfigurationsRequest&, const Model::ListBucketAnalyticsConfigurationsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListBucketAnalyticsConfigurationsResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::ListBucketIntelligentTieringConfigurationsRequest&, const Model::ListBucketIntelligentTieringConfigurationsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListBucketIntelligentTieringConfigurationsResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::ListBucketInventoryConfigurationsRequest&, const Model::ListBucketInventoryConfigurationsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListBucketInventoryConfigurationsResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::ListBucketMetricsConfigurationsRequest&, const Model::ListBucketMetricsConfigurationsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListBucketMetricsConfigurationsResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::ListBucketsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListBucketsResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::ListMultipartUploadsRequest&, const Model::ListMultipartUploadsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListMultipartUploadsResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::ListObjectVersionsRequest&, const Model::ListObjectVersionsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListObjectVersionsResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::ListObjectsRequest&, const Model::ListObjectsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListObjectsResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::ListObjectsV2Request&, const Model::ListObjectsV2Outcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListObjectsV2ResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::ListPartsRequest&, const Model::ListPartsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > ListPartsResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::PutBucketAccelerateConfigurationRequest&, const Model::PutBucketAccelerateConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketAccelerateConfigurationResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::PutBucketAclRequest&, const Model::PutBucketAclOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketAclResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::PutBucketAnalyticsConfigurationRequest&, const Model::PutBucketAnalyticsConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketAnalyticsConfigurationResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::PutBucketCorsRequest&, const Model::PutBucketCorsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketCorsResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::PutBucketEncryptionRequest&, const Model::PutBucketEncryptionOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketEncryptionResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::PutBucketIntelligentTieringConfigurationRequest&, const Model::PutBucketIntelligentTieringConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketIntelligentTieringConfigurationResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::PutBucketInventoryConfigurationRequest&, const Model::PutBucketInventoryConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketInventoryConfigurationResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::PutBucketLifecycleConfigurationRequest&, const Model::PutBucketLifecycleConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketLifecycleConfigurationResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::PutBucketLoggingRequest&, const Model::PutBucketLoggingOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketLoggingResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::PutBucketMetricsConfigurationRequest&, const Model::PutBucketMetricsConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketMetricsConfigurationResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::PutBucketNotificationConfigurationRequest&, const Model::PutBucketNotificationConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketNotificationConfigurationResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::PutBucketOwnershipControlsRequest&, const Model::PutBucketOwnershipControlsOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketOwnershipControlsResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::PutBucketPolicyRequest&, const Model::PutBucketPolicyOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketPolicyResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::PutBucketReplicationRequest&, const Model::PutBucketReplicationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketReplicationResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::PutBucketRequestPaymentRequest&, const Model::PutBucketRequestPaymentOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketRequestPaymentResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::PutBucketTaggingRequest&, const Model::PutBucketTaggingOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketTaggingResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::PutBucketVersioningRequest&, const Model::PutBucketVersioningOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketVersioningResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::PutBucketWebsiteRequest&, const Model::PutBucketWebsiteOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutBucketWebsiteResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::PutObjectRequest&, const Model::PutObjectOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutObjectResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::PutObjectAclRequest&, const Model::PutObjectAclOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutObjectAclResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::PutObjectLegalHoldRequest&, const Model::PutObjectLegalHoldOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutObjectLegalHoldResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::PutObjectLockConfigurationRequest&, const Model::PutObjectLockConfigurationOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutObjectLockConfigurationResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::PutObjectRetentionRequest&, const Model::PutObjectRetentionOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutObjectRetentionResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::PutObjectTaggingRequest&, const Model::PutObjectTaggingOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutObjectTaggingResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::PutPublicAccessBlockRequest&, const Model::PutPublicAccessBlockOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > PutPublicAccessBlockResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::RestoreObjectRequest&, const Model::RestoreObjectOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > RestoreObjectResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::SelectObjectContentRequest&, const Model::SelectObjectContentOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > SelectObjectContentResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::UploadPartRequest&, const Model::UploadPartOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > UploadPartResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::UploadPartCopyRequest&, const Model::UploadPartCopyOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > UploadPartCopyResponseReceivedHandler;
+ typedef std::function<void(const S3Client*, const Model::WriteGetObjectResponseRequest&, const Model::WriteGetObjectResponseOutcome&, const std::shared_ptr<const Aws::Client::AsyncCallerContext>&) > WriteGetObjectResponseResponseReceivedHandler;
+ /* End of service model async handlers definitions */
+ } // namespace S3
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AbortIncompleteMultipartUpload.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AbortIncompleteMultipartUpload.h
index fb84bdcf68..518b2277a9 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AbortIncompleteMultipartUpload.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AbortIncompleteMultipartUpload.h
@@ -30,14 +30,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AbortIncompleteMultipartUpload">AWS
* API Reference</a></p>
*/
- class AWS_S3_API AbortIncompleteMultipartUpload
+ class AbortIncompleteMultipartUpload
{
public:
- AbortIncompleteMultipartUpload();
- AbortIncompleteMultipartUpload(const Aws::Utils::Xml::XmlNode& xmlNode);
- AbortIncompleteMultipartUpload& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API AbortIncompleteMultipartUpload();
+ AWS_S3_API AbortIncompleteMultipartUpload(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API AbortIncompleteMultipartUpload& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -67,7 +67,7 @@ namespace Model
private:
int m_daysAfterInitiation;
- bool m_daysAfterInitiationHasBeenSet;
+ bool m_daysAfterInitiationHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AbortMultipartUploadRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AbortMultipartUploadRequest.h
index 92e6d27ca6..5f7cccecd7 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AbortMultipartUploadRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AbortMultipartUploadRequest.h
@@ -24,10 +24,10 @@ namespace Model
/**
*/
- class AWS_S3_API AbortMultipartUploadRequest : public S3Request
+ class AbortMultipartUploadRequest : public S3Request
{
public:
- AbortMultipartUploadRequest();
+ AWS_S3_API AbortMultipartUploadRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -35,31 +35,35 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "AbortMultipartUpload"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The bucket name to which the upload was taking place. </p> <p>When using this
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetBucket() const{ return m_bucket; }
@@ -68,19 +72,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool BucketHasBeenSet() const { return m_bucketHasBeenSet; }
@@ -89,19 +93,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const Aws::String& value) { m_bucketHasBeenSet = true; m_bucket = value; }
@@ -110,19 +114,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(Aws::String&& value) { m_bucketHasBeenSet = true; m_bucket = std::move(value); }
@@ -131,19 +135,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const char* value) { m_bucketHasBeenSet = true; m_bucket.assign(value); }
@@ -152,19 +156,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline AbortMultipartUploadRequest& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
@@ -173,19 +177,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline AbortMultipartUploadRequest& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
@@ -194,19 +198,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline AbortMultipartUploadRequest& WithBucket(const char* value) { SetBucket(value); return *this;}
@@ -314,57 +318,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline AbortMultipartUploadRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline AbortMultipartUploadRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline AbortMultipartUploadRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -411,22 +415,22 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
Aws::String m_uploadId;
- bool m_uploadIdHasBeenSet;
+ bool m_uploadIdHasBeenSet = false;
RequestPayer m_requestPayer;
- bool m_requestPayerHasBeenSet;
+ bool m_requestPayerHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AbortMultipartUploadResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AbortMultipartUploadResult.h
index 003df773b5..9867ef140a 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AbortMultipartUploadResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AbortMultipartUploadResult.h
@@ -24,12 +24,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API AbortMultipartUploadResult
+ class AbortMultipartUploadResult
{
public:
- AbortMultipartUploadResult();
- AbortMultipartUploadResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- AbortMultipartUploadResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API AbortMultipartUploadResult();
+ AWS_S3_API AbortMultipartUploadResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API AbortMultipartUploadResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AccelerateConfiguration.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AccelerateConfiguration.h
index 111ac83475..7bc73a1ca1 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AccelerateConfiguration.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AccelerateConfiguration.h
@@ -31,14 +31,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccelerateConfiguration">AWS
* API Reference</a></p>
*/
- class AWS_S3_API AccelerateConfiguration
+ class AccelerateConfiguration
{
public:
- AccelerateConfiguration();
- AccelerateConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
- AccelerateConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API AccelerateConfiguration();
+ AWS_S3_API AccelerateConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API AccelerateConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -74,7 +74,7 @@ namespace Model
private:
BucketAccelerateStatus m_status;
- bool m_statusHasBeenSet;
+ bool m_statusHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AccessControlPolicy.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AccessControlPolicy.h
index 1effa4bf55..1e8bfe7c8e 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AccessControlPolicy.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AccessControlPolicy.h
@@ -30,14 +30,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccessControlPolicy">AWS
* API Reference</a></p>
*/
- class AWS_S3_API AccessControlPolicy
+ class AccessControlPolicy
{
public:
- AccessControlPolicy();
- AccessControlPolicy(const Aws::Utils::Xml::XmlNode& xmlNode);
- AccessControlPolicy& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API AccessControlPolicy();
+ AWS_S3_API AccessControlPolicy(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API AccessControlPolicy& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -114,10 +114,10 @@ namespace Model
private:
Aws::Vector<Grant> m_grants;
- bool m_grantsHasBeenSet;
+ bool m_grantsHasBeenSet = false;
Owner m_owner;
- bool m_ownerHasBeenSet;
+ bool m_ownerHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AccessControlTranslation.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AccessControlTranslation.h
index e8f4845c2e..201a0a7ebd 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AccessControlTranslation.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AccessControlTranslation.h
@@ -28,68 +28,62 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AccessControlTranslation">AWS
* API Reference</a></p>
*/
- class AWS_S3_API AccessControlTranslation
+ class AccessControlTranslation
{
public:
- AccessControlTranslation();
- AccessControlTranslation(const Aws::Utils::Xml::XmlNode& xmlNode);
- AccessControlTranslation& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API AccessControlTranslation();
+ AWS_S3_API AccessControlTranslation(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API AccessControlTranslation& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
* <p>Specifies the replica ownership. For default and valid values, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html">PUT
- * bucket replication</a> in the <i>Amazon Simple Storage Service API
- * Reference</i>.</p>
+ * bucket replication</a> in the <i>Amazon S3 API Reference</i>.</p>
*/
inline const OwnerOverride& GetOwner() const{ return m_owner; }
/**
* <p>Specifies the replica ownership. For default and valid values, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html">PUT
- * bucket replication</a> in the <i>Amazon Simple Storage Service API
- * Reference</i>.</p>
+ * bucket replication</a> in the <i>Amazon S3 API Reference</i>.</p>
*/
inline bool OwnerHasBeenSet() const { return m_ownerHasBeenSet; }
/**
* <p>Specifies the replica ownership. For default and valid values, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html">PUT
- * bucket replication</a> in the <i>Amazon Simple Storage Service API
- * Reference</i>.</p>
+ * bucket replication</a> in the <i>Amazon S3 API Reference</i>.</p>
*/
inline void SetOwner(const OwnerOverride& value) { m_ownerHasBeenSet = true; m_owner = value; }
/**
* <p>Specifies the replica ownership. For default and valid values, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html">PUT
- * bucket replication</a> in the <i>Amazon Simple Storage Service API
- * Reference</i>.</p>
+ * bucket replication</a> in the <i>Amazon S3 API Reference</i>.</p>
*/
inline void SetOwner(OwnerOverride&& value) { m_ownerHasBeenSet = true; m_owner = std::move(value); }
/**
* <p>Specifies the replica ownership. For default and valid values, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html">PUT
- * bucket replication</a> in the <i>Amazon Simple Storage Service API
- * Reference</i>.</p>
+ * bucket replication</a> in the <i>Amazon S3 API Reference</i>.</p>
*/
inline AccessControlTranslation& WithOwner(const OwnerOverride& value) { SetOwner(value); return *this;}
/**
* <p>Specifies the replica ownership. For default and valid values, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html">PUT
- * bucket replication</a> in the <i>Amazon Simple Storage Service API
- * Reference</i>.</p>
+ * bucket replication</a> in the <i>Amazon S3 API Reference</i>.</p>
*/
inline AccessControlTranslation& WithOwner(OwnerOverride&& value) { SetOwner(std::move(value)); return *this;}
private:
OwnerOverride m_owner;
- bool m_ownerHasBeenSet;
+ bool m_ownerHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AnalyticsAndOperator.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AnalyticsAndOperator.h
index 760b911104..4859717c2c 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AnalyticsAndOperator.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AnalyticsAndOperator.h
@@ -32,14 +32,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsAndOperator">AWS
* API Reference</a></p>
*/
- class AWS_S3_API AnalyticsAndOperator
+ class AnalyticsAndOperator
{
public:
- AnalyticsAndOperator();
- AnalyticsAndOperator(const Aws::Utils::Xml::XmlNode& xmlNode);
- AnalyticsAndOperator& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API AnalyticsAndOperator();
+ AWS_S3_API AnalyticsAndOperator(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API AnalyticsAndOperator& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -134,10 +134,10 @@ namespace Model
private:
Aws::String m_prefix;
- bool m_prefixHasBeenSet;
+ bool m_prefixHasBeenSet = false;
Aws::Vector<Tag> m_tags;
- bool m_tagsHasBeenSet;
+ bool m_tagsHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AnalyticsConfiguration.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AnalyticsConfiguration.h
index b3b70834bd..f735dc7644 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AnalyticsConfiguration.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AnalyticsConfiguration.h
@@ -25,19 +25,19 @@ namespace Model
{
/**
- * <p> Specifies the configuration and any analyses for the analytics filter of an
+ * <p>Specifies the configuration and any analyses for the analytics filter of an
* Amazon S3 bucket.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsConfiguration">AWS
* API Reference</a></p>
*/
- class AWS_S3_API AnalyticsConfiguration
+ class AnalyticsConfiguration
{
public:
- AnalyticsConfiguration();
- AnalyticsConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
- AnalyticsConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API AnalyticsConfiguration();
+ AWS_S3_API AnalyticsConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API AnalyticsConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -163,13 +163,13 @@ namespace Model
private:
Aws::String m_id;
- bool m_idHasBeenSet;
+ bool m_idHasBeenSet = false;
AnalyticsFilter m_filter;
- bool m_filterHasBeenSet;
+ bool m_filterHasBeenSet = false;
StorageClassAnalysis m_storageClassAnalysis;
- bool m_storageClassAnalysisHasBeenSet;
+ bool m_storageClassAnalysisHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AnalyticsExportDestination.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AnalyticsExportDestination.h
index 9b7dfb72f0..33652244e4 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AnalyticsExportDestination.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AnalyticsExportDestination.h
@@ -27,14 +27,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsExportDestination">AWS
* API Reference</a></p>
*/
- class AWS_S3_API AnalyticsExportDestination
+ class AnalyticsExportDestination
{
public:
- AnalyticsExportDestination();
- AnalyticsExportDestination(const Aws::Utils::Xml::XmlNode& xmlNode);
- AnalyticsExportDestination& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API AnalyticsExportDestination();
+ AWS_S3_API AnalyticsExportDestination(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API AnalyticsExportDestination& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -70,7 +70,7 @@ namespace Model
private:
AnalyticsS3BucketDestination m_s3BucketDestination;
- bool m_s3BucketDestinationHasBeenSet;
+ bool m_s3BucketDestinationHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AnalyticsFilter.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AnalyticsFilter.h
index 1567043a4a..c655675838 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AnalyticsFilter.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AnalyticsFilter.h
@@ -32,14 +32,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsFilter">AWS
* API Reference</a></p>
*/
- class AWS_S3_API AnalyticsFilter
+ class AnalyticsFilter
{
public:
- AnalyticsFilter();
- AnalyticsFilter(const Aws::Utils::Xml::XmlNode& xmlNode);
- AnalyticsFilter& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API AnalyticsFilter();
+ AWS_S3_API AnalyticsFilter(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API AnalyticsFilter& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -153,13 +153,13 @@ namespace Model
private:
Aws::String m_prefix;
- bool m_prefixHasBeenSet;
+ bool m_prefixHasBeenSet = false;
Tag m_tag;
- bool m_tagHasBeenSet;
+ bool m_tagHasBeenSet = false;
AnalyticsAndOperator m_and;
- bool m_andHasBeenSet;
+ bool m_andHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AnalyticsS3BucketDestination.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AnalyticsS3BucketDestination.h
index 69c40eb86d..2175b74df6 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AnalyticsS3BucketDestination.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/AnalyticsS3BucketDestination.h
@@ -29,14 +29,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/AnalyticsS3BucketDestination">AWS
* API Reference</a></p>
*/
- class AWS_S3_API AnalyticsS3BucketDestination
+ class AnalyticsS3BucketDestination
{
public:
- AnalyticsS3BucketDestination();
- AnalyticsS3BucketDestination(const Aws::Utils::Xml::XmlNode& xmlNode);
- AnalyticsS3BucketDestination& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API AnalyticsS3BucketDestination();
+ AWS_S3_API AnalyticsS3BucketDestination(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API AnalyticsS3BucketDestination& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -227,16 +227,16 @@ namespace Model
private:
AnalyticsS3ExportFileFormat m_format;
- bool m_formatHasBeenSet;
+ bool m_formatHasBeenSet = false;
Aws::String m_bucketAccountId;
- bool m_bucketAccountIdHasBeenSet;
+ bool m_bucketAccountIdHasBeenSet = false;
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_prefix;
- bool m_prefixHasBeenSet;
+ bool m_prefixHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Bucket.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Bucket.h
index ffe7104842..ad5c7d6d25 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Bucket.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Bucket.h
@@ -25,19 +25,19 @@ namespace Model
/**
* <p> In terms of implementation, a Bucket is a resource. An Amazon S3 bucket name
- * is globally unique, and the namespace is shared by all AWS accounts.
- * </p><p><h3>See Also:</h3> <a
+ * is globally unique, and the namespace is shared by all Amazon Web Services
+ * accounts. </p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Bucket">AWS API
* Reference</a></p>
*/
- class AWS_S3_API Bucket
+ class Bucket
{
public:
- Bucket();
- Bucket(const Aws::Utils::Xml::XmlNode& xmlNode);
- Bucket& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Bucket();
+ AWS_S3_API Bucket(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Bucket& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -120,10 +120,10 @@ namespace Model
private:
Aws::String m_name;
- bool m_nameHasBeenSet;
+ bool m_nameHasBeenSet = false;
Aws::Utils::DateTime m_creationDate;
- bool m_creationDateHasBeenSet;
+ bool m_creationDateHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/BucketLifecycleConfiguration.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/BucketLifecycleConfiguration.h
index ee35f3fd9a..745fe58263 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/BucketLifecycleConfiguration.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/BucketLifecycleConfiguration.h
@@ -32,14 +32,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLifecycleConfiguration">AWS
* API Reference</a></p>
*/
- class AWS_S3_API BucketLifecycleConfiguration
+ class BucketLifecycleConfiguration
{
public:
- BucketLifecycleConfiguration();
- BucketLifecycleConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
- BucketLifecycleConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API BucketLifecycleConfiguration();
+ AWS_S3_API BucketLifecycleConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API BucketLifecycleConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -85,7 +85,7 @@ namespace Model
private:
Aws::Vector<LifecycleRule> m_rules;
- bool m_rulesHasBeenSet;
+ bool m_rulesHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/BucketLocationConstraint.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/BucketLocationConstraint.h
index 86490ce1fa..b4f177fba6 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/BucketLocationConstraint.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/BucketLocationConstraint.h
@@ -24,6 +24,7 @@ namespace Model
ap_south_1,
ap_southeast_1,
ap_southeast_2,
+ ap_southeast_3,
ca_central_1,
cn_north_1,
cn_northwest_1,
@@ -41,6 +42,7 @@ namespace Model
us_gov_west_1,
us_west_1,
us_west_2,
+ us_iso_west_1,
us_east_1
};
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/BucketLoggingStatus.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/BucketLoggingStatus.h
index 36273be848..1848d8f9b9 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/BucketLoggingStatus.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/BucketLoggingStatus.h
@@ -27,14 +27,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/BucketLoggingStatus">AWS
* API Reference</a></p>
*/
- class AWS_S3_API BucketLoggingStatus
+ class BucketLoggingStatus
{
public:
- BucketLoggingStatus();
- BucketLoggingStatus(const Aws::Utils::Xml::XmlNode& xmlNode);
- BucketLoggingStatus& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API BucketLoggingStatus();
+ AWS_S3_API BucketLoggingStatus(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API BucketLoggingStatus& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
@@ -58,7 +58,7 @@ namespace Model
private:
LoggingEnabled m_loggingEnabled;
- bool m_loggingEnabledHasBeenSet;
+ bool m_loggingEnabledHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CORSConfiguration.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CORSConfiguration.h
index 5280876ab3..9fe46f05ad 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CORSConfiguration.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CORSConfiguration.h
@@ -32,14 +32,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSConfiguration">AWS
* API Reference</a></p>
*/
- class AWS_S3_API CORSConfiguration
+ class CORSConfiguration
{
public:
- CORSConfiguration();
- CORSConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
- CORSConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API CORSConfiguration();
+ AWS_S3_API CORSConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API CORSConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -93,7 +93,7 @@ namespace Model
private:
Aws::Vector<CORSRule> m_cORSRules;
- bool m_cORSRulesHasBeenSet;
+ bool m_cORSRulesHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CORSRule.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CORSRule.h
index fd1808aa0a..1fd4da5df1 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CORSRule.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CORSRule.h
@@ -29,14 +29,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CORSRule">AWS API
* Reference</a></p>
*/
- class AWS_S3_API CORSRule
+ class CORSRule
{
public:
- CORSRule();
- CORSRule(const Aws::Utils::Xml::XmlNode& xmlNode);
- CORSRule& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API CORSRule();
+ AWS_S3_API CORSRule(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API CORSRule& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -371,22 +371,22 @@ namespace Model
private:
Aws::String m_iD;
- bool m_iDHasBeenSet;
+ bool m_iDHasBeenSet = false;
Aws::Vector<Aws::String> m_allowedHeaders;
- bool m_allowedHeadersHasBeenSet;
+ bool m_allowedHeadersHasBeenSet = false;
Aws::Vector<Aws::String> m_allowedMethods;
- bool m_allowedMethodsHasBeenSet;
+ bool m_allowedMethodsHasBeenSet = false;
Aws::Vector<Aws::String> m_allowedOrigins;
- bool m_allowedOriginsHasBeenSet;
+ bool m_allowedOriginsHasBeenSet = false;
Aws::Vector<Aws::String> m_exposeHeaders;
- bool m_exposeHeadersHasBeenSet;
+ bool m_exposeHeadersHasBeenSet = false;
int m_maxAgeSeconds;
- bool m_maxAgeSecondsHasBeenSet;
+ bool m_maxAgeSecondsHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CSVInput.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CSVInput.h
index ca40bb8ef9..3b591448db 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CSVInput.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CSVInput.h
@@ -29,14 +29,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CSVInput">AWS API
* Reference</a></p>
*/
- class AWS_S3_API CSVInput
+ class CSVInput
{
public:
- CSVInput();
- CSVInput(const Aws::Utils::Xml::XmlNode& xmlNode);
- CSVInput& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API CSVInput();
+ AWS_S3_API CSVInput(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API CSVInput& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -171,57 +171,57 @@ namespace Model
/**
* <p>A single character used for escaping the quotation mark character inside an
- * already escaped value. For example, the value """ a , b """ is parsed as " a , b
- * ".</p>
+ * already escaped value. For example, the value <code>""" a , b """</code> is
+ * parsed as <code>" a , b "</code>.</p>
*/
inline const Aws::String& GetQuoteEscapeCharacter() const{ return m_quoteEscapeCharacter; }
/**
* <p>A single character used for escaping the quotation mark character inside an
- * already escaped value. For example, the value """ a , b """ is parsed as " a , b
- * ".</p>
+ * already escaped value. For example, the value <code>""" a , b """</code> is
+ * parsed as <code>" a , b "</code>.</p>
*/
inline bool QuoteEscapeCharacterHasBeenSet() const { return m_quoteEscapeCharacterHasBeenSet; }
/**
* <p>A single character used for escaping the quotation mark character inside an
- * already escaped value. For example, the value """ a , b """ is parsed as " a , b
- * ".</p>
+ * already escaped value. For example, the value <code>""" a , b """</code> is
+ * parsed as <code>" a , b "</code>.</p>
*/
inline void SetQuoteEscapeCharacter(const Aws::String& value) { m_quoteEscapeCharacterHasBeenSet = true; m_quoteEscapeCharacter = value; }
/**
* <p>A single character used for escaping the quotation mark character inside an
- * already escaped value. For example, the value """ a , b """ is parsed as " a , b
- * ".</p>
+ * already escaped value. For example, the value <code>""" a , b """</code> is
+ * parsed as <code>" a , b "</code>.</p>
*/
inline void SetQuoteEscapeCharacter(Aws::String&& value) { m_quoteEscapeCharacterHasBeenSet = true; m_quoteEscapeCharacter = std::move(value); }
/**
* <p>A single character used for escaping the quotation mark character inside an
- * already escaped value. For example, the value """ a , b """ is parsed as " a , b
- * ".</p>
+ * already escaped value. For example, the value <code>""" a , b """</code> is
+ * parsed as <code>" a , b "</code>.</p>
*/
inline void SetQuoteEscapeCharacter(const char* value) { m_quoteEscapeCharacterHasBeenSet = true; m_quoteEscapeCharacter.assign(value); }
/**
* <p>A single character used for escaping the quotation mark character inside an
- * already escaped value. For example, the value """ a , b """ is parsed as " a , b
- * ".</p>
+ * already escaped value. For example, the value <code>""" a , b """</code> is
+ * parsed as <code>" a , b "</code>.</p>
*/
inline CSVInput& WithQuoteEscapeCharacter(const Aws::String& value) { SetQuoteEscapeCharacter(value); return *this;}
/**
* <p>A single character used for escaping the quotation mark character inside an
- * already escaped value. For example, the value """ a , b """ is parsed as " a , b
- * ".</p>
+ * already escaped value. For example, the value <code>""" a , b """</code> is
+ * parsed as <code>" a , b "</code>.</p>
*/
inline CSVInput& WithQuoteEscapeCharacter(Aws::String&& value) { SetQuoteEscapeCharacter(std::move(value)); return *this;}
/**
* <p>A single character used for escaping the quotation mark character inside an
- * already escaped value. For example, the value """ a , b """ is parsed as " a , b
- * ".</p>
+ * already escaped value. For example, the value <code>""" a , b """</code> is
+ * parsed as <code>" a , b "</code>.</p>
*/
inline CSVInput& WithQuoteEscapeCharacter(const char* value) { SetQuoteEscapeCharacter(value); return *this;}
@@ -420,25 +420,25 @@ namespace Model
private:
FileHeaderInfo m_fileHeaderInfo;
- bool m_fileHeaderInfoHasBeenSet;
+ bool m_fileHeaderInfoHasBeenSet = false;
Aws::String m_comments;
- bool m_commentsHasBeenSet;
+ bool m_commentsHasBeenSet = false;
Aws::String m_quoteEscapeCharacter;
- bool m_quoteEscapeCharacterHasBeenSet;
+ bool m_quoteEscapeCharacterHasBeenSet = false;
Aws::String m_recordDelimiter;
- bool m_recordDelimiterHasBeenSet;
+ bool m_recordDelimiterHasBeenSet = false;
Aws::String m_fieldDelimiter;
- bool m_fieldDelimiterHasBeenSet;
+ bool m_fieldDelimiterHasBeenSet = false;
Aws::String m_quoteCharacter;
- bool m_quoteCharacterHasBeenSet;
+ bool m_quoteCharacterHasBeenSet = false;
bool m_allowQuotedRecordDelimiter;
- bool m_allowQuotedRecordDelimiterHasBeenSet;
+ bool m_allowQuotedRecordDelimiterHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CSVOutput.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CSVOutput.h
index d507716d7f..dc0a67f469 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CSVOutput.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CSVOutput.h
@@ -29,14 +29,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CSVOutput">AWS API
* Reference</a></p>
*/
- class AWS_S3_API CSVOutput
+ class CSVOutput
{
public:
- CSVOutput();
- CSVOutput(const Aws::Utils::Xml::XmlNode& xmlNode);
- CSVOutput& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API CSVOutput();
+ AWS_S3_API CSVOutput(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API CSVOutput& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -294,19 +294,19 @@ namespace Model
private:
QuoteFields m_quoteFields;
- bool m_quoteFieldsHasBeenSet;
+ bool m_quoteFieldsHasBeenSet = false;
Aws::String m_quoteEscapeCharacter;
- bool m_quoteEscapeCharacterHasBeenSet;
+ bool m_quoteEscapeCharacterHasBeenSet = false;
Aws::String m_recordDelimiter;
- bool m_recordDelimiterHasBeenSet;
+ bool m_recordDelimiterHasBeenSet = false;
Aws::String m_fieldDelimiter;
- bool m_fieldDelimiterHasBeenSet;
+ bool m_fieldDelimiterHasBeenSet = false;
Aws::String m_quoteCharacter;
- bool m_quoteCharacterHasBeenSet;
+ bool m_quoteCharacterHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Checksum.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Checksum.h
new file mode 100644
index 0000000000..39b3c520c1
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Checksum.h
@@ -0,0 +1,381 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+#include <aws/s3/S3_EXPORTS.h>
+#include <aws/core/utils/memory/stl/AWSString.h>
+#include <utility>
+
+namespace Aws
+{
+namespace Utils
+{
+namespace Xml
+{
+ class XmlNode;
+} // namespace Xml
+} // namespace Utils
+namespace S3
+{
+namespace Model
+{
+
+ /**
+ * <p>Contains all the possible checksum or digest values for an
+ * object.</p><p><h3>See Also:</h3> <a
+ * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Checksum">AWS API
+ * Reference</a></p>
+ */
+ class Checksum
+ {
+ public:
+ AWS_S3_API Checksum();
+ AWS_S3_API Checksum(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Checksum& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumCRC32() const{ return m_checksumCRC32; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumCRC32HasBeenSet() const { return m_checksumCRC32HasBeenSet; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(const Aws::String& value) { m_checksumCRC32HasBeenSet = true; m_checksumCRC32 = value; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(Aws::String&& value) { m_checksumCRC32HasBeenSet = true; m_checksumCRC32 = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(const char* value) { m_checksumCRC32HasBeenSet = true; m_checksumCRC32.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline Checksum& WithChecksumCRC32(const Aws::String& value) { SetChecksumCRC32(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline Checksum& WithChecksumCRC32(Aws::String&& value) { SetChecksumCRC32(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline Checksum& WithChecksumCRC32(const char* value) { SetChecksumCRC32(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumCRC32C() const{ return m_checksumCRC32C; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumCRC32CHasBeenSet() const { return m_checksumCRC32CHasBeenSet; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(const Aws::String& value) { m_checksumCRC32CHasBeenSet = true; m_checksumCRC32C = value; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(Aws::String&& value) { m_checksumCRC32CHasBeenSet = true; m_checksumCRC32C = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(const char* value) { m_checksumCRC32CHasBeenSet = true; m_checksumCRC32C.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline Checksum& WithChecksumCRC32C(const Aws::String& value) { SetChecksumCRC32C(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline Checksum& WithChecksumCRC32C(Aws::String&& value) { SetChecksumCRC32C(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline Checksum& WithChecksumCRC32C(const char* value) { SetChecksumCRC32C(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumSHA1() const{ return m_checksumSHA1; }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumSHA1HasBeenSet() const { return m_checksumSHA1HasBeenSet; }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(const Aws::String& value) { m_checksumSHA1HasBeenSet = true; m_checksumSHA1 = value; }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(Aws::String&& value) { m_checksumSHA1HasBeenSet = true; m_checksumSHA1 = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(const char* value) { m_checksumSHA1HasBeenSet = true; m_checksumSHA1.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline Checksum& WithChecksumSHA1(const Aws::String& value) { SetChecksumSHA1(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline Checksum& WithChecksumSHA1(Aws::String&& value) { SetChecksumSHA1(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline Checksum& WithChecksumSHA1(const char* value) { SetChecksumSHA1(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumSHA256() const{ return m_checksumSHA256; }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumSHA256HasBeenSet() const { return m_checksumSHA256HasBeenSet; }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(const Aws::String& value) { m_checksumSHA256HasBeenSet = true; m_checksumSHA256 = value; }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(Aws::String&& value) { m_checksumSHA256HasBeenSet = true; m_checksumSHA256 = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(const char* value) { m_checksumSHA256HasBeenSet = true; m_checksumSHA256.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline Checksum& WithChecksumSHA256(const Aws::String& value) { SetChecksumSHA256(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline Checksum& WithChecksumSHA256(Aws::String&& value) { SetChecksumSHA256(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline Checksum& WithChecksumSHA256(const char* value) { SetChecksumSHA256(value); return *this;}
+
+ private:
+
+ Aws::String m_checksumCRC32;
+ bool m_checksumCRC32HasBeenSet = false;
+
+ Aws::String m_checksumCRC32C;
+ bool m_checksumCRC32CHasBeenSet = false;
+
+ Aws::String m_checksumSHA1;
+ bool m_checksumSHA1HasBeenSet = false;
+
+ Aws::String m_checksumSHA256;
+ bool m_checksumSHA256HasBeenSet = false;
+ };
+
+} // namespace Model
+} // namespace S3
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ChecksumAlgorithm.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ChecksumAlgorithm.h
new file mode 100644
index 0000000000..cdd0307538
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ChecksumAlgorithm.h
@@ -0,0 +1,33 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+#include <aws/s3/S3_EXPORTS.h>
+#include <aws/core/utils/memory/stl/AWSString.h>
+
+namespace Aws
+{
+namespace S3
+{
+namespace Model
+{
+ enum class ChecksumAlgorithm
+ {
+ NOT_SET,
+ CRC32,
+ CRC32C,
+ SHA1,
+ SHA256
+ };
+
+namespace ChecksumAlgorithmMapper
+{
+AWS_S3_API ChecksumAlgorithm GetChecksumAlgorithmForName(const Aws::String& name);
+
+AWS_S3_API Aws::String GetNameForChecksumAlgorithm(ChecksumAlgorithm value);
+} // namespace ChecksumAlgorithmMapper
+} // namespace Model
+} // namespace S3
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ChecksumMode.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ChecksumMode.h
new file mode 100644
index 0000000000..2efd0d38aa
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ChecksumMode.h
@@ -0,0 +1,30 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+#include <aws/s3/S3_EXPORTS.h>
+#include <aws/core/utils/memory/stl/AWSString.h>
+
+namespace Aws
+{
+namespace S3
+{
+namespace Model
+{
+ enum class ChecksumMode
+ {
+ NOT_SET,
+ ENABLED
+ };
+
+namespace ChecksumModeMapper
+{
+AWS_S3_API ChecksumMode GetChecksumModeForName(const Aws::String& name);
+
+AWS_S3_API Aws::String GetNameForChecksumMode(ChecksumMode value);
+} // namespace ChecksumModeMapper
+} // namespace Model
+} // namespace S3
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CloudFunctionConfiguration.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CloudFunctionConfiguration.h
index d85743d470..bc592a051a 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CloudFunctionConfiguration.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CloudFunctionConfiguration.h
@@ -25,19 +25,19 @@ namespace Model
{
/**
- * <p>Container for specifying the AWS Lambda notification
- * configuration.</p><p><h3>See Also:</h3> <a
+ * <p>Container for specifying the Lambda notification configuration.</p><p><h3>See
+ * Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CloudFunctionConfiguration">AWS
* API Reference</a></p>
*/
- class AWS_S3_API CloudFunctionConfiguration
+ class CloudFunctionConfiguration
{
public:
- CloudFunctionConfiguration();
- CloudFunctionConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
- CloudFunctionConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API CloudFunctionConfiguration();
+ AWS_S3_API CloudFunctionConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API CloudFunctionConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
@@ -198,16 +198,16 @@ namespace Model
private:
Aws::String m_id;
- bool m_idHasBeenSet;
+ bool m_idHasBeenSet = false;
Aws::Vector<Event> m_events;
- bool m_eventsHasBeenSet;
+ bool m_eventsHasBeenSet = false;
Aws::String m_cloudFunction;
- bool m_cloudFunctionHasBeenSet;
+ bool m_cloudFunctionHasBeenSet = false;
Aws::String m_invocationRole;
- bool m_invocationRoleHasBeenSet;
+ bool m_invocationRoleHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CommonPrefix.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CommonPrefix.h
index 42fec00298..6f876cde8e 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CommonPrefix.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CommonPrefix.h
@@ -31,14 +31,14 @@ namespace Model
* <a href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CommonPrefix">AWS
* API Reference</a></p>
*/
- class AWS_S3_API CommonPrefix
+ class CommonPrefix
{
public:
- CommonPrefix();
- CommonPrefix(const Aws::Utils::Xml::XmlNode& xmlNode);
- CommonPrefix& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API CommonPrefix();
+ AWS_S3_API CommonPrefix(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API CommonPrefix& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -84,7 +84,7 @@ namespace Model
private:
Aws::String m_prefix;
- bool m_prefixHasBeenSet;
+ bool m_prefixHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CompleteMultipartUploadRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CompleteMultipartUploadRequest.h
index 98e2ec2730..8972e55bfd 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CompleteMultipartUploadRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CompleteMultipartUploadRequest.h
@@ -25,10 +25,10 @@ namespace Model
/**
*/
- class AWS_S3_API CompleteMultipartUploadRequest : public S3Request
+ class CompleteMultipartUploadRequest : public S3Request
{
public:
- CompleteMultipartUploadRequest();
+ AWS_S3_API CompleteMultipartUploadRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -36,50 +36,183 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "CompleteMultipartUpload"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API bool HasEmbeddedError(IOStream &body, const Http::HeaderValueCollection &header) const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
- * <p>Name of the bucket to which the multipart upload was initiated.</p>
+ * <p>Name of the bucket to which the multipart upload was initiated.</p> <p>When
+ * using this action with an access point, you must direct requests to the access
+ * point hostname. The access point hostname takes the form
+ * <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * action with Amazon S3 on Outposts, you must direct requests to the S3 on
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetBucket() const{ return m_bucket; }
/**
- * <p>Name of the bucket to which the multipart upload was initiated.</p>
+ * <p>Name of the bucket to which the multipart upload was initiated.</p> <p>When
+ * using this action with an access point, you must direct requests to the access
+ * point hostname. The access point hostname takes the form
+ * <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * action with Amazon S3 on Outposts, you must direct requests to the S3 on
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool BucketHasBeenSet() const { return m_bucketHasBeenSet; }
/**
- * <p>Name of the bucket to which the multipart upload was initiated.</p>
+ * <p>Name of the bucket to which the multipart upload was initiated.</p> <p>When
+ * using this action with an access point, you must direct requests to the access
+ * point hostname. The access point hostname takes the form
+ * <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * action with Amazon S3 on Outposts, you must direct requests to the S3 on
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const Aws::String& value) { m_bucketHasBeenSet = true; m_bucket = value; }
/**
- * <p>Name of the bucket to which the multipart upload was initiated.</p>
+ * <p>Name of the bucket to which the multipart upload was initiated.</p> <p>When
+ * using this action with an access point, you must direct requests to the access
+ * point hostname. The access point hostname takes the form
+ * <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * action with Amazon S3 on Outposts, you must direct requests to the S3 on
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(Aws::String&& value) { m_bucketHasBeenSet = true; m_bucket = std::move(value); }
/**
- * <p>Name of the bucket to which the multipart upload was initiated.</p>
+ * <p>Name of the bucket to which the multipart upload was initiated.</p> <p>When
+ * using this action with an access point, you must direct requests to the access
+ * point hostname. The access point hostname takes the form
+ * <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * action with Amazon S3 on Outposts, you must direct requests to the S3 on
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const char* value) { m_bucketHasBeenSet = true; m_bucket.assign(value); }
/**
- * <p>Name of the bucket to which the multipart upload was initiated.</p>
+ * <p>Name of the bucket to which the multipart upload was initiated.</p> <p>When
+ * using this action with an access point, you must direct requests to the access
+ * point hostname. The access point hostname takes the form
+ * <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * action with Amazon S3 on Outposts, you must direct requests to the S3 on
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline CompleteMultipartUploadRequest& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
/**
- * <p>Name of the bucket to which the multipart upload was initiated.</p>
+ * <p>Name of the bucket to which the multipart upload was initiated.</p> <p>When
+ * using this action with an access point, you must direct requests to the access
+ * point hostname. The access point hostname takes the form
+ * <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * action with Amazon S3 on Outposts, you must direct requests to the S3 on
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline CompleteMultipartUploadRequest& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
/**
- * <p>Name of the bucket to which the multipart upload was initiated.</p>
+ * <p>Name of the bucket to which the multipart upload was initiated.</p> <p>When
+ * using this action with an access point, you must direct requests to the access
+ * point hostname. The access point hostname takes the form
+ * <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * action with Amazon S3 on Outposts, you must direct requests to the S3 on
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline CompleteMultipartUploadRequest& WithBucket(const char* value) { SetBucket(value); return *this;}
@@ -197,6 +330,322 @@ namespace Model
inline CompleteMultipartUploadRequest& WithUploadId(const char* value) { SetUploadId(value); return *this;}
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumCRC32() const{ return m_checksumCRC32; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumCRC32HasBeenSet() const { return m_checksumCRC32HasBeenSet; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(const Aws::String& value) { m_checksumCRC32HasBeenSet = true; m_checksumCRC32 = value; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(Aws::String&& value) { m_checksumCRC32HasBeenSet = true; m_checksumCRC32 = std::move(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(const char* value) { m_checksumCRC32HasBeenSet = true; m_checksumCRC32.assign(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadRequest& WithChecksumCRC32(const Aws::String& value) { SetChecksumCRC32(value); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadRequest& WithChecksumCRC32(Aws::String&& value) { SetChecksumCRC32(std::move(value)); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadRequest& WithChecksumCRC32(const char* value) { SetChecksumCRC32(value); return *this;}
+
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumCRC32C() const{ return m_checksumCRC32C; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumCRC32CHasBeenSet() const { return m_checksumCRC32CHasBeenSet; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(const Aws::String& value) { m_checksumCRC32CHasBeenSet = true; m_checksumCRC32C = value; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(Aws::String&& value) { m_checksumCRC32CHasBeenSet = true; m_checksumCRC32C = std::move(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(const char* value) { m_checksumCRC32CHasBeenSet = true; m_checksumCRC32C.assign(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadRequest& WithChecksumCRC32C(const Aws::String& value) { SetChecksumCRC32C(value); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadRequest& WithChecksumCRC32C(Aws::String&& value) { SetChecksumCRC32C(std::move(value)); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadRequest& WithChecksumCRC32C(const char* value) { SetChecksumCRC32C(value); return *this;}
+
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumSHA1() const{ return m_checksumSHA1; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumSHA1HasBeenSet() const { return m_checksumSHA1HasBeenSet; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(const Aws::String& value) { m_checksumSHA1HasBeenSet = true; m_checksumSHA1 = value; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(Aws::String&& value) { m_checksumSHA1HasBeenSet = true; m_checksumSHA1 = std::move(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(const char* value) { m_checksumSHA1HasBeenSet = true; m_checksumSHA1.assign(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadRequest& WithChecksumSHA1(const Aws::String& value) { SetChecksumSHA1(value); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadRequest& WithChecksumSHA1(Aws::String&& value) { SetChecksumSHA1(std::move(value)); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadRequest& WithChecksumSHA1(const char* value) { SetChecksumSHA1(value); return *this;}
+
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumSHA256() const{ return m_checksumSHA256; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumSHA256HasBeenSet() const { return m_checksumSHA256HasBeenSet; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(const Aws::String& value) { m_checksumSHA256HasBeenSet = true; m_checksumSHA256 = value; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(Aws::String&& value) { m_checksumSHA256HasBeenSet = true; m_checksumSHA256 = std::move(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(const char* value) { m_checksumSHA256HasBeenSet = true; m_checksumSHA256.assign(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadRequest& WithChecksumSHA256(const Aws::String& value) { SetChecksumSHA256(value); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadRequest& WithChecksumSHA256(Aws::String&& value) { SetChecksumSHA256(std::move(value)); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadRequest& WithChecksumSHA256(const char* value) { SetChecksumSHA256(value); return *this;}
+
+
inline const RequestPayer& GetRequestPayer() const{ return m_requestPayer; }
@@ -218,61 +667,280 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline CompleteMultipartUploadRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline CompleteMultipartUploadRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline CompleteMultipartUploadRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
+ /**
+ * <p>The server-side encryption (SSE) algorithm used to encrypt the object. This
+ * parameter is needed only when the object was created using a checksum algorithm.
+ * For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetSSECustomerAlgorithm() const{ return m_sSECustomerAlgorithm; }
+
+ /**
+ * <p>The server-side encryption (SSE) algorithm used to encrypt the object. This
+ * parameter is needed only when the object was created using a checksum algorithm.
+ * For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool SSECustomerAlgorithmHasBeenSet() const { return m_sSECustomerAlgorithmHasBeenSet; }
+
+ /**
+ * <p>The server-side encryption (SSE) algorithm used to encrypt the object. This
+ * parameter is needed only when the object was created using a checksum algorithm.
+ * For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetSSECustomerAlgorithm(const Aws::String& value) { m_sSECustomerAlgorithmHasBeenSet = true; m_sSECustomerAlgorithm = value; }
+
+ /**
+ * <p>The server-side encryption (SSE) algorithm used to encrypt the object. This
+ * parameter is needed only when the object was created using a checksum algorithm.
+ * For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetSSECustomerAlgorithm(Aws::String&& value) { m_sSECustomerAlgorithmHasBeenSet = true; m_sSECustomerAlgorithm = std::move(value); }
+
+ /**
+ * <p>The server-side encryption (SSE) algorithm used to encrypt the object. This
+ * parameter is needed only when the object was created using a checksum algorithm.
+ * For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetSSECustomerAlgorithm(const char* value) { m_sSECustomerAlgorithmHasBeenSet = true; m_sSECustomerAlgorithm.assign(value); }
+
+ /**
+ * <p>The server-side encryption (SSE) algorithm used to encrypt the object. This
+ * parameter is needed only when the object was created using a checksum algorithm.
+ * For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadRequest& WithSSECustomerAlgorithm(const Aws::String& value) { SetSSECustomerAlgorithm(value); return *this;}
+
+ /**
+ * <p>The server-side encryption (SSE) algorithm used to encrypt the object. This
+ * parameter is needed only when the object was created using a checksum algorithm.
+ * For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadRequest& WithSSECustomerAlgorithm(Aws::String&& value) { SetSSECustomerAlgorithm(std::move(value)); return *this;}
+
+ /**
+ * <p>The server-side encryption (SSE) algorithm used to encrypt the object. This
+ * parameter is needed only when the object was created using a checksum algorithm.
+ * For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadRequest& WithSSECustomerAlgorithm(const char* value) { SetSSECustomerAlgorithm(value); return *this;}
+
+
+ /**
+ * <p>The server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetSSECustomerKey() const{ return m_sSECustomerKey; }
+
+ /**
+ * <p>The server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool SSECustomerKeyHasBeenSet() const { return m_sSECustomerKeyHasBeenSet; }
+
+ /**
+ * <p>The server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetSSECustomerKey(const Aws::String& value) { m_sSECustomerKeyHasBeenSet = true; m_sSECustomerKey = value; }
+
+ /**
+ * <p>The server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetSSECustomerKey(Aws::String&& value) { m_sSECustomerKeyHasBeenSet = true; m_sSECustomerKey = std::move(value); }
+
+ /**
+ * <p>The server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetSSECustomerKey(const char* value) { m_sSECustomerKeyHasBeenSet = true; m_sSECustomerKey.assign(value); }
+
+ /**
+ * <p>The server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadRequest& WithSSECustomerKey(const Aws::String& value) { SetSSECustomerKey(value); return *this;}
+
+ /**
+ * <p>The server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadRequest& WithSSECustomerKey(Aws::String&& value) { SetSSECustomerKey(std::move(value)); return *this;}
+
+ /**
+ * <p>The server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadRequest& WithSSECustomerKey(const char* value) { SetSSECustomerKey(value); return *this;}
+
+
+ /**
+ * <p>The MD5 server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetSSECustomerKeyMD5() const{ return m_sSECustomerKeyMD5; }
+
+ /**
+ * <p>The MD5 server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool SSECustomerKeyMD5HasBeenSet() const { return m_sSECustomerKeyMD5HasBeenSet; }
+
+ /**
+ * <p>The MD5 server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetSSECustomerKeyMD5(const Aws::String& value) { m_sSECustomerKeyMD5HasBeenSet = true; m_sSECustomerKeyMD5 = value; }
+
+ /**
+ * <p>The MD5 server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetSSECustomerKeyMD5(Aws::String&& value) { m_sSECustomerKeyMD5HasBeenSet = true; m_sSECustomerKeyMD5 = std::move(value); }
+
+ /**
+ * <p>The MD5 server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetSSECustomerKeyMD5(const char* value) { m_sSECustomerKeyMD5HasBeenSet = true; m_sSECustomerKeyMD5.assign(value); }
+
+ /**
+ * <p>The MD5 server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadRequest& WithSSECustomerKeyMD5(const Aws::String& value) { SetSSECustomerKeyMD5(value); return *this;}
+
+ /**
+ * <p>The MD5 server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadRequest& WithSSECustomerKeyMD5(Aws::String&& value) { SetSSECustomerKeyMD5(std::move(value)); return *this;}
+
+ /**
+ * <p>The MD5 server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadRequest& WithSSECustomerKeyMD5(const char* value) { SetSSECustomerKeyMD5(value); return *this;}
+
+
inline const Aws::Map<Aws::String, Aws::String>& GetCustomizedAccessLogTag() const{ return m_customizedAccessLogTag; }
@@ -315,25 +983,46 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
CompletedMultipartUpload m_multipartUpload;
- bool m_multipartUploadHasBeenSet;
+ bool m_multipartUploadHasBeenSet = false;
Aws::String m_uploadId;
- bool m_uploadIdHasBeenSet;
+ bool m_uploadIdHasBeenSet = false;
+
+ Aws::String m_checksumCRC32;
+ bool m_checksumCRC32HasBeenSet = false;
+
+ Aws::String m_checksumCRC32C;
+ bool m_checksumCRC32CHasBeenSet = false;
+
+ Aws::String m_checksumSHA1;
+ bool m_checksumSHA1HasBeenSet = false;
+
+ Aws::String m_checksumSHA256;
+ bool m_checksumSHA256HasBeenSet = false;
RequestPayer m_requestPayer;
- bool m_requestPayerHasBeenSet;
+ bool m_requestPayerHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
+
+ Aws::String m_sSECustomerAlgorithm;
+ bool m_sSECustomerAlgorithmHasBeenSet = false;
+
+ Aws::String m_sSECustomerKey;
+ bool m_sSECustomerKeyHasBeenSet = false;
+
+ Aws::String m_sSECustomerKeyMD5;
+ bool m_sSECustomerKeyMD5HasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CompleteMultipartUploadResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CompleteMultipartUploadResult.h
index d5cf10ac2b..7c131a19fd 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CompleteMultipartUploadResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CompleteMultipartUploadResult.h
@@ -26,12 +26,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API CompleteMultipartUploadResult
+ class CompleteMultipartUploadResult
{
public:
- CompleteMultipartUploadResult();
- CompleteMultipartUploadResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- CompleteMultipartUploadResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API CompleteMultipartUploadResult();
+ AWS_S3_API CompleteMultipartUploadResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API CompleteMultipartUploadResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
@@ -71,149 +71,156 @@ namespace Model
/**
- * <p>The name of the bucket that contains the newly created object.</p> <p>When
- * using this action with an access point, you must direct requests to the access
- * point hostname. The access point hostname takes the form
+ * <p>The name of the bucket that contains the newly created object. Does not
+ * return the access point ARN or access point alias if used.</p> <p>When using
+ * this action with an access point, you must direct requests to the access point
+ * hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetBucket() const{ return m_bucket; }
/**
- * <p>The name of the bucket that contains the newly created object.</p> <p>When
- * using this action with an access point, you must direct requests to the access
- * point hostname. The access point hostname takes the form
+ * <p>The name of the bucket that contains the newly created object. Does not
+ * return the access point ARN or access point alias if used.</p> <p>When using
+ * this action with an access point, you must direct requests to the access point
+ * hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const Aws::String& value) { m_bucket = value; }
/**
- * <p>The name of the bucket that contains the newly created object.</p> <p>When
- * using this action with an access point, you must direct requests to the access
- * point hostname. The access point hostname takes the form
+ * <p>The name of the bucket that contains the newly created object. Does not
+ * return the access point ARN or access point alias if used.</p> <p>When using
+ * this action with an access point, you must direct requests to the access point
+ * hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(Aws::String&& value) { m_bucket = std::move(value); }
/**
- * <p>The name of the bucket that contains the newly created object.</p> <p>When
- * using this action with an access point, you must direct requests to the access
- * point hostname. The access point hostname takes the form
+ * <p>The name of the bucket that contains the newly created object. Does not
+ * return the access point ARN or access point alias if used.</p> <p>When using
+ * this action with an access point, you must direct requests to the access point
+ * hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const char* value) { m_bucket.assign(value); }
/**
- * <p>The name of the bucket that contains the newly created object.</p> <p>When
- * using this action with an access point, you must direct requests to the access
- * point hostname. The access point hostname takes the form
+ * <p>The name of the bucket that contains the newly created object. Does not
+ * return the access point ARN or access point alias if used.</p> <p>When using
+ * this action with an access point, you must direct requests to the access point
+ * hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline CompleteMultipartUploadResult& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
/**
- * <p>The name of the bucket that contains the newly created object.</p> <p>When
- * using this action with an access point, you must direct requests to the access
- * point hostname. The access point hostname takes the form
+ * <p>The name of the bucket that contains the newly created object. Does not
+ * return the access point ARN or access point alias if used.</p> <p>When using
+ * this action with an access point, you must direct requests to the access point
+ * hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline CompleteMultipartUploadResult& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
/**
- * <p>The name of the bucket that contains the newly created object.</p> <p>When
- * using this action with an access point, you must direct requests to the access
- * point hostname. The access point hostname takes the form
+ * <p>The name of the bucket that contains the newly created object. Does not
+ * return the access point ARN or access point alias if used.</p> <p>When using
+ * this action with an access point, you must direct requests to the access point
+ * hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline CompleteMultipartUploadResult& WithBucket(const char* value) { SetBucket(value); return *this;}
@@ -256,43 +263,50 @@ namespace Model
/**
* <p>If the object expiration is configured, this will contain the expiration date
- * (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.</p>
+ * (<code>expiry-date</code>) and rule ID (<code>rule-id</code>). The value of
+ * <code>rule-id</code> is URL-encoded.</p>
*/
inline const Aws::String& GetExpiration() const{ return m_expiration; }
/**
* <p>If the object expiration is configured, this will contain the expiration date
- * (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.</p>
+ * (<code>expiry-date</code>) and rule ID (<code>rule-id</code>). The value of
+ * <code>rule-id</code> is URL-encoded.</p>
*/
inline void SetExpiration(const Aws::String& value) { m_expiration = value; }
/**
* <p>If the object expiration is configured, this will contain the expiration date
- * (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.</p>
+ * (<code>expiry-date</code>) and rule ID (<code>rule-id</code>). The value of
+ * <code>rule-id</code> is URL-encoded.</p>
*/
inline void SetExpiration(Aws::String&& value) { m_expiration = std::move(value); }
/**
* <p>If the object expiration is configured, this will contain the expiration date
- * (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.</p>
+ * (<code>expiry-date</code>) and rule ID (<code>rule-id</code>). The value of
+ * <code>rule-id</code> is URL-encoded.</p>
*/
inline void SetExpiration(const char* value) { m_expiration.assign(value); }
/**
* <p>If the object expiration is configured, this will contain the expiration date
- * (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.</p>
+ * (<code>expiry-date</code>) and rule ID (<code>rule-id</code>). The value of
+ * <code>rule-id</code> is URL-encoded.</p>
*/
inline CompleteMultipartUploadResult& WithExpiration(const Aws::String& value) { SetExpiration(value); return *this;}
/**
* <p>If the object expiration is configured, this will contain the expiration date
- * (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.</p>
+ * (<code>expiry-date</code>) and rule ID (<code>rule-id</code>). The value of
+ * <code>rule-id</code> is URL-encoded.</p>
*/
inline CompleteMultipartUploadResult& WithExpiration(Aws::String&& value) { SetExpiration(std::move(value)); return *this;}
/**
* <p>If the object expiration is configured, this will contain the expiration date
- * (expiry-date) and rule ID (rule-id). The value of rule-id is URL encoded.</p>
+ * (<code>expiry-date</code>) and rule ID (<code>rule-id</code>). The value of
+ * <code>rule-id</code> is URL-encoded.</p>
*/
inline CompleteMultipartUploadResult& WithExpiration(const char* value) { SetExpiration(value); return *this;}
@@ -303,7 +317,10 @@ namespace Model
* opaque string. The entity tag may or may not be an MD5 digest of the object
* data. If the entity tag is not an MD5 digest of the object data, it will contain
* one or more nonhexadecimal characters and/or will consist of less than 32 or
- * more than 32 hexadecimal digits.</p>
+ * more than 32 hexadecimal digits. For more information about how the entity tag
+ * is calculated, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetETag() const{ return m_eTag; }
@@ -313,7 +330,10 @@ namespace Model
* opaque string. The entity tag may or may not be an MD5 digest of the object
* data. If the entity tag is not an MD5 digest of the object data, it will contain
* one or more nonhexadecimal characters and/or will consist of less than 32 or
- * more than 32 hexadecimal digits.</p>
+ * more than 32 hexadecimal digits. For more information about how the entity tag
+ * is calculated, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetETag(const Aws::String& value) { m_eTag = value; }
@@ -323,7 +343,10 @@ namespace Model
* opaque string. The entity tag may or may not be an MD5 digest of the object
* data. If the entity tag is not an MD5 digest of the object data, it will contain
* one or more nonhexadecimal characters and/or will consist of less than 32 or
- * more than 32 hexadecimal digits.</p>
+ * more than 32 hexadecimal digits. For more information about how the entity tag
+ * is calculated, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetETag(Aws::String&& value) { m_eTag = std::move(value); }
@@ -333,7 +356,10 @@ namespace Model
* opaque string. The entity tag may or may not be an MD5 digest of the object
* data. If the entity tag is not an MD5 digest of the object data, it will contain
* one or more nonhexadecimal characters and/or will consist of less than 32 or
- * more than 32 hexadecimal digits.</p>
+ * more than 32 hexadecimal digits. For more information about how the entity tag
+ * is calculated, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetETag(const char* value) { m_eTag.assign(value); }
@@ -343,7 +369,10 @@ namespace Model
* opaque string. The entity tag may or may not be an MD5 digest of the object
* data. If the entity tag is not an MD5 digest of the object data, it will contain
* one or more nonhexadecimal characters and/or will consist of less than 32 or
- * more than 32 hexadecimal digits.</p>
+ * more than 32 hexadecimal digits. For more information about how the entity tag
+ * is calculated, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline CompleteMultipartUploadResult& WithETag(const Aws::String& value) { SetETag(value); return *this;}
@@ -353,7 +382,10 @@ namespace Model
* opaque string. The entity tag may or may not be an MD5 digest of the object
* data. If the entity tag is not an MD5 digest of the object data, it will contain
* one or more nonhexadecimal characters and/or will consist of less than 32 or
- * more than 32 hexadecimal digits.</p>
+ * more than 32 hexadecimal digits. For more information about how the entity tag
+ * is calculated, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline CompleteMultipartUploadResult& WithETag(Aws::String&& value) { SetETag(std::move(value)); return *this;}
@@ -363,48 +395,335 @@ namespace Model
* opaque string. The entity tag may or may not be an MD5 digest of the object
* data. If the entity tag is not an MD5 digest of the object data, it will contain
* one or more nonhexadecimal characters and/or will consist of less than 32 or
- * more than 32 hexadecimal digits.</p>
+ * more than 32 hexadecimal digits. For more information about how the entity tag
+ * is calculated, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline CompleteMultipartUploadResult& WithETag(const char* value) { SetETag(value); return *this;}
/**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumCRC32() const{ return m_checksumCRC32; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(const Aws::String& value) { m_checksumCRC32 = value; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(Aws::String&& value) { m_checksumCRC32 = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(const char* value) { m_checksumCRC32.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadResult& WithChecksumCRC32(const Aws::String& value) { SetChecksumCRC32(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadResult& WithChecksumCRC32(Aws::String&& value) { SetChecksumCRC32(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadResult& WithChecksumCRC32(const char* value) { SetChecksumCRC32(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumCRC32C() const{ return m_checksumCRC32C; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(const Aws::String& value) { m_checksumCRC32C = value; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(Aws::String&& value) { m_checksumCRC32C = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(const char* value) { m_checksumCRC32C.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadResult& WithChecksumCRC32C(const Aws::String& value) { SetChecksumCRC32C(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadResult& WithChecksumCRC32C(Aws::String&& value) { SetChecksumCRC32C(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadResult& WithChecksumCRC32C(const char* value) { SetChecksumCRC32C(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumSHA1() const{ return m_checksumSHA1; }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(const Aws::String& value) { m_checksumSHA1 = value; }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(Aws::String&& value) { m_checksumSHA1 = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(const char* value) { m_checksumSHA1.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadResult& WithChecksumSHA1(const Aws::String& value) { SetChecksumSHA1(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadResult& WithChecksumSHA1(Aws::String&& value) { SetChecksumSHA1(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadResult& WithChecksumSHA1(const char* value) { SetChecksumSHA1(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumSHA256() const{ return m_checksumSHA256; }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(const Aws::String& value) { m_checksumSHA256 = value; }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(Aws::String&& value) { m_checksumSHA256 = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(const char* value) { m_checksumSHA256.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadResult& WithChecksumSHA256(const Aws::String& value) { SetChecksumSHA256(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadResult& WithChecksumSHA256(Aws::String&& value) { SetChecksumSHA256(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompleteMultipartUploadResult& WithChecksumSHA256(const char* value) { SetChecksumSHA256(value); return *this;}
+
+
+ /**
* <p>If you specified server-side encryption either with an Amazon S3-managed
- * encryption key or an AWS KMS customer master key (CMK) in your initiate
- * multipart upload request, the response includes this header. It confirms the
- * encryption algorithm that Amazon S3 used to encrypt the object.</p>
+ * encryption key or an Amazon Web Services KMS key in your initiate multipart
+ * upload request, the response includes this header. It confirms the encryption
+ * algorithm that Amazon S3 used to encrypt the object.</p>
*/
inline const ServerSideEncryption& GetServerSideEncryption() const{ return m_serverSideEncryption; }
/**
* <p>If you specified server-side encryption either with an Amazon S3-managed
- * encryption key or an AWS KMS customer master key (CMK) in your initiate
- * multipart upload request, the response includes this header. It confirms the
- * encryption algorithm that Amazon S3 used to encrypt the object.</p>
+ * encryption key or an Amazon Web Services KMS key in your initiate multipart
+ * upload request, the response includes this header. It confirms the encryption
+ * algorithm that Amazon S3 used to encrypt the object.</p>
*/
inline void SetServerSideEncryption(const ServerSideEncryption& value) { m_serverSideEncryption = value; }
/**
* <p>If you specified server-side encryption either with an Amazon S3-managed
- * encryption key or an AWS KMS customer master key (CMK) in your initiate
- * multipart upload request, the response includes this header. It confirms the
- * encryption algorithm that Amazon S3 used to encrypt the object.</p>
+ * encryption key or an Amazon Web Services KMS key in your initiate multipart
+ * upload request, the response includes this header. It confirms the encryption
+ * algorithm that Amazon S3 used to encrypt the object.</p>
*/
inline void SetServerSideEncryption(ServerSideEncryption&& value) { m_serverSideEncryption = std::move(value); }
/**
* <p>If you specified server-side encryption either with an Amazon S3-managed
- * encryption key or an AWS KMS customer master key (CMK) in your initiate
- * multipart upload request, the response includes this header. It confirms the
- * encryption algorithm that Amazon S3 used to encrypt the object.</p>
+ * encryption key or an Amazon Web Services KMS key in your initiate multipart
+ * upload request, the response includes this header. It confirms the encryption
+ * algorithm that Amazon S3 used to encrypt the object.</p>
*/
inline CompleteMultipartUploadResult& WithServerSideEncryption(const ServerSideEncryption& value) { SetServerSideEncryption(value); return *this;}
/**
* <p>If you specified server-side encryption either with an Amazon S3-managed
- * encryption key or an AWS KMS customer master key (CMK) in your initiate
- * multipart upload request, the response includes this header. It confirms the
- * encryption algorithm that Amazon S3 used to encrypt the object.</p>
+ * encryption key or an Amazon Web Services KMS key in your initiate multipart
+ * upload request, the response includes this header. It confirms the encryption
+ * algorithm that Amazon S3 used to encrypt the object.</p>
*/
inline CompleteMultipartUploadResult& WithServerSideEncryption(ServerSideEncryption&& value) { SetServerSideEncryption(std::move(value)); return *this;}
@@ -453,70 +772,70 @@ namespace Model
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline const Aws::String& GetSSEKMSKeyId() const{ return m_sSEKMSKeyId; }
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline void SetSSEKMSKeyId(const Aws::String& value) { m_sSEKMSKeyId = value; }
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline void SetSSEKMSKeyId(Aws::String&& value) { m_sSEKMSKeyId = std::move(value); }
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline void SetSSEKMSKeyId(const char* value) { m_sSEKMSKeyId.assign(value); }
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline CompleteMultipartUploadResult& WithSSEKMSKeyId(const Aws::String& value) { SetSSEKMSKeyId(value); return *this;}
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline CompleteMultipartUploadResult& WithSSEKMSKeyId(Aws::String&& value) { SetSSEKMSKeyId(std::move(value)); return *this;}
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline CompleteMultipartUploadResult& WithSSEKMSKeyId(const char* value) { SetSSEKMSKeyId(value); return *this;}
/**
* <p>Indicates whether the multipart upload uses an S3 Bucket Key for server-side
- * encryption with AWS KMS (SSE-KMS).</p>
+ * encryption with Amazon Web Services KMS (SSE-KMS).</p>
*/
inline bool GetBucketKeyEnabled() const{ return m_bucketKeyEnabled; }
/**
* <p>Indicates whether the multipart upload uses an S3 Bucket Key for server-side
- * encryption with AWS KMS (SSE-KMS).</p>
+ * encryption with Amazon Web Services KMS (SSE-KMS).</p>
*/
inline void SetBucketKeyEnabled(bool value) { m_bucketKeyEnabled = value; }
/**
* <p>Indicates whether the multipart upload uses an S3 Bucket Key for server-side
- * encryption with AWS KMS (SSE-KMS).</p>
+ * encryption with Amazon Web Services KMS (SSE-KMS).</p>
*/
inline CompleteMultipartUploadResult& WithBucketKeyEnabled(bool value) { SetBucketKeyEnabled(value); return *this;}
@@ -548,6 +867,14 @@ namespace Model
Aws::String m_eTag;
+ Aws::String m_checksumCRC32;
+
+ Aws::String m_checksumCRC32C;
+
+ Aws::String m_checksumSHA1;
+
+ Aws::String m_checksumSHA256;
+
ServerSideEncryption m_serverSideEncryption;
Aws::String m_versionId;
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CompletedMultipartUpload.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CompletedMultipartUpload.h
index 0970651366..a89f7743ee 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CompletedMultipartUpload.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CompletedMultipartUpload.h
@@ -29,60 +29,76 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedMultipartUpload">AWS
* API Reference</a></p>
*/
- class AWS_S3_API CompletedMultipartUpload
+ class CompletedMultipartUpload
{
public:
- CompletedMultipartUpload();
- CompletedMultipartUpload(const Aws::Utils::Xml::XmlNode& xmlNode);
- CompletedMultipartUpload& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API CompletedMultipartUpload();
+ AWS_S3_API CompletedMultipartUpload(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API CompletedMultipartUpload& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
- * <p>Array of CompletedPart data types.</p>
+ * <p>Array of CompletedPart data types.</p> <p>If you do not supply a valid
+ * <code>Part</code> with your request, the service sends back an HTTP 400
+ * response.</p>
*/
inline const Aws::Vector<CompletedPart>& GetParts() const{ return m_parts; }
/**
- * <p>Array of CompletedPart data types.</p>
+ * <p>Array of CompletedPart data types.</p> <p>If you do not supply a valid
+ * <code>Part</code> with your request, the service sends back an HTTP 400
+ * response.</p>
*/
inline bool PartsHasBeenSet() const { return m_partsHasBeenSet; }
/**
- * <p>Array of CompletedPart data types.</p>
+ * <p>Array of CompletedPart data types.</p> <p>If you do not supply a valid
+ * <code>Part</code> with your request, the service sends back an HTTP 400
+ * response.</p>
*/
inline void SetParts(const Aws::Vector<CompletedPart>& value) { m_partsHasBeenSet = true; m_parts = value; }
/**
- * <p>Array of CompletedPart data types.</p>
+ * <p>Array of CompletedPart data types.</p> <p>If you do not supply a valid
+ * <code>Part</code> with your request, the service sends back an HTTP 400
+ * response.</p>
*/
inline void SetParts(Aws::Vector<CompletedPart>&& value) { m_partsHasBeenSet = true; m_parts = std::move(value); }
/**
- * <p>Array of CompletedPart data types.</p>
+ * <p>Array of CompletedPart data types.</p> <p>If you do not supply a valid
+ * <code>Part</code> with your request, the service sends back an HTTP 400
+ * response.</p>
*/
inline CompletedMultipartUpload& WithParts(const Aws::Vector<CompletedPart>& value) { SetParts(value); return *this;}
/**
- * <p>Array of CompletedPart data types.</p>
+ * <p>Array of CompletedPart data types.</p> <p>If you do not supply a valid
+ * <code>Part</code> with your request, the service sends back an HTTP 400
+ * response.</p>
*/
inline CompletedMultipartUpload& WithParts(Aws::Vector<CompletedPart>&& value) { SetParts(std::move(value)); return *this;}
/**
- * <p>Array of CompletedPart data types.</p>
+ * <p>Array of CompletedPart data types.</p> <p>If you do not supply a valid
+ * <code>Part</code> with your request, the service sends back an HTTP 400
+ * response.</p>
*/
inline CompletedMultipartUpload& AddParts(const CompletedPart& value) { m_partsHasBeenSet = true; m_parts.push_back(value); return *this; }
/**
- * <p>Array of CompletedPart data types.</p>
+ * <p>Array of CompletedPart data types.</p> <p>If you do not supply a valid
+ * <code>Part</code> with your request, the service sends back an HTTP 400
+ * response.</p>
*/
inline CompletedMultipartUpload& AddParts(CompletedPart&& value) { m_partsHasBeenSet = true; m_parts.push_back(std::move(value)); return *this; }
private:
Aws::Vector<CompletedPart> m_parts;
- bool m_partsHasBeenSet;
+ bool m_partsHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CompletedPart.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CompletedPart.h
index 5e1507d762..80e5f91a77 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CompletedPart.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CompletedPart.h
@@ -27,14 +27,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CompletedPart">AWS
* API Reference</a></p>
*/
- class AWS_S3_API CompletedPart
+ class CompletedPart
{
public:
- CompletedPart();
- CompletedPart(const Aws::Utils::Xml::XmlNode& xmlNode);
- CompletedPart& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API CompletedPart();
+ AWS_S3_API CompletedPart(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API CompletedPart& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -79,6 +79,330 @@ namespace Model
/**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumCRC32() const{ return m_checksumCRC32; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumCRC32HasBeenSet() const { return m_checksumCRC32HasBeenSet; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(const Aws::String& value) { m_checksumCRC32HasBeenSet = true; m_checksumCRC32 = value; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(Aws::String&& value) { m_checksumCRC32HasBeenSet = true; m_checksumCRC32 = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(const char* value) { m_checksumCRC32HasBeenSet = true; m_checksumCRC32.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompletedPart& WithChecksumCRC32(const Aws::String& value) { SetChecksumCRC32(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompletedPart& WithChecksumCRC32(Aws::String&& value) { SetChecksumCRC32(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompletedPart& WithChecksumCRC32(const char* value) { SetChecksumCRC32(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumCRC32C() const{ return m_checksumCRC32C; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumCRC32CHasBeenSet() const { return m_checksumCRC32CHasBeenSet; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(const Aws::String& value) { m_checksumCRC32CHasBeenSet = true; m_checksumCRC32C = value; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(Aws::String&& value) { m_checksumCRC32CHasBeenSet = true; m_checksumCRC32C = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(const char* value) { m_checksumCRC32CHasBeenSet = true; m_checksumCRC32C.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompletedPart& WithChecksumCRC32C(const Aws::String& value) { SetChecksumCRC32C(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompletedPart& WithChecksumCRC32C(Aws::String&& value) { SetChecksumCRC32C(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompletedPart& WithChecksumCRC32C(const char* value) { SetChecksumCRC32C(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumSHA1() const{ return m_checksumSHA1; }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumSHA1HasBeenSet() const { return m_checksumSHA1HasBeenSet; }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(const Aws::String& value) { m_checksumSHA1HasBeenSet = true; m_checksumSHA1 = value; }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(Aws::String&& value) { m_checksumSHA1HasBeenSet = true; m_checksumSHA1 = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(const char* value) { m_checksumSHA1HasBeenSet = true; m_checksumSHA1.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompletedPart& WithChecksumSHA1(const Aws::String& value) { SetChecksumSHA1(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompletedPart& WithChecksumSHA1(Aws::String&& value) { SetChecksumSHA1(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompletedPart& WithChecksumSHA1(const char* value) { SetChecksumSHA1(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumSHA256() const{ return m_checksumSHA256; }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumSHA256HasBeenSet() const { return m_checksumSHA256HasBeenSet; }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(const Aws::String& value) { m_checksumSHA256HasBeenSet = true; m_checksumSHA256 = value; }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(Aws::String&& value) { m_checksumSHA256HasBeenSet = true; m_checksumSHA256 = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(const char* value) { m_checksumSHA256HasBeenSet = true; m_checksumSHA256.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompletedPart& WithChecksumSHA256(const Aws::String& value) { SetChecksumSHA256(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompletedPart& WithChecksumSHA256(Aws::String&& value) { SetChecksumSHA256(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CompletedPart& WithChecksumSHA256(const char* value) { SetChecksumSHA256(value); return *this;}
+
+
+ /**
* <p>Part number that identifies the part. This is a positive integer between 1
* and 10,000.</p>
*/
@@ -105,10 +429,22 @@ namespace Model
private:
Aws::String m_eTag;
- bool m_eTagHasBeenSet;
+ bool m_eTagHasBeenSet = false;
+
+ Aws::String m_checksumCRC32;
+ bool m_checksumCRC32HasBeenSet = false;
+
+ Aws::String m_checksumCRC32C;
+ bool m_checksumCRC32CHasBeenSet = false;
+
+ Aws::String m_checksumSHA1;
+ bool m_checksumSHA1HasBeenSet = false;
+
+ Aws::String m_checksumSHA256;
+ bool m_checksumSHA256HasBeenSet = false;
int m_partNumber;
- bool m_partNumberHasBeenSet;
+ bool m_partNumberHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Condition.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Condition.h
index 3dd8f8afe6..6e0f9683a5 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Condition.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Condition.h
@@ -31,14 +31,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Condition">AWS API
* Reference</a></p>
*/
- class AWS_S3_API Condition
+ class Condition
{
public:
- Condition();
- Condition(const Aws::Utils::Xml::XmlNode& xmlNode);
- Condition& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Condition();
+ AWS_S3_API Condition(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Condition& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -245,10 +245,10 @@ namespace Model
private:
Aws::String m_httpErrorCodeReturnedEquals;
- bool m_httpErrorCodeReturnedEqualsHasBeenSet;
+ bool m_httpErrorCodeReturnedEqualsHasBeenSet = false;
Aws::String m_keyPrefixEquals;
- bool m_keyPrefixEqualsHasBeenSet;
+ bool m_keyPrefixEqualsHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CopyObjectRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CopyObjectRequest.h
index ccb52d1e7c..19ce9bc0b7 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CopyObjectRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CopyObjectRequest.h
@@ -8,6 +8,7 @@
#include <aws/s3/S3Request.h>
#include <aws/s3/model/ObjectCannedACL.h>
#include <aws/core/utils/memory/stl/AWSString.h>
+#include <aws/s3/model/ChecksumAlgorithm.h>
#include <aws/core/utils/DateTime.h>
#include <aws/core/utils/memory/stl/AWSMap.h>
#include <aws/s3/model/MetadataDirective.h>
@@ -32,10 +33,10 @@ namespace Model
/**
*/
- class AWS_S3_API CopyObjectRequest : public S3Request
+ class CopyObjectRequest : public S3Request
{
public:
- CopyObjectRequest();
+ AWS_S3_API CopyObjectRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -43,12 +44,17 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "CopyObject"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API bool HasEmbeddedError(IOStream &body, const Http::HeaderValueCollection &header) const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The canned ACL to apply to the object.</p> <p>This action is not supported by
@@ -92,19 +98,19 @@ namespace Model
* access point, you must direct requests to the access point hostname. The access
* point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetBucket() const{ return m_bucket; }
@@ -113,19 +119,19 @@ namespace Model
* access point, you must direct requests to the access point hostname. The access
* point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool BucketHasBeenSet() const { return m_bucketHasBeenSet; }
@@ -134,19 +140,19 @@ namespace Model
* access point, you must direct requests to the access point hostname. The access
* point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const Aws::String& value) { m_bucketHasBeenSet = true; m_bucket = value; }
@@ -155,19 +161,19 @@ namespace Model
* access point, you must direct requests to the access point hostname. The access
* point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(Aws::String&& value) { m_bucketHasBeenSet = true; m_bucket = std::move(value); }
@@ -176,19 +182,19 @@ namespace Model
* access point, you must direct requests to the access point hostname. The access
* point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const char* value) { m_bucketHasBeenSet = true; m_bucket.assign(value); }
@@ -197,19 +203,19 @@ namespace Model
* access point, you must direct requests to the access point hostname. The access
* point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline CopyObjectRequest& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
@@ -218,19 +224,19 @@ namespace Model
* access point, you must direct requests to the access point hostname. The access
* point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline CopyObjectRequest& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
@@ -239,19 +245,19 @@ namespace Model
* access point, you must direct requests to the access point hostname. The access
* point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline CopyObjectRequest& WithBucket(const char* value) { SetBucket(value); return *this;}
@@ -298,6 +304,55 @@ namespace Model
/**
+ * <p>Indicates the algorithm you want Amazon S3 to use to create the checksum for
+ * the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const ChecksumAlgorithm& GetChecksumAlgorithm() const{ return m_checksumAlgorithm; }
+
+ /**
+ * <p>Indicates the algorithm you want Amazon S3 to use to create the checksum for
+ * the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumAlgorithmHasBeenSet() const { return m_checksumAlgorithmHasBeenSet; }
+
+ /**
+ * <p>Indicates the algorithm you want Amazon S3 to use to create the checksum for
+ * the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumAlgorithm(const ChecksumAlgorithm& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = value; }
+
+ /**
+ * <p>Indicates the algorithm you want Amazon S3 to use to create the checksum for
+ * the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumAlgorithm(ChecksumAlgorithm&& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = std::move(value); }
+
+ /**
+ * <p>Indicates the algorithm you want Amazon S3 to use to create the checksum for
+ * the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CopyObjectRequest& WithChecksumAlgorithm(const ChecksumAlgorithm& value) { SetChecksumAlgorithm(value); return *this;}
+
+ /**
+ * <p>Indicates the algorithm you want Amazon S3 to use to create the checksum for
+ * the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CopyObjectRequest& WithChecksumAlgorithm(ChecksumAlgorithm&& value) { SetChecksumAlgorithm(std::move(value)); return *this;}
+
+
+ /**
* <p>Specifies presentational information for the object.</p>
*/
inline const Aws::String& GetContentDisposition() const{ return m_contentDisposition; }
@@ -486,10 +541,10 @@ namespace Model
* specify the name of the source bucket and the key of the source object,
* separated by a slash (/). For example, to copy the object
* <code>reports/january.pdf</code> from the bucket <code>awsexamplebucket</code>,
- * use <code>awsexamplebucket/reports/january.pdf</code>. The value must be URL
- * encoded.</p> </li> <li> <p>For objects accessed through access points, specify
- * the Amazon Resource Name (ARN) of the object as accessed through the access
- * point, in the format
+ * use <code>awsexamplebucket/reports/january.pdf</code>. The value must be
+ * URL-encoded.</p> </li> <li> <p>For objects accessed through access points,
+ * specify the Amazon Resource Name (ARN) of the object as accessed through the
+ * access point, in the format
* <code>arn:aws:s3:&lt;Region&gt;:&lt;account-id&gt;:accesspoint/&lt;access-point-name&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through access
* point <code>my-access-point</code> owned by account <code>123456789012</code> in
@@ -497,14 +552,15 @@ namespace Model
* <code>arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf</code>.
* The value must be URL encoded.</p> <p>Amazon S3 supports copy operations
* using access points only when the source and destination buckets are in the same
- * AWS Region.</p> <p>Alternatively, for objects accessed through Amazon S3
- * on Outposts, specify the ARN of the object as accessed in the format
+ * Amazon Web Services Region.</p> <p>Alternatively, for objects accessed
+ * through Amazon S3 on Outposts, specify the ARN of the object as accessed in the
+ * format
* <code>arn:aws:s3-outposts:&lt;Region&gt;:&lt;account-id&gt;:outpost/&lt;outpost-id&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through outpost
* <code>my-outpost</code> owned by account <code>123456789012</code> in Region
* <code>us-west-2</code>, use the URL encoding of
* <code>arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf</code>.
- * The value must be URL encoded. </p> </li> </ul> <p>To copy a specific version of
+ * The value must be URL-encoded. </p> </li> </ul> <p>To copy a specific version of
* an object, append <code>?versionId=&lt;version-id&gt;</code> to the value (for
* example,
* <code>awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893</code>).
@@ -522,10 +578,10 @@ namespace Model
* specify the name of the source bucket and the key of the source object,
* separated by a slash (/). For example, to copy the object
* <code>reports/january.pdf</code> from the bucket <code>awsexamplebucket</code>,
- * use <code>awsexamplebucket/reports/january.pdf</code>. The value must be URL
- * encoded.</p> </li> <li> <p>For objects accessed through access points, specify
- * the Amazon Resource Name (ARN) of the object as accessed through the access
- * point, in the format
+ * use <code>awsexamplebucket/reports/january.pdf</code>. The value must be
+ * URL-encoded.</p> </li> <li> <p>For objects accessed through access points,
+ * specify the Amazon Resource Name (ARN) of the object as accessed through the
+ * access point, in the format
* <code>arn:aws:s3:&lt;Region&gt;:&lt;account-id&gt;:accesspoint/&lt;access-point-name&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through access
* point <code>my-access-point</code> owned by account <code>123456789012</code> in
@@ -533,14 +589,15 @@ namespace Model
* <code>arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf</code>.
* The value must be URL encoded.</p> <p>Amazon S3 supports copy operations
* using access points only when the source and destination buckets are in the same
- * AWS Region.</p> <p>Alternatively, for objects accessed through Amazon S3
- * on Outposts, specify the ARN of the object as accessed in the format
+ * Amazon Web Services Region.</p> <p>Alternatively, for objects accessed
+ * through Amazon S3 on Outposts, specify the ARN of the object as accessed in the
+ * format
* <code>arn:aws:s3-outposts:&lt;Region&gt;:&lt;account-id&gt;:outpost/&lt;outpost-id&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through outpost
* <code>my-outpost</code> owned by account <code>123456789012</code> in Region
* <code>us-west-2</code>, use the URL encoding of
* <code>arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf</code>.
- * The value must be URL encoded. </p> </li> </ul> <p>To copy a specific version of
+ * The value must be URL-encoded. </p> </li> </ul> <p>To copy a specific version of
* an object, append <code>?versionId=&lt;version-id&gt;</code> to the value (for
* example,
* <code>awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893</code>).
@@ -558,10 +615,10 @@ namespace Model
* specify the name of the source bucket and the key of the source object,
* separated by a slash (/). For example, to copy the object
* <code>reports/january.pdf</code> from the bucket <code>awsexamplebucket</code>,
- * use <code>awsexamplebucket/reports/january.pdf</code>. The value must be URL
- * encoded.</p> </li> <li> <p>For objects accessed through access points, specify
- * the Amazon Resource Name (ARN) of the object as accessed through the access
- * point, in the format
+ * use <code>awsexamplebucket/reports/january.pdf</code>. The value must be
+ * URL-encoded.</p> </li> <li> <p>For objects accessed through access points,
+ * specify the Amazon Resource Name (ARN) of the object as accessed through the
+ * access point, in the format
* <code>arn:aws:s3:&lt;Region&gt;:&lt;account-id&gt;:accesspoint/&lt;access-point-name&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through access
* point <code>my-access-point</code> owned by account <code>123456789012</code> in
@@ -569,14 +626,15 @@ namespace Model
* <code>arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf</code>.
* The value must be URL encoded.</p> <p>Amazon S3 supports copy operations
* using access points only when the source and destination buckets are in the same
- * AWS Region.</p> <p>Alternatively, for objects accessed through Amazon S3
- * on Outposts, specify the ARN of the object as accessed in the format
+ * Amazon Web Services Region.</p> <p>Alternatively, for objects accessed
+ * through Amazon S3 on Outposts, specify the ARN of the object as accessed in the
+ * format
* <code>arn:aws:s3-outposts:&lt;Region&gt;:&lt;account-id&gt;:outpost/&lt;outpost-id&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through outpost
* <code>my-outpost</code> owned by account <code>123456789012</code> in Region
* <code>us-west-2</code>, use the URL encoding of
* <code>arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf</code>.
- * The value must be URL encoded. </p> </li> </ul> <p>To copy a specific version of
+ * The value must be URL-encoded. </p> </li> </ul> <p>To copy a specific version of
* an object, append <code>?versionId=&lt;version-id&gt;</code> to the value (for
* example,
* <code>awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893</code>).
@@ -594,10 +652,10 @@ namespace Model
* specify the name of the source bucket and the key of the source object,
* separated by a slash (/). For example, to copy the object
* <code>reports/january.pdf</code> from the bucket <code>awsexamplebucket</code>,
- * use <code>awsexamplebucket/reports/january.pdf</code>. The value must be URL
- * encoded.</p> </li> <li> <p>For objects accessed through access points, specify
- * the Amazon Resource Name (ARN) of the object as accessed through the access
- * point, in the format
+ * use <code>awsexamplebucket/reports/january.pdf</code>. The value must be
+ * URL-encoded.</p> </li> <li> <p>For objects accessed through access points,
+ * specify the Amazon Resource Name (ARN) of the object as accessed through the
+ * access point, in the format
* <code>arn:aws:s3:&lt;Region&gt;:&lt;account-id&gt;:accesspoint/&lt;access-point-name&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through access
* point <code>my-access-point</code> owned by account <code>123456789012</code> in
@@ -605,14 +663,15 @@ namespace Model
* <code>arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf</code>.
* The value must be URL encoded.</p> <p>Amazon S3 supports copy operations
* using access points only when the source and destination buckets are in the same
- * AWS Region.</p> <p>Alternatively, for objects accessed through Amazon S3
- * on Outposts, specify the ARN of the object as accessed in the format
+ * Amazon Web Services Region.</p> <p>Alternatively, for objects accessed
+ * through Amazon S3 on Outposts, specify the ARN of the object as accessed in the
+ * format
* <code>arn:aws:s3-outposts:&lt;Region&gt;:&lt;account-id&gt;:outpost/&lt;outpost-id&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through outpost
* <code>my-outpost</code> owned by account <code>123456789012</code> in Region
* <code>us-west-2</code>, use the URL encoding of
* <code>arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf</code>.
- * The value must be URL encoded. </p> </li> </ul> <p>To copy a specific version of
+ * The value must be URL-encoded. </p> </li> </ul> <p>To copy a specific version of
* an object, append <code>?versionId=&lt;version-id&gt;</code> to the value (for
* example,
* <code>awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893</code>).
@@ -630,10 +689,10 @@ namespace Model
* specify the name of the source bucket and the key of the source object,
* separated by a slash (/). For example, to copy the object
* <code>reports/january.pdf</code> from the bucket <code>awsexamplebucket</code>,
- * use <code>awsexamplebucket/reports/january.pdf</code>. The value must be URL
- * encoded.</p> </li> <li> <p>For objects accessed through access points, specify
- * the Amazon Resource Name (ARN) of the object as accessed through the access
- * point, in the format
+ * use <code>awsexamplebucket/reports/january.pdf</code>. The value must be
+ * URL-encoded.</p> </li> <li> <p>For objects accessed through access points,
+ * specify the Amazon Resource Name (ARN) of the object as accessed through the
+ * access point, in the format
* <code>arn:aws:s3:&lt;Region&gt;:&lt;account-id&gt;:accesspoint/&lt;access-point-name&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through access
* point <code>my-access-point</code> owned by account <code>123456789012</code> in
@@ -641,14 +700,15 @@ namespace Model
* <code>arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf</code>.
* The value must be URL encoded.</p> <p>Amazon S3 supports copy operations
* using access points only when the source and destination buckets are in the same
- * AWS Region.</p> <p>Alternatively, for objects accessed through Amazon S3
- * on Outposts, specify the ARN of the object as accessed in the format
+ * Amazon Web Services Region.</p> <p>Alternatively, for objects accessed
+ * through Amazon S3 on Outposts, specify the ARN of the object as accessed in the
+ * format
* <code>arn:aws:s3-outposts:&lt;Region&gt;:&lt;account-id&gt;:outpost/&lt;outpost-id&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through outpost
* <code>my-outpost</code> owned by account <code>123456789012</code> in Region
* <code>us-west-2</code>, use the URL encoding of
* <code>arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf</code>.
- * The value must be URL encoded. </p> </li> </ul> <p>To copy a specific version of
+ * The value must be URL-encoded. </p> </li> </ul> <p>To copy a specific version of
* an object, append <code>?versionId=&lt;version-id&gt;</code> to the value (for
* example,
* <code>awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893</code>).
@@ -666,10 +726,10 @@ namespace Model
* specify the name of the source bucket and the key of the source object,
* separated by a slash (/). For example, to copy the object
* <code>reports/january.pdf</code> from the bucket <code>awsexamplebucket</code>,
- * use <code>awsexamplebucket/reports/january.pdf</code>. The value must be URL
- * encoded.</p> </li> <li> <p>For objects accessed through access points, specify
- * the Amazon Resource Name (ARN) of the object as accessed through the access
- * point, in the format
+ * use <code>awsexamplebucket/reports/january.pdf</code>. The value must be
+ * URL-encoded.</p> </li> <li> <p>For objects accessed through access points,
+ * specify the Amazon Resource Name (ARN) of the object as accessed through the
+ * access point, in the format
* <code>arn:aws:s3:&lt;Region&gt;:&lt;account-id&gt;:accesspoint/&lt;access-point-name&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through access
* point <code>my-access-point</code> owned by account <code>123456789012</code> in
@@ -677,14 +737,15 @@ namespace Model
* <code>arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf</code>.
* The value must be URL encoded.</p> <p>Amazon S3 supports copy operations
* using access points only when the source and destination buckets are in the same
- * AWS Region.</p> <p>Alternatively, for objects accessed through Amazon S3
- * on Outposts, specify the ARN of the object as accessed in the format
+ * Amazon Web Services Region.</p> <p>Alternatively, for objects accessed
+ * through Amazon S3 on Outposts, specify the ARN of the object as accessed in the
+ * format
* <code>arn:aws:s3-outposts:&lt;Region&gt;:&lt;account-id&gt;:outpost/&lt;outpost-id&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through outpost
* <code>my-outpost</code> owned by account <code>123456789012</code> in Region
* <code>us-west-2</code>, use the URL encoding of
* <code>arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf</code>.
- * The value must be URL encoded. </p> </li> </ul> <p>To copy a specific version of
+ * The value must be URL-encoded. </p> </li> </ul> <p>To copy a specific version of
* an object, append <code>?versionId=&lt;version-id&gt;</code> to the value (for
* example,
* <code>awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893</code>).
@@ -702,10 +763,10 @@ namespace Model
* specify the name of the source bucket and the key of the source object,
* separated by a slash (/). For example, to copy the object
* <code>reports/january.pdf</code> from the bucket <code>awsexamplebucket</code>,
- * use <code>awsexamplebucket/reports/january.pdf</code>. The value must be URL
- * encoded.</p> </li> <li> <p>For objects accessed through access points, specify
- * the Amazon Resource Name (ARN) of the object as accessed through the access
- * point, in the format
+ * use <code>awsexamplebucket/reports/january.pdf</code>. The value must be
+ * URL-encoded.</p> </li> <li> <p>For objects accessed through access points,
+ * specify the Amazon Resource Name (ARN) of the object as accessed through the
+ * access point, in the format
* <code>arn:aws:s3:&lt;Region&gt;:&lt;account-id&gt;:accesspoint/&lt;access-point-name&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through access
* point <code>my-access-point</code> owned by account <code>123456789012</code> in
@@ -713,14 +774,15 @@ namespace Model
* <code>arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf</code>.
* The value must be URL encoded.</p> <p>Amazon S3 supports copy operations
* using access points only when the source and destination buckets are in the same
- * AWS Region.</p> <p>Alternatively, for objects accessed through Amazon S3
- * on Outposts, specify the ARN of the object as accessed in the format
+ * Amazon Web Services Region.</p> <p>Alternatively, for objects accessed
+ * through Amazon S3 on Outposts, specify the ARN of the object as accessed in the
+ * format
* <code>arn:aws:s3-outposts:&lt;Region&gt;:&lt;account-id&gt;:outpost/&lt;outpost-id&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through outpost
* <code>my-outpost</code> owned by account <code>123456789012</code> in Region
* <code>us-west-2</code>, use the URL encoding of
* <code>arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf</code>.
- * The value must be URL encoded. </p> </li> </ul> <p>To copy a specific version of
+ * The value must be URL-encoded. </p> </li> </ul> <p>To copy a specific version of
* an object, append <code>?versionId=&lt;version-id&gt;</code> to the value (for
* example,
* <code>awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893</code>).
@@ -738,10 +800,10 @@ namespace Model
* specify the name of the source bucket and the key of the source object,
* separated by a slash (/). For example, to copy the object
* <code>reports/january.pdf</code> from the bucket <code>awsexamplebucket</code>,
- * use <code>awsexamplebucket/reports/january.pdf</code>. The value must be URL
- * encoded.</p> </li> <li> <p>For objects accessed through access points, specify
- * the Amazon Resource Name (ARN) of the object as accessed through the access
- * point, in the format
+ * use <code>awsexamplebucket/reports/january.pdf</code>. The value must be
+ * URL-encoded.</p> </li> <li> <p>For objects accessed through access points,
+ * specify the Amazon Resource Name (ARN) of the object as accessed through the
+ * access point, in the format
* <code>arn:aws:s3:&lt;Region&gt;:&lt;account-id&gt;:accesspoint/&lt;access-point-name&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through access
* point <code>my-access-point</code> owned by account <code>123456789012</code> in
@@ -749,14 +811,15 @@ namespace Model
* <code>arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf</code>.
* The value must be URL encoded.</p> <p>Amazon S3 supports copy operations
* using access points only when the source and destination buckets are in the same
- * AWS Region.</p> <p>Alternatively, for objects accessed through Amazon S3
- * on Outposts, specify the ARN of the object as accessed in the format
+ * Amazon Web Services Region.</p> <p>Alternatively, for objects accessed
+ * through Amazon S3 on Outposts, specify the ARN of the object as accessed in the
+ * format
* <code>arn:aws:s3-outposts:&lt;Region&gt;:&lt;account-id&gt;:outpost/&lt;outpost-id&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through outpost
* <code>my-outpost</code> owned by account <code>123456789012</code> in Region
* <code>us-west-2</code>, use the URL encoding of
* <code>arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf</code>.
- * The value must be URL encoded. </p> </li> </ul> <p>To copy a specific version of
+ * The value must be URL-encoded. </p> </li> </ul> <p>To copy a specific version of
* an object, append <code>?versionId=&lt;version-id&gt;</code> to the value (for
* example,
* <code>awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893</code>).
@@ -1370,7 +1433,7 @@ namespace Model
* Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For
* more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
- * Classes</a> in the <i>Amazon S3 Service Developer Guide</i>.</p>
+ * Classes</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const StorageClass& GetStorageClass() const{ return m_storageClass; }
@@ -1381,7 +1444,7 @@ namespace Model
* Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For
* more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
- * Classes</a> in the <i>Amazon S3 Service Developer Guide</i>.</p>
+ * Classes</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool StorageClassHasBeenSet() const { return m_storageClassHasBeenSet; }
@@ -1392,7 +1455,7 @@ namespace Model
* Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For
* more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
- * Classes</a> in the <i>Amazon S3 Service Developer Guide</i>.</p>
+ * Classes</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetStorageClass(const StorageClass& value) { m_storageClassHasBeenSet = true; m_storageClass = value; }
@@ -1403,7 +1466,7 @@ namespace Model
* Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For
* more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
- * Classes</a> in the <i>Amazon S3 Service Developer Guide</i>.</p>
+ * Classes</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetStorageClass(StorageClass&& value) { m_storageClassHasBeenSet = true; m_storageClass = std::move(value); }
@@ -1414,7 +1477,7 @@ namespace Model
* Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For
* more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
- * Classes</a> in the <i>Amazon S3 Service Developer Guide</i>.</p>
+ * Classes</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline CopyObjectRequest& WithStorageClass(const StorageClass& value) { SetStorageClass(value); return *this;}
@@ -1425,7 +1488,7 @@ namespace Model
* Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For
* more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
- * Classes</a> in the <i>Amazon S3 Service Developer Guide</i>.</p>
+ * Classes</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline CopyObjectRequest& WithStorageClass(StorageClass&& value) { SetStorageClass(std::move(value)); return *this;}
@@ -1667,147 +1730,155 @@ namespace Model
/**
- * <p>Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
- * requests for an object protected by AWS KMS will fail if not made via SSL or
- * using SigV4. For information about configuring using any of the officially
- * supported AWS SDKs and AWS CLI, see <a
+ * <p>Specifies the Amazon Web Services KMS key ID to use for object encryption.
+ * All GET and PUT requests for an object protected by Amazon Web Services KMS will
+ * fail if not made via SSL or using SigV4. For information about configuring using
+ * any of the officially supported Amazon Web Services SDKs and Amazon Web Services
+ * CLI, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version">Specifying
- * the Signature Version in Request Authentication</a> in the <i>Amazon S3
- * Developer Guide</i>.</p>
+ * the Signature Version in Request Authentication</a> in the <i>Amazon S3 User
+ * Guide</i>.</p>
*/
inline const Aws::String& GetSSEKMSKeyId() const{ return m_sSEKMSKeyId; }
/**
- * <p>Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
- * requests for an object protected by AWS KMS will fail if not made via SSL or
- * using SigV4. For information about configuring using any of the officially
- * supported AWS SDKs and AWS CLI, see <a
+ * <p>Specifies the Amazon Web Services KMS key ID to use for object encryption.
+ * All GET and PUT requests for an object protected by Amazon Web Services KMS will
+ * fail if not made via SSL or using SigV4. For information about configuring using
+ * any of the officially supported Amazon Web Services SDKs and Amazon Web Services
+ * CLI, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version">Specifying
- * the Signature Version in Request Authentication</a> in the <i>Amazon S3
- * Developer Guide</i>.</p>
+ * the Signature Version in Request Authentication</a> in the <i>Amazon S3 User
+ * Guide</i>.</p>
*/
inline bool SSEKMSKeyIdHasBeenSet() const { return m_sSEKMSKeyIdHasBeenSet; }
/**
- * <p>Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
- * requests for an object protected by AWS KMS will fail if not made via SSL or
- * using SigV4. For information about configuring using any of the officially
- * supported AWS SDKs and AWS CLI, see <a
+ * <p>Specifies the Amazon Web Services KMS key ID to use for object encryption.
+ * All GET and PUT requests for an object protected by Amazon Web Services KMS will
+ * fail if not made via SSL or using SigV4. For information about configuring using
+ * any of the officially supported Amazon Web Services SDKs and Amazon Web Services
+ * CLI, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version">Specifying
- * the Signature Version in Request Authentication</a> in the <i>Amazon S3
- * Developer Guide</i>.</p>
+ * the Signature Version in Request Authentication</a> in the <i>Amazon S3 User
+ * Guide</i>.</p>
*/
inline void SetSSEKMSKeyId(const Aws::String& value) { m_sSEKMSKeyIdHasBeenSet = true; m_sSEKMSKeyId = value; }
/**
- * <p>Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
- * requests for an object protected by AWS KMS will fail if not made via SSL or
- * using SigV4. For information about configuring using any of the officially
- * supported AWS SDKs and AWS CLI, see <a
+ * <p>Specifies the Amazon Web Services KMS key ID to use for object encryption.
+ * All GET and PUT requests for an object protected by Amazon Web Services KMS will
+ * fail if not made via SSL or using SigV4. For information about configuring using
+ * any of the officially supported Amazon Web Services SDKs and Amazon Web Services
+ * CLI, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version">Specifying
- * the Signature Version in Request Authentication</a> in the <i>Amazon S3
- * Developer Guide</i>.</p>
+ * the Signature Version in Request Authentication</a> in the <i>Amazon S3 User
+ * Guide</i>.</p>
*/
inline void SetSSEKMSKeyId(Aws::String&& value) { m_sSEKMSKeyIdHasBeenSet = true; m_sSEKMSKeyId = std::move(value); }
/**
- * <p>Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
- * requests for an object protected by AWS KMS will fail if not made via SSL or
- * using SigV4. For information about configuring using any of the officially
- * supported AWS SDKs and AWS CLI, see <a
+ * <p>Specifies the Amazon Web Services KMS key ID to use for object encryption.
+ * All GET and PUT requests for an object protected by Amazon Web Services KMS will
+ * fail if not made via SSL or using SigV4. For information about configuring using
+ * any of the officially supported Amazon Web Services SDKs and Amazon Web Services
+ * CLI, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version">Specifying
- * the Signature Version in Request Authentication</a> in the <i>Amazon S3
- * Developer Guide</i>.</p>
+ * the Signature Version in Request Authentication</a> in the <i>Amazon S3 User
+ * Guide</i>.</p>
*/
inline void SetSSEKMSKeyId(const char* value) { m_sSEKMSKeyIdHasBeenSet = true; m_sSEKMSKeyId.assign(value); }
/**
- * <p>Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
- * requests for an object protected by AWS KMS will fail if not made via SSL or
- * using SigV4. For information about configuring using any of the officially
- * supported AWS SDKs and AWS CLI, see <a
+ * <p>Specifies the Amazon Web Services KMS key ID to use for object encryption.
+ * All GET and PUT requests for an object protected by Amazon Web Services KMS will
+ * fail if not made via SSL or using SigV4. For information about configuring using
+ * any of the officially supported Amazon Web Services SDKs and Amazon Web Services
+ * CLI, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version">Specifying
- * the Signature Version in Request Authentication</a> in the <i>Amazon S3
- * Developer Guide</i>.</p>
+ * the Signature Version in Request Authentication</a> in the <i>Amazon S3 User
+ * Guide</i>.</p>
*/
inline CopyObjectRequest& WithSSEKMSKeyId(const Aws::String& value) { SetSSEKMSKeyId(value); return *this;}
/**
- * <p>Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
- * requests for an object protected by AWS KMS will fail if not made via SSL or
- * using SigV4. For information about configuring using any of the officially
- * supported AWS SDKs and AWS CLI, see <a
+ * <p>Specifies the Amazon Web Services KMS key ID to use for object encryption.
+ * All GET and PUT requests for an object protected by Amazon Web Services KMS will
+ * fail if not made via SSL or using SigV4. For information about configuring using
+ * any of the officially supported Amazon Web Services SDKs and Amazon Web Services
+ * CLI, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version">Specifying
- * the Signature Version in Request Authentication</a> in the <i>Amazon S3
- * Developer Guide</i>.</p>
+ * the Signature Version in Request Authentication</a> in the <i>Amazon S3 User
+ * Guide</i>.</p>
*/
inline CopyObjectRequest& WithSSEKMSKeyId(Aws::String&& value) { SetSSEKMSKeyId(std::move(value)); return *this;}
/**
- * <p>Specifies the AWS KMS key ID to use for object encryption. All GET and PUT
- * requests for an object protected by AWS KMS will fail if not made via SSL or
- * using SigV4. For information about configuring using any of the officially
- * supported AWS SDKs and AWS CLI, see <a
+ * <p>Specifies the Amazon Web Services KMS key ID to use for object encryption.
+ * All GET and PUT requests for an object protected by Amazon Web Services KMS will
+ * fail if not made via SSL or using SigV4. For information about configuring using
+ * any of the officially supported Amazon Web Services SDKs and Amazon Web Services
+ * CLI, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version">Specifying
- * the Signature Version in Request Authentication</a> in the <i>Amazon S3
- * Developer Guide</i>.</p>
+ * the Signature Version in Request Authentication</a> in the <i>Amazon S3 User
+ * Guide</i>.</p>
*/
inline CopyObjectRequest& WithSSEKMSKeyId(const char* value) { SetSSEKMSKeyId(value); return *this;}
/**
- * <p>Specifies the AWS KMS Encryption Context to use for object encryption. The
- * value of this header is a base64-encoded UTF-8 string holding JSON with the
- * encryption context key-value pairs.</p>
+ * <p>Specifies the Amazon Web Services KMS Encryption Context to use for object
+ * encryption. The value of this header is a base64-encoded UTF-8 string holding
+ * JSON with the encryption context key-value pairs.</p>
*/
inline const Aws::String& GetSSEKMSEncryptionContext() const{ return m_sSEKMSEncryptionContext; }
/**
- * <p>Specifies the AWS KMS Encryption Context to use for object encryption. The
- * value of this header is a base64-encoded UTF-8 string holding JSON with the
- * encryption context key-value pairs.</p>
+ * <p>Specifies the Amazon Web Services KMS Encryption Context to use for object
+ * encryption. The value of this header is a base64-encoded UTF-8 string holding
+ * JSON with the encryption context key-value pairs.</p>
*/
inline bool SSEKMSEncryptionContextHasBeenSet() const { return m_sSEKMSEncryptionContextHasBeenSet; }
/**
- * <p>Specifies the AWS KMS Encryption Context to use for object encryption. The
- * value of this header is a base64-encoded UTF-8 string holding JSON with the
- * encryption context key-value pairs.</p>
+ * <p>Specifies the Amazon Web Services KMS Encryption Context to use for object
+ * encryption. The value of this header is a base64-encoded UTF-8 string holding
+ * JSON with the encryption context key-value pairs.</p>
*/
inline void SetSSEKMSEncryptionContext(const Aws::String& value) { m_sSEKMSEncryptionContextHasBeenSet = true; m_sSEKMSEncryptionContext = value; }
/**
- * <p>Specifies the AWS KMS Encryption Context to use for object encryption. The
- * value of this header is a base64-encoded UTF-8 string holding JSON with the
- * encryption context key-value pairs.</p>
+ * <p>Specifies the Amazon Web Services KMS Encryption Context to use for object
+ * encryption. The value of this header is a base64-encoded UTF-8 string holding
+ * JSON with the encryption context key-value pairs.</p>
*/
inline void SetSSEKMSEncryptionContext(Aws::String&& value) { m_sSEKMSEncryptionContextHasBeenSet = true; m_sSEKMSEncryptionContext = std::move(value); }
/**
- * <p>Specifies the AWS KMS Encryption Context to use for object encryption. The
- * value of this header is a base64-encoded UTF-8 string holding JSON with the
- * encryption context key-value pairs.</p>
+ * <p>Specifies the Amazon Web Services KMS Encryption Context to use for object
+ * encryption. The value of this header is a base64-encoded UTF-8 string holding
+ * JSON with the encryption context key-value pairs.</p>
*/
inline void SetSSEKMSEncryptionContext(const char* value) { m_sSEKMSEncryptionContextHasBeenSet = true; m_sSEKMSEncryptionContext.assign(value); }
/**
- * <p>Specifies the AWS KMS Encryption Context to use for object encryption. The
- * value of this header is a base64-encoded UTF-8 string holding JSON with the
- * encryption context key-value pairs.</p>
+ * <p>Specifies the Amazon Web Services KMS Encryption Context to use for object
+ * encryption. The value of this header is a base64-encoded UTF-8 string holding
+ * JSON with the encryption context key-value pairs.</p>
*/
inline CopyObjectRequest& WithSSEKMSEncryptionContext(const Aws::String& value) { SetSSEKMSEncryptionContext(value); return *this;}
/**
- * <p>Specifies the AWS KMS Encryption Context to use for object encryption. The
- * value of this header is a base64-encoded UTF-8 string holding JSON with the
- * encryption context key-value pairs.</p>
+ * <p>Specifies the Amazon Web Services KMS Encryption Context to use for object
+ * encryption. The value of this header is a base64-encoded UTF-8 string holding
+ * JSON with the encryption context key-value pairs.</p>
*/
inline CopyObjectRequest& WithSSEKMSEncryptionContext(Aws::String&& value) { SetSSEKMSEncryptionContext(std::move(value)); return *this;}
/**
- * <p>Specifies the AWS KMS Encryption Context to use for object encryption. The
- * value of this header is a base64-encoded UTF-8 string holding JSON with the
- * encryption context key-value pairs.</p>
+ * <p>Specifies the Amazon Web Services KMS Encryption Context to use for object
+ * encryption. The value of this header is a base64-encoded UTF-8 string holding
+ * JSON with the encryption context key-value pairs.</p>
*/
inline CopyObjectRequest& WithSSEKMSEncryptionContext(const char* value) { SetSSEKMSEncryptionContext(value); return *this;}
@@ -2157,146 +2228,146 @@ namespace Model
/**
- * <p>Specifies whether you want to apply a Legal Hold to the copied object.</p>
+ * <p>Specifies whether you want to apply a legal hold to the copied object.</p>
*/
inline const ObjectLockLegalHoldStatus& GetObjectLockLegalHoldStatus() const{ return m_objectLockLegalHoldStatus; }
/**
- * <p>Specifies whether you want to apply a Legal Hold to the copied object.</p>
+ * <p>Specifies whether you want to apply a legal hold to the copied object.</p>
*/
inline bool ObjectLockLegalHoldStatusHasBeenSet() const { return m_objectLockLegalHoldStatusHasBeenSet; }
/**
- * <p>Specifies whether you want to apply a Legal Hold to the copied object.</p>
+ * <p>Specifies whether you want to apply a legal hold to the copied object.</p>
*/
inline void SetObjectLockLegalHoldStatus(const ObjectLockLegalHoldStatus& value) { m_objectLockLegalHoldStatusHasBeenSet = true; m_objectLockLegalHoldStatus = value; }
/**
- * <p>Specifies whether you want to apply a Legal Hold to the copied object.</p>
+ * <p>Specifies whether you want to apply a legal hold to the copied object.</p>
*/
inline void SetObjectLockLegalHoldStatus(ObjectLockLegalHoldStatus&& value) { m_objectLockLegalHoldStatusHasBeenSet = true; m_objectLockLegalHoldStatus = std::move(value); }
/**
- * <p>Specifies whether you want to apply a Legal Hold to the copied object.</p>
+ * <p>Specifies whether you want to apply a legal hold to the copied object.</p>
*/
inline CopyObjectRequest& WithObjectLockLegalHoldStatus(const ObjectLockLegalHoldStatus& value) { SetObjectLockLegalHoldStatus(value); return *this;}
/**
- * <p>Specifies whether you want to apply a Legal Hold to the copied object.</p>
+ * <p>Specifies whether you want to apply a legal hold to the copied object.</p>
*/
inline CopyObjectRequest& WithObjectLockLegalHoldStatus(ObjectLockLegalHoldStatus&& value) { SetObjectLockLegalHoldStatus(std::move(value)); return *this;}
/**
* <p>The account ID of the expected destination bucket owner. If the destination
- * bucket is owned by a different account, the request will fail with an HTTP
- * <code>403 (Access Denied)</code> error.</p>
+ * bucket is owned by a different account, the request fails with the HTTP status
+ * code <code>403 Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected destination bucket owner. If the destination
- * bucket is owned by a different account, the request will fail with an HTTP
- * <code>403 (Access Denied)</code> error.</p>
+ * bucket is owned by a different account, the request fails with the HTTP status
+ * code <code>403 Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected destination bucket owner. If the destination
- * bucket is owned by a different account, the request will fail with an HTTP
- * <code>403 (Access Denied)</code> error.</p>
+ * bucket is owned by a different account, the request fails with the HTTP status
+ * code <code>403 Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected destination bucket owner. If the destination
- * bucket is owned by a different account, the request will fail with an HTTP
- * <code>403 (Access Denied)</code> error.</p>
+ * bucket is owned by a different account, the request fails with the HTTP status
+ * code <code>403 Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected destination bucket owner. If the destination
- * bucket is owned by a different account, the request will fail with an HTTP
- * <code>403 (Access Denied)</code> error.</p>
+ * bucket is owned by a different account, the request fails with the HTTP status
+ * code <code>403 Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected destination bucket owner. If the destination
- * bucket is owned by a different account, the request will fail with an HTTP
- * <code>403 (Access Denied)</code> error.</p>
+ * bucket is owned by a different account, the request fails with the HTTP status
+ * code <code>403 Forbidden</code> (access denied).</p>
*/
inline CopyObjectRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected destination bucket owner. If the destination
- * bucket is owned by a different account, the request will fail with an HTTP
- * <code>403 (Access Denied)</code> error.</p>
+ * bucket is owned by a different account, the request fails with the HTTP status
+ * code <code>403 Forbidden</code> (access denied).</p>
*/
inline CopyObjectRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected destination bucket owner. If the destination
- * bucket is owned by a different account, the request will fail with an HTTP
- * <code>403 (Access Denied)</code> error.</p>
+ * bucket is owned by a different account, the request fails with the HTTP status
+ * code <code>403 Forbidden</code> (access denied).</p>
*/
inline CopyObjectRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected source bucket owner. If the source bucket is
- * owned by a different account, the request will fail with an HTTP <code>403
- * (Access Denied)</code> error.</p>
+ * owned by a different account, the request fails with the HTTP status code
+ * <code>403 Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedSourceBucketOwner() const{ return m_expectedSourceBucketOwner; }
/**
* <p>The account ID of the expected source bucket owner. If the source bucket is
- * owned by a different account, the request will fail with an HTTP <code>403
- * (Access Denied)</code> error.</p>
+ * owned by a different account, the request fails with the HTTP status code
+ * <code>403 Forbidden</code> (access denied).</p>
*/
inline bool ExpectedSourceBucketOwnerHasBeenSet() const { return m_expectedSourceBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected source bucket owner. If the source bucket is
- * owned by a different account, the request will fail with an HTTP <code>403
- * (Access Denied)</code> error.</p>
+ * owned by a different account, the request fails with the HTTP status code
+ * <code>403 Forbidden</code> (access denied).</p>
*/
inline void SetExpectedSourceBucketOwner(const Aws::String& value) { m_expectedSourceBucketOwnerHasBeenSet = true; m_expectedSourceBucketOwner = value; }
/**
* <p>The account ID of the expected source bucket owner. If the source bucket is
- * owned by a different account, the request will fail with an HTTP <code>403
- * (Access Denied)</code> error.</p>
+ * owned by a different account, the request fails with the HTTP status code
+ * <code>403 Forbidden</code> (access denied).</p>
*/
inline void SetExpectedSourceBucketOwner(Aws::String&& value) { m_expectedSourceBucketOwnerHasBeenSet = true; m_expectedSourceBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected source bucket owner. If the source bucket is
- * owned by a different account, the request will fail with an HTTP <code>403
- * (Access Denied)</code> error.</p>
+ * owned by a different account, the request fails with the HTTP status code
+ * <code>403 Forbidden</code> (access denied).</p>
*/
inline void SetExpectedSourceBucketOwner(const char* value) { m_expectedSourceBucketOwnerHasBeenSet = true; m_expectedSourceBucketOwner.assign(value); }
/**
* <p>The account ID of the expected source bucket owner. If the source bucket is
- * owned by a different account, the request will fail with an HTTP <code>403
- * (Access Denied)</code> error.</p>
+ * owned by a different account, the request fails with the HTTP status code
+ * <code>403 Forbidden</code> (access denied).</p>
*/
inline CopyObjectRequest& WithExpectedSourceBucketOwner(const Aws::String& value) { SetExpectedSourceBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected source bucket owner. If the source bucket is
- * owned by a different account, the request will fail with an HTTP <code>403
- * (Access Denied)</code> error.</p>
+ * owned by a different account, the request fails with the HTTP status code
+ * <code>403 Forbidden</code> (access denied).</p>
*/
inline CopyObjectRequest& WithExpectedSourceBucketOwner(Aws::String&& value) { SetExpectedSourceBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected source bucket owner. If the source bucket is
- * owned by a different account, the request will fail with an HTTP <code>403
- * (Access Denied)</code> error.</p>
+ * owned by a different account, the request fails with the HTTP status code
+ * <code>403 Forbidden</code> (access denied).</p>
*/
inline CopyObjectRequest& WithExpectedSourceBucketOwner(const char* value) { SetExpectedSourceBucketOwner(value); return *this;}
@@ -2343,127 +2414,130 @@ namespace Model
private:
ObjectCannedACL m_aCL;
- bool m_aCLHasBeenSet;
+ bool m_aCLHasBeenSet = false;
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_cacheControl;
- bool m_cacheControlHasBeenSet;
+ bool m_cacheControlHasBeenSet = false;
+
+ ChecksumAlgorithm m_checksumAlgorithm;
+ bool m_checksumAlgorithmHasBeenSet = false;
Aws::String m_contentDisposition;
- bool m_contentDispositionHasBeenSet;
+ bool m_contentDispositionHasBeenSet = false;
Aws::String m_contentEncoding;
- bool m_contentEncodingHasBeenSet;
+ bool m_contentEncodingHasBeenSet = false;
Aws::String m_contentLanguage;
- bool m_contentLanguageHasBeenSet;
+ bool m_contentLanguageHasBeenSet = false;
Aws::String m_contentType;
- bool m_contentTypeHasBeenSet;
+ bool m_contentTypeHasBeenSet = false;
Aws::String m_copySource;
- bool m_copySourceHasBeenSet;
+ bool m_copySourceHasBeenSet = false;
Aws::String m_copySourceIfMatch;
- bool m_copySourceIfMatchHasBeenSet;
+ bool m_copySourceIfMatchHasBeenSet = false;
Aws::Utils::DateTime m_copySourceIfModifiedSince;
- bool m_copySourceIfModifiedSinceHasBeenSet;
+ bool m_copySourceIfModifiedSinceHasBeenSet = false;
Aws::String m_copySourceIfNoneMatch;
- bool m_copySourceIfNoneMatchHasBeenSet;
+ bool m_copySourceIfNoneMatchHasBeenSet = false;
Aws::Utils::DateTime m_copySourceIfUnmodifiedSince;
- bool m_copySourceIfUnmodifiedSinceHasBeenSet;
+ bool m_copySourceIfUnmodifiedSinceHasBeenSet = false;
Aws::Utils::DateTime m_expires;
- bool m_expiresHasBeenSet;
+ bool m_expiresHasBeenSet = false;
Aws::String m_grantFullControl;
- bool m_grantFullControlHasBeenSet;
+ bool m_grantFullControlHasBeenSet = false;
Aws::String m_grantRead;
- bool m_grantReadHasBeenSet;
+ bool m_grantReadHasBeenSet = false;
Aws::String m_grantReadACP;
- bool m_grantReadACPHasBeenSet;
+ bool m_grantReadACPHasBeenSet = false;
Aws::String m_grantWriteACP;
- bool m_grantWriteACPHasBeenSet;
+ bool m_grantWriteACPHasBeenSet = false;
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_metadata;
- bool m_metadataHasBeenSet;
+ bool m_metadataHasBeenSet = false;
MetadataDirective m_metadataDirective;
- bool m_metadataDirectiveHasBeenSet;
+ bool m_metadataDirectiveHasBeenSet = false;
TaggingDirective m_taggingDirective;
- bool m_taggingDirectiveHasBeenSet;
+ bool m_taggingDirectiveHasBeenSet = false;
ServerSideEncryption m_serverSideEncryption;
- bool m_serverSideEncryptionHasBeenSet;
+ bool m_serverSideEncryptionHasBeenSet = false;
StorageClass m_storageClass;
- bool m_storageClassHasBeenSet;
+ bool m_storageClassHasBeenSet = false;
Aws::String m_websiteRedirectLocation;
- bool m_websiteRedirectLocationHasBeenSet;
+ bool m_websiteRedirectLocationHasBeenSet = false;
Aws::String m_sSECustomerAlgorithm;
- bool m_sSECustomerAlgorithmHasBeenSet;
+ bool m_sSECustomerAlgorithmHasBeenSet = false;
Aws::String m_sSECustomerKey;
- bool m_sSECustomerKeyHasBeenSet;
+ bool m_sSECustomerKeyHasBeenSet = false;
Aws::String m_sSECustomerKeyMD5;
- bool m_sSECustomerKeyMD5HasBeenSet;
+ bool m_sSECustomerKeyMD5HasBeenSet = false;
Aws::String m_sSEKMSKeyId;
- bool m_sSEKMSKeyIdHasBeenSet;
+ bool m_sSEKMSKeyIdHasBeenSet = false;
Aws::String m_sSEKMSEncryptionContext;
- bool m_sSEKMSEncryptionContextHasBeenSet;
+ bool m_sSEKMSEncryptionContextHasBeenSet = false;
bool m_bucketKeyEnabled;
- bool m_bucketKeyEnabledHasBeenSet;
+ bool m_bucketKeyEnabledHasBeenSet = false;
Aws::String m_copySourceSSECustomerAlgorithm;
- bool m_copySourceSSECustomerAlgorithmHasBeenSet;
+ bool m_copySourceSSECustomerAlgorithmHasBeenSet = false;
Aws::String m_copySourceSSECustomerKey;
- bool m_copySourceSSECustomerKeyHasBeenSet;
+ bool m_copySourceSSECustomerKeyHasBeenSet = false;
Aws::String m_copySourceSSECustomerKeyMD5;
- bool m_copySourceSSECustomerKeyMD5HasBeenSet;
+ bool m_copySourceSSECustomerKeyMD5HasBeenSet = false;
RequestPayer m_requestPayer;
- bool m_requestPayerHasBeenSet;
+ bool m_requestPayerHasBeenSet = false;
Aws::String m_tagging;
- bool m_taggingHasBeenSet;
+ bool m_taggingHasBeenSet = false;
ObjectLockMode m_objectLockMode;
- bool m_objectLockModeHasBeenSet;
+ bool m_objectLockModeHasBeenSet = false;
Aws::Utils::DateTime m_objectLockRetainUntilDate;
- bool m_objectLockRetainUntilDateHasBeenSet;
+ bool m_objectLockRetainUntilDateHasBeenSet = false;
ObjectLockLegalHoldStatus m_objectLockLegalHoldStatus;
- bool m_objectLockLegalHoldStatusHasBeenSet;
+ bool m_objectLockLegalHoldStatusHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::String m_expectedSourceBucketOwner;
- bool m_expectedSourceBucketOwnerHasBeenSet;
+ bool m_expectedSourceBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CopyObjectResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CopyObjectResult.h
index 386f654c30..3beca77c36 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CopyObjectResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CopyObjectResult.h
@@ -27,12 +27,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API CopyObjectResult
+ class CopyObjectResult
{
public:
- CopyObjectResult();
- CopyObjectResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- CopyObjectResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API CopyObjectResult();
+ AWS_S3_API CopyObjectResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API CopyObjectResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
@@ -282,120 +282,120 @@ namespace Model
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline const Aws::String& GetSSEKMSKeyId() const{ return m_sSEKMSKeyId; }
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline void SetSSEKMSKeyId(const Aws::String& value) { m_sSEKMSKeyId = value; }
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline void SetSSEKMSKeyId(Aws::String&& value) { m_sSEKMSKeyId = std::move(value); }
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline void SetSSEKMSKeyId(const char* value) { m_sSEKMSKeyId.assign(value); }
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline CopyObjectResult& WithSSEKMSKeyId(const Aws::String& value) { SetSSEKMSKeyId(value); return *this;}
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline CopyObjectResult& WithSSEKMSKeyId(Aws::String&& value) { SetSSEKMSKeyId(std::move(value)); return *this;}
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline CopyObjectResult& WithSSEKMSKeyId(const char* value) { SetSSEKMSKeyId(value); return *this;}
/**
- * <p>If present, specifies the AWS KMS Encryption Context to use for object
- * encryption. The value of this header is a base64-encoded UTF-8 string holding
- * JSON with the encryption context key-value pairs.</p>
+ * <p>If present, specifies the Amazon Web Services KMS Encryption Context to use
+ * for object encryption. The value of this header is a base64-encoded UTF-8 string
+ * holding JSON with the encryption context key-value pairs.</p>
*/
inline const Aws::String& GetSSEKMSEncryptionContext() const{ return m_sSEKMSEncryptionContext; }
/**
- * <p>If present, specifies the AWS KMS Encryption Context to use for object
- * encryption. The value of this header is a base64-encoded UTF-8 string holding
- * JSON with the encryption context key-value pairs.</p>
+ * <p>If present, specifies the Amazon Web Services KMS Encryption Context to use
+ * for object encryption. The value of this header is a base64-encoded UTF-8 string
+ * holding JSON with the encryption context key-value pairs.</p>
*/
inline void SetSSEKMSEncryptionContext(const Aws::String& value) { m_sSEKMSEncryptionContext = value; }
/**
- * <p>If present, specifies the AWS KMS Encryption Context to use for object
- * encryption. The value of this header is a base64-encoded UTF-8 string holding
- * JSON with the encryption context key-value pairs.</p>
+ * <p>If present, specifies the Amazon Web Services KMS Encryption Context to use
+ * for object encryption. The value of this header is a base64-encoded UTF-8 string
+ * holding JSON with the encryption context key-value pairs.</p>
*/
inline void SetSSEKMSEncryptionContext(Aws::String&& value) { m_sSEKMSEncryptionContext = std::move(value); }
/**
- * <p>If present, specifies the AWS KMS Encryption Context to use for object
- * encryption. The value of this header is a base64-encoded UTF-8 string holding
- * JSON with the encryption context key-value pairs.</p>
+ * <p>If present, specifies the Amazon Web Services KMS Encryption Context to use
+ * for object encryption. The value of this header is a base64-encoded UTF-8 string
+ * holding JSON with the encryption context key-value pairs.</p>
*/
inline void SetSSEKMSEncryptionContext(const char* value) { m_sSEKMSEncryptionContext.assign(value); }
/**
- * <p>If present, specifies the AWS KMS Encryption Context to use for object
- * encryption. The value of this header is a base64-encoded UTF-8 string holding
- * JSON with the encryption context key-value pairs.</p>
+ * <p>If present, specifies the Amazon Web Services KMS Encryption Context to use
+ * for object encryption. The value of this header is a base64-encoded UTF-8 string
+ * holding JSON with the encryption context key-value pairs.</p>
*/
inline CopyObjectResult& WithSSEKMSEncryptionContext(const Aws::String& value) { SetSSEKMSEncryptionContext(value); return *this;}
/**
- * <p>If present, specifies the AWS KMS Encryption Context to use for object
- * encryption. The value of this header is a base64-encoded UTF-8 string holding
- * JSON with the encryption context key-value pairs.</p>
+ * <p>If present, specifies the Amazon Web Services KMS Encryption Context to use
+ * for object encryption. The value of this header is a base64-encoded UTF-8 string
+ * holding JSON with the encryption context key-value pairs.</p>
*/
inline CopyObjectResult& WithSSEKMSEncryptionContext(Aws::String&& value) { SetSSEKMSEncryptionContext(std::move(value)); return *this;}
/**
- * <p>If present, specifies the AWS KMS Encryption Context to use for object
- * encryption. The value of this header is a base64-encoded UTF-8 string holding
- * JSON with the encryption context key-value pairs.</p>
+ * <p>If present, specifies the Amazon Web Services KMS Encryption Context to use
+ * for object encryption. The value of this header is a base64-encoded UTF-8 string
+ * holding JSON with the encryption context key-value pairs.</p>
*/
inline CopyObjectResult& WithSSEKMSEncryptionContext(const char* value) { SetSSEKMSEncryptionContext(value); return *this;}
/**
* <p>Indicates whether the copied object uses an S3 Bucket Key for server-side
- * encryption with AWS KMS (SSE-KMS).</p>
+ * encryption with Amazon Web Services KMS (SSE-KMS).</p>
*/
inline bool GetBucketKeyEnabled() const{ return m_bucketKeyEnabled; }
/**
* <p>Indicates whether the copied object uses an S3 Bucket Key for server-side
- * encryption with AWS KMS (SSE-KMS).</p>
+ * encryption with Amazon Web Services KMS (SSE-KMS).</p>
*/
inline void SetBucketKeyEnabled(bool value) { m_bucketKeyEnabled = value; }
/**
* <p>Indicates whether the copied object uses an S3 Bucket Key for server-side
- * encryption with AWS KMS (SSE-KMS).</p>
+ * encryption with Amazon Web Services KMS (SSE-KMS).</p>
*/
inline CopyObjectResult& WithBucketKeyEnabled(bool value) { SetBucketKeyEnabled(value); return *this;}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CopyObjectResultDetails.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CopyObjectResultDetails.h
index 69187574c3..ffd8efb64c 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CopyObjectResultDetails.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CopyObjectResultDetails.h
@@ -28,69 +28,61 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyObjectResult">AWS
* API Reference</a></p>
*/
- class AWS_S3_API CopyObjectResultDetails
+ class CopyObjectResultDetails
{
public:
- CopyObjectResultDetails();
- CopyObjectResultDetails(const Aws::Utils::Xml::XmlNode& xmlNode);
- CopyObjectResultDetails& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API CopyObjectResultDetails();
+ AWS_S3_API CopyObjectResultDetails(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API CopyObjectResultDetails& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
* <p>Returns the ETag of the new object. The ETag reflects only changes to the
- * contents of an object, not its metadata. The source and destination ETag is
- * identical for a successfully copied non-multipart object.</p>
+ * contents of an object, not its metadata.</p>
*/
inline const Aws::String& GetETag() const{ return m_eTag; }
/**
* <p>Returns the ETag of the new object. The ETag reflects only changes to the
- * contents of an object, not its metadata. The source and destination ETag is
- * identical for a successfully copied non-multipart object.</p>
+ * contents of an object, not its metadata.</p>
*/
inline bool ETagHasBeenSet() const { return m_eTagHasBeenSet; }
/**
* <p>Returns the ETag of the new object. The ETag reflects only changes to the
- * contents of an object, not its metadata. The source and destination ETag is
- * identical for a successfully copied non-multipart object.</p>
+ * contents of an object, not its metadata.</p>
*/
inline void SetETag(const Aws::String& value) { m_eTagHasBeenSet = true; m_eTag = value; }
/**
* <p>Returns the ETag of the new object. The ETag reflects only changes to the
- * contents of an object, not its metadata. The source and destination ETag is
- * identical for a successfully copied non-multipart object.</p>
+ * contents of an object, not its metadata.</p>
*/
inline void SetETag(Aws::String&& value) { m_eTagHasBeenSet = true; m_eTag = std::move(value); }
/**
* <p>Returns the ETag of the new object. The ETag reflects only changes to the
- * contents of an object, not its metadata. The source and destination ETag is
- * identical for a successfully copied non-multipart object.</p>
+ * contents of an object, not its metadata.</p>
*/
inline void SetETag(const char* value) { m_eTagHasBeenSet = true; m_eTag.assign(value); }
/**
* <p>Returns the ETag of the new object. The ETag reflects only changes to the
- * contents of an object, not its metadata. The source and destination ETag is
- * identical for a successfully copied non-multipart object.</p>
+ * contents of an object, not its metadata.</p>
*/
inline CopyObjectResultDetails& WithETag(const Aws::String& value) { SetETag(value); return *this;}
/**
* <p>Returns the ETag of the new object. The ETag reflects only changes to the
- * contents of an object, not its metadata. The source and destination ETag is
- * identical for a successfully copied non-multipart object.</p>
+ * contents of an object, not its metadata.</p>
*/
inline CopyObjectResultDetails& WithETag(Aws::String&& value) { SetETag(std::move(value)); return *this;}
/**
* <p>Returns the ETag of the new object. The ETag reflects only changes to the
- * contents of an object, not its metadata. The source and destination ETag is
- * identical for a successfully copied non-multipart object.</p>
+ * contents of an object, not its metadata.</p>
*/
inline CopyObjectResultDetails& WithETag(const char* value) { SetETag(value); return *this;}
@@ -125,13 +117,349 @@ namespace Model
*/
inline CopyObjectResultDetails& WithLastModified(Aws::Utils::DateTime&& value) { SetLastModified(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumCRC32() const{ return m_checksumCRC32; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumCRC32HasBeenSet() const { return m_checksumCRC32HasBeenSet; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(const Aws::String& value) { m_checksumCRC32HasBeenSet = true; m_checksumCRC32 = value; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(Aws::String&& value) { m_checksumCRC32HasBeenSet = true; m_checksumCRC32 = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(const char* value) { m_checksumCRC32HasBeenSet = true; m_checksumCRC32.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CopyObjectResultDetails& WithChecksumCRC32(const Aws::String& value) { SetChecksumCRC32(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CopyObjectResultDetails& WithChecksumCRC32(Aws::String&& value) { SetChecksumCRC32(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CopyObjectResultDetails& WithChecksumCRC32(const char* value) { SetChecksumCRC32(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumCRC32C() const{ return m_checksumCRC32C; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumCRC32CHasBeenSet() const { return m_checksumCRC32CHasBeenSet; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(const Aws::String& value) { m_checksumCRC32CHasBeenSet = true; m_checksumCRC32C = value; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(Aws::String&& value) { m_checksumCRC32CHasBeenSet = true; m_checksumCRC32C = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(const char* value) { m_checksumCRC32CHasBeenSet = true; m_checksumCRC32C.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CopyObjectResultDetails& WithChecksumCRC32C(const Aws::String& value) { SetChecksumCRC32C(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CopyObjectResultDetails& WithChecksumCRC32C(Aws::String&& value) { SetChecksumCRC32C(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CopyObjectResultDetails& WithChecksumCRC32C(const char* value) { SetChecksumCRC32C(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumSHA1() const{ return m_checksumSHA1; }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumSHA1HasBeenSet() const { return m_checksumSHA1HasBeenSet; }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(const Aws::String& value) { m_checksumSHA1HasBeenSet = true; m_checksumSHA1 = value; }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(Aws::String&& value) { m_checksumSHA1HasBeenSet = true; m_checksumSHA1 = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(const char* value) { m_checksumSHA1HasBeenSet = true; m_checksumSHA1.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CopyObjectResultDetails& WithChecksumSHA1(const Aws::String& value) { SetChecksumSHA1(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CopyObjectResultDetails& WithChecksumSHA1(Aws::String&& value) { SetChecksumSHA1(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CopyObjectResultDetails& WithChecksumSHA1(const char* value) { SetChecksumSHA1(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumSHA256() const{ return m_checksumSHA256; }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumSHA256HasBeenSet() const { return m_checksumSHA256HasBeenSet; }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(const Aws::String& value) { m_checksumSHA256HasBeenSet = true; m_checksumSHA256 = value; }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(Aws::String&& value) { m_checksumSHA256HasBeenSet = true; m_checksumSHA256 = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(const char* value) { m_checksumSHA256HasBeenSet = true; m_checksumSHA256.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CopyObjectResultDetails& WithChecksumSHA256(const Aws::String& value) { SetChecksumSHA256(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CopyObjectResultDetails& WithChecksumSHA256(Aws::String&& value) { SetChecksumSHA256(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CopyObjectResultDetails& WithChecksumSHA256(const char* value) { SetChecksumSHA256(value); return *this;}
+
private:
Aws::String m_eTag;
- bool m_eTagHasBeenSet;
+ bool m_eTagHasBeenSet = false;
Aws::Utils::DateTime m_lastModified;
- bool m_lastModifiedHasBeenSet;
+ bool m_lastModifiedHasBeenSet = false;
+
+ Aws::String m_checksumCRC32;
+ bool m_checksumCRC32HasBeenSet = false;
+
+ Aws::String m_checksumCRC32C;
+ bool m_checksumCRC32CHasBeenSet = false;
+
+ Aws::String m_checksumSHA1;
+ bool m_checksumSHA1HasBeenSet = false;
+
+ Aws::String m_checksumSHA256;
+ bool m_checksumSHA256HasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CopyPartResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CopyPartResult.h
index d744384ae2..ae26dbcc0c 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CopyPartResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CopyPartResult.h
@@ -28,14 +28,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CopyPartResult">AWS
* API Reference</a></p>
*/
- class AWS_S3_API CopyPartResult
+ class CopyPartResult
{
public:
- CopyPartResult();
- CopyPartResult(const Aws::Utils::Xml::XmlNode& xmlNode);
- CopyPartResult& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API CopyPartResult();
+ AWS_S3_API CopyPartResult(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API CopyPartResult& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -109,13 +109,349 @@ namespace Model
*/
inline CopyPartResult& WithLastModified(Aws::Utils::DateTime&& value) { SetLastModified(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumCRC32() const{ return m_checksumCRC32; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumCRC32HasBeenSet() const { return m_checksumCRC32HasBeenSet; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(const Aws::String& value) { m_checksumCRC32HasBeenSet = true; m_checksumCRC32 = value; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(Aws::String&& value) { m_checksumCRC32HasBeenSet = true; m_checksumCRC32 = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(const char* value) { m_checksumCRC32HasBeenSet = true; m_checksumCRC32.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CopyPartResult& WithChecksumCRC32(const Aws::String& value) { SetChecksumCRC32(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CopyPartResult& WithChecksumCRC32(Aws::String&& value) { SetChecksumCRC32(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CopyPartResult& WithChecksumCRC32(const char* value) { SetChecksumCRC32(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumCRC32C() const{ return m_checksumCRC32C; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumCRC32CHasBeenSet() const { return m_checksumCRC32CHasBeenSet; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(const Aws::String& value) { m_checksumCRC32CHasBeenSet = true; m_checksumCRC32C = value; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(Aws::String&& value) { m_checksumCRC32CHasBeenSet = true; m_checksumCRC32C = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(const char* value) { m_checksumCRC32CHasBeenSet = true; m_checksumCRC32C.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CopyPartResult& WithChecksumCRC32C(const Aws::String& value) { SetChecksumCRC32C(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CopyPartResult& WithChecksumCRC32C(Aws::String&& value) { SetChecksumCRC32C(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CopyPartResult& WithChecksumCRC32C(const char* value) { SetChecksumCRC32C(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumSHA1() const{ return m_checksumSHA1; }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumSHA1HasBeenSet() const { return m_checksumSHA1HasBeenSet; }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(const Aws::String& value) { m_checksumSHA1HasBeenSet = true; m_checksumSHA1 = value; }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(Aws::String&& value) { m_checksumSHA1HasBeenSet = true; m_checksumSHA1 = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(const char* value) { m_checksumSHA1HasBeenSet = true; m_checksumSHA1.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CopyPartResult& WithChecksumSHA1(const Aws::String& value) { SetChecksumSHA1(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CopyPartResult& WithChecksumSHA1(Aws::String&& value) { SetChecksumSHA1(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CopyPartResult& WithChecksumSHA1(const char* value) { SetChecksumSHA1(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumSHA256() const{ return m_checksumSHA256; }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumSHA256HasBeenSet() const { return m_checksumSHA256HasBeenSet; }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(const Aws::String& value) { m_checksumSHA256HasBeenSet = true; m_checksumSHA256 = value; }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(Aws::String&& value) { m_checksumSHA256HasBeenSet = true; m_checksumSHA256 = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(const char* value) { m_checksumSHA256HasBeenSet = true; m_checksumSHA256.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CopyPartResult& WithChecksumSHA256(const Aws::String& value) { SetChecksumSHA256(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CopyPartResult& WithChecksumSHA256(Aws::String&& value) { SetChecksumSHA256(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CopyPartResult& WithChecksumSHA256(const char* value) { SetChecksumSHA256(value); return *this;}
+
private:
Aws::String m_eTag;
- bool m_eTagHasBeenSet;
+ bool m_eTagHasBeenSet = false;
Aws::Utils::DateTime m_lastModified;
- bool m_lastModifiedHasBeenSet;
+ bool m_lastModifiedHasBeenSet = false;
+
+ Aws::String m_checksumCRC32;
+ bool m_checksumCRC32HasBeenSet = false;
+
+ Aws::String m_checksumCRC32C;
+ bool m_checksumCRC32CHasBeenSet = false;
+
+ Aws::String m_checksumSHA1;
+ bool m_checksumSHA1HasBeenSet = false;
+
+ Aws::String m_checksumSHA256;
+ bool m_checksumSHA256HasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CreateBucketConfiguration.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CreateBucketConfiguration.h
index b43d6ea94f..79e82bada5 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CreateBucketConfiguration.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CreateBucketConfiguration.h
@@ -27,14 +27,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/CreateBucketConfiguration">AWS
* API Reference</a></p>
*/
- class AWS_S3_API CreateBucketConfiguration
+ class CreateBucketConfiguration
{
public:
- CreateBucketConfiguration();
- CreateBucketConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
- CreateBucketConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API CreateBucketConfiguration();
+ AWS_S3_API CreateBucketConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API CreateBucketConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -82,7 +82,7 @@ namespace Model
private:
BucketLocationConstraint m_locationConstraint;
- bool m_locationConstraintHasBeenSet;
+ bool m_locationConstraintHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CreateBucketRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CreateBucketRequest.h
index ab18fd2bb7..1193b557b9 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CreateBucketRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CreateBucketRequest.h
@@ -9,6 +9,7 @@
#include <aws/s3/model/BucketCannedACL.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/s3/model/CreateBucketConfiguration.h>
+#include <aws/s3/model/ObjectOwnership.h>
#include <aws/core/utils/memory/stl/AWSMap.h>
#include <utility>
@@ -25,10 +26,10 @@ namespace Model
/**
*/
- class AWS_S3_API CreateBucketRequest : public S3Request
+ class CreateBucketRequest : public S3Request
{
public:
- CreateBucketRequest();
+ AWS_S3_API CreateBucketRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -36,12 +37,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "CreateBucket"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The canned ACL to apply to the bucket.</p>
@@ -278,42 +283,58 @@ namespace Model
/**
- * <p>Allows grantee to create, overwrite, and delete any object in the bucket.</p>
+ * <p>Allows grantee to create new objects in the bucket.</p> <p>For the bucket and
+ * object owners of existing objects, also allows deletions and overwrites of those
+ * objects.</p>
*/
inline const Aws::String& GetGrantWrite() const{ return m_grantWrite; }
/**
- * <p>Allows grantee to create, overwrite, and delete any object in the bucket.</p>
+ * <p>Allows grantee to create new objects in the bucket.</p> <p>For the bucket and
+ * object owners of existing objects, also allows deletions and overwrites of those
+ * objects.</p>
*/
inline bool GrantWriteHasBeenSet() const { return m_grantWriteHasBeenSet; }
/**
- * <p>Allows grantee to create, overwrite, and delete any object in the bucket.</p>
+ * <p>Allows grantee to create new objects in the bucket.</p> <p>For the bucket and
+ * object owners of existing objects, also allows deletions and overwrites of those
+ * objects.</p>
*/
inline void SetGrantWrite(const Aws::String& value) { m_grantWriteHasBeenSet = true; m_grantWrite = value; }
/**
- * <p>Allows grantee to create, overwrite, and delete any object in the bucket.</p>
+ * <p>Allows grantee to create new objects in the bucket.</p> <p>For the bucket and
+ * object owners of existing objects, also allows deletions and overwrites of those
+ * objects.</p>
*/
inline void SetGrantWrite(Aws::String&& value) { m_grantWriteHasBeenSet = true; m_grantWrite = std::move(value); }
/**
- * <p>Allows grantee to create, overwrite, and delete any object in the bucket.</p>
+ * <p>Allows grantee to create new objects in the bucket.</p> <p>For the bucket and
+ * object owners of existing objects, also allows deletions and overwrites of those
+ * objects.</p>
*/
inline void SetGrantWrite(const char* value) { m_grantWriteHasBeenSet = true; m_grantWrite.assign(value); }
/**
- * <p>Allows grantee to create, overwrite, and delete any object in the bucket.</p>
+ * <p>Allows grantee to create new objects in the bucket.</p> <p>For the bucket and
+ * object owners of existing objects, also allows deletions and overwrites of those
+ * objects.</p>
*/
inline CreateBucketRequest& WithGrantWrite(const Aws::String& value) { SetGrantWrite(value); return *this;}
/**
- * <p>Allows grantee to create, overwrite, and delete any object in the bucket.</p>
+ * <p>Allows grantee to create new objects in the bucket.</p> <p>For the bucket and
+ * object owners of existing objects, also allows deletions and overwrites of those
+ * objects.</p>
*/
inline CreateBucketRequest& WithGrantWrite(Aws::String&& value) { SetGrantWrite(std::move(value)); return *this;}
/**
- * <p>Allows grantee to create, overwrite, and delete any object in the bucket.</p>
+ * <p>Allows grantee to create new objects in the bucket.</p> <p>For the bucket and
+ * object owners of existing objects, also allows deletions and overwrites of those
+ * objects.</p>
*/
inline CreateBucketRequest& WithGrantWrite(const char* value) { SetGrantWrite(value); return *this;}
@@ -385,6 +406,25 @@ namespace Model
+ inline const ObjectOwnership& GetObjectOwnership() const{ return m_objectOwnership; }
+
+
+ inline bool ObjectOwnershipHasBeenSet() const { return m_objectOwnershipHasBeenSet; }
+
+
+ inline void SetObjectOwnership(const ObjectOwnership& value) { m_objectOwnershipHasBeenSet = true; m_objectOwnership = value; }
+
+
+ inline void SetObjectOwnership(ObjectOwnership&& value) { m_objectOwnershipHasBeenSet = true; m_objectOwnership = std::move(value); }
+
+
+ inline CreateBucketRequest& WithObjectOwnership(const ObjectOwnership& value) { SetObjectOwnership(value); return *this;}
+
+
+ inline CreateBucketRequest& WithObjectOwnership(ObjectOwnership&& value) { SetObjectOwnership(std::move(value)); return *this;}
+
+
+
inline const Aws::Map<Aws::String, Aws::String>& GetCustomizedAccessLogTag() const{ return m_customizedAccessLogTag; }
@@ -426,34 +466,37 @@ namespace Model
private:
BucketCannedACL m_aCL;
- bool m_aCLHasBeenSet;
+ bool m_aCLHasBeenSet = false;
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
CreateBucketConfiguration m_createBucketConfiguration;
- bool m_createBucketConfigurationHasBeenSet;
+ bool m_createBucketConfigurationHasBeenSet = false;
Aws::String m_grantFullControl;
- bool m_grantFullControlHasBeenSet;
+ bool m_grantFullControlHasBeenSet = false;
Aws::String m_grantRead;
- bool m_grantReadHasBeenSet;
+ bool m_grantReadHasBeenSet = false;
Aws::String m_grantReadACP;
- bool m_grantReadACPHasBeenSet;
+ bool m_grantReadACPHasBeenSet = false;
Aws::String m_grantWrite;
- bool m_grantWriteHasBeenSet;
+ bool m_grantWriteHasBeenSet = false;
Aws::String m_grantWriteACP;
- bool m_grantWriteACPHasBeenSet;
+ bool m_grantWriteACPHasBeenSet = false;
bool m_objectLockEnabledForBucket;
- bool m_objectLockEnabledForBucketHasBeenSet;
+ bool m_objectLockEnabledForBucketHasBeenSet = false;
+
+ ObjectOwnership m_objectOwnership;
+ bool m_objectOwnershipHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CreateBucketResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CreateBucketResult.h
index fb29599179..e5ec901a87 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CreateBucketResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CreateBucketResult.h
@@ -24,60 +24,46 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API CreateBucketResult
+ class CreateBucketResult
{
public:
- CreateBucketResult();
- CreateBucketResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- CreateBucketResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API CreateBucketResult();
+ AWS_S3_API CreateBucketResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API CreateBucketResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
- * <p>Specifies the Region where the bucket will be created. If you are creating a
- * bucket on the US East (N. Virginia) Region (us-east-1), you do not need to
- * specify the location.</p>
+ * <p>A forward slash followed by the name of the bucket.</p>
*/
inline const Aws::String& GetLocation() const{ return m_location; }
/**
- * <p>Specifies the Region where the bucket will be created. If you are creating a
- * bucket on the US East (N. Virginia) Region (us-east-1), you do not need to
- * specify the location.</p>
+ * <p>A forward slash followed by the name of the bucket.</p>
*/
inline void SetLocation(const Aws::String& value) { m_location = value; }
/**
- * <p>Specifies the Region where the bucket will be created. If you are creating a
- * bucket on the US East (N. Virginia) Region (us-east-1), you do not need to
- * specify the location.</p>
+ * <p>A forward slash followed by the name of the bucket.</p>
*/
inline void SetLocation(Aws::String&& value) { m_location = std::move(value); }
/**
- * <p>Specifies the Region where the bucket will be created. If you are creating a
- * bucket on the US East (N. Virginia) Region (us-east-1), you do not need to
- * specify the location.</p>
+ * <p>A forward slash followed by the name of the bucket.</p>
*/
inline void SetLocation(const char* value) { m_location.assign(value); }
/**
- * <p>Specifies the Region where the bucket will be created. If you are creating a
- * bucket on the US East (N. Virginia) Region (us-east-1), you do not need to
- * specify the location.</p>
+ * <p>A forward slash followed by the name of the bucket.</p>
*/
inline CreateBucketResult& WithLocation(const Aws::String& value) { SetLocation(value); return *this;}
/**
- * <p>Specifies the Region where the bucket will be created. If you are creating a
- * bucket on the US East (N. Virginia) Region (us-east-1), you do not need to
- * specify the location.</p>
+ * <p>A forward slash followed by the name of the bucket.</p>
*/
inline CreateBucketResult& WithLocation(Aws::String&& value) { SetLocation(std::move(value)); return *this;}
/**
- * <p>Specifies the Region where the bucket will be created. If you are creating a
- * bucket on the US East (N. Virginia) Region (us-east-1), you do not need to
- * specify the location.</p>
+ * <p>A forward slash followed by the name of the bucket.</p>
*/
inline CreateBucketResult& WithLocation(const char* value) { SetLocation(value); return *this;}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CreateMultipartUploadRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CreateMultipartUploadRequest.h
index aafe4dc650..7d4e60b1c9 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CreateMultipartUploadRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CreateMultipartUploadRequest.h
@@ -15,6 +15,7 @@
#include <aws/s3/model/RequestPayer.h>
#include <aws/s3/model/ObjectLockMode.h>
#include <aws/s3/model/ObjectLockLegalHoldStatus.h>
+#include <aws/s3/model/ChecksumAlgorithm.h>
#include <utility>
namespace Aws
@@ -30,10 +31,10 @@ namespace Model
/**
*/
- class AWS_S3_API CreateMultipartUploadRequest : public S3Request
+ class CreateMultipartUploadRequest : public S3Request
{
public:
- CreateMultipartUploadRequest();
+ AWS_S3_API CreateMultipartUploadRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -41,12 +42,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "CreateMultipartUpload"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The canned ACL to apply to the object.</p> <p>This action is not supported by
@@ -90,19 +95,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetBucket() const{ return m_bucket; }
@@ -111,19 +116,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool BucketHasBeenSet() const { return m_bucketHasBeenSet; }
@@ -132,19 +137,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const Aws::String& value) { m_bucketHasBeenSet = true; m_bucket = value; }
@@ -153,19 +158,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(Aws::String&& value) { m_bucketHasBeenSet = true; m_bucket = std::move(value); }
@@ -174,19 +179,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const char* value) { m_bucketHasBeenSet = true; m_bucket.assign(value); }
@@ -195,19 +200,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline CreateMultipartUploadRequest& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
@@ -216,19 +221,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline CreateMultipartUploadRequest& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
@@ -237,19 +242,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline CreateMultipartUploadRequest& WithBucket(const char* value) { SetBucket(value); return *this;}
@@ -853,7 +858,7 @@ namespace Model
* Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For
* more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
- * Classes</a> in the <i>Amazon S3 Service Developer Guide</i>.</p>
+ * Classes</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const StorageClass& GetStorageClass() const{ return m_storageClass; }
@@ -864,7 +869,7 @@ namespace Model
* Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For
* more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
- * Classes</a> in the <i>Amazon S3 Service Developer Guide</i>.</p>
+ * Classes</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool StorageClassHasBeenSet() const { return m_storageClassHasBeenSet; }
@@ -875,7 +880,7 @@ namespace Model
* Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For
* more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
- * Classes</a> in the <i>Amazon S3 Service Developer Guide</i>.</p>
+ * Classes</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetStorageClass(const StorageClass& value) { m_storageClassHasBeenSet = true; m_storageClass = value; }
@@ -886,7 +891,7 @@ namespace Model
* Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For
* more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
- * Classes</a> in the <i>Amazon S3 Service Developer Guide</i>.</p>
+ * Classes</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetStorageClass(StorageClass&& value) { m_storageClassHasBeenSet = true; m_storageClass = std::move(value); }
@@ -897,7 +902,7 @@ namespace Model
* Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For
* more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
- * Classes</a> in the <i>Amazon S3 Service Developer Guide</i>.</p>
+ * Classes</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline CreateMultipartUploadRequest& WithStorageClass(const StorageClass& value) { SetStorageClass(value); return *this;}
@@ -908,7 +913,7 @@ namespace Model
* Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For
* more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
- * Classes</a> in the <i>Amazon S3 Service Developer Guide</i>.</p>
+ * Classes</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline CreateMultipartUploadRequest& WithStorageClass(StorageClass&& value) { SetStorageClass(std::move(value)); return *this;}
@@ -1150,147 +1155,155 @@ namespace Model
/**
- * <p>Specifies the ID of the symmetric customer managed AWS KMS CMK to use for
- * object encryption. All GET and PUT requests for an object protected by AWS KMS
- * will fail if not made via SSL or using SigV4. For information about configuring
- * using any of the officially supported AWS SDKs and AWS CLI, see <a
+ * <p>Specifies the ID of the symmetric customer managed key to use for object
+ * encryption. All GET and PUT requests for an object protected by Amazon Web
+ * Services KMS will fail if not made via SSL or using SigV4. For information about
+ * configuring using any of the officially supported Amazon Web Services SDKs and
+ * Amazon Web Services CLI, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version">Specifying
- * the Signature Version in Request Authentication</a> in the <i>Amazon S3
- * Developer Guide</i>.</p>
+ * the Signature Version in Request Authentication</a> in the <i>Amazon S3 User
+ * Guide</i>.</p>
*/
inline const Aws::String& GetSSEKMSKeyId() const{ return m_sSEKMSKeyId; }
/**
- * <p>Specifies the ID of the symmetric customer managed AWS KMS CMK to use for
- * object encryption. All GET and PUT requests for an object protected by AWS KMS
- * will fail if not made via SSL or using SigV4. For information about configuring
- * using any of the officially supported AWS SDKs and AWS CLI, see <a
+ * <p>Specifies the ID of the symmetric customer managed key to use for object
+ * encryption. All GET and PUT requests for an object protected by Amazon Web
+ * Services KMS will fail if not made via SSL or using SigV4. For information about
+ * configuring using any of the officially supported Amazon Web Services SDKs and
+ * Amazon Web Services CLI, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version">Specifying
- * the Signature Version in Request Authentication</a> in the <i>Amazon S3
- * Developer Guide</i>.</p>
+ * the Signature Version in Request Authentication</a> in the <i>Amazon S3 User
+ * Guide</i>.</p>
*/
inline bool SSEKMSKeyIdHasBeenSet() const { return m_sSEKMSKeyIdHasBeenSet; }
/**
- * <p>Specifies the ID of the symmetric customer managed AWS KMS CMK to use for
- * object encryption. All GET and PUT requests for an object protected by AWS KMS
- * will fail if not made via SSL or using SigV4. For information about configuring
- * using any of the officially supported AWS SDKs and AWS CLI, see <a
+ * <p>Specifies the ID of the symmetric customer managed key to use for object
+ * encryption. All GET and PUT requests for an object protected by Amazon Web
+ * Services KMS will fail if not made via SSL or using SigV4. For information about
+ * configuring using any of the officially supported Amazon Web Services SDKs and
+ * Amazon Web Services CLI, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version">Specifying
- * the Signature Version in Request Authentication</a> in the <i>Amazon S3
- * Developer Guide</i>.</p>
+ * the Signature Version in Request Authentication</a> in the <i>Amazon S3 User
+ * Guide</i>.</p>
*/
inline void SetSSEKMSKeyId(const Aws::String& value) { m_sSEKMSKeyIdHasBeenSet = true; m_sSEKMSKeyId = value; }
/**
- * <p>Specifies the ID of the symmetric customer managed AWS KMS CMK to use for
- * object encryption. All GET and PUT requests for an object protected by AWS KMS
- * will fail if not made via SSL or using SigV4. For information about configuring
- * using any of the officially supported AWS SDKs and AWS CLI, see <a
+ * <p>Specifies the ID of the symmetric customer managed key to use for object
+ * encryption. All GET and PUT requests for an object protected by Amazon Web
+ * Services KMS will fail if not made via SSL or using SigV4. For information about
+ * configuring using any of the officially supported Amazon Web Services SDKs and
+ * Amazon Web Services CLI, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version">Specifying
- * the Signature Version in Request Authentication</a> in the <i>Amazon S3
- * Developer Guide</i>.</p>
+ * the Signature Version in Request Authentication</a> in the <i>Amazon S3 User
+ * Guide</i>.</p>
*/
inline void SetSSEKMSKeyId(Aws::String&& value) { m_sSEKMSKeyIdHasBeenSet = true; m_sSEKMSKeyId = std::move(value); }
/**
- * <p>Specifies the ID of the symmetric customer managed AWS KMS CMK to use for
- * object encryption. All GET and PUT requests for an object protected by AWS KMS
- * will fail if not made via SSL or using SigV4. For information about configuring
- * using any of the officially supported AWS SDKs and AWS CLI, see <a
+ * <p>Specifies the ID of the symmetric customer managed key to use for object
+ * encryption. All GET and PUT requests for an object protected by Amazon Web
+ * Services KMS will fail if not made via SSL or using SigV4. For information about
+ * configuring using any of the officially supported Amazon Web Services SDKs and
+ * Amazon Web Services CLI, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version">Specifying
- * the Signature Version in Request Authentication</a> in the <i>Amazon S3
- * Developer Guide</i>.</p>
+ * the Signature Version in Request Authentication</a> in the <i>Amazon S3 User
+ * Guide</i>.</p>
*/
inline void SetSSEKMSKeyId(const char* value) { m_sSEKMSKeyIdHasBeenSet = true; m_sSEKMSKeyId.assign(value); }
/**
- * <p>Specifies the ID of the symmetric customer managed AWS KMS CMK to use for
- * object encryption. All GET and PUT requests for an object protected by AWS KMS
- * will fail if not made via SSL or using SigV4. For information about configuring
- * using any of the officially supported AWS SDKs and AWS CLI, see <a
+ * <p>Specifies the ID of the symmetric customer managed key to use for object
+ * encryption. All GET and PUT requests for an object protected by Amazon Web
+ * Services KMS will fail if not made via SSL or using SigV4. For information about
+ * configuring using any of the officially supported Amazon Web Services SDKs and
+ * Amazon Web Services CLI, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version">Specifying
- * the Signature Version in Request Authentication</a> in the <i>Amazon S3
- * Developer Guide</i>.</p>
+ * the Signature Version in Request Authentication</a> in the <i>Amazon S3 User
+ * Guide</i>.</p>
*/
inline CreateMultipartUploadRequest& WithSSEKMSKeyId(const Aws::String& value) { SetSSEKMSKeyId(value); return *this;}
/**
- * <p>Specifies the ID of the symmetric customer managed AWS KMS CMK to use for
- * object encryption. All GET and PUT requests for an object protected by AWS KMS
- * will fail if not made via SSL or using SigV4. For information about configuring
- * using any of the officially supported AWS SDKs and AWS CLI, see <a
+ * <p>Specifies the ID of the symmetric customer managed key to use for object
+ * encryption. All GET and PUT requests for an object protected by Amazon Web
+ * Services KMS will fail if not made via SSL or using SigV4. For information about
+ * configuring using any of the officially supported Amazon Web Services SDKs and
+ * Amazon Web Services CLI, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version">Specifying
- * the Signature Version in Request Authentication</a> in the <i>Amazon S3
- * Developer Guide</i>.</p>
+ * the Signature Version in Request Authentication</a> in the <i>Amazon S3 User
+ * Guide</i>.</p>
*/
inline CreateMultipartUploadRequest& WithSSEKMSKeyId(Aws::String&& value) { SetSSEKMSKeyId(std::move(value)); return *this;}
/**
- * <p>Specifies the ID of the symmetric customer managed AWS KMS CMK to use for
- * object encryption. All GET and PUT requests for an object protected by AWS KMS
- * will fail if not made via SSL or using SigV4. For information about configuring
- * using any of the officially supported AWS SDKs and AWS CLI, see <a
+ * <p>Specifies the ID of the symmetric customer managed key to use for object
+ * encryption. All GET and PUT requests for an object protected by Amazon Web
+ * Services KMS will fail if not made via SSL or using SigV4. For information about
+ * configuring using any of the officially supported Amazon Web Services SDKs and
+ * Amazon Web Services CLI, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingAWSSDK.html#specify-signature-version">Specifying
- * the Signature Version in Request Authentication</a> in the <i>Amazon S3
- * Developer Guide</i>.</p>
+ * the Signature Version in Request Authentication</a> in the <i>Amazon S3 User
+ * Guide</i>.</p>
*/
inline CreateMultipartUploadRequest& WithSSEKMSKeyId(const char* value) { SetSSEKMSKeyId(value); return *this;}
/**
- * <p>Specifies the AWS KMS Encryption Context to use for object encryption. The
- * value of this header is a base64-encoded UTF-8 string holding JSON with the
- * encryption context key-value pairs.</p>
+ * <p>Specifies the Amazon Web Services KMS Encryption Context to use for object
+ * encryption. The value of this header is a base64-encoded UTF-8 string holding
+ * JSON with the encryption context key-value pairs.</p>
*/
inline const Aws::String& GetSSEKMSEncryptionContext() const{ return m_sSEKMSEncryptionContext; }
/**
- * <p>Specifies the AWS KMS Encryption Context to use for object encryption. The
- * value of this header is a base64-encoded UTF-8 string holding JSON with the
- * encryption context key-value pairs.</p>
+ * <p>Specifies the Amazon Web Services KMS Encryption Context to use for object
+ * encryption. The value of this header is a base64-encoded UTF-8 string holding
+ * JSON with the encryption context key-value pairs.</p>
*/
inline bool SSEKMSEncryptionContextHasBeenSet() const { return m_sSEKMSEncryptionContextHasBeenSet; }
/**
- * <p>Specifies the AWS KMS Encryption Context to use for object encryption. The
- * value of this header is a base64-encoded UTF-8 string holding JSON with the
- * encryption context key-value pairs.</p>
+ * <p>Specifies the Amazon Web Services KMS Encryption Context to use for object
+ * encryption. The value of this header is a base64-encoded UTF-8 string holding
+ * JSON with the encryption context key-value pairs.</p>
*/
inline void SetSSEKMSEncryptionContext(const Aws::String& value) { m_sSEKMSEncryptionContextHasBeenSet = true; m_sSEKMSEncryptionContext = value; }
/**
- * <p>Specifies the AWS KMS Encryption Context to use for object encryption. The
- * value of this header is a base64-encoded UTF-8 string holding JSON with the
- * encryption context key-value pairs.</p>
+ * <p>Specifies the Amazon Web Services KMS Encryption Context to use for object
+ * encryption. The value of this header is a base64-encoded UTF-8 string holding
+ * JSON with the encryption context key-value pairs.</p>
*/
inline void SetSSEKMSEncryptionContext(Aws::String&& value) { m_sSEKMSEncryptionContextHasBeenSet = true; m_sSEKMSEncryptionContext = std::move(value); }
/**
- * <p>Specifies the AWS KMS Encryption Context to use for object encryption. The
- * value of this header is a base64-encoded UTF-8 string holding JSON with the
- * encryption context key-value pairs.</p>
+ * <p>Specifies the Amazon Web Services KMS Encryption Context to use for object
+ * encryption. The value of this header is a base64-encoded UTF-8 string holding
+ * JSON with the encryption context key-value pairs.</p>
*/
inline void SetSSEKMSEncryptionContext(const char* value) { m_sSEKMSEncryptionContextHasBeenSet = true; m_sSEKMSEncryptionContext.assign(value); }
/**
- * <p>Specifies the AWS KMS Encryption Context to use for object encryption. The
- * value of this header is a base64-encoded UTF-8 string holding JSON with the
- * encryption context key-value pairs.</p>
+ * <p>Specifies the Amazon Web Services KMS Encryption Context to use for object
+ * encryption. The value of this header is a base64-encoded UTF-8 string holding
+ * JSON with the encryption context key-value pairs.</p>
*/
inline CreateMultipartUploadRequest& WithSSEKMSEncryptionContext(const Aws::String& value) { SetSSEKMSEncryptionContext(value); return *this;}
/**
- * <p>Specifies the AWS KMS Encryption Context to use for object encryption. The
- * value of this header is a base64-encoded UTF-8 string holding JSON with the
- * encryption context key-value pairs.</p>
+ * <p>Specifies the Amazon Web Services KMS Encryption Context to use for object
+ * encryption. The value of this header is a base64-encoded UTF-8 string holding
+ * JSON with the encryption context key-value pairs.</p>
*/
inline CreateMultipartUploadRequest& WithSSEKMSEncryptionContext(Aws::String&& value) { SetSSEKMSEncryptionContext(std::move(value)); return *this;}
/**
- * <p>Specifies the AWS KMS Encryption Context to use for object encryption. The
- * value of this header is a base64-encoded UTF-8 string holding JSON with the
- * encryption context key-value pairs.</p>
+ * <p>Specifies the Amazon Web Services KMS Encryption Context to use for object
+ * encryption. The value of this header is a base64-encoded UTF-8 string holding
+ * JSON with the encryption context key-value pairs.</p>
*/
inline CreateMultipartUploadRequest& WithSSEKMSEncryptionContext(const char* value) { SetSSEKMSEncryptionContext(value); return *this;}
@@ -1469,93 +1482,142 @@ namespace Model
/**
- * <p>Specifies whether you want to apply a Legal Hold to the uploaded object.</p>
+ * <p>Specifies whether you want to apply a legal hold to the uploaded object.</p>
*/
inline const ObjectLockLegalHoldStatus& GetObjectLockLegalHoldStatus() const{ return m_objectLockLegalHoldStatus; }
/**
- * <p>Specifies whether you want to apply a Legal Hold to the uploaded object.</p>
+ * <p>Specifies whether you want to apply a legal hold to the uploaded object.</p>
*/
inline bool ObjectLockLegalHoldStatusHasBeenSet() const { return m_objectLockLegalHoldStatusHasBeenSet; }
/**
- * <p>Specifies whether you want to apply a Legal Hold to the uploaded object.</p>
+ * <p>Specifies whether you want to apply a legal hold to the uploaded object.</p>
*/
inline void SetObjectLockLegalHoldStatus(const ObjectLockLegalHoldStatus& value) { m_objectLockLegalHoldStatusHasBeenSet = true; m_objectLockLegalHoldStatus = value; }
/**
- * <p>Specifies whether you want to apply a Legal Hold to the uploaded object.</p>
+ * <p>Specifies whether you want to apply a legal hold to the uploaded object.</p>
*/
inline void SetObjectLockLegalHoldStatus(ObjectLockLegalHoldStatus&& value) { m_objectLockLegalHoldStatusHasBeenSet = true; m_objectLockLegalHoldStatus = std::move(value); }
/**
- * <p>Specifies whether you want to apply a Legal Hold to the uploaded object.</p>
+ * <p>Specifies whether you want to apply a legal hold to the uploaded object.</p>
*/
inline CreateMultipartUploadRequest& WithObjectLockLegalHoldStatus(const ObjectLockLegalHoldStatus& value) { SetObjectLockLegalHoldStatus(value); return *this;}
/**
- * <p>Specifies whether you want to apply a Legal Hold to the uploaded object.</p>
+ * <p>Specifies whether you want to apply a legal hold to the uploaded object.</p>
*/
inline CreateMultipartUploadRequest& WithObjectLockLegalHoldStatus(ObjectLockLegalHoldStatus&& value) { SetObjectLockLegalHoldStatus(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline CreateMultipartUploadRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline CreateMultipartUploadRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline CreateMultipartUploadRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
+ /**
+ * <p>Indicates the algorithm you want Amazon S3 to use to create the checksum for
+ * the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const ChecksumAlgorithm& GetChecksumAlgorithm() const{ return m_checksumAlgorithm; }
+
+ /**
+ * <p>Indicates the algorithm you want Amazon S3 to use to create the checksum for
+ * the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumAlgorithmHasBeenSet() const { return m_checksumAlgorithmHasBeenSet; }
+
+ /**
+ * <p>Indicates the algorithm you want Amazon S3 to use to create the checksum for
+ * the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumAlgorithm(const ChecksumAlgorithm& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = value; }
+
+ /**
+ * <p>Indicates the algorithm you want Amazon S3 to use to create the checksum for
+ * the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumAlgorithm(ChecksumAlgorithm&& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = std::move(value); }
+
+ /**
+ * <p>Indicates the algorithm you want Amazon S3 to use to create the checksum for
+ * the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CreateMultipartUploadRequest& WithChecksumAlgorithm(const ChecksumAlgorithm& value) { SetChecksumAlgorithm(value); return *this;}
+
+ /**
+ * <p>Indicates the algorithm you want Amazon S3 to use to create the checksum for
+ * the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline CreateMultipartUploadRequest& WithChecksumAlgorithm(ChecksumAlgorithm&& value) { SetChecksumAlgorithm(std::move(value)); return *this;}
+
+
inline const Aws::Map<Aws::String, Aws::String>& GetCustomizedAccessLogTag() const{ return m_customizedAccessLogTag; }
@@ -1598,94 +1660,97 @@ namespace Model
private:
ObjectCannedACL m_aCL;
- bool m_aCLHasBeenSet;
+ bool m_aCLHasBeenSet = false;
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_cacheControl;
- bool m_cacheControlHasBeenSet;
+ bool m_cacheControlHasBeenSet = false;
Aws::String m_contentDisposition;
- bool m_contentDispositionHasBeenSet;
+ bool m_contentDispositionHasBeenSet = false;
Aws::String m_contentEncoding;
- bool m_contentEncodingHasBeenSet;
+ bool m_contentEncodingHasBeenSet = false;
Aws::String m_contentLanguage;
- bool m_contentLanguageHasBeenSet;
+ bool m_contentLanguageHasBeenSet = false;
Aws::String m_contentType;
- bool m_contentTypeHasBeenSet;
+ bool m_contentTypeHasBeenSet = false;
Aws::Utils::DateTime m_expires;
- bool m_expiresHasBeenSet;
+ bool m_expiresHasBeenSet = false;
Aws::String m_grantFullControl;
- bool m_grantFullControlHasBeenSet;
+ bool m_grantFullControlHasBeenSet = false;
Aws::String m_grantRead;
- bool m_grantReadHasBeenSet;
+ bool m_grantReadHasBeenSet = false;
Aws::String m_grantReadACP;
- bool m_grantReadACPHasBeenSet;
+ bool m_grantReadACPHasBeenSet = false;
Aws::String m_grantWriteACP;
- bool m_grantWriteACPHasBeenSet;
+ bool m_grantWriteACPHasBeenSet = false;
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_metadata;
- bool m_metadataHasBeenSet;
+ bool m_metadataHasBeenSet = false;
ServerSideEncryption m_serverSideEncryption;
- bool m_serverSideEncryptionHasBeenSet;
+ bool m_serverSideEncryptionHasBeenSet = false;
StorageClass m_storageClass;
- bool m_storageClassHasBeenSet;
+ bool m_storageClassHasBeenSet = false;
Aws::String m_websiteRedirectLocation;
- bool m_websiteRedirectLocationHasBeenSet;
+ bool m_websiteRedirectLocationHasBeenSet = false;
Aws::String m_sSECustomerAlgorithm;
- bool m_sSECustomerAlgorithmHasBeenSet;
+ bool m_sSECustomerAlgorithmHasBeenSet = false;
Aws::String m_sSECustomerKey;
- bool m_sSECustomerKeyHasBeenSet;
+ bool m_sSECustomerKeyHasBeenSet = false;
Aws::String m_sSECustomerKeyMD5;
- bool m_sSECustomerKeyMD5HasBeenSet;
+ bool m_sSECustomerKeyMD5HasBeenSet = false;
Aws::String m_sSEKMSKeyId;
- bool m_sSEKMSKeyIdHasBeenSet;
+ bool m_sSEKMSKeyIdHasBeenSet = false;
Aws::String m_sSEKMSEncryptionContext;
- bool m_sSEKMSEncryptionContextHasBeenSet;
+ bool m_sSEKMSEncryptionContextHasBeenSet = false;
bool m_bucketKeyEnabled;
- bool m_bucketKeyEnabledHasBeenSet;
+ bool m_bucketKeyEnabledHasBeenSet = false;
RequestPayer m_requestPayer;
- bool m_requestPayerHasBeenSet;
+ bool m_requestPayerHasBeenSet = false;
Aws::String m_tagging;
- bool m_taggingHasBeenSet;
+ bool m_taggingHasBeenSet = false;
ObjectLockMode m_objectLockMode;
- bool m_objectLockModeHasBeenSet;
+ bool m_objectLockModeHasBeenSet = false;
Aws::Utils::DateTime m_objectLockRetainUntilDate;
- bool m_objectLockRetainUntilDateHasBeenSet;
+ bool m_objectLockRetainUntilDateHasBeenSet = false;
ObjectLockLegalHoldStatus m_objectLockLegalHoldStatus;
- bool m_objectLockLegalHoldStatusHasBeenSet;
+ bool m_objectLockLegalHoldStatusHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
+
+ ChecksumAlgorithm m_checksumAlgorithm;
+ bool m_checksumAlgorithmHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CreateMultipartUploadResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CreateMultipartUploadResult.h
index 49f002f3b7..ccbb87c3d7 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CreateMultipartUploadResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/CreateMultipartUploadResult.h
@@ -9,6 +9,7 @@
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/s3/model/ServerSideEncryption.h>
#include <aws/s3/model/RequestCharged.h>
+#include <aws/s3/model/ChecksumAlgorithm.h>
#include <utility>
namespace Aws
@@ -27,12 +28,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API CreateMultipartUploadResult
+ class CreateMultipartUploadResult
{
public:
- CreateMultipartUploadResult();
- CreateMultipartUploadResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- CreateMultipartUploadResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API CreateMultipartUploadResult();
+ AWS_S3_API CreateMultipartUploadResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API CreateMultipartUploadResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
@@ -157,149 +158,156 @@ namespace Model
/**
- * <p>The name of the bucket to which the multipart upload was initiated. </p>
- * <p>When using this action with an access point, you must direct requests to the
- * access point hostname. The access point hostname takes the form
+ * <p>The name of the bucket to which the multipart upload was initiated. Does not
+ * return the access point ARN or access point alias if used.</p> <p>When using
+ * this action with an access point, you must direct requests to the access point
+ * hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetBucket() const{ return m_bucket; }
/**
- * <p>The name of the bucket to which the multipart upload was initiated. </p>
- * <p>When using this action with an access point, you must direct requests to the
- * access point hostname. The access point hostname takes the form
+ * <p>The name of the bucket to which the multipart upload was initiated. Does not
+ * return the access point ARN or access point alias if used.</p> <p>When using
+ * this action with an access point, you must direct requests to the access point
+ * hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const Aws::String& value) { m_bucket = value; }
/**
- * <p>The name of the bucket to which the multipart upload was initiated. </p>
- * <p>When using this action with an access point, you must direct requests to the
- * access point hostname. The access point hostname takes the form
+ * <p>The name of the bucket to which the multipart upload was initiated. Does not
+ * return the access point ARN or access point alias if used.</p> <p>When using
+ * this action with an access point, you must direct requests to the access point
+ * hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(Aws::String&& value) { m_bucket = std::move(value); }
/**
- * <p>The name of the bucket to which the multipart upload was initiated. </p>
- * <p>When using this action with an access point, you must direct requests to the
- * access point hostname. The access point hostname takes the form
+ * <p>The name of the bucket to which the multipart upload was initiated. Does not
+ * return the access point ARN or access point alias if used.</p> <p>When using
+ * this action with an access point, you must direct requests to the access point
+ * hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const char* value) { m_bucket.assign(value); }
/**
- * <p>The name of the bucket to which the multipart upload was initiated. </p>
- * <p>When using this action with an access point, you must direct requests to the
- * access point hostname. The access point hostname takes the form
+ * <p>The name of the bucket to which the multipart upload was initiated. Does not
+ * return the access point ARN or access point alias if used.</p> <p>When using
+ * this action with an access point, you must direct requests to the access point
+ * hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline CreateMultipartUploadResult& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
/**
- * <p>The name of the bucket to which the multipart upload was initiated. </p>
- * <p>When using this action with an access point, you must direct requests to the
- * access point hostname. The access point hostname takes the form
+ * <p>The name of the bucket to which the multipart upload was initiated. Does not
+ * return the access point ARN or access point alias if used.</p> <p>When using
+ * this action with an access point, you must direct requests to the access point
+ * hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline CreateMultipartUploadResult& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
/**
- * <p>The name of the bucket to which the multipart upload was initiated. </p>
- * <p>When using this action with an access point, you must direct requests to the
- * access point hostname. The access point hostname takes the form
+ * <p>The name of the bucket to which the multipart upload was initiated. Does not
+ * return the access point ARN or access point alias if used.</p> <p>When using
+ * this action with an access point, you must direct requests to the access point
+ * hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline CreateMultipartUploadResult& WithBucket(const char* value) { SetBucket(value); return *this;}
@@ -508,120 +516,120 @@ namespace Model
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline const Aws::String& GetSSEKMSKeyId() const{ return m_sSEKMSKeyId; }
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline void SetSSEKMSKeyId(const Aws::String& value) { m_sSEKMSKeyId = value; }
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline void SetSSEKMSKeyId(Aws::String&& value) { m_sSEKMSKeyId = std::move(value); }
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline void SetSSEKMSKeyId(const char* value) { m_sSEKMSKeyId.assign(value); }
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline CreateMultipartUploadResult& WithSSEKMSKeyId(const Aws::String& value) { SetSSEKMSKeyId(value); return *this;}
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline CreateMultipartUploadResult& WithSSEKMSKeyId(Aws::String&& value) { SetSSEKMSKeyId(std::move(value)); return *this;}
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline CreateMultipartUploadResult& WithSSEKMSKeyId(const char* value) { SetSSEKMSKeyId(value); return *this;}
/**
- * <p>If present, specifies the AWS KMS Encryption Context to use for object
- * encryption. The value of this header is a base64-encoded UTF-8 string holding
- * JSON with the encryption context key-value pairs.</p>
+ * <p>If present, specifies the Amazon Web Services KMS Encryption Context to use
+ * for object encryption. The value of this header is a base64-encoded UTF-8 string
+ * holding JSON with the encryption context key-value pairs.</p>
*/
inline const Aws::String& GetSSEKMSEncryptionContext() const{ return m_sSEKMSEncryptionContext; }
/**
- * <p>If present, specifies the AWS KMS Encryption Context to use for object
- * encryption. The value of this header is a base64-encoded UTF-8 string holding
- * JSON with the encryption context key-value pairs.</p>
+ * <p>If present, specifies the Amazon Web Services KMS Encryption Context to use
+ * for object encryption. The value of this header is a base64-encoded UTF-8 string
+ * holding JSON with the encryption context key-value pairs.</p>
*/
inline void SetSSEKMSEncryptionContext(const Aws::String& value) { m_sSEKMSEncryptionContext = value; }
/**
- * <p>If present, specifies the AWS KMS Encryption Context to use for object
- * encryption. The value of this header is a base64-encoded UTF-8 string holding
- * JSON with the encryption context key-value pairs.</p>
+ * <p>If present, specifies the Amazon Web Services KMS Encryption Context to use
+ * for object encryption. The value of this header is a base64-encoded UTF-8 string
+ * holding JSON with the encryption context key-value pairs.</p>
*/
inline void SetSSEKMSEncryptionContext(Aws::String&& value) { m_sSEKMSEncryptionContext = std::move(value); }
/**
- * <p>If present, specifies the AWS KMS Encryption Context to use for object
- * encryption. The value of this header is a base64-encoded UTF-8 string holding
- * JSON with the encryption context key-value pairs.</p>
+ * <p>If present, specifies the Amazon Web Services KMS Encryption Context to use
+ * for object encryption. The value of this header is a base64-encoded UTF-8 string
+ * holding JSON with the encryption context key-value pairs.</p>
*/
inline void SetSSEKMSEncryptionContext(const char* value) { m_sSEKMSEncryptionContext.assign(value); }
/**
- * <p>If present, specifies the AWS KMS Encryption Context to use for object
- * encryption. The value of this header is a base64-encoded UTF-8 string holding
- * JSON with the encryption context key-value pairs.</p>
+ * <p>If present, specifies the Amazon Web Services KMS Encryption Context to use
+ * for object encryption. The value of this header is a base64-encoded UTF-8 string
+ * holding JSON with the encryption context key-value pairs.</p>
*/
inline CreateMultipartUploadResult& WithSSEKMSEncryptionContext(const Aws::String& value) { SetSSEKMSEncryptionContext(value); return *this;}
/**
- * <p>If present, specifies the AWS KMS Encryption Context to use for object
- * encryption. The value of this header is a base64-encoded UTF-8 string holding
- * JSON with the encryption context key-value pairs.</p>
+ * <p>If present, specifies the Amazon Web Services KMS Encryption Context to use
+ * for object encryption. The value of this header is a base64-encoded UTF-8 string
+ * holding JSON with the encryption context key-value pairs.</p>
*/
inline CreateMultipartUploadResult& WithSSEKMSEncryptionContext(Aws::String&& value) { SetSSEKMSEncryptionContext(std::move(value)); return *this;}
/**
- * <p>If present, specifies the AWS KMS Encryption Context to use for object
- * encryption. The value of this header is a base64-encoded UTF-8 string holding
- * JSON with the encryption context key-value pairs.</p>
+ * <p>If present, specifies the Amazon Web Services KMS Encryption Context to use
+ * for object encryption. The value of this header is a base64-encoded UTF-8 string
+ * holding JSON with the encryption context key-value pairs.</p>
*/
inline CreateMultipartUploadResult& WithSSEKMSEncryptionContext(const char* value) { SetSSEKMSEncryptionContext(value); return *this;}
/**
* <p>Indicates whether the multipart upload uses an S3 Bucket Key for server-side
- * encryption with AWS KMS (SSE-KMS).</p>
+ * encryption with Amazon Web Services KMS (SSE-KMS).</p>
*/
inline bool GetBucketKeyEnabled() const{ return m_bucketKeyEnabled; }
/**
* <p>Indicates whether the multipart upload uses an S3 Bucket Key for server-side
- * encryption with AWS KMS (SSE-KMS).</p>
+ * encryption with Amazon Web Services KMS (SSE-KMS).</p>
*/
inline void SetBucketKeyEnabled(bool value) { m_bucketKeyEnabled = value; }
/**
* <p>Indicates whether the multipart upload uses an S3 Bucket Key for server-side
- * encryption with AWS KMS (SSE-KMS).</p>
+ * encryption with Amazon Web Services KMS (SSE-KMS).</p>
*/
inline CreateMultipartUploadResult& WithBucketKeyEnabled(bool value) { SetBucketKeyEnabled(value); return *this;}
@@ -641,6 +649,32 @@ namespace Model
inline CreateMultipartUploadResult& WithRequestCharged(RequestCharged&& value) { SetRequestCharged(std::move(value)); return *this;}
+
+ /**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline const ChecksumAlgorithm& GetChecksumAlgorithm() const{ return m_checksumAlgorithm; }
+
+ /**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline void SetChecksumAlgorithm(const ChecksumAlgorithm& value) { m_checksumAlgorithm = value; }
+
+ /**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline void SetChecksumAlgorithm(ChecksumAlgorithm&& value) { m_checksumAlgorithm = std::move(value); }
+
+ /**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline CreateMultipartUploadResult& WithChecksumAlgorithm(const ChecksumAlgorithm& value) { SetChecksumAlgorithm(value); return *this;}
+
+ /**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline CreateMultipartUploadResult& WithChecksumAlgorithm(ChecksumAlgorithm&& value) { SetChecksumAlgorithm(std::move(value)); return *this;}
+
private:
Aws::Utils::DateTime m_abortDate;
@@ -666,6 +700,8 @@ namespace Model
bool m_bucketKeyEnabled;
RequestCharged m_requestCharged;
+
+ ChecksumAlgorithm m_checksumAlgorithm;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DefaultRetention.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DefaultRetention.h
index 582f90c903..5d33792991 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DefaultRetention.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DefaultRetention.h
@@ -33,14 +33,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DefaultRetention">AWS
* API Reference</a></p>
*/
- class AWS_S3_API DefaultRetention
+ class DefaultRetention
{
public:
- DefaultRetention();
- DefaultRetention(const Aws::Utils::Xml::XmlNode& xmlNode);
- DefaultRetention& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API DefaultRetention();
+ AWS_S3_API DefaultRetention(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API DefaultRetention& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -138,13 +138,13 @@ namespace Model
private:
ObjectLockRetentionMode m_mode;
- bool m_modeHasBeenSet;
+ bool m_modeHasBeenSet = false;
int m_days;
- bool m_daysHasBeenSet;
+ bool m_daysHasBeenSet = false;
int m_years;
- bool m_yearsHasBeenSet;
+ bool m_yearsHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Delete.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Delete.h
index c40ac3062a..5ac4e7efaa 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Delete.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Delete.h
@@ -28,14 +28,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Delete">AWS API
* Reference</a></p>
*/
- class AWS_S3_API Delete
+ class Delete
{
public:
- Delete();
- Delete(const Aws::Utils::Xml::XmlNode& xmlNode);
- Delete& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Delete();
+ AWS_S3_API Delete(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Delete& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -106,10 +106,10 @@ namespace Model
private:
Aws::Vector<ObjectIdentifier> m_objects;
- bool m_objectsHasBeenSet;
+ bool m_objectsHasBeenSet = false;
bool m_quiet;
- bool m_quietHasBeenSet;
+ bool m_quietHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketAnalyticsConfigurationRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketAnalyticsConfigurationRequest.h
index cfefbb8802..ff996ffa36 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketAnalyticsConfigurationRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketAnalyticsConfigurationRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API DeleteBucketAnalyticsConfigurationRequest : public S3Request
+ class DeleteBucketAnalyticsConfigurationRequest : public S3Request
{
public:
- DeleteBucketAnalyticsConfigurationRequest();
+ AWS_S3_API DeleteBucketAnalyticsConfigurationRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DeleteBucketAnalyticsConfiguration"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket from which an analytics configuration is deleted.</p>
@@ -125,57 +129,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketAnalyticsConfigurationRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketAnalyticsConfigurationRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketAnalyticsConfigurationRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -222,16 +226,16 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_id;
- bool m_idHasBeenSet;
+ bool m_idHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketCorsRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketCorsRequest.h
index e1199271c0..2f9fcc3582 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketCorsRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketCorsRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API DeleteBucketCorsRequest : public S3Request
+ class DeleteBucketCorsRequest : public S3Request
{
public:
- DeleteBucketCorsRequest();
+ AWS_S3_API DeleteBucketCorsRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DeleteBucketCors"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>Specifies the bucket whose <code>cors</code> configuration is being
@@ -92,57 +96,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketCorsRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketCorsRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketCorsRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -189,13 +193,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketEncryptionRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketEncryptionRequest.h
index 2ca332dccb..a52f6b3543 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketEncryptionRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketEncryptionRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API DeleteBucketEncryptionRequest : public S3Request
+ class DeleteBucketEncryptionRequest : public S3Request
{
public:
- DeleteBucketEncryptionRequest();
+ AWS_S3_API DeleteBucketEncryptionRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DeleteBucketEncryption"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket containing the server-side encryption configuration to
@@ -92,57 +96,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketEncryptionRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketEncryptionRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketEncryptionRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -189,13 +193,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketIntelligentTieringConfigurationRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketIntelligentTieringConfigurationRequest.h
index 7beaad6ec9..a39e8cbdbb 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketIntelligentTieringConfigurationRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketIntelligentTieringConfigurationRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API DeleteBucketIntelligentTieringConfigurationRequest : public S3Request
+ class DeleteBucketIntelligentTieringConfigurationRequest : public S3Request
{
public:
- DeleteBucketIntelligentTieringConfigurationRequest();
+ AWS_S3_API DeleteBucketIntelligentTieringConfigurationRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,10 +34,14 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DeleteBucketIntelligentTieringConfiguration"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the Amazon S3 bucket whose configuration you want to modify or
@@ -171,13 +175,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_id;
- bool m_idHasBeenSet;
+ bool m_idHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketInventoryConfigurationRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketInventoryConfigurationRequest.h
index 3cc56c6504..57a9477f97 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketInventoryConfigurationRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketInventoryConfigurationRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API DeleteBucketInventoryConfigurationRequest : public S3Request
+ class DeleteBucketInventoryConfigurationRequest : public S3Request
{
public:
- DeleteBucketInventoryConfigurationRequest();
+ AWS_S3_API DeleteBucketInventoryConfigurationRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DeleteBucketInventoryConfiguration"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket containing the inventory configuration to delete.</p>
@@ -125,57 +129,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketInventoryConfigurationRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketInventoryConfigurationRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketInventoryConfigurationRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -222,16 +226,16 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_id;
- bool m_idHasBeenSet;
+ bool m_idHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketLifecycleRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketLifecycleRequest.h
index c69d339d6b..a330226759 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketLifecycleRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketLifecycleRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API DeleteBucketLifecycleRequest : public S3Request
+ class DeleteBucketLifecycleRequest : public S3Request
{
public:
- DeleteBucketLifecycleRequest();
+ AWS_S3_API DeleteBucketLifecycleRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DeleteBucketLifecycle"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The bucket name of the lifecycle to delete.</p>
@@ -84,57 +88,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketLifecycleRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketLifecycleRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketLifecycleRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -181,13 +185,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketMetricsConfigurationRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketMetricsConfigurationRequest.h
index ce8a0b333f..2203861b80 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketMetricsConfigurationRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketMetricsConfigurationRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API DeleteBucketMetricsConfigurationRequest : public S3Request
+ class DeleteBucketMetricsConfigurationRequest : public S3Request
{
public:
- DeleteBucketMetricsConfigurationRequest();
+ AWS_S3_API DeleteBucketMetricsConfigurationRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DeleteBucketMetricsConfiguration"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket containing the metrics configuration to delete.</p>
@@ -125,57 +129,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketMetricsConfigurationRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketMetricsConfigurationRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketMetricsConfigurationRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -222,16 +226,16 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_id;
- bool m_idHasBeenSet;
+ bool m_idHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketOwnershipControlsRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketOwnershipControlsRequest.h
index aab2947698..3aec542306 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketOwnershipControlsRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketOwnershipControlsRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API DeleteBucketOwnershipControlsRequest : public S3Request
+ class DeleteBucketOwnershipControlsRequest : public S3Request
{
public:
- DeleteBucketOwnershipControlsRequest();
+ AWS_S3_API DeleteBucketOwnershipControlsRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DeleteBucketOwnershipControls"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The Amazon S3 bucket whose <code>OwnershipControls</code> you want to delete.
@@ -92,57 +96,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketOwnershipControlsRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketOwnershipControlsRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketOwnershipControlsRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -189,13 +193,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketPolicyRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketPolicyRequest.h
index cde2a99893..804809a685 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketPolicyRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketPolicyRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API DeleteBucketPolicyRequest : public S3Request
+ class DeleteBucketPolicyRequest : public S3Request
{
public:
- DeleteBucketPolicyRequest();
+ AWS_S3_API DeleteBucketPolicyRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DeleteBucketPolicy"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The bucket name.</p>
@@ -84,57 +88,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketPolicyRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketPolicyRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketPolicyRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -181,13 +185,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketReplicationRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketReplicationRequest.h
index 37c1fbe834..86aa475b92 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketReplicationRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketReplicationRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API DeleteBucketReplicationRequest : public S3Request
+ class DeleteBucketReplicationRequest : public S3Request
{
public:
- DeleteBucketReplicationRequest();
+ AWS_S3_API DeleteBucketReplicationRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DeleteBucketReplication"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p> The bucket name. </p>
@@ -84,57 +88,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketReplicationRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketReplicationRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketReplicationRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -181,13 +185,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketRequest.h
index e7daf51d74..1c7a39ed1e 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API DeleteBucketRequest : public S3Request
+ class DeleteBucketRequest : public S3Request
{
public:
- DeleteBucketRequest();
+ AWS_S3_API DeleteBucketRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DeleteBucket"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>Specifies the bucket being deleted.</p>
@@ -84,57 +88,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -181,13 +185,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketTaggingRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketTaggingRequest.h
index 54355c04fc..b8fe28bc01 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketTaggingRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketTaggingRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API DeleteBucketTaggingRequest : public S3Request
+ class DeleteBucketTaggingRequest : public S3Request
{
public:
- DeleteBucketTaggingRequest();
+ AWS_S3_API DeleteBucketTaggingRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DeleteBucketTagging"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The bucket that has the tag set to be removed.</p>
@@ -84,57 +88,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketTaggingRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketTaggingRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketTaggingRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -181,13 +185,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketWebsiteRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketWebsiteRequest.h
index c7ab0d9e40..1a51e2e024 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketWebsiteRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteBucketWebsiteRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API DeleteBucketWebsiteRequest : public S3Request
+ class DeleteBucketWebsiteRequest : public S3Request
{
public:
- DeleteBucketWebsiteRequest();
+ AWS_S3_API DeleteBucketWebsiteRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DeleteBucketWebsite"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The bucket name for which you want to remove the website configuration. </p>
@@ -84,57 +88,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketWebsiteRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketWebsiteRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteBucketWebsiteRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -181,13 +185,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteMarkerEntry.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteMarkerEntry.h
index 60ee780db6..0e7a1a8746 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteMarkerEntry.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteMarkerEntry.h
@@ -29,14 +29,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteMarkerEntry">AWS
* API Reference</a></p>
*/
- class AWS_S3_API DeleteMarkerEntry
+ class DeleteMarkerEntry
{
public:
- DeleteMarkerEntry();
- DeleteMarkerEntry(const Aws::Utils::Xml::XmlNode& xmlNode);
- DeleteMarkerEntry& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API DeleteMarkerEntry();
+ AWS_S3_API DeleteMarkerEntry(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API DeleteMarkerEntry& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -210,19 +210,19 @@ namespace Model
private:
Owner m_owner;
- bool m_ownerHasBeenSet;
+ bool m_ownerHasBeenSet = false;
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
Aws::String m_versionId;
- bool m_versionIdHasBeenSet;
+ bool m_versionIdHasBeenSet = false;
bool m_isLatest;
- bool m_isLatestHasBeenSet;
+ bool m_isLatestHasBeenSet = false;
Aws::Utils::DateTime m_lastModified;
- bool m_lastModifiedHasBeenSet;
+ bool m_lastModifiedHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteMarkerReplication.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteMarkerReplication.h
index fdaecb8346..a207b04eda 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteMarkerReplication.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteMarkerReplication.h
@@ -42,14 +42,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeleteMarkerReplication">AWS
* API Reference</a></p>
*/
- class AWS_S3_API DeleteMarkerReplication
+ class DeleteMarkerReplication
{
public:
- DeleteMarkerReplication();
- DeleteMarkerReplication(const Aws::Utils::Xml::XmlNode& xmlNode);
- DeleteMarkerReplication& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API DeleteMarkerReplication();
+ AWS_S3_API DeleteMarkerReplication(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API DeleteMarkerReplication& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -91,7 +91,7 @@ namespace Model
private:
DeleteMarkerReplicationStatus m_status;
- bool m_statusHasBeenSet;
+ bool m_statusHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteObjectRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteObjectRequest.h
index c340d287a8..4ca9bd0457 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteObjectRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteObjectRequest.h
@@ -24,10 +24,10 @@ namespace Model
/**
*/
- class AWS_S3_API DeleteObjectRequest : public S3Request
+ class DeleteObjectRequest : public S3Request
{
public:
- DeleteObjectRequest();
+ AWS_S3_API DeleteObjectRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -35,31 +35,35 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DeleteObject"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The bucket name of the bucket containing the object. </p> <p>When using this
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetBucket() const{ return m_bucket; }
@@ -68,19 +72,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool BucketHasBeenSet() const { return m_bucketHasBeenSet; }
@@ -89,19 +93,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const Aws::String& value) { m_bucketHasBeenSet = true; m_bucket = value; }
@@ -110,19 +114,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(Aws::String&& value) { m_bucketHasBeenSet = true; m_bucket = std::move(value); }
@@ -131,19 +135,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const char* value) { m_bucketHasBeenSet = true; m_bucket.assign(value); }
@@ -152,19 +156,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline DeleteObjectRequest& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
@@ -173,19 +177,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline DeleteObjectRequest& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
@@ -194,19 +198,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline DeleteObjectRequest& WithBucket(const char* value) { SetBucket(value); return *this;}
@@ -379,82 +383,86 @@ namespace Model
/**
* <p>Indicates whether S3 Object Lock should bypass Governance-mode restrictions
- * to process this operation.</p>
+ * to process this operation. To use this header, you must have the
+ * <code>s3:BypassGovernanceRetention</code> permission.</p>
*/
inline bool GetBypassGovernanceRetention() const{ return m_bypassGovernanceRetention; }
/**
* <p>Indicates whether S3 Object Lock should bypass Governance-mode restrictions
- * to process this operation.</p>
+ * to process this operation. To use this header, you must have the
+ * <code>s3:BypassGovernanceRetention</code> permission.</p>
*/
inline bool BypassGovernanceRetentionHasBeenSet() const { return m_bypassGovernanceRetentionHasBeenSet; }
/**
* <p>Indicates whether S3 Object Lock should bypass Governance-mode restrictions
- * to process this operation.</p>
+ * to process this operation. To use this header, you must have the
+ * <code>s3:BypassGovernanceRetention</code> permission.</p>
*/
inline void SetBypassGovernanceRetention(bool value) { m_bypassGovernanceRetentionHasBeenSet = true; m_bypassGovernanceRetention = value; }
/**
* <p>Indicates whether S3 Object Lock should bypass Governance-mode restrictions
- * to process this operation.</p>
+ * to process this operation. To use this header, you must have the
+ * <code>s3:BypassGovernanceRetention</code> permission.</p>
*/
inline DeleteObjectRequest& WithBypassGovernanceRetention(bool value) { SetBypassGovernanceRetention(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteObjectRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteObjectRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteObjectRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -501,28 +509,28 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
Aws::String m_mFA;
- bool m_mFAHasBeenSet;
+ bool m_mFAHasBeenSet = false;
Aws::String m_versionId;
- bool m_versionIdHasBeenSet;
+ bool m_versionIdHasBeenSet = false;
RequestPayer m_requestPayer;
- bool m_requestPayerHasBeenSet;
+ bool m_requestPayerHasBeenSet = false;
bool m_bypassGovernanceRetention;
- bool m_bypassGovernanceRetentionHasBeenSet;
+ bool m_bypassGovernanceRetentionHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteObjectResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteObjectResult.h
index 15a886bf11..7b945ccea4 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteObjectResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteObjectResult.h
@@ -25,12 +25,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API DeleteObjectResult
+ class DeleteObjectResult
{
public:
- DeleteObjectResult();
- DeleteObjectResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- DeleteObjectResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API DeleteObjectResult();
+ AWS_S3_API DeleteObjectResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API DeleteObjectResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteObjectTaggingRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteObjectTaggingRequest.h
index ad6286e1af..1f5181a071 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteObjectTaggingRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteObjectTaggingRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API DeleteObjectTaggingRequest : public S3Request
+ class DeleteObjectTaggingRequest : public S3Request
{
public:
- DeleteObjectTaggingRequest();
+ AWS_S3_API DeleteObjectTaggingRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,31 +34,35 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DeleteObjectTagging"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The bucket name containing the objects from which to remove the tags. </p>
* <p>When using this action with an access point, you must direct requests to the
* access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetBucket() const{ return m_bucket; }
@@ -67,19 +71,19 @@ namespace Model
* <p>When using this action with an access point, you must direct requests to the
* access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool BucketHasBeenSet() const { return m_bucketHasBeenSet; }
@@ -88,19 +92,19 @@ namespace Model
* <p>When using this action with an access point, you must direct requests to the
* access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const Aws::String& value) { m_bucketHasBeenSet = true; m_bucket = value; }
@@ -109,19 +113,19 @@ namespace Model
* <p>When using this action with an access point, you must direct requests to the
* access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(Aws::String&& value) { m_bucketHasBeenSet = true; m_bucket = std::move(value); }
@@ -130,19 +134,19 @@ namespace Model
* <p>When using this action with an access point, you must direct requests to the
* access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const char* value) { m_bucketHasBeenSet = true; m_bucket.assign(value); }
@@ -151,19 +155,19 @@ namespace Model
* <p>When using this action with an access point, you must direct requests to the
* access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline DeleteObjectTaggingRequest& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
@@ -172,19 +176,19 @@ namespace Model
* <p>When using this action with an access point, you must direct requests to the
* access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline DeleteObjectTaggingRequest& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
@@ -193,19 +197,19 @@ namespace Model
* <p>When using this action with an access point, you must direct requests to the
* access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline DeleteObjectTaggingRequest& WithBucket(const char* value) { SetBucket(value); return *this;}
@@ -302,57 +306,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteObjectTaggingRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteObjectTaggingRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteObjectTaggingRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -399,19 +403,19 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
Aws::String m_versionId;
- bool m_versionIdHasBeenSet;
+ bool m_versionIdHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteObjectTaggingResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteObjectTaggingResult.h
index 103267f221..ac47534a93 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteObjectTaggingResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteObjectTaggingResult.h
@@ -24,12 +24,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API DeleteObjectTaggingResult
+ class DeleteObjectTaggingResult
{
public:
- DeleteObjectTaggingResult();
- DeleteObjectTaggingResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- DeleteObjectTaggingResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API DeleteObjectTaggingResult();
+ AWS_S3_API DeleteObjectTaggingResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API DeleteObjectTaggingResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteObjectsRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteObjectsRequest.h
index fcbe9218b6..e7c7c792fd 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteObjectsRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteObjectsRequest.h
@@ -9,6 +9,7 @@
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/s3/model/Delete.h>
#include <aws/s3/model/RequestPayer.h>
+#include <aws/s3/model/ChecksumAlgorithm.h>
#include <aws/core/utils/memory/stl/AWSMap.h>
#include <utility>
@@ -25,10 +26,10 @@ namespace Model
/**
*/
- class AWS_S3_API DeleteObjectsRequest : public S3Request
+ class DeleteObjectsRequest : public S3Request
{
public:
- DeleteObjectsRequest();
+ AWS_S3_API DeleteObjectsRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -36,33 +37,37 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DeleteObjects"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
- inline bool ShouldComputeContentMd5() const override { return true; }
+ AWS_S3_API Aws::String GetChecksumAlgorithmName() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The bucket name containing the objects to delete. </p> <p>When using this
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetBucket() const{ return m_bucket; }
@@ -71,19 +76,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool BucketHasBeenSet() const { return m_bucketHasBeenSet; }
@@ -92,19 +97,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const Aws::String& value) { m_bucketHasBeenSet = true; m_bucket = value; }
@@ -113,19 +118,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(Aws::String&& value) { m_bucketHasBeenSet = true; m_bucket = std::move(value); }
@@ -134,19 +139,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const char* value) { m_bucketHasBeenSet = true; m_bucket.assign(value); }
@@ -155,19 +160,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline DeleteObjectsRequest& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
@@ -176,19 +181,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline DeleteObjectsRequest& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
@@ -197,19 +202,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline DeleteObjectsRequest& WithBucket(const char* value) { SetBucket(value); return *this;}
@@ -331,90 +336,187 @@ namespace Model
/**
* <p>Specifies whether you want to delete this object even if it has a
- * Governance-type Object Lock in place. You must have sufficient permissions to
- * perform this operation.</p>
+ * Governance-type Object Lock in place. To use this header, you must have the
+ * <code>s3:BypassGovernanceRetention</code> permission.</p>
*/
inline bool GetBypassGovernanceRetention() const{ return m_bypassGovernanceRetention; }
/**
* <p>Specifies whether you want to delete this object even if it has a
- * Governance-type Object Lock in place. You must have sufficient permissions to
- * perform this operation.</p>
+ * Governance-type Object Lock in place. To use this header, you must have the
+ * <code>s3:BypassGovernanceRetention</code> permission.</p>
*/
inline bool BypassGovernanceRetentionHasBeenSet() const { return m_bypassGovernanceRetentionHasBeenSet; }
/**
* <p>Specifies whether you want to delete this object even if it has a
- * Governance-type Object Lock in place. You must have sufficient permissions to
- * perform this operation.</p>
+ * Governance-type Object Lock in place. To use this header, you must have the
+ * <code>s3:BypassGovernanceRetention</code> permission.</p>
*/
inline void SetBypassGovernanceRetention(bool value) { m_bypassGovernanceRetentionHasBeenSet = true; m_bypassGovernanceRetention = value; }
/**
* <p>Specifies whether you want to delete this object even if it has a
- * Governance-type Object Lock in place. You must have sufficient permissions to
- * perform this operation.</p>
+ * Governance-type Object Lock in place. To use this header, you must have the
+ * <code>s3:BypassGovernanceRetention</code> permission.</p>
*/
inline DeleteObjectsRequest& WithBypassGovernanceRetention(bool value) { SetBypassGovernanceRetention(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteObjectsRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteObjectsRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeleteObjectsRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p> <p>This checksum algorithm must be
+ * the same for all parts and it match the checksum value supplied in the
+ * <code>CreateMultipartUpload</code> request.</p>
+ */
+ inline const ChecksumAlgorithm& GetChecksumAlgorithm() const{ return m_checksumAlgorithm; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p> <p>This checksum algorithm must be
+ * the same for all parts and it match the checksum value supplied in the
+ * <code>CreateMultipartUpload</code> request.</p>
+ */
+ inline bool ChecksumAlgorithmHasBeenSet() const { return m_checksumAlgorithmHasBeenSet; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p> <p>This checksum algorithm must be
+ * the same for all parts and it match the checksum value supplied in the
+ * <code>CreateMultipartUpload</code> request.</p>
+ */
+ inline void SetChecksumAlgorithm(const ChecksumAlgorithm& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = value; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p> <p>This checksum algorithm must be
+ * the same for all parts and it match the checksum value supplied in the
+ * <code>CreateMultipartUpload</code> request.</p>
+ */
+ inline void SetChecksumAlgorithm(ChecksumAlgorithm&& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = std::move(value); }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p> <p>This checksum algorithm must be
+ * the same for all parts and it match the checksum value supplied in the
+ * <code>CreateMultipartUpload</code> request.</p>
+ */
+ inline DeleteObjectsRequest& WithChecksumAlgorithm(const ChecksumAlgorithm& value) { SetChecksumAlgorithm(value); return *this;}
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p> <p>This checksum algorithm must be
+ * the same for all parts and it match the checksum value supplied in the
+ * <code>CreateMultipartUpload</code> request.</p>
+ */
+ inline DeleteObjectsRequest& WithChecksumAlgorithm(ChecksumAlgorithm&& value) { SetChecksumAlgorithm(std::move(value)); return *this;}
+
+
inline const Aws::Map<Aws::String, Aws::String>& GetCustomizedAccessLogTag() const{ return m_customizedAccessLogTag; }
@@ -457,25 +559,28 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Delete m_delete;
- bool m_deleteHasBeenSet;
+ bool m_deleteHasBeenSet = false;
Aws::String m_mFA;
- bool m_mFAHasBeenSet;
+ bool m_mFAHasBeenSet = false;
RequestPayer m_requestPayer;
- bool m_requestPayerHasBeenSet;
+ bool m_requestPayerHasBeenSet = false;
bool m_bypassGovernanceRetention;
- bool m_bypassGovernanceRetentionHasBeenSet;
+ bool m_bypassGovernanceRetentionHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
+
+ ChecksumAlgorithm m_checksumAlgorithm;
+ bool m_checksumAlgorithmHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteObjectsResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteObjectsResult.h
index 5533d2b3e5..b06f22acb1 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteObjectsResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeleteObjectsResult.h
@@ -27,12 +27,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API DeleteObjectsResult
+ class DeleteObjectsResult
{
public:
- DeleteObjectsResult();
- DeleteObjectsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- DeleteObjectsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API DeleteObjectsResult();
+ AWS_S3_API DeleteObjectsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API DeleteObjectsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeletePublicAccessBlockRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeletePublicAccessBlockRequest.h
index d53ad97548..754bb1e12b 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeletePublicAccessBlockRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeletePublicAccessBlockRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API DeletePublicAccessBlockRequest : public S3Request
+ class DeletePublicAccessBlockRequest : public S3Request
{
public:
- DeletePublicAccessBlockRequest();
+ AWS_S3_API DeletePublicAccessBlockRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "DeletePublicAccessBlock"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The Amazon S3 bucket whose <code>PublicAccessBlock</code> configuration you
@@ -92,57 +96,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeletePublicAccessBlockRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeletePublicAccessBlockRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline DeletePublicAccessBlockRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -189,13 +193,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeletedObject.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeletedObject.h
index 72f1b97e85..a2d30acfa0 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeletedObject.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/DeletedObject.h
@@ -27,14 +27,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/DeletedObject">AWS
* API Reference</a></p>
*/
- class AWS_S3_API DeletedObject
+ class DeletedObject
{
public:
- DeletedObject();
- DeletedObject(const Aws::Utils::Xml::XmlNode& xmlNode);
- DeletedObject& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API DeletedObject();
+ AWS_S3_API DeletedObject(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API DeletedObject& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -207,16 +207,16 @@ namespace Model
private:
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
Aws::String m_versionId;
- bool m_versionIdHasBeenSet;
+ bool m_versionIdHasBeenSet = false;
bool m_deleteMarker;
- bool m_deleteMarkerHasBeenSet;
+ bool m_deleteMarkerHasBeenSet = false;
Aws::String m_deleteMarkerVersionId;
- bool m_deleteMarkerVersionIdHasBeenSet;
+ bool m_deleteMarkerVersionIdHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Destination.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Destination.h
index eabf57c69a..251c04b772 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Destination.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Destination.h
@@ -34,14 +34,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Destination">AWS API
* Reference</a></p>
*/
- class AWS_S3_API Destination
+ class Destination
{
public:
- Destination();
- Destination(const Aws::Utils::Xml::XmlNode& xmlNode);
- Destination& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Destination();
+ AWS_S3_API Destination(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Destination& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -95,97 +95,97 @@ namespace Model
/**
* <p>Destination bucket owner account ID. In a cross-account scenario, if you
- * direct Amazon S3 to change replica ownership to the AWS account that owns the
- * destination bucket by specifying the <code>AccessControlTranslation</code>
- * property, this is the account ID of the destination bucket owner. For more
- * information, see <a
+ * direct Amazon S3 to change replica ownership to the Amazon Web Services account
+ * that owns the destination bucket by specifying the
+ * <code>AccessControlTranslation</code> property, this is the account ID of the
+ * destination bucket owner. For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html">Replication
- * Additional Configuration: Changing the Replica Owner</a> in the <i>Amazon Simple
- * Storage Service Developer Guide</i>.</p>
+ * Additional Configuration: Changing the Replica Owner</a> in the <i>Amazon S3
+ * User Guide</i>.</p>
*/
inline const Aws::String& GetAccount() const{ return m_account; }
/**
* <p>Destination bucket owner account ID. In a cross-account scenario, if you
- * direct Amazon S3 to change replica ownership to the AWS account that owns the
- * destination bucket by specifying the <code>AccessControlTranslation</code>
- * property, this is the account ID of the destination bucket owner. For more
- * information, see <a
+ * direct Amazon S3 to change replica ownership to the Amazon Web Services account
+ * that owns the destination bucket by specifying the
+ * <code>AccessControlTranslation</code> property, this is the account ID of the
+ * destination bucket owner. For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html">Replication
- * Additional Configuration: Changing the Replica Owner</a> in the <i>Amazon Simple
- * Storage Service Developer Guide</i>.</p>
+ * Additional Configuration: Changing the Replica Owner</a> in the <i>Amazon S3
+ * User Guide</i>.</p>
*/
inline bool AccountHasBeenSet() const { return m_accountHasBeenSet; }
/**
* <p>Destination bucket owner account ID. In a cross-account scenario, if you
- * direct Amazon S3 to change replica ownership to the AWS account that owns the
- * destination bucket by specifying the <code>AccessControlTranslation</code>
- * property, this is the account ID of the destination bucket owner. For more
- * information, see <a
+ * direct Amazon S3 to change replica ownership to the Amazon Web Services account
+ * that owns the destination bucket by specifying the
+ * <code>AccessControlTranslation</code> property, this is the account ID of the
+ * destination bucket owner. For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html">Replication
- * Additional Configuration: Changing the Replica Owner</a> in the <i>Amazon Simple
- * Storage Service Developer Guide</i>.</p>
+ * Additional Configuration: Changing the Replica Owner</a> in the <i>Amazon S3
+ * User Guide</i>.</p>
*/
inline void SetAccount(const Aws::String& value) { m_accountHasBeenSet = true; m_account = value; }
/**
* <p>Destination bucket owner account ID. In a cross-account scenario, if you
- * direct Amazon S3 to change replica ownership to the AWS account that owns the
- * destination bucket by specifying the <code>AccessControlTranslation</code>
- * property, this is the account ID of the destination bucket owner. For more
- * information, see <a
+ * direct Amazon S3 to change replica ownership to the Amazon Web Services account
+ * that owns the destination bucket by specifying the
+ * <code>AccessControlTranslation</code> property, this is the account ID of the
+ * destination bucket owner. For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html">Replication
- * Additional Configuration: Changing the Replica Owner</a> in the <i>Amazon Simple
- * Storage Service Developer Guide</i>.</p>
+ * Additional Configuration: Changing the Replica Owner</a> in the <i>Amazon S3
+ * User Guide</i>.</p>
*/
inline void SetAccount(Aws::String&& value) { m_accountHasBeenSet = true; m_account = std::move(value); }
/**
* <p>Destination bucket owner account ID. In a cross-account scenario, if you
- * direct Amazon S3 to change replica ownership to the AWS account that owns the
- * destination bucket by specifying the <code>AccessControlTranslation</code>
- * property, this is the account ID of the destination bucket owner. For more
- * information, see <a
+ * direct Amazon S3 to change replica ownership to the Amazon Web Services account
+ * that owns the destination bucket by specifying the
+ * <code>AccessControlTranslation</code> property, this is the account ID of the
+ * destination bucket owner. For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html">Replication
- * Additional Configuration: Changing the Replica Owner</a> in the <i>Amazon Simple
- * Storage Service Developer Guide</i>.</p>
+ * Additional Configuration: Changing the Replica Owner</a> in the <i>Amazon S3
+ * User Guide</i>.</p>
*/
inline void SetAccount(const char* value) { m_accountHasBeenSet = true; m_account.assign(value); }
/**
* <p>Destination bucket owner account ID. In a cross-account scenario, if you
- * direct Amazon S3 to change replica ownership to the AWS account that owns the
- * destination bucket by specifying the <code>AccessControlTranslation</code>
- * property, this is the account ID of the destination bucket owner. For more
- * information, see <a
+ * direct Amazon S3 to change replica ownership to the Amazon Web Services account
+ * that owns the destination bucket by specifying the
+ * <code>AccessControlTranslation</code> property, this is the account ID of the
+ * destination bucket owner. For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html">Replication
- * Additional Configuration: Changing the Replica Owner</a> in the <i>Amazon Simple
- * Storage Service Developer Guide</i>.</p>
+ * Additional Configuration: Changing the Replica Owner</a> in the <i>Amazon S3
+ * User Guide</i>.</p>
*/
inline Destination& WithAccount(const Aws::String& value) { SetAccount(value); return *this;}
/**
* <p>Destination bucket owner account ID. In a cross-account scenario, if you
- * direct Amazon S3 to change replica ownership to the AWS account that owns the
- * destination bucket by specifying the <code>AccessControlTranslation</code>
- * property, this is the account ID of the destination bucket owner. For more
- * information, see <a
+ * direct Amazon S3 to change replica ownership to the Amazon Web Services account
+ * that owns the destination bucket by specifying the
+ * <code>AccessControlTranslation</code> property, this is the account ID of the
+ * destination bucket owner. For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html">Replication
- * Additional Configuration: Changing the Replica Owner</a> in the <i>Amazon Simple
- * Storage Service Developer Guide</i>.</p>
+ * Additional Configuration: Changing the Replica Owner</a> in the <i>Amazon S3
+ * User Guide</i>.</p>
*/
inline Destination& WithAccount(Aws::String&& value) { SetAccount(std::move(value)); return *this;}
/**
* <p>Destination bucket owner account ID. In a cross-account scenario, if you
- * direct Amazon S3 to change replica ownership to the AWS account that owns the
- * destination bucket by specifying the <code>AccessControlTranslation</code>
- * property, this is the account ID of the destination bucket owner. For more
- * information, see <a
+ * direct Amazon S3 to change replica ownership to the Amazon Web Services account
+ * that owns the destination bucket by specifying the
+ * <code>AccessControlTranslation</code> property, this is the account ID of the
+ * destination bucket owner. For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-change-owner.html">Replication
- * Additional Configuration: Changing the Replica Owner</a> in the <i>Amazon Simple
- * Storage Service Developer Guide</i>.</p>
+ * Additional Configuration: Changing the Replica Owner</a> in the <i>Amazon S3
+ * User Guide</i>.</p>
*/
inline Destination& WithAccount(const char* value) { SetAccount(value); return *this;}
@@ -196,8 +196,7 @@ namespace Model
* object to create the object replica. </p> <p>For valid values, see the
* <code>StorageClass</code> element of the <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html">PUT
- * Bucket replication</a> action in the <i>Amazon Simple Storage Service API
- * Reference</i>.</p>
+ * Bucket replication</a> action in the <i>Amazon S3 API Reference</i>.</p>
*/
inline const StorageClass& GetStorageClass() const{ return m_storageClass; }
@@ -207,8 +206,7 @@ namespace Model
* object to create the object replica. </p> <p>For valid values, see the
* <code>StorageClass</code> element of the <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html">PUT
- * Bucket replication</a> action in the <i>Amazon Simple Storage Service API
- * Reference</i>.</p>
+ * Bucket replication</a> action in the <i>Amazon S3 API Reference</i>.</p>
*/
inline bool StorageClassHasBeenSet() const { return m_storageClassHasBeenSet; }
@@ -218,8 +216,7 @@ namespace Model
* object to create the object replica. </p> <p>For valid values, see the
* <code>StorageClass</code> element of the <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html">PUT
- * Bucket replication</a> action in the <i>Amazon Simple Storage Service API
- * Reference</i>.</p>
+ * Bucket replication</a> action in the <i>Amazon S3 API Reference</i>.</p>
*/
inline void SetStorageClass(const StorageClass& value) { m_storageClassHasBeenSet = true; m_storageClass = value; }
@@ -229,8 +226,7 @@ namespace Model
* object to create the object replica. </p> <p>For valid values, see the
* <code>StorageClass</code> element of the <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html">PUT
- * Bucket replication</a> action in the <i>Amazon Simple Storage Service API
- * Reference</i>.</p>
+ * Bucket replication</a> action in the <i>Amazon S3 API Reference</i>.</p>
*/
inline void SetStorageClass(StorageClass&& value) { m_storageClassHasBeenSet = true; m_storageClass = std::move(value); }
@@ -240,8 +236,7 @@ namespace Model
* object to create the object replica. </p> <p>For valid values, see the
* <code>StorageClass</code> element of the <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html">PUT
- * Bucket replication</a> action in the <i>Amazon Simple Storage Service API
- * Reference</i>.</p>
+ * Bucket replication</a> action in the <i>Amazon S3 API Reference</i>.</p>
*/
inline Destination& WithStorageClass(const StorageClass& value) { SetStorageClass(value); return *this;}
@@ -251,8 +246,7 @@ namespace Model
* object to create the object replica. </p> <p>For valid values, see the
* <code>StorageClass</code> element of the <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTreplication.html">PUT
- * Bucket replication</a> action in the <i>Amazon Simple Storage Service API
- * Reference</i>.</p>
+ * Bucket replication</a> action in the <i>Amazon S3 API Reference</i>.</p>
*/
inline Destination& WithStorageClass(StorageClass&& value) { SetStorageClass(std::move(value)); return *this;}
@@ -260,54 +254,54 @@ namespace Model
/**
* <p>Specify this only in a cross-account scenario (where source and destination
* bucket owners are not the same), and you want to change replica ownership to the
- * AWS account that owns the destination bucket. If this is not specified in the
- * replication configuration, the replicas are owned by same AWS account that owns
- * the source object.</p>
+ * Amazon Web Services account that owns the destination bucket. If this is not
+ * specified in the replication configuration, the replicas are owned by same
+ * Amazon Web Services account that owns the source object.</p>
*/
inline const AccessControlTranslation& GetAccessControlTranslation() const{ return m_accessControlTranslation; }
/**
* <p>Specify this only in a cross-account scenario (where source and destination
* bucket owners are not the same), and you want to change replica ownership to the
- * AWS account that owns the destination bucket. If this is not specified in the
- * replication configuration, the replicas are owned by same AWS account that owns
- * the source object.</p>
+ * Amazon Web Services account that owns the destination bucket. If this is not
+ * specified in the replication configuration, the replicas are owned by same
+ * Amazon Web Services account that owns the source object.</p>
*/
inline bool AccessControlTranslationHasBeenSet() const { return m_accessControlTranslationHasBeenSet; }
/**
* <p>Specify this only in a cross-account scenario (where source and destination
* bucket owners are not the same), and you want to change replica ownership to the
- * AWS account that owns the destination bucket. If this is not specified in the
- * replication configuration, the replicas are owned by same AWS account that owns
- * the source object.</p>
+ * Amazon Web Services account that owns the destination bucket. If this is not
+ * specified in the replication configuration, the replicas are owned by same
+ * Amazon Web Services account that owns the source object.</p>
*/
inline void SetAccessControlTranslation(const AccessControlTranslation& value) { m_accessControlTranslationHasBeenSet = true; m_accessControlTranslation = value; }
/**
* <p>Specify this only in a cross-account scenario (where source and destination
* bucket owners are not the same), and you want to change replica ownership to the
- * AWS account that owns the destination bucket. If this is not specified in the
- * replication configuration, the replicas are owned by same AWS account that owns
- * the source object.</p>
+ * Amazon Web Services account that owns the destination bucket. If this is not
+ * specified in the replication configuration, the replicas are owned by same
+ * Amazon Web Services account that owns the source object.</p>
*/
inline void SetAccessControlTranslation(AccessControlTranslation&& value) { m_accessControlTranslationHasBeenSet = true; m_accessControlTranslation = std::move(value); }
/**
* <p>Specify this only in a cross-account scenario (where source and destination
* bucket owners are not the same), and you want to change replica ownership to the
- * AWS account that owns the destination bucket. If this is not specified in the
- * replication configuration, the replicas are owned by same AWS account that owns
- * the source object.</p>
+ * Amazon Web Services account that owns the destination bucket. If this is not
+ * specified in the replication configuration, the replicas are owned by same
+ * Amazon Web Services account that owns the source object.</p>
*/
inline Destination& WithAccessControlTranslation(const AccessControlTranslation& value) { SetAccessControlTranslation(value); return *this;}
/**
* <p>Specify this only in a cross-account scenario (where source and destination
* bucket owners are not the same), and you want to change replica ownership to the
- * AWS account that owns the destination bucket. If this is not specified in the
- * replication configuration, the replicas are owned by same AWS account that owns
- * the source object.</p>
+ * Amazon Web Services account that owns the destination bucket. If this is not
+ * specified in the replication configuration, the replicas are owned by same
+ * Amazon Web Services account that owns the source object.</p>
*/
inline Destination& WithAccessControlTranslation(AccessControlTranslation&& value) { SetAccessControlTranslation(std::move(value)); return *this;}
@@ -443,25 +437,25 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_account;
- bool m_accountHasBeenSet;
+ bool m_accountHasBeenSet = false;
StorageClass m_storageClass;
- bool m_storageClassHasBeenSet;
+ bool m_storageClassHasBeenSet = false;
AccessControlTranslation m_accessControlTranslation;
- bool m_accessControlTranslationHasBeenSet;
+ bool m_accessControlTranslationHasBeenSet = false;
EncryptionConfiguration m_encryptionConfiguration;
- bool m_encryptionConfigurationHasBeenSet;
+ bool m_encryptionConfigurationHasBeenSet = false;
ReplicationTime m_replicationTime;
- bool m_replicationTimeHasBeenSet;
+ bool m_replicationTimeHasBeenSet = false;
Metrics m_metrics;
- bool m_metricsHasBeenSet;
+ bool m_metricsHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Encryption.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Encryption.h
index 87937565bb..729beeeca0 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Encryption.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Encryption.h
@@ -28,14 +28,14 @@ namespace Model
* <a href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Encryption">AWS
* API Reference</a></p>
*/
- class AWS_S3_API Encryption
+ class Encryption
{
public:
- Encryption();
- Encryption(const Aws::Utils::Xml::XmlNode& xmlNode);
- Encryption& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Encryption();
+ AWS_S3_API Encryption(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Encryption& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -77,89 +77,81 @@ namespace Model
/**
* <p>If the encryption type is <code>aws:kms</code>, this optional value specifies
- * the ID of the symmetric customer managed AWS KMS CMK to use for encryption of
- * job results. Amazon S3 only supports symmetric CMKs. For more information, see
- * <a
+ * the ID of the symmetric customer managed key to use for encryption of job
+ * results. Amazon S3 only supports symmetric keys. For more information, see <a
* href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using
- * Symmetric and Asymmetric Keys</a> in the <i>AWS Key Management Service Developer
- * Guide</i>.</p>
+ * symmetric and asymmetric keys</a> in the <i>Amazon Web Services Key Management
+ * Service Developer Guide</i>.</p>
*/
inline const Aws::String& GetKMSKeyId() const{ return m_kMSKeyId; }
/**
* <p>If the encryption type is <code>aws:kms</code>, this optional value specifies
- * the ID of the symmetric customer managed AWS KMS CMK to use for encryption of
- * job results. Amazon S3 only supports symmetric CMKs. For more information, see
- * <a
+ * the ID of the symmetric customer managed key to use for encryption of job
+ * results. Amazon S3 only supports symmetric keys. For more information, see <a
* href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using
- * Symmetric and Asymmetric Keys</a> in the <i>AWS Key Management Service Developer
- * Guide</i>.</p>
+ * symmetric and asymmetric keys</a> in the <i>Amazon Web Services Key Management
+ * Service Developer Guide</i>.</p>
*/
inline bool KMSKeyIdHasBeenSet() const { return m_kMSKeyIdHasBeenSet; }
/**
* <p>If the encryption type is <code>aws:kms</code>, this optional value specifies
- * the ID of the symmetric customer managed AWS KMS CMK to use for encryption of
- * job results. Amazon S3 only supports symmetric CMKs. For more information, see
- * <a
+ * the ID of the symmetric customer managed key to use for encryption of job
+ * results. Amazon S3 only supports symmetric keys. For more information, see <a
* href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using
- * Symmetric and Asymmetric Keys</a> in the <i>AWS Key Management Service Developer
- * Guide</i>.</p>
+ * symmetric and asymmetric keys</a> in the <i>Amazon Web Services Key Management
+ * Service Developer Guide</i>.</p>
*/
inline void SetKMSKeyId(const Aws::String& value) { m_kMSKeyIdHasBeenSet = true; m_kMSKeyId = value; }
/**
* <p>If the encryption type is <code>aws:kms</code>, this optional value specifies
- * the ID of the symmetric customer managed AWS KMS CMK to use for encryption of
- * job results. Amazon S3 only supports symmetric CMKs. For more information, see
- * <a
+ * the ID of the symmetric customer managed key to use for encryption of job
+ * results. Amazon S3 only supports symmetric keys. For more information, see <a
* href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using
- * Symmetric and Asymmetric Keys</a> in the <i>AWS Key Management Service Developer
- * Guide</i>.</p>
+ * symmetric and asymmetric keys</a> in the <i>Amazon Web Services Key Management
+ * Service Developer Guide</i>.</p>
*/
inline void SetKMSKeyId(Aws::String&& value) { m_kMSKeyIdHasBeenSet = true; m_kMSKeyId = std::move(value); }
/**
* <p>If the encryption type is <code>aws:kms</code>, this optional value specifies
- * the ID of the symmetric customer managed AWS KMS CMK to use for encryption of
- * job results. Amazon S3 only supports symmetric CMKs. For more information, see
- * <a
+ * the ID of the symmetric customer managed key to use for encryption of job
+ * results. Amazon S3 only supports symmetric keys. For more information, see <a
* href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using
- * Symmetric and Asymmetric Keys</a> in the <i>AWS Key Management Service Developer
- * Guide</i>.</p>
+ * symmetric and asymmetric keys</a> in the <i>Amazon Web Services Key Management
+ * Service Developer Guide</i>.</p>
*/
inline void SetKMSKeyId(const char* value) { m_kMSKeyIdHasBeenSet = true; m_kMSKeyId.assign(value); }
/**
* <p>If the encryption type is <code>aws:kms</code>, this optional value specifies
- * the ID of the symmetric customer managed AWS KMS CMK to use for encryption of
- * job results. Amazon S3 only supports symmetric CMKs. For more information, see
- * <a
+ * the ID of the symmetric customer managed key to use for encryption of job
+ * results. Amazon S3 only supports symmetric keys. For more information, see <a
* href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using
- * Symmetric and Asymmetric Keys</a> in the <i>AWS Key Management Service Developer
- * Guide</i>.</p>
+ * symmetric and asymmetric keys</a> in the <i>Amazon Web Services Key Management
+ * Service Developer Guide</i>.</p>
*/
inline Encryption& WithKMSKeyId(const Aws::String& value) { SetKMSKeyId(value); return *this;}
/**
* <p>If the encryption type is <code>aws:kms</code>, this optional value specifies
- * the ID of the symmetric customer managed AWS KMS CMK to use for encryption of
- * job results. Amazon S3 only supports symmetric CMKs. For more information, see
- * <a
+ * the ID of the symmetric customer managed key to use for encryption of job
+ * results. Amazon S3 only supports symmetric keys. For more information, see <a
* href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using
- * Symmetric and Asymmetric Keys</a> in the <i>AWS Key Management Service Developer
- * Guide</i>.</p>
+ * symmetric and asymmetric keys</a> in the <i>Amazon Web Services Key Management
+ * Service Developer Guide</i>.</p>
*/
inline Encryption& WithKMSKeyId(Aws::String&& value) { SetKMSKeyId(std::move(value)); return *this;}
/**
* <p>If the encryption type is <code>aws:kms</code>, this optional value specifies
- * the ID of the symmetric customer managed AWS KMS CMK to use for encryption of
- * job results. Amazon S3 only supports symmetric CMKs. For more information, see
- * <a
+ * the ID of the symmetric customer managed key to use for encryption of job
+ * results. Amazon S3 only supports symmetric keys. For more information, see <a
* href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using
- * Symmetric and Asymmetric Keys</a> in the <i>AWS Key Management Service Developer
- * Guide</i>.</p>
+ * symmetric and asymmetric keys</a> in the <i>Amazon Web Services Key Management
+ * Service Developer Guide</i>.</p>
*/
inline Encryption& WithKMSKeyId(const char* value) { SetKMSKeyId(value); return *this;}
@@ -215,13 +207,13 @@ namespace Model
private:
ServerSideEncryption m_encryptionType;
- bool m_encryptionTypeHasBeenSet;
+ bool m_encryptionTypeHasBeenSet = false;
Aws::String m_kMSKeyId;
- bool m_kMSKeyIdHasBeenSet;
+ bool m_kMSKeyIdHasBeenSet = false;
Aws::String m_kMSContext;
- bool m_kMSContextHasBeenSet;
+ bool m_kMSContextHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/EncryptionConfiguration.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/EncryptionConfiguration.h
index 14d86c3767..285dda7937 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/EncryptionConfiguration.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/EncryptionConfiguration.h
@@ -28,108 +28,116 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/EncryptionConfiguration">AWS
* API Reference</a></p>
*/
- class AWS_S3_API EncryptionConfiguration
+ class EncryptionConfiguration
{
public:
- EncryptionConfiguration();
- EncryptionConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
- EncryptionConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API EncryptionConfiguration();
+ AWS_S3_API EncryptionConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API EncryptionConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
- * <p>Specifies the ID (Key ARN or Alias ARN) of the customer managed customer
- * master key (CMK) stored in AWS Key Management Service (KMS) for the destination
- * bucket. Amazon S3 uses this key to encrypt replica objects. Amazon S3 only
- * supports symmetric customer managed CMKs. For more information, see <a
+ * <p>Specifies the ID (Key ARN or Alias ARN) of the customer managed Amazon Web
+ * Services KMS key stored in Amazon Web Services Key Management Service (KMS) for
+ * the destination bucket. Amazon S3 uses this key to encrypt replica objects.
+ * Amazon S3 only supports symmetric, customer managed KMS keys. For more
+ * information, see <a
* href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using
- * Symmetric and Asymmetric Keys</a> in the <i>AWS Key Management Service Developer
- * Guide</i>.</p>
+ * symmetric and asymmetric keys</a> in the <i>Amazon Web Services Key Management
+ * Service Developer Guide</i>.</p>
*/
inline const Aws::String& GetReplicaKmsKeyID() const{ return m_replicaKmsKeyID; }
/**
- * <p>Specifies the ID (Key ARN or Alias ARN) of the customer managed customer
- * master key (CMK) stored in AWS Key Management Service (KMS) for the destination
- * bucket. Amazon S3 uses this key to encrypt replica objects. Amazon S3 only
- * supports symmetric customer managed CMKs. For more information, see <a
+ * <p>Specifies the ID (Key ARN or Alias ARN) of the customer managed Amazon Web
+ * Services KMS key stored in Amazon Web Services Key Management Service (KMS) for
+ * the destination bucket. Amazon S3 uses this key to encrypt replica objects.
+ * Amazon S3 only supports symmetric, customer managed KMS keys. For more
+ * information, see <a
* href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using
- * Symmetric and Asymmetric Keys</a> in the <i>AWS Key Management Service Developer
- * Guide</i>.</p>
+ * symmetric and asymmetric keys</a> in the <i>Amazon Web Services Key Management
+ * Service Developer Guide</i>.</p>
*/
inline bool ReplicaKmsKeyIDHasBeenSet() const { return m_replicaKmsKeyIDHasBeenSet; }
/**
- * <p>Specifies the ID (Key ARN or Alias ARN) of the customer managed customer
- * master key (CMK) stored in AWS Key Management Service (KMS) for the destination
- * bucket. Amazon S3 uses this key to encrypt replica objects. Amazon S3 only
- * supports symmetric customer managed CMKs. For more information, see <a
+ * <p>Specifies the ID (Key ARN or Alias ARN) of the customer managed Amazon Web
+ * Services KMS key stored in Amazon Web Services Key Management Service (KMS) for
+ * the destination bucket. Amazon S3 uses this key to encrypt replica objects.
+ * Amazon S3 only supports symmetric, customer managed KMS keys. For more
+ * information, see <a
* href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using
- * Symmetric and Asymmetric Keys</a> in the <i>AWS Key Management Service Developer
- * Guide</i>.</p>
+ * symmetric and asymmetric keys</a> in the <i>Amazon Web Services Key Management
+ * Service Developer Guide</i>.</p>
*/
inline void SetReplicaKmsKeyID(const Aws::String& value) { m_replicaKmsKeyIDHasBeenSet = true; m_replicaKmsKeyID = value; }
/**
- * <p>Specifies the ID (Key ARN or Alias ARN) of the customer managed customer
- * master key (CMK) stored in AWS Key Management Service (KMS) for the destination
- * bucket. Amazon S3 uses this key to encrypt replica objects. Amazon S3 only
- * supports symmetric customer managed CMKs. For more information, see <a
+ * <p>Specifies the ID (Key ARN or Alias ARN) of the customer managed Amazon Web
+ * Services KMS key stored in Amazon Web Services Key Management Service (KMS) for
+ * the destination bucket. Amazon S3 uses this key to encrypt replica objects.
+ * Amazon S3 only supports symmetric, customer managed KMS keys. For more
+ * information, see <a
* href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using
- * Symmetric and Asymmetric Keys</a> in the <i>AWS Key Management Service Developer
- * Guide</i>.</p>
+ * symmetric and asymmetric keys</a> in the <i>Amazon Web Services Key Management
+ * Service Developer Guide</i>.</p>
*/
inline void SetReplicaKmsKeyID(Aws::String&& value) { m_replicaKmsKeyIDHasBeenSet = true; m_replicaKmsKeyID = std::move(value); }
/**
- * <p>Specifies the ID (Key ARN or Alias ARN) of the customer managed customer
- * master key (CMK) stored in AWS Key Management Service (KMS) for the destination
- * bucket. Amazon S3 uses this key to encrypt replica objects. Amazon S3 only
- * supports symmetric customer managed CMKs. For more information, see <a
+ * <p>Specifies the ID (Key ARN or Alias ARN) of the customer managed Amazon Web
+ * Services KMS key stored in Amazon Web Services Key Management Service (KMS) for
+ * the destination bucket. Amazon S3 uses this key to encrypt replica objects.
+ * Amazon S3 only supports symmetric, customer managed KMS keys. For more
+ * information, see <a
* href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using
- * Symmetric and Asymmetric Keys</a> in the <i>AWS Key Management Service Developer
- * Guide</i>.</p>
+ * symmetric and asymmetric keys</a> in the <i>Amazon Web Services Key Management
+ * Service Developer Guide</i>.</p>
*/
inline void SetReplicaKmsKeyID(const char* value) { m_replicaKmsKeyIDHasBeenSet = true; m_replicaKmsKeyID.assign(value); }
/**
- * <p>Specifies the ID (Key ARN or Alias ARN) of the customer managed customer
- * master key (CMK) stored in AWS Key Management Service (KMS) for the destination
- * bucket. Amazon S3 uses this key to encrypt replica objects. Amazon S3 only
- * supports symmetric customer managed CMKs. For more information, see <a
+ * <p>Specifies the ID (Key ARN or Alias ARN) of the customer managed Amazon Web
+ * Services KMS key stored in Amazon Web Services Key Management Service (KMS) for
+ * the destination bucket. Amazon S3 uses this key to encrypt replica objects.
+ * Amazon S3 only supports symmetric, customer managed KMS keys. For more
+ * information, see <a
* href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using
- * Symmetric and Asymmetric Keys</a> in the <i>AWS Key Management Service Developer
- * Guide</i>.</p>
+ * symmetric and asymmetric keys</a> in the <i>Amazon Web Services Key Management
+ * Service Developer Guide</i>.</p>
*/
inline EncryptionConfiguration& WithReplicaKmsKeyID(const Aws::String& value) { SetReplicaKmsKeyID(value); return *this;}
/**
- * <p>Specifies the ID (Key ARN or Alias ARN) of the customer managed customer
- * master key (CMK) stored in AWS Key Management Service (KMS) for the destination
- * bucket. Amazon S3 uses this key to encrypt replica objects. Amazon S3 only
- * supports symmetric customer managed CMKs. For more information, see <a
+ * <p>Specifies the ID (Key ARN or Alias ARN) of the customer managed Amazon Web
+ * Services KMS key stored in Amazon Web Services Key Management Service (KMS) for
+ * the destination bucket. Amazon S3 uses this key to encrypt replica objects.
+ * Amazon S3 only supports symmetric, customer managed KMS keys. For more
+ * information, see <a
* href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using
- * Symmetric and Asymmetric Keys</a> in the <i>AWS Key Management Service Developer
- * Guide</i>.</p>
+ * symmetric and asymmetric keys</a> in the <i>Amazon Web Services Key Management
+ * Service Developer Guide</i>.</p>
*/
inline EncryptionConfiguration& WithReplicaKmsKeyID(Aws::String&& value) { SetReplicaKmsKeyID(std::move(value)); return *this;}
/**
- * <p>Specifies the ID (Key ARN or Alias ARN) of the customer managed customer
- * master key (CMK) stored in AWS Key Management Service (KMS) for the destination
- * bucket. Amazon S3 uses this key to encrypt replica objects. Amazon S3 only
- * supports symmetric customer managed CMKs. For more information, see <a
+ * <p>Specifies the ID (Key ARN or Alias ARN) of the customer managed Amazon Web
+ * Services KMS key stored in Amazon Web Services Key Management Service (KMS) for
+ * the destination bucket. Amazon S3 uses this key to encrypt replica objects.
+ * Amazon S3 only supports symmetric, customer managed KMS keys. For more
+ * information, see <a
* href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using
- * Symmetric and Asymmetric Keys</a> in the <i>AWS Key Management Service Developer
- * Guide</i>.</p>
+ * symmetric and asymmetric keys</a> in the <i>Amazon Web Services Key Management
+ * Service Developer Guide</i>.</p>
*/
inline EncryptionConfiguration& WithReplicaKmsKeyID(const char* value) { SetReplicaKmsKeyID(value); return *this;}
private:
Aws::String m_replicaKmsKeyID;
- bool m_replicaKmsKeyIDHasBeenSet;
+ bool m_replicaKmsKeyIDHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Error.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Error.h
index a6ca21b0e4..50d4938818 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Error.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Error.h
@@ -27,14 +27,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Error">AWS API
* Reference</a></p>
*/
- class AWS_S3_API Error
+ class Error
{
public:
- Error();
- Error(const Aws::Utils::Xml::XmlNode& xmlNode);
- Error& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Error();
+ AWS_S3_API Error(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Error& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -127,41 +127,42 @@ namespace Model
* Access Denied</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p>
* </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
* <ul> <li> <p> <i>Code:</i> AccountProblem</p> </li> <li> <p> <i>Description:</i>
- * There is a problem with your AWS account that prevents the action from
- * completing successfully. Contact AWS Support for further assistance.</p> </li>
- * <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault
+ * There is a problem with your Amazon Web Services account that prevents the
+ * action from completing successfully. Contact Amazon Web Services Support for
+ * further assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
+ * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> AllAccessDisabled</p> </li> <li> <p>
+ * <i>Description:</i> All access to this Amazon S3 resource has been disabled.
+ * Contact Amazon Web Services Support for further assistance.</p> </li> <li> <p>
+ * <i>HTTP Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault Code
+ * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * AmbiguousGrantByEmailAddress</p> </li> <li> <p> <i>Description:</i> The email
+ * address you provided is associated with more than one account.</p> </li> <li>
+ * <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault
* Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * AllAccessDisabled</p> </li> <li> <p> <i>Description:</i> All access to this
- * Amazon S3 resource has been disabled. Contact AWS Support for further
- * assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li>
+ * AuthorizationHeaderMalformed</p> </li> <li> <p> <i>Description:</i> The
+ * authorization header you provided is invalid.</p> </li> <li> <p> <i>HTTP Status
+ * Code:</i> 400 Bad Request</p> </li> <li> <p> <i>HTTP Status Code:</i> N/A</p>
+ * </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> BadDigest</p> </li> <li> <p>
+ * <i>Description:</i> The Content-MD5 you specified did not match what we
+ * received.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
* <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> AmbiguousGrantByEmailAddress</p> </li> <li> <p>
- * <i>Description:</i> The email address you provided is associated with more than
- * one account.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
- * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
- * <ul> <li> <p> <i>Code:</i> AuthorizationHeaderMalformed</p> </li> <li> <p>
- * <i>Description:</i> The authorization header you provided is invalid.</p> </li>
- * <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> N/A</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * BadDigest</p> </li> <li> <p> <i>Description:</i> The Content-MD5 you specified
- * did not match what we received.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
- * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> BucketAlreadyExists</p> </li> <li>
- * <p> <i>Description:</i> The requested bucket name is not available. The bucket
- * namespace is shared by all users of the system. Please select a different name
- * and try again.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p>
- * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
- * <ul> <li> <p> <i>Code:</i> BucketAlreadyOwnedByYou</p> </li> <li> <p>
- * <i>Description:</i> The bucket you tried to create already exists, and you own
- * it. Amazon S3 returns this error in all AWS Regions except in the North Virginia
- * Region. For legacy compatibility, if you re-create an existing bucket that you
- * already own in the North Virginia Region, Amazon S3 returns 200 OK and resets
- * the bucket access control lists (ACLs).</p> </li> <li> <p> <i>Code:</i> 409
- * Conflict (in all Regions except the North Virginia Region) </p> </li> <li> <p>
- * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
- * <i>Code:</i> BucketNotEmpty</p> </li> <li> <p> <i>Description:</i> The bucket
- * you tried to delete is not empty.</p> </li> <li> <p> <i>HTTP Status Code:</i>
- * 409 Conflict</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * <li> <p> <i>Code:</i> BucketAlreadyExists</p> </li> <li> <p> <i>Description:</i>
+ * The requested bucket name is not available. The bucket namespace is shared by
+ * all users of the system. Please select a different name and try again.</p> </li>
+ * <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li> <p> <i>SOAP Fault
+ * Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * BucketAlreadyOwnedByYou</p> </li> <li> <p> <i>Description:</i> The bucket you
+ * tried to create already exists, and you own it. Amazon S3 returns this error in
+ * all Amazon Web Services Regions except in the North Virginia Region. For legacy
+ * compatibility, if you re-create an existing bucket that you already own in the
+ * North Virginia Region, Amazon S3 returns 200 OK and resets the bucket access
+ * control lists (ACLs).</p> </li> <li> <p> <i>Code:</i> 409 Conflict (in all
+ * Regions except the North Virginia Region) </p> </li> <li> <p> <i>SOAP Fault Code
+ * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * BucketNotEmpty</p> </li> <li> <p> <i>Description:</i> The bucket you tried to
+ * delete is not empty.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409
+ * Conflict</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
* </ul> </li> <li> <ul> <li> <p> <i>Code:</i> CredentialsNotSupported</p> </li>
* <li> <p> <i>Description:</i> This request does not support credentials.</p>
* </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
@@ -202,33 +203,34 @@ namespace Model
* again.</p> </li> <li> <p> <i>HTTP Status Code:</i> 500 Internal Server Error</p>
* </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Server</p> </li> </ul> </li> <li>
* <ul> <li> <p> <i>Code:</i> InvalidAccessKeyId</p> </li> <li> <p>
- * <i>Description:</i> The AWS access key ID you provided does not exist in our
- * records.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> InvalidAddressingHeader</p> </li> <li> <p>
- * <i>Description:</i> You must specify the Anonymous role.</p> </li> <li> <p>
- * <i>HTTP Status Code:</i> N/A</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
- * Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidArgument</p>
- * </li> <li> <p> <i>Description:</i> Invalid Argument</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
- * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidBucketName</p> </li> <li> <p> <i>Description:</i> The specified bucket is
- * not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> InvalidBucketState</p> </li> <li> <p> <i>Description:</i>
- * The request is not valid with the current state of the bucket.</p> </li> <li>
- * <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li> <p> <i>SOAP Fault Code
+ * <i>Description:</i> The Amazon Web Services access key ID you provided does not
+ * exist in our records.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
+ * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidAddressingHeader</p> </li>
+ * <li> <p> <i>Description:</i> You must specify the Anonymous role.</p> </li> <li>
+ * <p> <i>HTTP Status Code:</i> N/A</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidDigest</p> </li> <li> <p> <i>Description:</i> The Content-MD5 you
- * specified is not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
- * Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
- * </li> <li> <ul> <li> <p> <i>Code:</i> InvalidEncryptionAlgorithmError</p> </li>
- * <li> <p> <i>Description:</i> The encryption request you specified is not valid.
- * The valid value is AES256.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
- * Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
- * </li> <li> <ul> <li> <p> <i>Code:</i> InvalidLocationConstraint</p> </li> <li>
- * <p> <i>Description:</i> The specified location constraint is not valid. For more
- * information about Regions, see <a
+ * InvalidArgument</p> </li> <li> <p> <i>Description:</i> Invalid Argument</p>
+ * </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
+ * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
+ * <i>Code:</i> InvalidBucketName</p> </li> <li> <p> <i>Description:</i> The
+ * specified bucket is not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
+ * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidBucketState</p> </li> <li>
+ * <p> <i>Description:</i> The request is not valid with the current state of the
+ * bucket.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li>
+ * <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
+ * <p> <i>Code:</i> InvalidDigest</p> </li> <li> <p> <i>Description:</i> The
+ * Content-MD5 you specified is not valid.</p> </li> <li> <p> <i>HTTP Status
+ * Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
+ * Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * InvalidEncryptionAlgorithmError</p> </li> <li> <p> <i>Description:</i> The
+ * encryption request you specified is not valid. The valid value is AES256.</p>
+ * </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
+ * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
+ * <i>Code:</i> InvalidLocationConstraint</p> </li> <li> <p> <i>Description:</i>
+ * The specified location constraint is not valid. For more information about
+ * Regions, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro">How
* to Select a Region for Your Buckets</a>. </p> </li> <li> <p> <i>HTTP Status
* Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
@@ -247,11 +249,11 @@ namespace Model
* number.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
* <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
* <li> <p> <i>Code:</i> InvalidPayer</p> </li> <li> <p> <i>Description:</i> All
- * access to this object has been disabled. Please contact AWS Support for further
- * assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> InvalidPolicyDocument</p> </li> <li> <p>
- * <i>Description:</i> The content of the form does not meet the conditions
+ * access to this object has been disabled. Please contact Amazon Web Services
+ * Support for further assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
+ * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidPolicyDocument</p> </li> <li>
+ * <p> <i>Description:</i> The content of the form does not meet the conditions
* specified in the policy document.</p> </li> <li> <p> <i>HTTP Status Code:</i>
* 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p>
* </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRange</p> </li> <li>
@@ -259,10 +261,10 @@ namespace Model
* <p> <i>HTTP Status Code:</i> 416 Requested Range Not Satisfiable</p> </li> <li>
* <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
* <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p> <i>Description:</i> Please
- * use AWS4-HMAC-SHA256.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
- * Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul> </li> <li> <ul> <li>
- * <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p> <i>Description:</i> SOAP
- * requests must be made over an HTTPS connection.</p> </li> <li> <p> <i>HTTP
+ * use <code>AWS4-HMAC-SHA256</code>.</p> </li> <li> <p> <i>HTTP Status Code:</i>
+ * 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul> </li> <li>
+ * <ul> <li> <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p> <i>Description:</i>
+ * SOAP requests must be made over an HTTPS connection.</p> </li> <li> <p> <i>HTTP
* Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
* InvalidRequest</p> </li> <li> <p> <i>Description:</i> Amazon S3 Transfer
@@ -285,44 +287,44 @@ namespace Model
* Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul>
* </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p>
* <i>Description:</i> Amazon S3 Transfer Acceleration is not supported on this
- * bucket. Contact AWS Support for more information.</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p>
- * <i>Description:</i> Amazon S3 Transfer Acceleration cannot be enabled on this
- * bucket. Contact AWS Support for more information.</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidSecurity</p> </li> <li> <p>
- * <i>Description:</i> The provided security credentials are not valid.</p> </li>
- * <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault
- * Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidSOAPRequest</p> </li> <li> <p> <i>Description:</i> The SOAP request body
- * is invalid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
- * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
- * <ul> <li> <p> <i>Code:</i> InvalidStorageClass</p> </li> <li> <p>
- * <i>Description:</i> The storage class you specified is not valid.</p> </li> <li>
- * <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault
- * Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidTargetBucketForLogging</p> </li> <li> <p> <i>Description:</i> The target
- * bucket for logging does not exist, is not owned by you, or does not have the
- * appropriate grants for the log-delivery group. </p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
+ * bucket. Contact Amazon Web Services Support for more information.</p> </li> <li>
+ * <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i>
+ * N/A</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRequest</p>
+ * </li> <li> <p> <i>Description:</i> Amazon S3 Transfer Acceleration cannot be
+ * enabled on this bucket. Contact Amazon Web Services Support for more
+ * information.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
+ * </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul> </li> <li> <ul> <li> <p>
+ * <i>Code:</i> InvalidSecurity</p> </li> <li> <p> <i>Description:</i> The provided
+ * security credentials are not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i>
+ * 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidSOAPRequest</p> </li> <li>
+ * <p> <i>Description:</i> The SOAP request body is invalid.</p> </li> <li> <p>
+ * <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidToken</p> </li> <li> <p> <i>Description:</i> The provided token is
- * malformed or otherwise invalid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
- * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidURI</p> </li> <li> <p>
- * <i>Description:</i> Couldn't parse the specified URI.</p> </li> <li> <p> <i>HTTP
+ * InvalidStorageClass</p> </li> <li> <p> <i>Description:</i> The storage class you
+ * specified is not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
+ * Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
+ * </li> <li> <ul> <li> <p> <i>Code:</i> InvalidTargetBucketForLogging</p> </li>
+ * <li> <p> <i>Description:</i> The target bucket for logging does not exist, is
+ * not owned by you, or does not have the appropriate grants for the log-delivery
+ * group. </p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
+ * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
+ * <li> <p> <i>Code:</i> InvalidToken</p> </li> <li> <p> <i>Description:</i> The
+ * provided token is malformed or otherwise invalid.</p> </li> <li> <p> <i>HTTP
* Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * KeyTooLongError</p> </li> <li> <p> <i>Description:</i> Your key is too long.</p>
- * </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
- * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
- * <i>Code:</i> MalformedACLError</p> </li> <li> <p> <i>Description:</i> The XML
- * you provided was not well-formed or did not validate against our published
- * schema.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> MalformedPOSTRequest </p> </li> <li> <p>
- * <i>Description:</i> The body of your POST request is not well-formed
+ * InvalidURI</p> </li> <li> <p> <i>Description:</i> Couldn't parse the specified
+ * URI.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li>
+ * <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
+ * <p> <i>Code:</i> KeyTooLongError</p> </li> <li> <p> <i>Description:</i> Your key
+ * is too long.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
+ * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
+ * <ul> <li> <p> <i>Code:</i> MalformedACLError</p> </li> <li> <p>
+ * <i>Description:</i> The XML you provided was not well-formed or did not validate
+ * against our published schema.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
+ * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> MalformedPOSTRequest </p> </li> <li>
+ * <p> <i>Description:</i> The body of your POST request is not well-formed
* multipart/form-data.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
* Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
* </li> <li> <ul> <li> <p> <i>Code:</i> MalformedXML</p> </li> <li> <p>
@@ -399,20 +401,21 @@ namespace Model
* </ul> </li> <li> <ul> <li> <p> <i>Code:</i> NotSignedUp</p> </li> <li> <p>
* <i>Description:</i> Your account is not signed up for the Amazon S3 service. You
* must sign up before you can use Amazon S3. You can sign up at the following URL:
- * https://aws.amazon.com/s3</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
- * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> OperationAborted</p> </li> <li> <p>
- * <i>Description:</i> A conflicting conditional action is currently in progress
- * against this resource. Try again.</p> </li> <li> <p> <i>HTTP Status Code:</i>
- * 409 Conflict</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> PermanentRedirect</p> </li> <li> <p>
- * <i>Description:</i> The bucket you are attempting to access must be addressed
- * using the specified endpoint. Send all future requests to this endpoint.</p>
- * </li> <li> <p> <i>HTTP Status Code:</i> 301 Moved Permanently</p> </li> <li> <p>
- * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
- * <i>Code:</i> PreconditionFailed</p> </li> <li> <p> <i>Description:</i> At least
- * one of the preconditions you specified did not hold.</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 412 Precondition Failed</p> </li> <li> <p> <i>SOAP Fault Code
+ * <a href="http://aws.amazon.com/s3">Amazon S3</a> </p> </li> <li> <p> <i>HTTP
+ * Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
+ * Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * OperationAborted</p> </li> <li> <p> <i>Description:</i> A conflicting
+ * conditional action is currently in progress against this resource. Try
+ * again.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li>
+ * <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
+ * <p> <i>Code:</i> PermanentRedirect</p> </li> <li> <p> <i>Description:</i> The
+ * bucket you are attempting to access must be addressed using the specified
+ * endpoint. Send all future requests to this endpoint.</p> </li> <li> <p> <i>HTTP
+ * Status Code:</i> 301 Moved Permanently</p> </li> <li> <p> <i>SOAP Fault Code
+ * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * PreconditionFailed</p> </li> <li> <p> <i>Description:</i> At least one of the
+ * preconditions you specified did not hold.</p> </li> <li> <p> <i>HTTP Status
+ * Code:</i> 412 Precondition Failed</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
* Redirect</p> </li> <li> <p> <i>Description:</i> Temporary redirect.</p> </li>
* <li> <p> <i>HTTP Status Code:</i> 307 Moved Temporarily</p> </li> <li> <p>
@@ -439,7 +442,8 @@ namespace Model
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
* SignatureDoesNotMatch</p> </li> <li> <p> <i>Description:</i> The request
* signature we calculated does not match the signature you provided. Check your
- * AWS secret access key and signing method. For more information, see <a
+ * Amazon Web Services secret access key and signing method. For more information,
+ * see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html">REST
* Authentication</a> and <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html">SOAP
@@ -487,41 +491,42 @@ namespace Model
* Access Denied</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p>
* </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
* <ul> <li> <p> <i>Code:</i> AccountProblem</p> </li> <li> <p> <i>Description:</i>
- * There is a problem with your AWS account that prevents the action from
- * completing successfully. Contact AWS Support for further assistance.</p> </li>
- * <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault
+ * There is a problem with your Amazon Web Services account that prevents the
+ * action from completing successfully. Contact Amazon Web Services Support for
+ * further assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
+ * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> AllAccessDisabled</p> </li> <li> <p>
+ * <i>Description:</i> All access to this Amazon S3 resource has been disabled.
+ * Contact Amazon Web Services Support for further assistance.</p> </li> <li> <p>
+ * <i>HTTP Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault Code
+ * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * AmbiguousGrantByEmailAddress</p> </li> <li> <p> <i>Description:</i> The email
+ * address you provided is associated with more than one account.</p> </li> <li>
+ * <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault
* Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * AllAccessDisabled</p> </li> <li> <p> <i>Description:</i> All access to this
- * Amazon S3 resource has been disabled. Contact AWS Support for further
- * assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li>
+ * AuthorizationHeaderMalformed</p> </li> <li> <p> <i>Description:</i> The
+ * authorization header you provided is invalid.</p> </li> <li> <p> <i>HTTP Status
+ * Code:</i> 400 Bad Request</p> </li> <li> <p> <i>HTTP Status Code:</i> N/A</p>
+ * </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> BadDigest</p> </li> <li> <p>
+ * <i>Description:</i> The Content-MD5 you specified did not match what we
+ * received.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
* <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> AmbiguousGrantByEmailAddress</p> </li> <li> <p>
- * <i>Description:</i> The email address you provided is associated with more than
- * one account.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
- * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
- * <ul> <li> <p> <i>Code:</i> AuthorizationHeaderMalformed</p> </li> <li> <p>
- * <i>Description:</i> The authorization header you provided is invalid.</p> </li>
- * <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> N/A</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * BadDigest</p> </li> <li> <p> <i>Description:</i> The Content-MD5 you specified
- * did not match what we received.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
- * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> BucketAlreadyExists</p> </li> <li>
- * <p> <i>Description:</i> The requested bucket name is not available. The bucket
- * namespace is shared by all users of the system. Please select a different name
- * and try again.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p>
- * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
- * <ul> <li> <p> <i>Code:</i> BucketAlreadyOwnedByYou</p> </li> <li> <p>
- * <i>Description:</i> The bucket you tried to create already exists, and you own
- * it. Amazon S3 returns this error in all AWS Regions except in the North Virginia
- * Region. For legacy compatibility, if you re-create an existing bucket that you
- * already own in the North Virginia Region, Amazon S3 returns 200 OK and resets
- * the bucket access control lists (ACLs).</p> </li> <li> <p> <i>Code:</i> 409
- * Conflict (in all Regions except the North Virginia Region) </p> </li> <li> <p>
- * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
- * <i>Code:</i> BucketNotEmpty</p> </li> <li> <p> <i>Description:</i> The bucket
- * you tried to delete is not empty.</p> </li> <li> <p> <i>HTTP Status Code:</i>
- * 409 Conflict</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * <li> <p> <i>Code:</i> BucketAlreadyExists</p> </li> <li> <p> <i>Description:</i>
+ * The requested bucket name is not available. The bucket namespace is shared by
+ * all users of the system. Please select a different name and try again.</p> </li>
+ * <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li> <p> <i>SOAP Fault
+ * Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * BucketAlreadyOwnedByYou</p> </li> <li> <p> <i>Description:</i> The bucket you
+ * tried to create already exists, and you own it. Amazon S3 returns this error in
+ * all Amazon Web Services Regions except in the North Virginia Region. For legacy
+ * compatibility, if you re-create an existing bucket that you already own in the
+ * North Virginia Region, Amazon S3 returns 200 OK and resets the bucket access
+ * control lists (ACLs).</p> </li> <li> <p> <i>Code:</i> 409 Conflict (in all
+ * Regions except the North Virginia Region) </p> </li> <li> <p> <i>SOAP Fault Code
+ * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * BucketNotEmpty</p> </li> <li> <p> <i>Description:</i> The bucket you tried to
+ * delete is not empty.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409
+ * Conflict</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
* </ul> </li> <li> <ul> <li> <p> <i>Code:</i> CredentialsNotSupported</p> </li>
* <li> <p> <i>Description:</i> This request does not support credentials.</p>
* </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
@@ -562,33 +567,34 @@ namespace Model
* again.</p> </li> <li> <p> <i>HTTP Status Code:</i> 500 Internal Server Error</p>
* </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Server</p> </li> </ul> </li> <li>
* <ul> <li> <p> <i>Code:</i> InvalidAccessKeyId</p> </li> <li> <p>
- * <i>Description:</i> The AWS access key ID you provided does not exist in our
- * records.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> InvalidAddressingHeader</p> </li> <li> <p>
- * <i>Description:</i> You must specify the Anonymous role.</p> </li> <li> <p>
- * <i>HTTP Status Code:</i> N/A</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
- * Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidArgument</p>
- * </li> <li> <p> <i>Description:</i> Invalid Argument</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
- * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidBucketName</p> </li> <li> <p> <i>Description:</i> The specified bucket is
- * not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> InvalidBucketState</p> </li> <li> <p> <i>Description:</i>
- * The request is not valid with the current state of the bucket.</p> </li> <li>
- * <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li> <p> <i>SOAP Fault Code
+ * <i>Description:</i> The Amazon Web Services access key ID you provided does not
+ * exist in our records.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
+ * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidAddressingHeader</p> </li>
+ * <li> <p> <i>Description:</i> You must specify the Anonymous role.</p> </li> <li>
+ * <p> <i>HTTP Status Code:</i> N/A</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidDigest</p> </li> <li> <p> <i>Description:</i> The Content-MD5 you
- * specified is not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
- * Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
- * </li> <li> <ul> <li> <p> <i>Code:</i> InvalidEncryptionAlgorithmError</p> </li>
- * <li> <p> <i>Description:</i> The encryption request you specified is not valid.
- * The valid value is AES256.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
- * Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
- * </li> <li> <ul> <li> <p> <i>Code:</i> InvalidLocationConstraint</p> </li> <li>
- * <p> <i>Description:</i> The specified location constraint is not valid. For more
- * information about Regions, see <a
+ * InvalidArgument</p> </li> <li> <p> <i>Description:</i> Invalid Argument</p>
+ * </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
+ * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
+ * <i>Code:</i> InvalidBucketName</p> </li> <li> <p> <i>Description:</i> The
+ * specified bucket is not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
+ * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidBucketState</p> </li> <li>
+ * <p> <i>Description:</i> The request is not valid with the current state of the
+ * bucket.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li>
+ * <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
+ * <p> <i>Code:</i> InvalidDigest</p> </li> <li> <p> <i>Description:</i> The
+ * Content-MD5 you specified is not valid.</p> </li> <li> <p> <i>HTTP Status
+ * Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
+ * Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * InvalidEncryptionAlgorithmError</p> </li> <li> <p> <i>Description:</i> The
+ * encryption request you specified is not valid. The valid value is AES256.</p>
+ * </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
+ * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
+ * <i>Code:</i> InvalidLocationConstraint</p> </li> <li> <p> <i>Description:</i>
+ * The specified location constraint is not valid. For more information about
+ * Regions, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro">How
* to Select a Region for Your Buckets</a>. </p> </li> <li> <p> <i>HTTP Status
* Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
@@ -607,11 +613,11 @@ namespace Model
* number.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
* <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
* <li> <p> <i>Code:</i> InvalidPayer</p> </li> <li> <p> <i>Description:</i> All
- * access to this object has been disabled. Please contact AWS Support for further
- * assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> InvalidPolicyDocument</p> </li> <li> <p>
- * <i>Description:</i> The content of the form does not meet the conditions
+ * access to this object has been disabled. Please contact Amazon Web Services
+ * Support for further assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
+ * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidPolicyDocument</p> </li> <li>
+ * <p> <i>Description:</i> The content of the form does not meet the conditions
* specified in the policy document.</p> </li> <li> <p> <i>HTTP Status Code:</i>
* 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p>
* </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRange</p> </li> <li>
@@ -619,10 +625,10 @@ namespace Model
* <p> <i>HTTP Status Code:</i> 416 Requested Range Not Satisfiable</p> </li> <li>
* <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
* <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p> <i>Description:</i> Please
- * use AWS4-HMAC-SHA256.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
- * Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul> </li> <li> <ul> <li>
- * <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p> <i>Description:</i> SOAP
- * requests must be made over an HTTPS connection.</p> </li> <li> <p> <i>HTTP
+ * use <code>AWS4-HMAC-SHA256</code>.</p> </li> <li> <p> <i>HTTP Status Code:</i>
+ * 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul> </li> <li>
+ * <ul> <li> <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p> <i>Description:</i>
+ * SOAP requests must be made over an HTTPS connection.</p> </li> <li> <p> <i>HTTP
* Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
* InvalidRequest</p> </li> <li> <p> <i>Description:</i> Amazon S3 Transfer
@@ -645,44 +651,44 @@ namespace Model
* Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul>
* </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p>
* <i>Description:</i> Amazon S3 Transfer Acceleration is not supported on this
- * bucket. Contact AWS Support for more information.</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p>
- * <i>Description:</i> Amazon S3 Transfer Acceleration cannot be enabled on this
- * bucket. Contact AWS Support for more information.</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidSecurity</p> </li> <li> <p>
- * <i>Description:</i> The provided security credentials are not valid.</p> </li>
- * <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault
- * Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidSOAPRequest</p> </li> <li> <p> <i>Description:</i> The SOAP request body
- * is invalid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
- * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
- * <ul> <li> <p> <i>Code:</i> InvalidStorageClass</p> </li> <li> <p>
- * <i>Description:</i> The storage class you specified is not valid.</p> </li> <li>
- * <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault
- * Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidTargetBucketForLogging</p> </li> <li> <p> <i>Description:</i> The target
- * bucket for logging does not exist, is not owned by you, or does not have the
- * appropriate grants for the log-delivery group. </p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
+ * bucket. Contact Amazon Web Services Support for more information.</p> </li> <li>
+ * <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i>
+ * N/A</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRequest</p>
+ * </li> <li> <p> <i>Description:</i> Amazon S3 Transfer Acceleration cannot be
+ * enabled on this bucket. Contact Amazon Web Services Support for more
+ * information.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
+ * </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul> </li> <li> <ul> <li> <p>
+ * <i>Code:</i> InvalidSecurity</p> </li> <li> <p> <i>Description:</i> The provided
+ * security credentials are not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i>
+ * 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidSOAPRequest</p> </li> <li>
+ * <p> <i>Description:</i> The SOAP request body is invalid.</p> </li> <li> <p>
+ * <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidToken</p> </li> <li> <p> <i>Description:</i> The provided token is
- * malformed or otherwise invalid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
- * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidURI</p> </li> <li> <p>
- * <i>Description:</i> Couldn't parse the specified URI.</p> </li> <li> <p> <i>HTTP
+ * InvalidStorageClass</p> </li> <li> <p> <i>Description:</i> The storage class you
+ * specified is not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
+ * Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
+ * </li> <li> <ul> <li> <p> <i>Code:</i> InvalidTargetBucketForLogging</p> </li>
+ * <li> <p> <i>Description:</i> The target bucket for logging does not exist, is
+ * not owned by you, or does not have the appropriate grants for the log-delivery
+ * group. </p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
+ * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
+ * <li> <p> <i>Code:</i> InvalidToken</p> </li> <li> <p> <i>Description:</i> The
+ * provided token is malformed or otherwise invalid.</p> </li> <li> <p> <i>HTTP
* Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * KeyTooLongError</p> </li> <li> <p> <i>Description:</i> Your key is too long.</p>
- * </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
- * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
- * <i>Code:</i> MalformedACLError</p> </li> <li> <p> <i>Description:</i> The XML
- * you provided was not well-formed or did not validate against our published
- * schema.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> MalformedPOSTRequest </p> </li> <li> <p>
- * <i>Description:</i> The body of your POST request is not well-formed
+ * InvalidURI</p> </li> <li> <p> <i>Description:</i> Couldn't parse the specified
+ * URI.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li>
+ * <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
+ * <p> <i>Code:</i> KeyTooLongError</p> </li> <li> <p> <i>Description:</i> Your key
+ * is too long.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
+ * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
+ * <ul> <li> <p> <i>Code:</i> MalformedACLError</p> </li> <li> <p>
+ * <i>Description:</i> The XML you provided was not well-formed or did not validate
+ * against our published schema.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
+ * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> MalformedPOSTRequest </p> </li> <li>
+ * <p> <i>Description:</i> The body of your POST request is not well-formed
* multipart/form-data.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
* Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
* </li> <li> <ul> <li> <p> <i>Code:</i> MalformedXML</p> </li> <li> <p>
@@ -759,20 +765,21 @@ namespace Model
* </ul> </li> <li> <ul> <li> <p> <i>Code:</i> NotSignedUp</p> </li> <li> <p>
* <i>Description:</i> Your account is not signed up for the Amazon S3 service. You
* must sign up before you can use Amazon S3. You can sign up at the following URL:
- * https://aws.amazon.com/s3</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
- * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> OperationAborted</p> </li> <li> <p>
- * <i>Description:</i> A conflicting conditional action is currently in progress
- * against this resource. Try again.</p> </li> <li> <p> <i>HTTP Status Code:</i>
- * 409 Conflict</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> PermanentRedirect</p> </li> <li> <p>
- * <i>Description:</i> The bucket you are attempting to access must be addressed
- * using the specified endpoint. Send all future requests to this endpoint.</p>
- * </li> <li> <p> <i>HTTP Status Code:</i> 301 Moved Permanently</p> </li> <li> <p>
- * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
- * <i>Code:</i> PreconditionFailed</p> </li> <li> <p> <i>Description:</i> At least
- * one of the preconditions you specified did not hold.</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 412 Precondition Failed</p> </li> <li> <p> <i>SOAP Fault Code
+ * <a href="http://aws.amazon.com/s3">Amazon S3</a> </p> </li> <li> <p> <i>HTTP
+ * Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
+ * Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * OperationAborted</p> </li> <li> <p> <i>Description:</i> A conflicting
+ * conditional action is currently in progress against this resource. Try
+ * again.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li>
+ * <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
+ * <p> <i>Code:</i> PermanentRedirect</p> </li> <li> <p> <i>Description:</i> The
+ * bucket you are attempting to access must be addressed using the specified
+ * endpoint. Send all future requests to this endpoint.</p> </li> <li> <p> <i>HTTP
+ * Status Code:</i> 301 Moved Permanently</p> </li> <li> <p> <i>SOAP Fault Code
+ * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * PreconditionFailed</p> </li> <li> <p> <i>Description:</i> At least one of the
+ * preconditions you specified did not hold.</p> </li> <li> <p> <i>HTTP Status
+ * Code:</i> 412 Precondition Failed</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
* Redirect</p> </li> <li> <p> <i>Description:</i> Temporary redirect.</p> </li>
* <li> <p> <i>HTTP Status Code:</i> 307 Moved Temporarily</p> </li> <li> <p>
@@ -799,7 +806,8 @@ namespace Model
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
* SignatureDoesNotMatch</p> </li> <li> <p> <i>Description:</i> The request
* signature we calculated does not match the signature you provided. Check your
- * AWS secret access key and signing method. For more information, see <a
+ * Amazon Web Services secret access key and signing method. For more information,
+ * see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html">REST
* Authentication</a> and <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html">SOAP
@@ -847,41 +855,42 @@ namespace Model
* Access Denied</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p>
* </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
* <ul> <li> <p> <i>Code:</i> AccountProblem</p> </li> <li> <p> <i>Description:</i>
- * There is a problem with your AWS account that prevents the action from
- * completing successfully. Contact AWS Support for further assistance.</p> </li>
- * <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault
+ * There is a problem with your Amazon Web Services account that prevents the
+ * action from completing successfully. Contact Amazon Web Services Support for
+ * further assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
+ * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> AllAccessDisabled</p> </li> <li> <p>
+ * <i>Description:</i> All access to this Amazon S3 resource has been disabled.
+ * Contact Amazon Web Services Support for further assistance.</p> </li> <li> <p>
+ * <i>HTTP Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault Code
+ * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * AmbiguousGrantByEmailAddress</p> </li> <li> <p> <i>Description:</i> The email
+ * address you provided is associated with more than one account.</p> </li> <li>
+ * <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault
* Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * AllAccessDisabled</p> </li> <li> <p> <i>Description:</i> All access to this
- * Amazon S3 resource has been disabled. Contact AWS Support for further
- * assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li>
+ * AuthorizationHeaderMalformed</p> </li> <li> <p> <i>Description:</i> The
+ * authorization header you provided is invalid.</p> </li> <li> <p> <i>HTTP Status
+ * Code:</i> 400 Bad Request</p> </li> <li> <p> <i>HTTP Status Code:</i> N/A</p>
+ * </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> BadDigest</p> </li> <li> <p>
+ * <i>Description:</i> The Content-MD5 you specified did not match what we
+ * received.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
* <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> AmbiguousGrantByEmailAddress</p> </li> <li> <p>
- * <i>Description:</i> The email address you provided is associated with more than
- * one account.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
- * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
- * <ul> <li> <p> <i>Code:</i> AuthorizationHeaderMalformed</p> </li> <li> <p>
- * <i>Description:</i> The authorization header you provided is invalid.</p> </li>
- * <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> N/A</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * BadDigest</p> </li> <li> <p> <i>Description:</i> The Content-MD5 you specified
- * did not match what we received.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
- * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> BucketAlreadyExists</p> </li> <li>
- * <p> <i>Description:</i> The requested bucket name is not available. The bucket
- * namespace is shared by all users of the system. Please select a different name
- * and try again.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p>
- * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
- * <ul> <li> <p> <i>Code:</i> BucketAlreadyOwnedByYou</p> </li> <li> <p>
- * <i>Description:</i> The bucket you tried to create already exists, and you own
- * it. Amazon S3 returns this error in all AWS Regions except in the North Virginia
- * Region. For legacy compatibility, if you re-create an existing bucket that you
- * already own in the North Virginia Region, Amazon S3 returns 200 OK and resets
- * the bucket access control lists (ACLs).</p> </li> <li> <p> <i>Code:</i> 409
- * Conflict (in all Regions except the North Virginia Region) </p> </li> <li> <p>
- * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
- * <i>Code:</i> BucketNotEmpty</p> </li> <li> <p> <i>Description:</i> The bucket
- * you tried to delete is not empty.</p> </li> <li> <p> <i>HTTP Status Code:</i>
- * 409 Conflict</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * <li> <p> <i>Code:</i> BucketAlreadyExists</p> </li> <li> <p> <i>Description:</i>
+ * The requested bucket name is not available. The bucket namespace is shared by
+ * all users of the system. Please select a different name and try again.</p> </li>
+ * <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li> <p> <i>SOAP Fault
+ * Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * BucketAlreadyOwnedByYou</p> </li> <li> <p> <i>Description:</i> The bucket you
+ * tried to create already exists, and you own it. Amazon S3 returns this error in
+ * all Amazon Web Services Regions except in the North Virginia Region. For legacy
+ * compatibility, if you re-create an existing bucket that you already own in the
+ * North Virginia Region, Amazon S3 returns 200 OK and resets the bucket access
+ * control lists (ACLs).</p> </li> <li> <p> <i>Code:</i> 409 Conflict (in all
+ * Regions except the North Virginia Region) </p> </li> <li> <p> <i>SOAP Fault Code
+ * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * BucketNotEmpty</p> </li> <li> <p> <i>Description:</i> The bucket you tried to
+ * delete is not empty.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409
+ * Conflict</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
* </ul> </li> <li> <ul> <li> <p> <i>Code:</i> CredentialsNotSupported</p> </li>
* <li> <p> <i>Description:</i> This request does not support credentials.</p>
* </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
@@ -922,33 +931,34 @@ namespace Model
* again.</p> </li> <li> <p> <i>HTTP Status Code:</i> 500 Internal Server Error</p>
* </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Server</p> </li> </ul> </li> <li>
* <ul> <li> <p> <i>Code:</i> InvalidAccessKeyId</p> </li> <li> <p>
- * <i>Description:</i> The AWS access key ID you provided does not exist in our
- * records.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> InvalidAddressingHeader</p> </li> <li> <p>
- * <i>Description:</i> You must specify the Anonymous role.</p> </li> <li> <p>
- * <i>HTTP Status Code:</i> N/A</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
- * Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidArgument</p>
- * </li> <li> <p> <i>Description:</i> Invalid Argument</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
- * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidBucketName</p> </li> <li> <p> <i>Description:</i> The specified bucket is
- * not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> InvalidBucketState</p> </li> <li> <p> <i>Description:</i>
- * The request is not valid with the current state of the bucket.</p> </li> <li>
- * <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li> <p> <i>SOAP Fault Code
+ * <i>Description:</i> The Amazon Web Services access key ID you provided does not
+ * exist in our records.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
+ * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidAddressingHeader</p> </li>
+ * <li> <p> <i>Description:</i> You must specify the Anonymous role.</p> </li> <li>
+ * <p> <i>HTTP Status Code:</i> N/A</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidDigest</p> </li> <li> <p> <i>Description:</i> The Content-MD5 you
- * specified is not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
- * Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
- * </li> <li> <ul> <li> <p> <i>Code:</i> InvalidEncryptionAlgorithmError</p> </li>
- * <li> <p> <i>Description:</i> The encryption request you specified is not valid.
- * The valid value is AES256.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
- * Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
- * </li> <li> <ul> <li> <p> <i>Code:</i> InvalidLocationConstraint</p> </li> <li>
- * <p> <i>Description:</i> The specified location constraint is not valid. For more
- * information about Regions, see <a
+ * InvalidArgument</p> </li> <li> <p> <i>Description:</i> Invalid Argument</p>
+ * </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
+ * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
+ * <i>Code:</i> InvalidBucketName</p> </li> <li> <p> <i>Description:</i> The
+ * specified bucket is not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
+ * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidBucketState</p> </li> <li>
+ * <p> <i>Description:</i> The request is not valid with the current state of the
+ * bucket.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li>
+ * <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
+ * <p> <i>Code:</i> InvalidDigest</p> </li> <li> <p> <i>Description:</i> The
+ * Content-MD5 you specified is not valid.</p> </li> <li> <p> <i>HTTP Status
+ * Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
+ * Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * InvalidEncryptionAlgorithmError</p> </li> <li> <p> <i>Description:</i> The
+ * encryption request you specified is not valid. The valid value is AES256.</p>
+ * </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
+ * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
+ * <i>Code:</i> InvalidLocationConstraint</p> </li> <li> <p> <i>Description:</i>
+ * The specified location constraint is not valid. For more information about
+ * Regions, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro">How
* to Select a Region for Your Buckets</a>. </p> </li> <li> <p> <i>HTTP Status
* Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
@@ -967,11 +977,11 @@ namespace Model
* number.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
* <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
* <li> <p> <i>Code:</i> InvalidPayer</p> </li> <li> <p> <i>Description:</i> All
- * access to this object has been disabled. Please contact AWS Support for further
- * assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> InvalidPolicyDocument</p> </li> <li> <p>
- * <i>Description:</i> The content of the form does not meet the conditions
+ * access to this object has been disabled. Please contact Amazon Web Services
+ * Support for further assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
+ * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidPolicyDocument</p> </li> <li>
+ * <p> <i>Description:</i> The content of the form does not meet the conditions
* specified in the policy document.</p> </li> <li> <p> <i>HTTP Status Code:</i>
* 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p>
* </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRange</p> </li> <li>
@@ -979,10 +989,10 @@ namespace Model
* <p> <i>HTTP Status Code:</i> 416 Requested Range Not Satisfiable</p> </li> <li>
* <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
* <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p> <i>Description:</i> Please
- * use AWS4-HMAC-SHA256.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
- * Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul> </li> <li> <ul> <li>
- * <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p> <i>Description:</i> SOAP
- * requests must be made over an HTTPS connection.</p> </li> <li> <p> <i>HTTP
+ * use <code>AWS4-HMAC-SHA256</code>.</p> </li> <li> <p> <i>HTTP Status Code:</i>
+ * 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul> </li> <li>
+ * <ul> <li> <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p> <i>Description:</i>
+ * SOAP requests must be made over an HTTPS connection.</p> </li> <li> <p> <i>HTTP
* Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
* InvalidRequest</p> </li> <li> <p> <i>Description:</i> Amazon S3 Transfer
@@ -1005,44 +1015,44 @@ namespace Model
* Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul>
* </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p>
* <i>Description:</i> Amazon S3 Transfer Acceleration is not supported on this
- * bucket. Contact AWS Support for more information.</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p>
- * <i>Description:</i> Amazon S3 Transfer Acceleration cannot be enabled on this
- * bucket. Contact AWS Support for more information.</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidSecurity</p> </li> <li> <p>
- * <i>Description:</i> The provided security credentials are not valid.</p> </li>
- * <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault
- * Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidSOAPRequest</p> </li> <li> <p> <i>Description:</i> The SOAP request body
- * is invalid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
- * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
- * <ul> <li> <p> <i>Code:</i> InvalidStorageClass</p> </li> <li> <p>
- * <i>Description:</i> The storage class you specified is not valid.</p> </li> <li>
- * <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault
- * Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidTargetBucketForLogging</p> </li> <li> <p> <i>Description:</i> The target
- * bucket for logging does not exist, is not owned by you, or does not have the
- * appropriate grants for the log-delivery group. </p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
+ * bucket. Contact Amazon Web Services Support for more information.</p> </li> <li>
+ * <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i>
+ * N/A</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRequest</p>
+ * </li> <li> <p> <i>Description:</i> Amazon S3 Transfer Acceleration cannot be
+ * enabled on this bucket. Contact Amazon Web Services Support for more
+ * information.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
+ * </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul> </li> <li> <ul> <li> <p>
+ * <i>Code:</i> InvalidSecurity</p> </li> <li> <p> <i>Description:</i> The provided
+ * security credentials are not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i>
+ * 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidSOAPRequest</p> </li> <li>
+ * <p> <i>Description:</i> The SOAP request body is invalid.</p> </li> <li> <p>
+ * <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidToken</p> </li> <li> <p> <i>Description:</i> The provided token is
- * malformed or otherwise invalid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
- * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidURI</p> </li> <li> <p>
- * <i>Description:</i> Couldn't parse the specified URI.</p> </li> <li> <p> <i>HTTP
+ * InvalidStorageClass</p> </li> <li> <p> <i>Description:</i> The storage class you
+ * specified is not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
+ * Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
+ * </li> <li> <ul> <li> <p> <i>Code:</i> InvalidTargetBucketForLogging</p> </li>
+ * <li> <p> <i>Description:</i> The target bucket for logging does not exist, is
+ * not owned by you, or does not have the appropriate grants for the log-delivery
+ * group. </p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
+ * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
+ * <li> <p> <i>Code:</i> InvalidToken</p> </li> <li> <p> <i>Description:</i> The
+ * provided token is malformed or otherwise invalid.</p> </li> <li> <p> <i>HTTP
* Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * KeyTooLongError</p> </li> <li> <p> <i>Description:</i> Your key is too long.</p>
- * </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
- * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
- * <i>Code:</i> MalformedACLError</p> </li> <li> <p> <i>Description:</i> The XML
- * you provided was not well-formed or did not validate against our published
- * schema.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> MalformedPOSTRequest </p> </li> <li> <p>
- * <i>Description:</i> The body of your POST request is not well-formed
+ * InvalidURI</p> </li> <li> <p> <i>Description:</i> Couldn't parse the specified
+ * URI.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li>
+ * <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
+ * <p> <i>Code:</i> KeyTooLongError</p> </li> <li> <p> <i>Description:</i> Your key
+ * is too long.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
+ * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
+ * <ul> <li> <p> <i>Code:</i> MalformedACLError</p> </li> <li> <p>
+ * <i>Description:</i> The XML you provided was not well-formed or did not validate
+ * against our published schema.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
+ * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> MalformedPOSTRequest </p> </li> <li>
+ * <p> <i>Description:</i> The body of your POST request is not well-formed
* multipart/form-data.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
* Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
* </li> <li> <ul> <li> <p> <i>Code:</i> MalformedXML</p> </li> <li> <p>
@@ -1119,20 +1129,21 @@ namespace Model
* </ul> </li> <li> <ul> <li> <p> <i>Code:</i> NotSignedUp</p> </li> <li> <p>
* <i>Description:</i> Your account is not signed up for the Amazon S3 service. You
* must sign up before you can use Amazon S3. You can sign up at the following URL:
- * https://aws.amazon.com/s3</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
- * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> OperationAborted</p> </li> <li> <p>
- * <i>Description:</i> A conflicting conditional action is currently in progress
- * against this resource. Try again.</p> </li> <li> <p> <i>HTTP Status Code:</i>
- * 409 Conflict</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> PermanentRedirect</p> </li> <li> <p>
- * <i>Description:</i> The bucket you are attempting to access must be addressed
- * using the specified endpoint. Send all future requests to this endpoint.</p>
- * </li> <li> <p> <i>HTTP Status Code:</i> 301 Moved Permanently</p> </li> <li> <p>
- * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
- * <i>Code:</i> PreconditionFailed</p> </li> <li> <p> <i>Description:</i> At least
- * one of the preconditions you specified did not hold.</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 412 Precondition Failed</p> </li> <li> <p> <i>SOAP Fault Code
+ * <a href="http://aws.amazon.com/s3">Amazon S3</a> </p> </li> <li> <p> <i>HTTP
+ * Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
+ * Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * OperationAborted</p> </li> <li> <p> <i>Description:</i> A conflicting
+ * conditional action is currently in progress against this resource. Try
+ * again.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li>
+ * <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
+ * <p> <i>Code:</i> PermanentRedirect</p> </li> <li> <p> <i>Description:</i> The
+ * bucket you are attempting to access must be addressed using the specified
+ * endpoint. Send all future requests to this endpoint.</p> </li> <li> <p> <i>HTTP
+ * Status Code:</i> 301 Moved Permanently</p> </li> <li> <p> <i>SOAP Fault Code
+ * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * PreconditionFailed</p> </li> <li> <p> <i>Description:</i> At least one of the
+ * preconditions you specified did not hold.</p> </li> <li> <p> <i>HTTP Status
+ * Code:</i> 412 Precondition Failed</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
* Redirect</p> </li> <li> <p> <i>Description:</i> Temporary redirect.</p> </li>
* <li> <p> <i>HTTP Status Code:</i> 307 Moved Temporarily</p> </li> <li> <p>
@@ -1159,7 +1170,8 @@ namespace Model
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
* SignatureDoesNotMatch</p> </li> <li> <p> <i>Description:</i> The request
* signature we calculated does not match the signature you provided. Check your
- * AWS secret access key and signing method. For more information, see <a
+ * Amazon Web Services secret access key and signing method. For more information,
+ * see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html">REST
* Authentication</a> and <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html">SOAP
@@ -1207,41 +1219,42 @@ namespace Model
* Access Denied</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p>
* </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
* <ul> <li> <p> <i>Code:</i> AccountProblem</p> </li> <li> <p> <i>Description:</i>
- * There is a problem with your AWS account that prevents the action from
- * completing successfully. Contact AWS Support for further assistance.</p> </li>
- * <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault
+ * There is a problem with your Amazon Web Services account that prevents the
+ * action from completing successfully. Contact Amazon Web Services Support for
+ * further assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
+ * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> AllAccessDisabled</p> </li> <li> <p>
+ * <i>Description:</i> All access to this Amazon S3 resource has been disabled.
+ * Contact Amazon Web Services Support for further assistance.</p> </li> <li> <p>
+ * <i>HTTP Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault Code
+ * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * AmbiguousGrantByEmailAddress</p> </li> <li> <p> <i>Description:</i> The email
+ * address you provided is associated with more than one account.</p> </li> <li>
+ * <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault
* Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * AllAccessDisabled</p> </li> <li> <p> <i>Description:</i> All access to this
- * Amazon S3 resource has been disabled. Contact AWS Support for further
- * assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li>
+ * AuthorizationHeaderMalformed</p> </li> <li> <p> <i>Description:</i> The
+ * authorization header you provided is invalid.</p> </li> <li> <p> <i>HTTP Status
+ * Code:</i> 400 Bad Request</p> </li> <li> <p> <i>HTTP Status Code:</i> N/A</p>
+ * </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> BadDigest</p> </li> <li> <p>
+ * <i>Description:</i> The Content-MD5 you specified did not match what we
+ * received.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
* <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> AmbiguousGrantByEmailAddress</p> </li> <li> <p>
- * <i>Description:</i> The email address you provided is associated with more than
- * one account.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
- * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
- * <ul> <li> <p> <i>Code:</i> AuthorizationHeaderMalformed</p> </li> <li> <p>
- * <i>Description:</i> The authorization header you provided is invalid.</p> </li>
- * <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> N/A</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * BadDigest</p> </li> <li> <p> <i>Description:</i> The Content-MD5 you specified
- * did not match what we received.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
- * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> BucketAlreadyExists</p> </li> <li>
- * <p> <i>Description:</i> The requested bucket name is not available. The bucket
- * namespace is shared by all users of the system. Please select a different name
- * and try again.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p>
- * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
- * <ul> <li> <p> <i>Code:</i> BucketAlreadyOwnedByYou</p> </li> <li> <p>
- * <i>Description:</i> The bucket you tried to create already exists, and you own
- * it. Amazon S3 returns this error in all AWS Regions except in the North Virginia
- * Region. For legacy compatibility, if you re-create an existing bucket that you
- * already own in the North Virginia Region, Amazon S3 returns 200 OK and resets
- * the bucket access control lists (ACLs).</p> </li> <li> <p> <i>Code:</i> 409
- * Conflict (in all Regions except the North Virginia Region) </p> </li> <li> <p>
- * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
- * <i>Code:</i> BucketNotEmpty</p> </li> <li> <p> <i>Description:</i> The bucket
- * you tried to delete is not empty.</p> </li> <li> <p> <i>HTTP Status Code:</i>
- * 409 Conflict</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * <li> <p> <i>Code:</i> BucketAlreadyExists</p> </li> <li> <p> <i>Description:</i>
+ * The requested bucket name is not available. The bucket namespace is shared by
+ * all users of the system. Please select a different name and try again.</p> </li>
+ * <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li> <p> <i>SOAP Fault
+ * Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * BucketAlreadyOwnedByYou</p> </li> <li> <p> <i>Description:</i> The bucket you
+ * tried to create already exists, and you own it. Amazon S3 returns this error in
+ * all Amazon Web Services Regions except in the North Virginia Region. For legacy
+ * compatibility, if you re-create an existing bucket that you already own in the
+ * North Virginia Region, Amazon S3 returns 200 OK and resets the bucket access
+ * control lists (ACLs).</p> </li> <li> <p> <i>Code:</i> 409 Conflict (in all
+ * Regions except the North Virginia Region) </p> </li> <li> <p> <i>SOAP Fault Code
+ * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * BucketNotEmpty</p> </li> <li> <p> <i>Description:</i> The bucket you tried to
+ * delete is not empty.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409
+ * Conflict</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
* </ul> </li> <li> <ul> <li> <p> <i>Code:</i> CredentialsNotSupported</p> </li>
* <li> <p> <i>Description:</i> This request does not support credentials.</p>
* </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
@@ -1282,33 +1295,34 @@ namespace Model
* again.</p> </li> <li> <p> <i>HTTP Status Code:</i> 500 Internal Server Error</p>
* </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Server</p> </li> </ul> </li> <li>
* <ul> <li> <p> <i>Code:</i> InvalidAccessKeyId</p> </li> <li> <p>
- * <i>Description:</i> The AWS access key ID you provided does not exist in our
- * records.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> InvalidAddressingHeader</p> </li> <li> <p>
- * <i>Description:</i> You must specify the Anonymous role.</p> </li> <li> <p>
- * <i>HTTP Status Code:</i> N/A</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
- * Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidArgument</p>
- * </li> <li> <p> <i>Description:</i> Invalid Argument</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
- * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidBucketName</p> </li> <li> <p> <i>Description:</i> The specified bucket is
- * not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> InvalidBucketState</p> </li> <li> <p> <i>Description:</i>
- * The request is not valid with the current state of the bucket.</p> </li> <li>
- * <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li> <p> <i>SOAP Fault Code
+ * <i>Description:</i> The Amazon Web Services access key ID you provided does not
+ * exist in our records.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
+ * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidAddressingHeader</p> </li>
+ * <li> <p> <i>Description:</i> You must specify the Anonymous role.</p> </li> <li>
+ * <p> <i>HTTP Status Code:</i> N/A</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidDigest</p> </li> <li> <p> <i>Description:</i> The Content-MD5 you
- * specified is not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
- * Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
- * </li> <li> <ul> <li> <p> <i>Code:</i> InvalidEncryptionAlgorithmError</p> </li>
- * <li> <p> <i>Description:</i> The encryption request you specified is not valid.
- * The valid value is AES256.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
- * Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
- * </li> <li> <ul> <li> <p> <i>Code:</i> InvalidLocationConstraint</p> </li> <li>
- * <p> <i>Description:</i> The specified location constraint is not valid. For more
- * information about Regions, see <a
+ * InvalidArgument</p> </li> <li> <p> <i>Description:</i> Invalid Argument</p>
+ * </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
+ * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
+ * <i>Code:</i> InvalidBucketName</p> </li> <li> <p> <i>Description:</i> The
+ * specified bucket is not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
+ * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidBucketState</p> </li> <li>
+ * <p> <i>Description:</i> The request is not valid with the current state of the
+ * bucket.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li>
+ * <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
+ * <p> <i>Code:</i> InvalidDigest</p> </li> <li> <p> <i>Description:</i> The
+ * Content-MD5 you specified is not valid.</p> </li> <li> <p> <i>HTTP Status
+ * Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
+ * Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * InvalidEncryptionAlgorithmError</p> </li> <li> <p> <i>Description:</i> The
+ * encryption request you specified is not valid. The valid value is AES256.</p>
+ * </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
+ * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
+ * <i>Code:</i> InvalidLocationConstraint</p> </li> <li> <p> <i>Description:</i>
+ * The specified location constraint is not valid. For more information about
+ * Regions, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro">How
* to Select a Region for Your Buckets</a>. </p> </li> <li> <p> <i>HTTP Status
* Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
@@ -1327,11 +1341,11 @@ namespace Model
* number.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
* <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
* <li> <p> <i>Code:</i> InvalidPayer</p> </li> <li> <p> <i>Description:</i> All
- * access to this object has been disabled. Please contact AWS Support for further
- * assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> InvalidPolicyDocument</p> </li> <li> <p>
- * <i>Description:</i> The content of the form does not meet the conditions
+ * access to this object has been disabled. Please contact Amazon Web Services
+ * Support for further assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
+ * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidPolicyDocument</p> </li> <li>
+ * <p> <i>Description:</i> The content of the form does not meet the conditions
* specified in the policy document.</p> </li> <li> <p> <i>HTTP Status Code:</i>
* 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p>
* </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRange</p> </li> <li>
@@ -1339,10 +1353,10 @@ namespace Model
* <p> <i>HTTP Status Code:</i> 416 Requested Range Not Satisfiable</p> </li> <li>
* <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
* <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p> <i>Description:</i> Please
- * use AWS4-HMAC-SHA256.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
- * Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul> </li> <li> <ul> <li>
- * <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p> <i>Description:</i> SOAP
- * requests must be made over an HTTPS connection.</p> </li> <li> <p> <i>HTTP
+ * use <code>AWS4-HMAC-SHA256</code>.</p> </li> <li> <p> <i>HTTP Status Code:</i>
+ * 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul> </li> <li>
+ * <ul> <li> <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p> <i>Description:</i>
+ * SOAP requests must be made over an HTTPS connection.</p> </li> <li> <p> <i>HTTP
* Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
* InvalidRequest</p> </li> <li> <p> <i>Description:</i> Amazon S3 Transfer
@@ -1365,44 +1379,44 @@ namespace Model
* Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul>
* </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p>
* <i>Description:</i> Amazon S3 Transfer Acceleration is not supported on this
- * bucket. Contact AWS Support for more information.</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p>
- * <i>Description:</i> Amazon S3 Transfer Acceleration cannot be enabled on this
- * bucket. Contact AWS Support for more information.</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidSecurity</p> </li> <li> <p>
- * <i>Description:</i> The provided security credentials are not valid.</p> </li>
- * <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault
- * Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidSOAPRequest</p> </li> <li> <p> <i>Description:</i> The SOAP request body
- * is invalid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
- * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
- * <ul> <li> <p> <i>Code:</i> InvalidStorageClass</p> </li> <li> <p>
- * <i>Description:</i> The storage class you specified is not valid.</p> </li> <li>
- * <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault
- * Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidTargetBucketForLogging</p> </li> <li> <p> <i>Description:</i> The target
- * bucket for logging does not exist, is not owned by you, or does not have the
- * appropriate grants for the log-delivery group. </p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
+ * bucket. Contact Amazon Web Services Support for more information.</p> </li> <li>
+ * <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i>
+ * N/A</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRequest</p>
+ * </li> <li> <p> <i>Description:</i> Amazon S3 Transfer Acceleration cannot be
+ * enabled on this bucket. Contact Amazon Web Services Support for more
+ * information.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
+ * </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul> </li> <li> <ul> <li> <p>
+ * <i>Code:</i> InvalidSecurity</p> </li> <li> <p> <i>Description:</i> The provided
+ * security credentials are not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i>
+ * 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidSOAPRequest</p> </li> <li>
+ * <p> <i>Description:</i> The SOAP request body is invalid.</p> </li> <li> <p>
+ * <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidToken</p> </li> <li> <p> <i>Description:</i> The provided token is
- * malformed or otherwise invalid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
- * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidURI</p> </li> <li> <p>
- * <i>Description:</i> Couldn't parse the specified URI.</p> </li> <li> <p> <i>HTTP
+ * InvalidStorageClass</p> </li> <li> <p> <i>Description:</i> The storage class you
+ * specified is not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
+ * Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
+ * </li> <li> <ul> <li> <p> <i>Code:</i> InvalidTargetBucketForLogging</p> </li>
+ * <li> <p> <i>Description:</i> The target bucket for logging does not exist, is
+ * not owned by you, or does not have the appropriate grants for the log-delivery
+ * group. </p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
+ * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
+ * <li> <p> <i>Code:</i> InvalidToken</p> </li> <li> <p> <i>Description:</i> The
+ * provided token is malformed or otherwise invalid.</p> </li> <li> <p> <i>HTTP
* Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * KeyTooLongError</p> </li> <li> <p> <i>Description:</i> Your key is too long.</p>
- * </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
- * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
- * <i>Code:</i> MalformedACLError</p> </li> <li> <p> <i>Description:</i> The XML
- * you provided was not well-formed or did not validate against our published
- * schema.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> MalformedPOSTRequest </p> </li> <li> <p>
- * <i>Description:</i> The body of your POST request is not well-formed
+ * InvalidURI</p> </li> <li> <p> <i>Description:</i> Couldn't parse the specified
+ * URI.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li>
+ * <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
+ * <p> <i>Code:</i> KeyTooLongError</p> </li> <li> <p> <i>Description:</i> Your key
+ * is too long.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
+ * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
+ * <ul> <li> <p> <i>Code:</i> MalformedACLError</p> </li> <li> <p>
+ * <i>Description:</i> The XML you provided was not well-formed or did not validate
+ * against our published schema.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
+ * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> MalformedPOSTRequest </p> </li> <li>
+ * <p> <i>Description:</i> The body of your POST request is not well-formed
* multipart/form-data.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
* Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
* </li> <li> <ul> <li> <p> <i>Code:</i> MalformedXML</p> </li> <li> <p>
@@ -1479,20 +1493,21 @@ namespace Model
* </ul> </li> <li> <ul> <li> <p> <i>Code:</i> NotSignedUp</p> </li> <li> <p>
* <i>Description:</i> Your account is not signed up for the Amazon S3 service. You
* must sign up before you can use Amazon S3. You can sign up at the following URL:
- * https://aws.amazon.com/s3</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
- * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> OperationAborted</p> </li> <li> <p>
- * <i>Description:</i> A conflicting conditional action is currently in progress
- * against this resource. Try again.</p> </li> <li> <p> <i>HTTP Status Code:</i>
- * 409 Conflict</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> PermanentRedirect</p> </li> <li> <p>
- * <i>Description:</i> The bucket you are attempting to access must be addressed
- * using the specified endpoint. Send all future requests to this endpoint.</p>
- * </li> <li> <p> <i>HTTP Status Code:</i> 301 Moved Permanently</p> </li> <li> <p>
- * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
- * <i>Code:</i> PreconditionFailed</p> </li> <li> <p> <i>Description:</i> At least
- * one of the preconditions you specified did not hold.</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 412 Precondition Failed</p> </li> <li> <p> <i>SOAP Fault Code
+ * <a href="http://aws.amazon.com/s3">Amazon S3</a> </p> </li> <li> <p> <i>HTTP
+ * Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
+ * Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * OperationAborted</p> </li> <li> <p> <i>Description:</i> A conflicting
+ * conditional action is currently in progress against this resource. Try
+ * again.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li>
+ * <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
+ * <p> <i>Code:</i> PermanentRedirect</p> </li> <li> <p> <i>Description:</i> The
+ * bucket you are attempting to access must be addressed using the specified
+ * endpoint. Send all future requests to this endpoint.</p> </li> <li> <p> <i>HTTP
+ * Status Code:</i> 301 Moved Permanently</p> </li> <li> <p> <i>SOAP Fault Code
+ * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * PreconditionFailed</p> </li> <li> <p> <i>Description:</i> At least one of the
+ * preconditions you specified did not hold.</p> </li> <li> <p> <i>HTTP Status
+ * Code:</i> 412 Precondition Failed</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
* Redirect</p> </li> <li> <p> <i>Description:</i> Temporary redirect.</p> </li>
* <li> <p> <i>HTTP Status Code:</i> 307 Moved Temporarily</p> </li> <li> <p>
@@ -1519,7 +1534,8 @@ namespace Model
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
* SignatureDoesNotMatch</p> </li> <li> <p> <i>Description:</i> The request
* signature we calculated does not match the signature you provided. Check your
- * AWS secret access key and signing method. For more information, see <a
+ * Amazon Web Services secret access key and signing method. For more information,
+ * see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html">REST
* Authentication</a> and <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html">SOAP
@@ -1567,41 +1583,42 @@ namespace Model
* Access Denied</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p>
* </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
* <ul> <li> <p> <i>Code:</i> AccountProblem</p> </li> <li> <p> <i>Description:</i>
- * There is a problem with your AWS account that prevents the action from
- * completing successfully. Contact AWS Support for further assistance.</p> </li>
- * <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault
+ * There is a problem with your Amazon Web Services account that prevents the
+ * action from completing successfully. Contact Amazon Web Services Support for
+ * further assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
+ * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> AllAccessDisabled</p> </li> <li> <p>
+ * <i>Description:</i> All access to this Amazon S3 resource has been disabled.
+ * Contact Amazon Web Services Support for further assistance.</p> </li> <li> <p>
+ * <i>HTTP Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault Code
+ * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * AmbiguousGrantByEmailAddress</p> </li> <li> <p> <i>Description:</i> The email
+ * address you provided is associated with more than one account.</p> </li> <li>
+ * <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault
* Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * AllAccessDisabled</p> </li> <li> <p> <i>Description:</i> All access to this
- * Amazon S3 resource has been disabled. Contact AWS Support for further
- * assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li>
+ * AuthorizationHeaderMalformed</p> </li> <li> <p> <i>Description:</i> The
+ * authorization header you provided is invalid.</p> </li> <li> <p> <i>HTTP Status
+ * Code:</i> 400 Bad Request</p> </li> <li> <p> <i>HTTP Status Code:</i> N/A</p>
+ * </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> BadDigest</p> </li> <li> <p>
+ * <i>Description:</i> The Content-MD5 you specified did not match what we
+ * received.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
* <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> AmbiguousGrantByEmailAddress</p> </li> <li> <p>
- * <i>Description:</i> The email address you provided is associated with more than
- * one account.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
- * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
- * <ul> <li> <p> <i>Code:</i> AuthorizationHeaderMalformed</p> </li> <li> <p>
- * <i>Description:</i> The authorization header you provided is invalid.</p> </li>
- * <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> N/A</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * BadDigest</p> </li> <li> <p> <i>Description:</i> The Content-MD5 you specified
- * did not match what we received.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
- * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> BucketAlreadyExists</p> </li> <li>
- * <p> <i>Description:</i> The requested bucket name is not available. The bucket
- * namespace is shared by all users of the system. Please select a different name
- * and try again.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p>
- * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
- * <ul> <li> <p> <i>Code:</i> BucketAlreadyOwnedByYou</p> </li> <li> <p>
- * <i>Description:</i> The bucket you tried to create already exists, and you own
- * it. Amazon S3 returns this error in all AWS Regions except in the North Virginia
- * Region. For legacy compatibility, if you re-create an existing bucket that you
- * already own in the North Virginia Region, Amazon S3 returns 200 OK and resets
- * the bucket access control lists (ACLs).</p> </li> <li> <p> <i>Code:</i> 409
- * Conflict (in all Regions except the North Virginia Region) </p> </li> <li> <p>
- * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
- * <i>Code:</i> BucketNotEmpty</p> </li> <li> <p> <i>Description:</i> The bucket
- * you tried to delete is not empty.</p> </li> <li> <p> <i>HTTP Status Code:</i>
- * 409 Conflict</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * <li> <p> <i>Code:</i> BucketAlreadyExists</p> </li> <li> <p> <i>Description:</i>
+ * The requested bucket name is not available. The bucket namespace is shared by
+ * all users of the system. Please select a different name and try again.</p> </li>
+ * <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li> <p> <i>SOAP Fault
+ * Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * BucketAlreadyOwnedByYou</p> </li> <li> <p> <i>Description:</i> The bucket you
+ * tried to create already exists, and you own it. Amazon S3 returns this error in
+ * all Amazon Web Services Regions except in the North Virginia Region. For legacy
+ * compatibility, if you re-create an existing bucket that you already own in the
+ * North Virginia Region, Amazon S3 returns 200 OK and resets the bucket access
+ * control lists (ACLs).</p> </li> <li> <p> <i>Code:</i> 409 Conflict (in all
+ * Regions except the North Virginia Region) </p> </li> <li> <p> <i>SOAP Fault Code
+ * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * BucketNotEmpty</p> </li> <li> <p> <i>Description:</i> The bucket you tried to
+ * delete is not empty.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409
+ * Conflict</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
* </ul> </li> <li> <ul> <li> <p> <i>Code:</i> CredentialsNotSupported</p> </li>
* <li> <p> <i>Description:</i> This request does not support credentials.</p>
* </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
@@ -1642,33 +1659,34 @@ namespace Model
* again.</p> </li> <li> <p> <i>HTTP Status Code:</i> 500 Internal Server Error</p>
* </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Server</p> </li> </ul> </li> <li>
* <ul> <li> <p> <i>Code:</i> InvalidAccessKeyId</p> </li> <li> <p>
- * <i>Description:</i> The AWS access key ID you provided does not exist in our
- * records.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> InvalidAddressingHeader</p> </li> <li> <p>
- * <i>Description:</i> You must specify the Anonymous role.</p> </li> <li> <p>
- * <i>HTTP Status Code:</i> N/A</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
- * Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidArgument</p>
- * </li> <li> <p> <i>Description:</i> Invalid Argument</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
- * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidBucketName</p> </li> <li> <p> <i>Description:</i> The specified bucket is
- * not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> InvalidBucketState</p> </li> <li> <p> <i>Description:</i>
- * The request is not valid with the current state of the bucket.</p> </li> <li>
- * <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li> <p> <i>SOAP Fault Code
+ * <i>Description:</i> The Amazon Web Services access key ID you provided does not
+ * exist in our records.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
+ * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidAddressingHeader</p> </li>
+ * <li> <p> <i>Description:</i> You must specify the Anonymous role.</p> </li> <li>
+ * <p> <i>HTTP Status Code:</i> N/A</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidDigest</p> </li> <li> <p> <i>Description:</i> The Content-MD5 you
- * specified is not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
- * Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
- * </li> <li> <ul> <li> <p> <i>Code:</i> InvalidEncryptionAlgorithmError</p> </li>
- * <li> <p> <i>Description:</i> The encryption request you specified is not valid.
- * The valid value is AES256.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
- * Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
- * </li> <li> <ul> <li> <p> <i>Code:</i> InvalidLocationConstraint</p> </li> <li>
- * <p> <i>Description:</i> The specified location constraint is not valid. For more
- * information about Regions, see <a
+ * InvalidArgument</p> </li> <li> <p> <i>Description:</i> Invalid Argument</p>
+ * </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
+ * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
+ * <i>Code:</i> InvalidBucketName</p> </li> <li> <p> <i>Description:</i> The
+ * specified bucket is not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
+ * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidBucketState</p> </li> <li>
+ * <p> <i>Description:</i> The request is not valid with the current state of the
+ * bucket.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li>
+ * <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
+ * <p> <i>Code:</i> InvalidDigest</p> </li> <li> <p> <i>Description:</i> The
+ * Content-MD5 you specified is not valid.</p> </li> <li> <p> <i>HTTP Status
+ * Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
+ * Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * InvalidEncryptionAlgorithmError</p> </li> <li> <p> <i>Description:</i> The
+ * encryption request you specified is not valid. The valid value is AES256.</p>
+ * </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
+ * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
+ * <i>Code:</i> InvalidLocationConstraint</p> </li> <li> <p> <i>Description:</i>
+ * The specified location constraint is not valid. For more information about
+ * Regions, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro">How
* to Select a Region for Your Buckets</a>. </p> </li> <li> <p> <i>HTTP Status
* Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
@@ -1687,11 +1705,11 @@ namespace Model
* number.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
* <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
* <li> <p> <i>Code:</i> InvalidPayer</p> </li> <li> <p> <i>Description:</i> All
- * access to this object has been disabled. Please contact AWS Support for further
- * assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> InvalidPolicyDocument</p> </li> <li> <p>
- * <i>Description:</i> The content of the form does not meet the conditions
+ * access to this object has been disabled. Please contact Amazon Web Services
+ * Support for further assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
+ * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidPolicyDocument</p> </li> <li>
+ * <p> <i>Description:</i> The content of the form does not meet the conditions
* specified in the policy document.</p> </li> <li> <p> <i>HTTP Status Code:</i>
* 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p>
* </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRange</p> </li> <li>
@@ -1699,10 +1717,10 @@ namespace Model
* <p> <i>HTTP Status Code:</i> 416 Requested Range Not Satisfiable</p> </li> <li>
* <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
* <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p> <i>Description:</i> Please
- * use AWS4-HMAC-SHA256.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
- * Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul> </li> <li> <ul> <li>
- * <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p> <i>Description:</i> SOAP
- * requests must be made over an HTTPS connection.</p> </li> <li> <p> <i>HTTP
+ * use <code>AWS4-HMAC-SHA256</code>.</p> </li> <li> <p> <i>HTTP Status Code:</i>
+ * 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul> </li> <li>
+ * <ul> <li> <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p> <i>Description:</i>
+ * SOAP requests must be made over an HTTPS connection.</p> </li> <li> <p> <i>HTTP
* Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
* InvalidRequest</p> </li> <li> <p> <i>Description:</i> Amazon S3 Transfer
@@ -1725,44 +1743,44 @@ namespace Model
* Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul>
* </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p>
* <i>Description:</i> Amazon S3 Transfer Acceleration is not supported on this
- * bucket. Contact AWS Support for more information.</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p>
- * <i>Description:</i> Amazon S3 Transfer Acceleration cannot be enabled on this
- * bucket. Contact AWS Support for more information.</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidSecurity</p> </li> <li> <p>
- * <i>Description:</i> The provided security credentials are not valid.</p> </li>
- * <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault
- * Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidSOAPRequest</p> </li> <li> <p> <i>Description:</i> The SOAP request body
- * is invalid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
- * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
- * <ul> <li> <p> <i>Code:</i> InvalidStorageClass</p> </li> <li> <p>
- * <i>Description:</i> The storage class you specified is not valid.</p> </li> <li>
- * <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault
- * Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidTargetBucketForLogging</p> </li> <li> <p> <i>Description:</i> The target
- * bucket for logging does not exist, is not owned by you, or does not have the
- * appropriate grants for the log-delivery group. </p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
+ * bucket. Contact Amazon Web Services Support for more information.</p> </li> <li>
+ * <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i>
+ * N/A</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRequest</p>
+ * </li> <li> <p> <i>Description:</i> Amazon S3 Transfer Acceleration cannot be
+ * enabled on this bucket. Contact Amazon Web Services Support for more
+ * information.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
+ * </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul> </li> <li> <ul> <li> <p>
+ * <i>Code:</i> InvalidSecurity</p> </li> <li> <p> <i>Description:</i> The provided
+ * security credentials are not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i>
+ * 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidSOAPRequest</p> </li> <li>
+ * <p> <i>Description:</i> The SOAP request body is invalid.</p> </li> <li> <p>
+ * <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidToken</p> </li> <li> <p> <i>Description:</i> The provided token is
- * malformed or otherwise invalid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
- * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidURI</p> </li> <li> <p>
- * <i>Description:</i> Couldn't parse the specified URI.</p> </li> <li> <p> <i>HTTP
+ * InvalidStorageClass</p> </li> <li> <p> <i>Description:</i> The storage class you
+ * specified is not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
+ * Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
+ * </li> <li> <ul> <li> <p> <i>Code:</i> InvalidTargetBucketForLogging</p> </li>
+ * <li> <p> <i>Description:</i> The target bucket for logging does not exist, is
+ * not owned by you, or does not have the appropriate grants for the log-delivery
+ * group. </p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
+ * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
+ * <li> <p> <i>Code:</i> InvalidToken</p> </li> <li> <p> <i>Description:</i> The
+ * provided token is malformed or otherwise invalid.</p> </li> <li> <p> <i>HTTP
* Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * KeyTooLongError</p> </li> <li> <p> <i>Description:</i> Your key is too long.</p>
- * </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
- * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
- * <i>Code:</i> MalformedACLError</p> </li> <li> <p> <i>Description:</i> The XML
- * you provided was not well-formed or did not validate against our published
- * schema.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> MalformedPOSTRequest </p> </li> <li> <p>
- * <i>Description:</i> The body of your POST request is not well-formed
+ * InvalidURI</p> </li> <li> <p> <i>Description:</i> Couldn't parse the specified
+ * URI.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li>
+ * <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
+ * <p> <i>Code:</i> KeyTooLongError</p> </li> <li> <p> <i>Description:</i> Your key
+ * is too long.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
+ * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
+ * <ul> <li> <p> <i>Code:</i> MalformedACLError</p> </li> <li> <p>
+ * <i>Description:</i> The XML you provided was not well-formed or did not validate
+ * against our published schema.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
+ * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> MalformedPOSTRequest </p> </li> <li>
+ * <p> <i>Description:</i> The body of your POST request is not well-formed
* multipart/form-data.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
* Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
* </li> <li> <ul> <li> <p> <i>Code:</i> MalformedXML</p> </li> <li> <p>
@@ -1839,20 +1857,21 @@ namespace Model
* </ul> </li> <li> <ul> <li> <p> <i>Code:</i> NotSignedUp</p> </li> <li> <p>
* <i>Description:</i> Your account is not signed up for the Amazon S3 service. You
* must sign up before you can use Amazon S3. You can sign up at the following URL:
- * https://aws.amazon.com/s3</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
- * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> OperationAborted</p> </li> <li> <p>
- * <i>Description:</i> A conflicting conditional action is currently in progress
- * against this resource. Try again.</p> </li> <li> <p> <i>HTTP Status Code:</i>
- * 409 Conflict</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> PermanentRedirect</p> </li> <li> <p>
- * <i>Description:</i> The bucket you are attempting to access must be addressed
- * using the specified endpoint. Send all future requests to this endpoint.</p>
- * </li> <li> <p> <i>HTTP Status Code:</i> 301 Moved Permanently</p> </li> <li> <p>
- * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
- * <i>Code:</i> PreconditionFailed</p> </li> <li> <p> <i>Description:</i> At least
- * one of the preconditions you specified did not hold.</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 412 Precondition Failed</p> </li> <li> <p> <i>SOAP Fault Code
+ * <a href="http://aws.amazon.com/s3">Amazon S3</a> </p> </li> <li> <p> <i>HTTP
+ * Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
+ * Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * OperationAborted</p> </li> <li> <p> <i>Description:</i> A conflicting
+ * conditional action is currently in progress against this resource. Try
+ * again.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li>
+ * <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
+ * <p> <i>Code:</i> PermanentRedirect</p> </li> <li> <p> <i>Description:</i> The
+ * bucket you are attempting to access must be addressed using the specified
+ * endpoint. Send all future requests to this endpoint.</p> </li> <li> <p> <i>HTTP
+ * Status Code:</i> 301 Moved Permanently</p> </li> <li> <p> <i>SOAP Fault Code
+ * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * PreconditionFailed</p> </li> <li> <p> <i>Description:</i> At least one of the
+ * preconditions you specified did not hold.</p> </li> <li> <p> <i>HTTP Status
+ * Code:</i> 412 Precondition Failed</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
* Redirect</p> </li> <li> <p> <i>Description:</i> Temporary redirect.</p> </li>
* <li> <p> <i>HTTP Status Code:</i> 307 Moved Temporarily</p> </li> <li> <p>
@@ -1879,7 +1898,8 @@ namespace Model
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
* SignatureDoesNotMatch</p> </li> <li> <p> <i>Description:</i> The request
* signature we calculated does not match the signature you provided. Check your
- * AWS secret access key and signing method. For more information, see <a
+ * Amazon Web Services secret access key and signing method. For more information,
+ * see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html">REST
* Authentication</a> and <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html">SOAP
@@ -1927,41 +1947,42 @@ namespace Model
* Access Denied</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p>
* </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
* <ul> <li> <p> <i>Code:</i> AccountProblem</p> </li> <li> <p> <i>Description:</i>
- * There is a problem with your AWS account that prevents the action from
- * completing successfully. Contact AWS Support for further assistance.</p> </li>
- * <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault
+ * There is a problem with your Amazon Web Services account that prevents the
+ * action from completing successfully. Contact Amazon Web Services Support for
+ * further assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
+ * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> AllAccessDisabled</p> </li> <li> <p>
+ * <i>Description:</i> All access to this Amazon S3 resource has been disabled.
+ * Contact Amazon Web Services Support for further assistance.</p> </li> <li> <p>
+ * <i>HTTP Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault Code
+ * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * AmbiguousGrantByEmailAddress</p> </li> <li> <p> <i>Description:</i> The email
+ * address you provided is associated with more than one account.</p> </li> <li>
+ * <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault
* Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * AllAccessDisabled</p> </li> <li> <p> <i>Description:</i> All access to this
- * Amazon S3 resource has been disabled. Contact AWS Support for further
- * assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li>
+ * AuthorizationHeaderMalformed</p> </li> <li> <p> <i>Description:</i> The
+ * authorization header you provided is invalid.</p> </li> <li> <p> <i>HTTP Status
+ * Code:</i> 400 Bad Request</p> </li> <li> <p> <i>HTTP Status Code:</i> N/A</p>
+ * </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> BadDigest</p> </li> <li> <p>
+ * <i>Description:</i> The Content-MD5 you specified did not match what we
+ * received.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
* <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> AmbiguousGrantByEmailAddress</p> </li> <li> <p>
- * <i>Description:</i> The email address you provided is associated with more than
- * one account.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
- * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
- * <ul> <li> <p> <i>Code:</i> AuthorizationHeaderMalformed</p> </li> <li> <p>
- * <i>Description:</i> The authorization header you provided is invalid.</p> </li>
- * <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> N/A</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * BadDigest</p> </li> <li> <p> <i>Description:</i> The Content-MD5 you specified
- * did not match what we received.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
- * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> BucketAlreadyExists</p> </li> <li>
- * <p> <i>Description:</i> The requested bucket name is not available. The bucket
- * namespace is shared by all users of the system. Please select a different name
- * and try again.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p>
- * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
- * <ul> <li> <p> <i>Code:</i> BucketAlreadyOwnedByYou</p> </li> <li> <p>
- * <i>Description:</i> The bucket you tried to create already exists, and you own
- * it. Amazon S3 returns this error in all AWS Regions except in the North Virginia
- * Region. For legacy compatibility, if you re-create an existing bucket that you
- * already own in the North Virginia Region, Amazon S3 returns 200 OK and resets
- * the bucket access control lists (ACLs).</p> </li> <li> <p> <i>Code:</i> 409
- * Conflict (in all Regions except the North Virginia Region) </p> </li> <li> <p>
- * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
- * <i>Code:</i> BucketNotEmpty</p> </li> <li> <p> <i>Description:</i> The bucket
- * you tried to delete is not empty.</p> </li> <li> <p> <i>HTTP Status Code:</i>
- * 409 Conflict</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * <li> <p> <i>Code:</i> BucketAlreadyExists</p> </li> <li> <p> <i>Description:</i>
+ * The requested bucket name is not available. The bucket namespace is shared by
+ * all users of the system. Please select a different name and try again.</p> </li>
+ * <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li> <p> <i>SOAP Fault
+ * Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * BucketAlreadyOwnedByYou</p> </li> <li> <p> <i>Description:</i> The bucket you
+ * tried to create already exists, and you own it. Amazon S3 returns this error in
+ * all Amazon Web Services Regions except in the North Virginia Region. For legacy
+ * compatibility, if you re-create an existing bucket that you already own in the
+ * North Virginia Region, Amazon S3 returns 200 OK and resets the bucket access
+ * control lists (ACLs).</p> </li> <li> <p> <i>Code:</i> 409 Conflict (in all
+ * Regions except the North Virginia Region) </p> </li> <li> <p> <i>SOAP Fault Code
+ * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * BucketNotEmpty</p> </li> <li> <p> <i>Description:</i> The bucket you tried to
+ * delete is not empty.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409
+ * Conflict</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
* </ul> </li> <li> <ul> <li> <p> <i>Code:</i> CredentialsNotSupported</p> </li>
* <li> <p> <i>Description:</i> This request does not support credentials.</p>
* </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
@@ -2002,33 +2023,34 @@ namespace Model
* again.</p> </li> <li> <p> <i>HTTP Status Code:</i> 500 Internal Server Error</p>
* </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Server</p> </li> </ul> </li> <li>
* <ul> <li> <p> <i>Code:</i> InvalidAccessKeyId</p> </li> <li> <p>
- * <i>Description:</i> The AWS access key ID you provided does not exist in our
- * records.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> InvalidAddressingHeader</p> </li> <li> <p>
- * <i>Description:</i> You must specify the Anonymous role.</p> </li> <li> <p>
- * <i>HTTP Status Code:</i> N/A</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
- * Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidArgument</p>
- * </li> <li> <p> <i>Description:</i> Invalid Argument</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
- * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidBucketName</p> </li> <li> <p> <i>Description:</i> The specified bucket is
- * not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> InvalidBucketState</p> </li> <li> <p> <i>Description:</i>
- * The request is not valid with the current state of the bucket.</p> </li> <li>
- * <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li> <p> <i>SOAP Fault Code
+ * <i>Description:</i> The Amazon Web Services access key ID you provided does not
+ * exist in our records.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
+ * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidAddressingHeader</p> </li>
+ * <li> <p> <i>Description:</i> You must specify the Anonymous role.</p> </li> <li>
+ * <p> <i>HTTP Status Code:</i> N/A</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidDigest</p> </li> <li> <p> <i>Description:</i> The Content-MD5 you
- * specified is not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
- * Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
- * </li> <li> <ul> <li> <p> <i>Code:</i> InvalidEncryptionAlgorithmError</p> </li>
- * <li> <p> <i>Description:</i> The encryption request you specified is not valid.
- * The valid value is AES256.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
- * Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
- * </li> <li> <ul> <li> <p> <i>Code:</i> InvalidLocationConstraint</p> </li> <li>
- * <p> <i>Description:</i> The specified location constraint is not valid. For more
- * information about Regions, see <a
+ * InvalidArgument</p> </li> <li> <p> <i>Description:</i> Invalid Argument</p>
+ * </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
+ * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
+ * <i>Code:</i> InvalidBucketName</p> </li> <li> <p> <i>Description:</i> The
+ * specified bucket is not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
+ * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidBucketState</p> </li> <li>
+ * <p> <i>Description:</i> The request is not valid with the current state of the
+ * bucket.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li>
+ * <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
+ * <p> <i>Code:</i> InvalidDigest</p> </li> <li> <p> <i>Description:</i> The
+ * Content-MD5 you specified is not valid.</p> </li> <li> <p> <i>HTTP Status
+ * Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
+ * Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * InvalidEncryptionAlgorithmError</p> </li> <li> <p> <i>Description:</i> The
+ * encryption request you specified is not valid. The valid value is AES256.</p>
+ * </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
+ * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
+ * <i>Code:</i> InvalidLocationConstraint</p> </li> <li> <p> <i>Description:</i>
+ * The specified location constraint is not valid. For more information about
+ * Regions, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro">How
* to Select a Region for Your Buckets</a>. </p> </li> <li> <p> <i>HTTP Status
* Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
@@ -2047,11 +2069,11 @@ namespace Model
* number.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
* <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
* <li> <p> <i>Code:</i> InvalidPayer</p> </li> <li> <p> <i>Description:</i> All
- * access to this object has been disabled. Please contact AWS Support for further
- * assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> InvalidPolicyDocument</p> </li> <li> <p>
- * <i>Description:</i> The content of the form does not meet the conditions
+ * access to this object has been disabled. Please contact Amazon Web Services
+ * Support for further assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
+ * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidPolicyDocument</p> </li> <li>
+ * <p> <i>Description:</i> The content of the form does not meet the conditions
* specified in the policy document.</p> </li> <li> <p> <i>HTTP Status Code:</i>
* 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p>
* </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRange</p> </li> <li>
@@ -2059,10 +2081,10 @@ namespace Model
* <p> <i>HTTP Status Code:</i> 416 Requested Range Not Satisfiable</p> </li> <li>
* <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
* <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p> <i>Description:</i> Please
- * use AWS4-HMAC-SHA256.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
- * Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul> </li> <li> <ul> <li>
- * <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p> <i>Description:</i> SOAP
- * requests must be made over an HTTPS connection.</p> </li> <li> <p> <i>HTTP
+ * use <code>AWS4-HMAC-SHA256</code>.</p> </li> <li> <p> <i>HTTP Status Code:</i>
+ * 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul> </li> <li>
+ * <ul> <li> <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p> <i>Description:</i>
+ * SOAP requests must be made over an HTTPS connection.</p> </li> <li> <p> <i>HTTP
* Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
* InvalidRequest</p> </li> <li> <p> <i>Description:</i> Amazon S3 Transfer
@@ -2085,44 +2107,44 @@ namespace Model
* Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul>
* </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p>
* <i>Description:</i> Amazon S3 Transfer Acceleration is not supported on this
- * bucket. Contact AWS Support for more information.</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p>
- * <i>Description:</i> Amazon S3 Transfer Acceleration cannot be enabled on this
- * bucket. Contact AWS Support for more information.</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidSecurity</p> </li> <li> <p>
- * <i>Description:</i> The provided security credentials are not valid.</p> </li>
- * <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault
- * Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidSOAPRequest</p> </li> <li> <p> <i>Description:</i> The SOAP request body
- * is invalid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
- * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
- * <ul> <li> <p> <i>Code:</i> InvalidStorageClass</p> </li> <li> <p>
- * <i>Description:</i> The storage class you specified is not valid.</p> </li> <li>
- * <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault
- * Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidTargetBucketForLogging</p> </li> <li> <p> <i>Description:</i> The target
- * bucket for logging does not exist, is not owned by you, or does not have the
- * appropriate grants for the log-delivery group. </p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
+ * bucket. Contact Amazon Web Services Support for more information.</p> </li> <li>
+ * <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i>
+ * N/A</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRequest</p>
+ * </li> <li> <p> <i>Description:</i> Amazon S3 Transfer Acceleration cannot be
+ * enabled on this bucket. Contact Amazon Web Services Support for more
+ * information.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
+ * </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul> </li> <li> <ul> <li> <p>
+ * <i>Code:</i> InvalidSecurity</p> </li> <li> <p> <i>Description:</i> The provided
+ * security credentials are not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i>
+ * 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidSOAPRequest</p> </li> <li>
+ * <p> <i>Description:</i> The SOAP request body is invalid.</p> </li> <li> <p>
+ * <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidToken</p> </li> <li> <p> <i>Description:</i> The provided token is
- * malformed or otherwise invalid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
- * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidURI</p> </li> <li> <p>
- * <i>Description:</i> Couldn't parse the specified URI.</p> </li> <li> <p> <i>HTTP
+ * InvalidStorageClass</p> </li> <li> <p> <i>Description:</i> The storage class you
+ * specified is not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
+ * Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
+ * </li> <li> <ul> <li> <p> <i>Code:</i> InvalidTargetBucketForLogging</p> </li>
+ * <li> <p> <i>Description:</i> The target bucket for logging does not exist, is
+ * not owned by you, or does not have the appropriate grants for the log-delivery
+ * group. </p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
+ * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
+ * <li> <p> <i>Code:</i> InvalidToken</p> </li> <li> <p> <i>Description:</i> The
+ * provided token is malformed or otherwise invalid.</p> </li> <li> <p> <i>HTTP
* Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * KeyTooLongError</p> </li> <li> <p> <i>Description:</i> Your key is too long.</p>
- * </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
- * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
- * <i>Code:</i> MalformedACLError</p> </li> <li> <p> <i>Description:</i> The XML
- * you provided was not well-formed or did not validate against our published
- * schema.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> MalformedPOSTRequest </p> </li> <li> <p>
- * <i>Description:</i> The body of your POST request is not well-formed
+ * InvalidURI</p> </li> <li> <p> <i>Description:</i> Couldn't parse the specified
+ * URI.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li>
+ * <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
+ * <p> <i>Code:</i> KeyTooLongError</p> </li> <li> <p> <i>Description:</i> Your key
+ * is too long.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
+ * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
+ * <ul> <li> <p> <i>Code:</i> MalformedACLError</p> </li> <li> <p>
+ * <i>Description:</i> The XML you provided was not well-formed or did not validate
+ * against our published schema.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
+ * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> MalformedPOSTRequest </p> </li> <li>
+ * <p> <i>Description:</i> The body of your POST request is not well-formed
* multipart/form-data.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
* Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
* </li> <li> <ul> <li> <p> <i>Code:</i> MalformedXML</p> </li> <li> <p>
@@ -2199,20 +2221,21 @@ namespace Model
* </ul> </li> <li> <ul> <li> <p> <i>Code:</i> NotSignedUp</p> </li> <li> <p>
* <i>Description:</i> Your account is not signed up for the Amazon S3 service. You
* must sign up before you can use Amazon S3. You can sign up at the following URL:
- * https://aws.amazon.com/s3</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
- * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> OperationAborted</p> </li> <li> <p>
- * <i>Description:</i> A conflicting conditional action is currently in progress
- * against this resource. Try again.</p> </li> <li> <p> <i>HTTP Status Code:</i>
- * 409 Conflict</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> PermanentRedirect</p> </li> <li> <p>
- * <i>Description:</i> The bucket you are attempting to access must be addressed
- * using the specified endpoint. Send all future requests to this endpoint.</p>
- * </li> <li> <p> <i>HTTP Status Code:</i> 301 Moved Permanently</p> </li> <li> <p>
- * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
- * <i>Code:</i> PreconditionFailed</p> </li> <li> <p> <i>Description:</i> At least
- * one of the preconditions you specified did not hold.</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 412 Precondition Failed</p> </li> <li> <p> <i>SOAP Fault Code
+ * <a href="http://aws.amazon.com/s3">Amazon S3</a> </p> </li> <li> <p> <i>HTTP
+ * Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
+ * Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * OperationAborted</p> </li> <li> <p> <i>Description:</i> A conflicting
+ * conditional action is currently in progress against this resource. Try
+ * again.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li>
+ * <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
+ * <p> <i>Code:</i> PermanentRedirect</p> </li> <li> <p> <i>Description:</i> The
+ * bucket you are attempting to access must be addressed using the specified
+ * endpoint. Send all future requests to this endpoint.</p> </li> <li> <p> <i>HTTP
+ * Status Code:</i> 301 Moved Permanently</p> </li> <li> <p> <i>SOAP Fault Code
+ * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * PreconditionFailed</p> </li> <li> <p> <i>Description:</i> At least one of the
+ * preconditions you specified did not hold.</p> </li> <li> <p> <i>HTTP Status
+ * Code:</i> 412 Precondition Failed</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
* Redirect</p> </li> <li> <p> <i>Description:</i> Temporary redirect.</p> </li>
* <li> <p> <i>HTTP Status Code:</i> 307 Moved Temporarily</p> </li> <li> <p>
@@ -2239,7 +2262,8 @@ namespace Model
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
* SignatureDoesNotMatch</p> </li> <li> <p> <i>Description:</i> The request
* signature we calculated does not match the signature you provided. Check your
- * AWS secret access key and signing method. For more information, see <a
+ * Amazon Web Services secret access key and signing method. For more information,
+ * see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html">REST
* Authentication</a> and <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html">SOAP
@@ -2287,41 +2311,42 @@ namespace Model
* Access Denied</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p>
* </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
* <ul> <li> <p> <i>Code:</i> AccountProblem</p> </li> <li> <p> <i>Description:</i>
- * There is a problem with your AWS account that prevents the action from
- * completing successfully. Contact AWS Support for further assistance.</p> </li>
- * <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault
+ * There is a problem with your Amazon Web Services account that prevents the
+ * action from completing successfully. Contact Amazon Web Services Support for
+ * further assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
+ * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> AllAccessDisabled</p> </li> <li> <p>
+ * <i>Description:</i> All access to this Amazon S3 resource has been disabled.
+ * Contact Amazon Web Services Support for further assistance.</p> </li> <li> <p>
+ * <i>HTTP Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault Code
+ * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * AmbiguousGrantByEmailAddress</p> </li> <li> <p> <i>Description:</i> The email
+ * address you provided is associated with more than one account.</p> </li> <li>
+ * <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault
* Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * AllAccessDisabled</p> </li> <li> <p> <i>Description:</i> All access to this
- * Amazon S3 resource has been disabled. Contact AWS Support for further
- * assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li>
+ * AuthorizationHeaderMalformed</p> </li> <li> <p> <i>Description:</i> The
+ * authorization header you provided is invalid.</p> </li> <li> <p> <i>HTTP Status
+ * Code:</i> 400 Bad Request</p> </li> <li> <p> <i>HTTP Status Code:</i> N/A</p>
+ * </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> BadDigest</p> </li> <li> <p>
+ * <i>Description:</i> The Content-MD5 you specified did not match what we
+ * received.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
* <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> AmbiguousGrantByEmailAddress</p> </li> <li> <p>
- * <i>Description:</i> The email address you provided is associated with more than
- * one account.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
- * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
- * <ul> <li> <p> <i>Code:</i> AuthorizationHeaderMalformed</p> </li> <li> <p>
- * <i>Description:</i> The authorization header you provided is invalid.</p> </li>
- * <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> N/A</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * BadDigest</p> </li> <li> <p> <i>Description:</i> The Content-MD5 you specified
- * did not match what we received.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
- * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> BucketAlreadyExists</p> </li> <li>
- * <p> <i>Description:</i> The requested bucket name is not available. The bucket
- * namespace is shared by all users of the system. Please select a different name
- * and try again.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p>
- * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
- * <ul> <li> <p> <i>Code:</i> BucketAlreadyOwnedByYou</p> </li> <li> <p>
- * <i>Description:</i> The bucket you tried to create already exists, and you own
- * it. Amazon S3 returns this error in all AWS Regions except in the North Virginia
- * Region. For legacy compatibility, if you re-create an existing bucket that you
- * already own in the North Virginia Region, Amazon S3 returns 200 OK and resets
- * the bucket access control lists (ACLs).</p> </li> <li> <p> <i>Code:</i> 409
- * Conflict (in all Regions except the North Virginia Region) </p> </li> <li> <p>
- * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
- * <i>Code:</i> BucketNotEmpty</p> </li> <li> <p> <i>Description:</i> The bucket
- * you tried to delete is not empty.</p> </li> <li> <p> <i>HTTP Status Code:</i>
- * 409 Conflict</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * <li> <p> <i>Code:</i> BucketAlreadyExists</p> </li> <li> <p> <i>Description:</i>
+ * The requested bucket name is not available. The bucket namespace is shared by
+ * all users of the system. Please select a different name and try again.</p> </li>
+ * <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li> <p> <i>SOAP Fault
+ * Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * BucketAlreadyOwnedByYou</p> </li> <li> <p> <i>Description:</i> The bucket you
+ * tried to create already exists, and you own it. Amazon S3 returns this error in
+ * all Amazon Web Services Regions except in the North Virginia Region. For legacy
+ * compatibility, if you re-create an existing bucket that you already own in the
+ * North Virginia Region, Amazon S3 returns 200 OK and resets the bucket access
+ * control lists (ACLs).</p> </li> <li> <p> <i>Code:</i> 409 Conflict (in all
+ * Regions except the North Virginia Region) </p> </li> <li> <p> <i>SOAP Fault Code
+ * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * BucketNotEmpty</p> </li> <li> <p> <i>Description:</i> The bucket you tried to
+ * delete is not empty.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409
+ * Conflict</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
* </ul> </li> <li> <ul> <li> <p> <i>Code:</i> CredentialsNotSupported</p> </li>
* <li> <p> <i>Description:</i> This request does not support credentials.</p>
* </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
@@ -2362,33 +2387,34 @@ namespace Model
* again.</p> </li> <li> <p> <i>HTTP Status Code:</i> 500 Internal Server Error</p>
* </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Server</p> </li> </ul> </li> <li>
* <ul> <li> <p> <i>Code:</i> InvalidAccessKeyId</p> </li> <li> <p>
- * <i>Description:</i> The AWS access key ID you provided does not exist in our
- * records.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> InvalidAddressingHeader</p> </li> <li> <p>
- * <i>Description:</i> You must specify the Anonymous role.</p> </li> <li> <p>
- * <i>HTTP Status Code:</i> N/A</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
- * Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidArgument</p>
- * </li> <li> <p> <i>Description:</i> Invalid Argument</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
- * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidBucketName</p> </li> <li> <p> <i>Description:</i> The specified bucket is
- * not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> InvalidBucketState</p> </li> <li> <p> <i>Description:</i>
- * The request is not valid with the current state of the bucket.</p> </li> <li>
- * <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li> <p> <i>SOAP Fault Code
+ * <i>Description:</i> The Amazon Web Services access key ID you provided does not
+ * exist in our records.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
+ * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidAddressingHeader</p> </li>
+ * <li> <p> <i>Description:</i> You must specify the Anonymous role.</p> </li> <li>
+ * <p> <i>HTTP Status Code:</i> N/A</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidDigest</p> </li> <li> <p> <i>Description:</i> The Content-MD5 you
- * specified is not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
- * Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
- * </li> <li> <ul> <li> <p> <i>Code:</i> InvalidEncryptionAlgorithmError</p> </li>
- * <li> <p> <i>Description:</i> The encryption request you specified is not valid.
- * The valid value is AES256.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
- * Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
- * </li> <li> <ul> <li> <p> <i>Code:</i> InvalidLocationConstraint</p> </li> <li>
- * <p> <i>Description:</i> The specified location constraint is not valid. For more
- * information about Regions, see <a
+ * InvalidArgument</p> </li> <li> <p> <i>Description:</i> Invalid Argument</p>
+ * </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
+ * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
+ * <i>Code:</i> InvalidBucketName</p> </li> <li> <p> <i>Description:</i> The
+ * specified bucket is not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
+ * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidBucketState</p> </li> <li>
+ * <p> <i>Description:</i> The request is not valid with the current state of the
+ * bucket.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li>
+ * <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
+ * <p> <i>Code:</i> InvalidDigest</p> </li> <li> <p> <i>Description:</i> The
+ * Content-MD5 you specified is not valid.</p> </li> <li> <p> <i>HTTP Status
+ * Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
+ * Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * InvalidEncryptionAlgorithmError</p> </li> <li> <p> <i>Description:</i> The
+ * encryption request you specified is not valid. The valid value is AES256.</p>
+ * </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
+ * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
+ * <i>Code:</i> InvalidLocationConstraint</p> </li> <li> <p> <i>Description:</i>
+ * The specified location constraint is not valid. For more information about
+ * Regions, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro">How
* to Select a Region for Your Buckets</a>. </p> </li> <li> <p> <i>HTTP Status
* Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
@@ -2407,11 +2433,11 @@ namespace Model
* number.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
* <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
* <li> <p> <i>Code:</i> InvalidPayer</p> </li> <li> <p> <i>Description:</i> All
- * access to this object has been disabled. Please contact AWS Support for further
- * assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> InvalidPolicyDocument</p> </li> <li> <p>
- * <i>Description:</i> The content of the form does not meet the conditions
+ * access to this object has been disabled. Please contact Amazon Web Services
+ * Support for further assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
+ * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidPolicyDocument</p> </li> <li>
+ * <p> <i>Description:</i> The content of the form does not meet the conditions
* specified in the policy document.</p> </li> <li> <p> <i>HTTP Status Code:</i>
* 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p>
* </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRange</p> </li> <li>
@@ -2419,10 +2445,10 @@ namespace Model
* <p> <i>HTTP Status Code:</i> 416 Requested Range Not Satisfiable</p> </li> <li>
* <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
* <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p> <i>Description:</i> Please
- * use AWS4-HMAC-SHA256.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
- * Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul> </li> <li> <ul> <li>
- * <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p> <i>Description:</i> SOAP
- * requests must be made over an HTTPS connection.</p> </li> <li> <p> <i>HTTP
+ * use <code>AWS4-HMAC-SHA256</code>.</p> </li> <li> <p> <i>HTTP Status Code:</i>
+ * 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul> </li> <li>
+ * <ul> <li> <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p> <i>Description:</i>
+ * SOAP requests must be made over an HTTPS connection.</p> </li> <li> <p> <i>HTTP
* Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
* InvalidRequest</p> </li> <li> <p> <i>Description:</i> Amazon S3 Transfer
@@ -2445,44 +2471,44 @@ namespace Model
* Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul>
* </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p>
* <i>Description:</i> Amazon S3 Transfer Acceleration is not supported on this
- * bucket. Contact AWS Support for more information.</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p>
- * <i>Description:</i> Amazon S3 Transfer Acceleration cannot be enabled on this
- * bucket. Contact AWS Support for more information.</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidSecurity</p> </li> <li> <p>
- * <i>Description:</i> The provided security credentials are not valid.</p> </li>
- * <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault
- * Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidSOAPRequest</p> </li> <li> <p> <i>Description:</i> The SOAP request body
- * is invalid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
- * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
- * <ul> <li> <p> <i>Code:</i> InvalidStorageClass</p> </li> <li> <p>
- * <i>Description:</i> The storage class you specified is not valid.</p> </li> <li>
- * <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault
- * Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidTargetBucketForLogging</p> </li> <li> <p> <i>Description:</i> The target
- * bucket for logging does not exist, is not owned by you, or does not have the
- * appropriate grants for the log-delivery group. </p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
+ * bucket. Contact Amazon Web Services Support for more information.</p> </li> <li>
+ * <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i>
+ * N/A</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRequest</p>
+ * </li> <li> <p> <i>Description:</i> Amazon S3 Transfer Acceleration cannot be
+ * enabled on this bucket. Contact Amazon Web Services Support for more
+ * information.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
+ * </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul> </li> <li> <ul> <li> <p>
+ * <i>Code:</i> InvalidSecurity</p> </li> <li> <p> <i>Description:</i> The provided
+ * security credentials are not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i>
+ * 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidSOAPRequest</p> </li> <li>
+ * <p> <i>Description:</i> The SOAP request body is invalid.</p> </li> <li> <p>
+ * <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidToken</p> </li> <li> <p> <i>Description:</i> The provided token is
- * malformed or otherwise invalid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
- * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidURI</p> </li> <li> <p>
- * <i>Description:</i> Couldn't parse the specified URI.</p> </li> <li> <p> <i>HTTP
+ * InvalidStorageClass</p> </li> <li> <p> <i>Description:</i> The storage class you
+ * specified is not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
+ * Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
+ * </li> <li> <ul> <li> <p> <i>Code:</i> InvalidTargetBucketForLogging</p> </li>
+ * <li> <p> <i>Description:</i> The target bucket for logging does not exist, is
+ * not owned by you, or does not have the appropriate grants for the log-delivery
+ * group. </p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
+ * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
+ * <li> <p> <i>Code:</i> InvalidToken</p> </li> <li> <p> <i>Description:</i> The
+ * provided token is malformed or otherwise invalid.</p> </li> <li> <p> <i>HTTP
* Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * KeyTooLongError</p> </li> <li> <p> <i>Description:</i> Your key is too long.</p>
- * </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
- * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
- * <i>Code:</i> MalformedACLError</p> </li> <li> <p> <i>Description:</i> The XML
- * you provided was not well-formed or did not validate against our published
- * schema.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> MalformedPOSTRequest </p> </li> <li> <p>
- * <i>Description:</i> The body of your POST request is not well-formed
+ * InvalidURI</p> </li> <li> <p> <i>Description:</i> Couldn't parse the specified
+ * URI.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li>
+ * <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
+ * <p> <i>Code:</i> KeyTooLongError</p> </li> <li> <p> <i>Description:</i> Your key
+ * is too long.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
+ * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
+ * <ul> <li> <p> <i>Code:</i> MalformedACLError</p> </li> <li> <p>
+ * <i>Description:</i> The XML you provided was not well-formed or did not validate
+ * against our published schema.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
+ * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> MalformedPOSTRequest </p> </li> <li>
+ * <p> <i>Description:</i> The body of your POST request is not well-formed
* multipart/form-data.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
* Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
* </li> <li> <ul> <li> <p> <i>Code:</i> MalformedXML</p> </li> <li> <p>
@@ -2559,20 +2585,21 @@ namespace Model
* </ul> </li> <li> <ul> <li> <p> <i>Code:</i> NotSignedUp</p> </li> <li> <p>
* <i>Description:</i> Your account is not signed up for the Amazon S3 service. You
* must sign up before you can use Amazon S3. You can sign up at the following URL:
- * https://aws.amazon.com/s3</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
- * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> OperationAborted</p> </li> <li> <p>
- * <i>Description:</i> A conflicting conditional action is currently in progress
- * against this resource. Try again.</p> </li> <li> <p> <i>HTTP Status Code:</i>
- * 409 Conflict</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> PermanentRedirect</p> </li> <li> <p>
- * <i>Description:</i> The bucket you are attempting to access must be addressed
- * using the specified endpoint. Send all future requests to this endpoint.</p>
- * </li> <li> <p> <i>HTTP Status Code:</i> 301 Moved Permanently</p> </li> <li> <p>
- * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
- * <i>Code:</i> PreconditionFailed</p> </li> <li> <p> <i>Description:</i> At least
- * one of the preconditions you specified did not hold.</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 412 Precondition Failed</p> </li> <li> <p> <i>SOAP Fault Code
+ * <a href="http://aws.amazon.com/s3">Amazon S3</a> </p> </li> <li> <p> <i>HTTP
+ * Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
+ * Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * OperationAborted</p> </li> <li> <p> <i>Description:</i> A conflicting
+ * conditional action is currently in progress against this resource. Try
+ * again.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li>
+ * <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
+ * <p> <i>Code:</i> PermanentRedirect</p> </li> <li> <p> <i>Description:</i> The
+ * bucket you are attempting to access must be addressed using the specified
+ * endpoint. Send all future requests to this endpoint.</p> </li> <li> <p> <i>HTTP
+ * Status Code:</i> 301 Moved Permanently</p> </li> <li> <p> <i>SOAP Fault Code
+ * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * PreconditionFailed</p> </li> <li> <p> <i>Description:</i> At least one of the
+ * preconditions you specified did not hold.</p> </li> <li> <p> <i>HTTP Status
+ * Code:</i> 412 Precondition Failed</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
* Redirect</p> </li> <li> <p> <i>Description:</i> Temporary redirect.</p> </li>
* <li> <p> <i>HTTP Status Code:</i> 307 Moved Temporarily</p> </li> <li> <p>
@@ -2599,7 +2626,8 @@ namespace Model
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
* SignatureDoesNotMatch</p> </li> <li> <p> <i>Description:</i> The request
* signature we calculated does not match the signature you provided. Check your
- * AWS secret access key and signing method. For more information, see <a
+ * Amazon Web Services secret access key and signing method. For more information,
+ * see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html">REST
* Authentication</a> and <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html">SOAP
@@ -2647,41 +2675,42 @@ namespace Model
* Access Denied</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p>
* </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
* <ul> <li> <p> <i>Code:</i> AccountProblem</p> </li> <li> <p> <i>Description:</i>
- * There is a problem with your AWS account that prevents the action from
- * completing successfully. Contact AWS Support for further assistance.</p> </li>
- * <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault
+ * There is a problem with your Amazon Web Services account that prevents the
+ * action from completing successfully. Contact Amazon Web Services Support for
+ * further assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
+ * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> AllAccessDisabled</p> </li> <li> <p>
+ * <i>Description:</i> All access to this Amazon S3 resource has been disabled.
+ * Contact Amazon Web Services Support for further assistance.</p> </li> <li> <p>
+ * <i>HTTP Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault Code
+ * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * AmbiguousGrantByEmailAddress</p> </li> <li> <p> <i>Description:</i> The email
+ * address you provided is associated with more than one account.</p> </li> <li>
+ * <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault
* Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * AllAccessDisabled</p> </li> <li> <p> <i>Description:</i> All access to this
- * Amazon S3 resource has been disabled. Contact AWS Support for further
- * assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li>
+ * AuthorizationHeaderMalformed</p> </li> <li> <p> <i>Description:</i> The
+ * authorization header you provided is invalid.</p> </li> <li> <p> <i>HTTP Status
+ * Code:</i> 400 Bad Request</p> </li> <li> <p> <i>HTTP Status Code:</i> N/A</p>
+ * </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> BadDigest</p> </li> <li> <p>
+ * <i>Description:</i> The Content-MD5 you specified did not match what we
+ * received.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
* <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> AmbiguousGrantByEmailAddress</p> </li> <li> <p>
- * <i>Description:</i> The email address you provided is associated with more than
- * one account.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
- * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
- * <ul> <li> <p> <i>Code:</i> AuthorizationHeaderMalformed</p> </li> <li> <p>
- * <i>Description:</i> The authorization header you provided is invalid.</p> </li>
- * <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> N/A</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * BadDigest</p> </li> <li> <p> <i>Description:</i> The Content-MD5 you specified
- * did not match what we received.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
- * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> BucketAlreadyExists</p> </li> <li>
- * <p> <i>Description:</i> The requested bucket name is not available. The bucket
- * namespace is shared by all users of the system. Please select a different name
- * and try again.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p>
- * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
- * <ul> <li> <p> <i>Code:</i> BucketAlreadyOwnedByYou</p> </li> <li> <p>
- * <i>Description:</i> The bucket you tried to create already exists, and you own
- * it. Amazon S3 returns this error in all AWS Regions except in the North Virginia
- * Region. For legacy compatibility, if you re-create an existing bucket that you
- * already own in the North Virginia Region, Amazon S3 returns 200 OK and resets
- * the bucket access control lists (ACLs).</p> </li> <li> <p> <i>Code:</i> 409
- * Conflict (in all Regions except the North Virginia Region) </p> </li> <li> <p>
- * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
- * <i>Code:</i> BucketNotEmpty</p> </li> <li> <p> <i>Description:</i> The bucket
- * you tried to delete is not empty.</p> </li> <li> <p> <i>HTTP Status Code:</i>
- * 409 Conflict</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * <li> <p> <i>Code:</i> BucketAlreadyExists</p> </li> <li> <p> <i>Description:</i>
+ * The requested bucket name is not available. The bucket namespace is shared by
+ * all users of the system. Please select a different name and try again.</p> </li>
+ * <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li> <p> <i>SOAP Fault
+ * Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * BucketAlreadyOwnedByYou</p> </li> <li> <p> <i>Description:</i> The bucket you
+ * tried to create already exists, and you own it. Amazon S3 returns this error in
+ * all Amazon Web Services Regions except in the North Virginia Region. For legacy
+ * compatibility, if you re-create an existing bucket that you already own in the
+ * North Virginia Region, Amazon S3 returns 200 OK and resets the bucket access
+ * control lists (ACLs).</p> </li> <li> <p> <i>Code:</i> 409 Conflict (in all
+ * Regions except the North Virginia Region) </p> </li> <li> <p> <i>SOAP Fault Code
+ * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * BucketNotEmpty</p> </li> <li> <p> <i>Description:</i> The bucket you tried to
+ * delete is not empty.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409
+ * Conflict</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
* </ul> </li> <li> <ul> <li> <p> <i>Code:</i> CredentialsNotSupported</p> </li>
* <li> <p> <i>Description:</i> This request does not support credentials.</p>
* </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
@@ -2722,33 +2751,34 @@ namespace Model
* again.</p> </li> <li> <p> <i>HTTP Status Code:</i> 500 Internal Server Error</p>
* </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Server</p> </li> </ul> </li> <li>
* <ul> <li> <p> <i>Code:</i> InvalidAccessKeyId</p> </li> <li> <p>
- * <i>Description:</i> The AWS access key ID you provided does not exist in our
- * records.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> InvalidAddressingHeader</p> </li> <li> <p>
- * <i>Description:</i> You must specify the Anonymous role.</p> </li> <li> <p>
- * <i>HTTP Status Code:</i> N/A</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
- * Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidArgument</p>
- * </li> <li> <p> <i>Description:</i> Invalid Argument</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
- * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidBucketName</p> </li> <li> <p> <i>Description:</i> The specified bucket is
- * not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> InvalidBucketState</p> </li> <li> <p> <i>Description:</i>
- * The request is not valid with the current state of the bucket.</p> </li> <li>
- * <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li> <p> <i>SOAP Fault Code
+ * <i>Description:</i> The Amazon Web Services access key ID you provided does not
+ * exist in our records.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
+ * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidAddressingHeader</p> </li>
+ * <li> <p> <i>Description:</i> You must specify the Anonymous role.</p> </li> <li>
+ * <p> <i>HTTP Status Code:</i> N/A</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidDigest</p> </li> <li> <p> <i>Description:</i> The Content-MD5 you
- * specified is not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
- * Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
- * </li> <li> <ul> <li> <p> <i>Code:</i> InvalidEncryptionAlgorithmError</p> </li>
- * <li> <p> <i>Description:</i> The encryption request you specified is not valid.
- * The valid value is AES256.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
- * Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
- * </li> <li> <ul> <li> <p> <i>Code:</i> InvalidLocationConstraint</p> </li> <li>
- * <p> <i>Description:</i> The specified location constraint is not valid. For more
- * information about Regions, see <a
+ * InvalidArgument</p> </li> <li> <p> <i>Description:</i> Invalid Argument</p>
+ * </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
+ * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
+ * <i>Code:</i> InvalidBucketName</p> </li> <li> <p> <i>Description:</i> The
+ * specified bucket is not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
+ * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidBucketState</p> </li> <li>
+ * <p> <i>Description:</i> The request is not valid with the current state of the
+ * bucket.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li>
+ * <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
+ * <p> <i>Code:</i> InvalidDigest</p> </li> <li> <p> <i>Description:</i> The
+ * Content-MD5 you specified is not valid.</p> </li> <li> <p> <i>HTTP Status
+ * Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
+ * Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * InvalidEncryptionAlgorithmError</p> </li> <li> <p> <i>Description:</i> The
+ * encryption request you specified is not valid. The valid value is AES256.</p>
+ * </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
+ * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
+ * <i>Code:</i> InvalidLocationConstraint</p> </li> <li> <p> <i>Description:</i>
+ * The specified location constraint is not valid. For more information about
+ * Regions, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/UsingBucket.html#access-bucket-intro">How
* to Select a Region for Your Buckets</a>. </p> </li> <li> <p> <i>HTTP Status
* Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
@@ -2767,11 +2797,11 @@ namespace Model
* number.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
* <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
* <li> <p> <i>Code:</i> InvalidPayer</p> </li> <li> <p> <i>Description:</i> All
- * access to this object has been disabled. Please contact AWS Support for further
- * assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> InvalidPolicyDocument</p> </li> <li> <p>
- * <i>Description:</i> The content of the form does not meet the conditions
+ * access to this object has been disabled. Please contact Amazon Web Services
+ * Support for further assistance.</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
+ * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidPolicyDocument</p> </li> <li>
+ * <p> <i>Description:</i> The content of the form does not meet the conditions
* specified in the policy document.</p> </li> <li> <p> <i>HTTP Status Code:</i>
* 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p>
* </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRange</p> </li> <li>
@@ -2779,10 +2809,10 @@ namespace Model
* <p> <i>HTTP Status Code:</i> 416 Requested Range Not Satisfiable</p> </li> <li>
* <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
* <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p> <i>Description:</i> Please
- * use AWS4-HMAC-SHA256.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
- * Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul> </li> <li> <ul> <li>
- * <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p> <i>Description:</i> SOAP
- * requests must be made over an HTTPS connection.</p> </li> <li> <p> <i>HTTP
+ * use <code>AWS4-HMAC-SHA256</code>.</p> </li> <li> <p> <i>HTTP Status Code:</i>
+ * 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul> </li> <li>
+ * <ul> <li> <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p> <i>Description:</i>
+ * SOAP requests must be made over an HTTPS connection.</p> </li> <li> <p> <i>HTTP
* Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
* InvalidRequest</p> </li> <li> <p> <i>Description:</i> Amazon S3 Transfer
@@ -2805,44 +2835,44 @@ namespace Model
* Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul>
* </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p>
* <i>Description:</i> Amazon S3 Transfer Acceleration is not supported on this
- * bucket. Contact AWS Support for more information.</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRequest</p> </li> <li> <p>
- * <i>Description:</i> Amazon S3 Transfer Acceleration cannot be enabled on this
- * bucket. Contact AWS Support for more information.</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i> N/A</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidSecurity</p> </li> <li> <p>
- * <i>Description:</i> The provided security credentials are not valid.</p> </li>
- * <li> <p> <i>HTTP Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault
- * Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidSOAPRequest</p> </li> <li> <p> <i>Description:</i> The SOAP request body
- * is invalid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
- * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
- * <ul> <li> <p> <i>Code:</i> InvalidStorageClass</p> </li> <li> <p>
- * <i>Description:</i> The storage class you specified is not valid.</p> </li> <li>
- * <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault
- * Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidTargetBucketForLogging</p> </li> <li> <p> <i>Description:</i> The target
- * bucket for logging does not exist, is not owned by you, or does not have the
- * appropriate grants for the log-delivery group. </p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
+ * bucket. Contact Amazon Web Services Support for more information.</p> </li> <li>
+ * <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>Code:</i>
+ * N/A</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidRequest</p>
+ * </li> <li> <p> <i>Description:</i> Amazon S3 Transfer Acceleration cannot be
+ * enabled on this bucket. Contact Amazon Web Services Support for more
+ * information.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
+ * </li> <li> <p> <i>Code:</i> N/A</p> </li> </ul> </li> <li> <ul> <li> <p>
+ * <i>Code:</i> InvalidSecurity</p> </li> <li> <p> <i>Description:</i> The provided
+ * security credentials are not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i>
+ * 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidSOAPRequest</p> </li> <li>
+ * <p> <i>Description:</i> The SOAP request body is invalid.</p> </li> <li> <p>
+ * <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * InvalidToken</p> </li> <li> <p> <i>Description:</i> The provided token is
- * malformed or otherwise invalid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
- * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> InvalidURI</p> </li> <li> <p>
- * <i>Description:</i> Couldn't parse the specified URI.</p> </li> <li> <p> <i>HTTP
+ * InvalidStorageClass</p> </li> <li> <p> <i>Description:</i> The storage class you
+ * specified is not valid.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
+ * Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
+ * </li> <li> <ul> <li> <p> <i>Code:</i> InvalidTargetBucketForLogging</p> </li>
+ * <li> <p> <i>Description:</i> The target bucket for logging does not exist, is
+ * not owned by you, or does not have the appropriate grants for the log-delivery
+ * group. </p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
+ * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
+ * <li> <p> <i>Code:</i> InvalidToken</p> </li> <li> <p> <i>Description:</i> The
+ * provided token is malformed or otherwise invalid.</p> </li> <li> <p> <i>HTTP
* Status Code:</i> 400 Bad Request</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
- * KeyTooLongError</p> </li> <li> <p> <i>Description:</i> Your key is too long.</p>
- * </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li> <p>
- * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
- * <i>Code:</i> MalformedACLError</p> </li> <li> <p> <i>Description:</i> The XML
- * you provided was not well-formed or did not validate against our published
- * schema.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li>
- * <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul>
- * <li> <p> <i>Code:</i> MalformedPOSTRequest </p> </li> <li> <p>
- * <i>Description:</i> The body of your POST request is not well-formed
+ * InvalidURI</p> </li> <li> <p> <i>Description:</i> Couldn't parse the specified
+ * URI.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p> </li> <li>
+ * <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
+ * <p> <i>Code:</i> KeyTooLongError</p> </li> <li> <p> <i>Description:</i> Your key
+ * is too long.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad Request</p>
+ * </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li>
+ * <ul> <li> <p> <i>Code:</i> MalformedACLError</p> </li> <li> <p>
+ * <i>Description:</i> The XML you provided was not well-formed or did not validate
+ * against our published schema.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400
+ * Bad Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
+ * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> MalformedPOSTRequest </p> </li> <li>
+ * <p> <i>Description:</i> The body of your POST request is not well-formed
* multipart/form-data.</p> </li> <li> <p> <i>HTTP Status Code:</i> 400 Bad
* Request</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul>
* </li> <li> <ul> <li> <p> <i>Code:</i> MalformedXML</p> </li> <li> <p>
@@ -2919,20 +2949,21 @@ namespace Model
* </ul> </li> <li> <ul> <li> <p> <i>Code:</i> NotSignedUp</p> </li> <li> <p>
* <i>Description:</i> Your account is not signed up for the Amazon S3 service. You
* must sign up before you can use Amazon S3. You can sign up at the following URL:
- * https://aws.amazon.com/s3</p> </li> <li> <p> <i>HTTP Status Code:</i> 403
- * Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> OperationAborted</p> </li> <li> <p>
- * <i>Description:</i> A conflicting conditional action is currently in progress
- * against this resource. Try again.</p> </li> <li> <p> <i>HTTP Status Code:</i>
- * 409 Conflict</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li>
- * </ul> </li> <li> <ul> <li> <p> <i>Code:</i> PermanentRedirect</p> </li> <li> <p>
- * <i>Description:</i> The bucket you are attempting to access must be addressed
- * using the specified endpoint. Send all future requests to this endpoint.</p>
- * </li> <li> <p> <i>HTTP Status Code:</i> 301 Moved Permanently</p> </li> <li> <p>
- * <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p>
- * <i>Code:</i> PreconditionFailed</p> </li> <li> <p> <i>Description:</i> At least
- * one of the preconditions you specified did not hold.</p> </li> <li> <p> <i>HTTP
- * Status Code:</i> 412 Precondition Failed</p> </li> <li> <p> <i>SOAP Fault Code
+ * <a href="http://aws.amazon.com/s3">Amazon S3</a> </p> </li> <li> <p> <i>HTTP
+ * Status Code:</i> 403 Forbidden</p> </li> <li> <p> <i>SOAP Fault Code Prefix:</i>
+ * Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * OperationAborted</p> </li> <li> <p> <i>Description:</i> A conflicting
+ * conditional action is currently in progress against this resource. Try
+ * again.</p> </li> <li> <p> <i>HTTP Status Code:</i> 409 Conflict</p> </li> <li>
+ * <p> <i>SOAP Fault Code Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li>
+ * <p> <i>Code:</i> PermanentRedirect</p> </li> <li> <p> <i>Description:</i> The
+ * bucket you are attempting to access must be addressed using the specified
+ * endpoint. Send all future requests to this endpoint.</p> </li> <li> <p> <i>HTTP
+ * Status Code:</i> 301 Moved Permanently</p> </li> <li> <p> <i>SOAP Fault Code
+ * Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
+ * PreconditionFailed</p> </li> <li> <p> <i>Description:</i> At least one of the
+ * preconditions you specified did not hold.</p> </li> <li> <p> <i>HTTP Status
+ * Code:</i> 412 Precondition Failed</p> </li> <li> <p> <i>SOAP Fault Code
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
* Redirect</p> </li> <li> <p> <i>Description:</i> Temporary redirect.</p> </li>
* <li> <p> <i>HTTP Status Code:</i> 307 Moved Temporarily</p> </li> <li> <p>
@@ -2959,7 +2990,8 @@ namespace Model
* Prefix:</i> Client</p> </li> </ul> </li> <li> <ul> <li> <p> <i>Code:</i>
* SignatureDoesNotMatch</p> </li> <li> <p> <i>Description:</i> The request
* signature we calculated does not match the signature you provided. Check your
- * AWS secret access key and signing method. For more information, see <a
+ * Amazon Web Services secret access key and signing method. For more information,
+ * see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/RESTAuthentication.html">REST
* Authentication</a> and <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/SOAPAuthentication.html">SOAP
@@ -3083,16 +3115,16 @@ namespace Model
private:
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
Aws::String m_versionId;
- bool m_versionIdHasBeenSet;
+ bool m_versionIdHasBeenSet = false;
Aws::String m_code;
- bool m_codeHasBeenSet;
+ bool m_codeHasBeenSet = false;
Aws::String m_message;
- bool m_messageHasBeenSet;
+ bool m_messageHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ErrorDocument.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ErrorDocument.h
index 114e8b8865..eb35c3438c 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ErrorDocument.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ErrorDocument.h
@@ -27,14 +27,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ErrorDocument">AWS
* API Reference</a></p>
*/
- class AWS_S3_API ErrorDocument
+ class ErrorDocument
{
public:
- ErrorDocument();
- ErrorDocument(const Aws::Utils::Xml::XmlNode& xmlNode);
- ErrorDocument& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ErrorDocument();
+ AWS_S3_API ErrorDocument(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ErrorDocument& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -112,7 +112,7 @@ namespace Model
private:
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Event.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Event.h
index 036cc47543..b0c9bbcbd9 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Event.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Event.h
@@ -32,7 +32,17 @@ namespace Model
s3_Replication_OperationFailedReplication,
s3_Replication_OperationNotTracked,
s3_Replication_OperationMissedThreshold,
- s3_Replication_OperationReplicatedAfterThreshold
+ s3_Replication_OperationReplicatedAfterThreshold,
+ s3_ObjectRestore_Delete,
+ s3_LifecycleTransition,
+ s3_IntelligentTiering,
+ s3_ObjectAcl_Put,
+ s3_LifecycleExpiration,
+ s3_LifecycleExpiration_Delete,
+ s3_LifecycleExpiration_DeleteMarkerCreated,
+ s3_ObjectTagging,
+ s3_ObjectTagging_Put,
+ s3_ObjectTagging_Delete
};
namespace EventMapper
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/EventBridgeConfiguration.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/EventBridgeConfiguration.h
new file mode 100644
index 0000000000..023d3a50a6
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/EventBridgeConfiguration.h
@@ -0,0 +1,42 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+#include <aws/s3/S3_EXPORTS.h>
+
+namespace Aws
+{
+namespace Utils
+{
+namespace Xml
+{
+ class XmlNode;
+} // namespace Xml
+} // namespace Utils
+namespace S3
+{
+namespace Model
+{
+
+ /**
+ * <p>A container for specifying the configuration for Amazon
+ * EventBridge.</p><p><h3>See Also:</h3> <a
+ * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/EventBridgeConfiguration">AWS
+ * API Reference</a></p>
+ */
+ class EventBridgeConfiguration
+ {
+ public:
+ AWS_S3_API EventBridgeConfiguration();
+ AWS_S3_API EventBridgeConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API EventBridgeConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+
+ };
+
+} // namespace Model
+} // namespace S3
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ExistingObjectReplication.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ExistingObjectReplication.h
index 9484083682..152b8d7629 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ExistingObjectReplication.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ExistingObjectReplication.h
@@ -26,19 +26,19 @@ namespace Model
* <p>Optional configuration to replicate existing source bucket objects. For more
* information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-what-is-isnot-replicated.html#existing-object-replication">Replicating
- * Existing Objects</a> in the <i>Amazon S3 Developer Guide</i>. </p><p><h3>See
+ * Existing Objects</a> in the <i>Amazon S3 User Guide</i>. </p><p><h3>See
* Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ExistingObjectReplication">AWS
* API Reference</a></p>
*/
- class AWS_S3_API ExistingObjectReplication
+ class ExistingObjectReplication
{
public:
- ExistingObjectReplication();
- ExistingObjectReplication(const Aws::Utils::Xml::XmlNode& xmlNode);
- ExistingObjectReplication& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ExistingObjectReplication();
+ AWS_S3_API ExistingObjectReplication(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ExistingObjectReplication& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -74,7 +74,7 @@ namespace Model
private:
ExistingObjectReplicationStatus m_status;
- bool m_statusHasBeenSet;
+ bool m_statusHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/FilterRule.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/FilterRule.h
index efbbb66359..e5236cd076 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/FilterRule.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/FilterRule.h
@@ -29,14 +29,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/FilterRule">AWS API
* Reference</a></p>
*/
- class AWS_S3_API FilterRule
+ class FilterRule
{
public:
- FilterRule();
- FilterRule(const Aws::Utils::Xml::XmlNode& xmlNode);
- FilterRule& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API FilterRule();
+ AWS_S3_API FilterRule(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API FilterRule& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -137,10 +137,10 @@ namespace Model
private:
FilterRuleName m_name;
- bool m_nameHasBeenSet;
+ bool m_nameHasBeenSet = false;
Aws::String m_value;
- bool m_valueHasBeenSet;
+ bool m_valueHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketAccelerateConfigurationRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketAccelerateConfigurationRequest.h
index 24523b85d8..146941b445 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketAccelerateConfigurationRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketAccelerateConfigurationRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API GetBucketAccelerateConfigurationRequest : public S3Request
+ class GetBucketAccelerateConfigurationRequest : public S3Request
{
public:
- GetBucketAccelerateConfigurationRequest();
+ AWS_S3_API GetBucketAccelerateConfigurationRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetBucketAccelerateConfiguration"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket for which the accelerate configuration is
@@ -92,57 +96,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketAccelerateConfigurationRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketAccelerateConfigurationRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketAccelerateConfigurationRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -189,13 +193,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketAccelerateConfigurationResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketAccelerateConfigurationResult.h
index 7838cbb34d..d866a39f5a 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketAccelerateConfigurationResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketAccelerateConfigurationResult.h
@@ -24,12 +24,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API GetBucketAccelerateConfigurationResult
+ class GetBucketAccelerateConfigurationResult
{
public:
- GetBucketAccelerateConfigurationResult();
- GetBucketAccelerateConfigurationResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- GetBucketAccelerateConfigurationResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketAccelerateConfigurationResult();
+ AWS_S3_API GetBucketAccelerateConfigurationResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketAccelerateConfigurationResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketAclRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketAclRequest.h
index c4c03d2d4f..7f89407726 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketAclRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketAclRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API GetBucketAclRequest : public S3Request
+ class GetBucketAclRequest : public S3Request
{
public:
- GetBucketAclRequest();
+ AWS_S3_API GetBucketAclRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetBucketAcl"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>Specifies the S3 bucket whose ACL is being requested.</p>
@@ -84,57 +88,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketAclRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketAclRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketAclRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -181,13 +185,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketAclResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketAclResult.h
index dc3723c89c..54b6246913 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketAclResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketAclResult.h
@@ -26,12 +26,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API GetBucketAclResult
+ class GetBucketAclResult
{
public:
- GetBucketAclResult();
- GetBucketAclResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- GetBucketAclResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketAclResult();
+ AWS_S3_API GetBucketAclResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketAclResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketAnalyticsConfigurationRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketAnalyticsConfigurationRequest.h
index 559bb22660..779aa8a272 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketAnalyticsConfigurationRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketAnalyticsConfigurationRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API GetBucketAnalyticsConfigurationRequest : public S3Request
+ class GetBucketAnalyticsConfigurationRequest : public S3Request
{
public:
- GetBucketAnalyticsConfigurationRequest();
+ AWS_S3_API GetBucketAnalyticsConfigurationRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetBucketAnalyticsConfiguration"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket from which an analytics configuration is
@@ -133,57 +137,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketAnalyticsConfigurationRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketAnalyticsConfigurationRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketAnalyticsConfigurationRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -230,16 +234,16 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_id;
- bool m_idHasBeenSet;
+ bool m_idHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketAnalyticsConfigurationResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketAnalyticsConfigurationResult.h
index d0847a1458..a6846be102 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketAnalyticsConfigurationResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketAnalyticsConfigurationResult.h
@@ -24,12 +24,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API GetBucketAnalyticsConfigurationResult
+ class GetBucketAnalyticsConfigurationResult
{
public:
- GetBucketAnalyticsConfigurationResult();
- GetBucketAnalyticsConfigurationResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- GetBucketAnalyticsConfigurationResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketAnalyticsConfigurationResult();
+ AWS_S3_API GetBucketAnalyticsConfigurationResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketAnalyticsConfigurationResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketCorsRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketCorsRequest.h
index 3e9b8cd66f..05a5bc1e9b 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketCorsRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketCorsRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API GetBucketCorsRequest : public S3Request
+ class GetBucketCorsRequest : public S3Request
{
public:
- GetBucketCorsRequest();
+ AWS_S3_API GetBucketCorsRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetBucketCors"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The bucket name for which to get the cors configuration.</p>
@@ -84,57 +88,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketCorsRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketCorsRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketCorsRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -181,13 +185,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketCorsResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketCorsResult.h
index 8e0e34069f..2b40f60a0b 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketCorsResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketCorsResult.h
@@ -25,12 +25,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API GetBucketCorsResult
+ class GetBucketCorsResult
{
public:
- GetBucketCorsResult();
- GetBucketCorsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- GetBucketCorsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketCorsResult();
+ AWS_S3_API GetBucketCorsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketCorsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketEncryptionRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketEncryptionRequest.h
index 1bb5bfd2eb..f985c90ce5 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketEncryptionRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketEncryptionRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API GetBucketEncryptionRequest : public S3Request
+ class GetBucketEncryptionRequest : public S3Request
{
public:
- GetBucketEncryptionRequest();
+ AWS_S3_API GetBucketEncryptionRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetBucketEncryption"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket from which the server-side encryption configuration is
@@ -92,57 +96,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketEncryptionRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketEncryptionRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketEncryptionRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -189,13 +193,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketEncryptionResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketEncryptionResult.h
index deab65dbd4..758bd3af9f 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketEncryptionResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketEncryptionResult.h
@@ -24,12 +24,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API GetBucketEncryptionResult
+ class GetBucketEncryptionResult
{
public:
- GetBucketEncryptionResult();
- GetBucketEncryptionResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- GetBucketEncryptionResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketEncryptionResult();
+ AWS_S3_API GetBucketEncryptionResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketEncryptionResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketIntelligentTieringConfigurationRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketIntelligentTieringConfigurationRequest.h
index ddba23557e..951cefa202 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketIntelligentTieringConfigurationRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketIntelligentTieringConfigurationRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API GetBucketIntelligentTieringConfigurationRequest : public S3Request
+ class GetBucketIntelligentTieringConfigurationRequest : public S3Request
{
public:
- GetBucketIntelligentTieringConfigurationRequest();
+ AWS_S3_API GetBucketIntelligentTieringConfigurationRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,10 +34,14 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetBucketIntelligentTieringConfiguration"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the Amazon S3 bucket whose configuration you want to modify or
@@ -171,13 +175,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_id;
- bool m_idHasBeenSet;
+ bool m_idHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketIntelligentTieringConfigurationResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketIntelligentTieringConfigurationResult.h
index a039bb60be..8965f54347 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketIntelligentTieringConfigurationResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketIntelligentTieringConfigurationResult.h
@@ -24,12 +24,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API GetBucketIntelligentTieringConfigurationResult
+ class GetBucketIntelligentTieringConfigurationResult
{
public:
- GetBucketIntelligentTieringConfigurationResult();
- GetBucketIntelligentTieringConfigurationResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- GetBucketIntelligentTieringConfigurationResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketIntelligentTieringConfigurationResult();
+ AWS_S3_API GetBucketIntelligentTieringConfigurationResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketIntelligentTieringConfigurationResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketInventoryConfigurationRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketInventoryConfigurationRequest.h
index 6bbac67627..41648d1c70 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketInventoryConfigurationRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketInventoryConfigurationRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API GetBucketInventoryConfigurationRequest : public S3Request
+ class GetBucketInventoryConfigurationRequest : public S3Request
{
public:
- GetBucketInventoryConfigurationRequest();
+ AWS_S3_API GetBucketInventoryConfigurationRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetBucketInventoryConfiguration"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket containing the inventory configuration to
@@ -133,57 +137,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketInventoryConfigurationRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketInventoryConfigurationRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketInventoryConfigurationRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -230,16 +234,16 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_id;
- bool m_idHasBeenSet;
+ bool m_idHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketInventoryConfigurationResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketInventoryConfigurationResult.h
index 034c56c8ef..21c188282f 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketInventoryConfigurationResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketInventoryConfigurationResult.h
@@ -24,12 +24,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API GetBucketInventoryConfigurationResult
+ class GetBucketInventoryConfigurationResult
{
public:
- GetBucketInventoryConfigurationResult();
- GetBucketInventoryConfigurationResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- GetBucketInventoryConfigurationResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketInventoryConfigurationResult();
+ AWS_S3_API GetBucketInventoryConfigurationResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketInventoryConfigurationResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketLifecycleConfigurationRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketLifecycleConfigurationRequest.h
index b26b05c742..be9ec90e32 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketLifecycleConfigurationRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketLifecycleConfigurationRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API GetBucketLifecycleConfigurationRequest : public S3Request
+ class GetBucketLifecycleConfigurationRequest : public S3Request
{
public:
- GetBucketLifecycleConfigurationRequest();
+ AWS_S3_API GetBucketLifecycleConfigurationRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetBucketLifecycleConfiguration"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket for which to get the lifecycle information.</p>
@@ -84,57 +88,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketLifecycleConfigurationRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketLifecycleConfigurationRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketLifecycleConfigurationRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -181,13 +185,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketLifecycleConfigurationResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketLifecycleConfigurationResult.h
index c04c22a758..ffeab90760 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketLifecycleConfigurationResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketLifecycleConfigurationResult.h
@@ -25,12 +25,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API GetBucketLifecycleConfigurationResult
+ class GetBucketLifecycleConfigurationResult
{
public:
- GetBucketLifecycleConfigurationResult();
- GetBucketLifecycleConfigurationResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- GetBucketLifecycleConfigurationResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketLifecycleConfigurationResult();
+ AWS_S3_API GetBucketLifecycleConfigurationResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketLifecycleConfigurationResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketLocationRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketLocationRequest.h
index fa4aed4020..74cd31e044 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketLocationRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketLocationRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API GetBucketLocationRequest : public S3Request
+ class GetBucketLocationRequest : public S3Request
{
public:
- GetBucketLocationRequest();
+ AWS_S3_API GetBucketLocationRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetBucketLocation"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket for which to get the location.</p>
@@ -84,57 +88,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketLocationRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketLocationRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketLocationRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -181,13 +185,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketLocationResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketLocationResult.h
index 4f777648cc..9c203932ca 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketLocationResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketLocationResult.h
@@ -24,12 +24,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API GetBucketLocationResult
+ class GetBucketLocationResult
{
public:
- GetBucketLocationResult();
- GetBucketLocationResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- GetBucketLocationResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketLocationResult();
+ AWS_S3_API GetBucketLocationResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketLocationResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketLoggingRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketLoggingRequest.h
index 833a4534b1..0193cad7ba 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketLoggingRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketLoggingRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API GetBucketLoggingRequest : public S3Request
+ class GetBucketLoggingRequest : public S3Request
{
public:
- GetBucketLoggingRequest();
+ AWS_S3_API GetBucketLoggingRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetBucketLogging"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The bucket name for which to get the logging information.</p>
@@ -84,57 +88,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketLoggingRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketLoggingRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketLoggingRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -181,13 +185,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketLoggingResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketLoggingResult.h
index 1495a54ba4..88a9ea7aac 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketLoggingResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketLoggingResult.h
@@ -24,12 +24,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API GetBucketLoggingResult
+ class GetBucketLoggingResult
{
public:
- GetBucketLoggingResult();
- GetBucketLoggingResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- GetBucketLoggingResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketLoggingResult();
+ AWS_S3_API GetBucketLoggingResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketLoggingResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketMetricsConfigurationRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketMetricsConfigurationRequest.h
index dd345e42d2..1ab4cdcabf 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketMetricsConfigurationRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketMetricsConfigurationRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API GetBucketMetricsConfigurationRequest : public S3Request
+ class GetBucketMetricsConfigurationRequest : public S3Request
{
public:
- GetBucketMetricsConfigurationRequest();
+ AWS_S3_API GetBucketMetricsConfigurationRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetBucketMetricsConfiguration"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket containing the metrics configuration to retrieve.</p>
@@ -125,57 +129,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketMetricsConfigurationRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketMetricsConfigurationRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketMetricsConfigurationRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -222,16 +226,16 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_id;
- bool m_idHasBeenSet;
+ bool m_idHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketMetricsConfigurationResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketMetricsConfigurationResult.h
index bb60e97dbf..6bc09c0022 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketMetricsConfigurationResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketMetricsConfigurationResult.h
@@ -24,12 +24,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API GetBucketMetricsConfigurationResult
+ class GetBucketMetricsConfigurationResult
{
public:
- GetBucketMetricsConfigurationResult();
- GetBucketMetricsConfigurationResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- GetBucketMetricsConfigurationResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketMetricsConfigurationResult();
+ AWS_S3_API GetBucketMetricsConfigurationResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketMetricsConfigurationResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketNotificationConfigurationRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketNotificationConfigurationRequest.h
index 2a191d8f4c..ab6d442b5f 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketNotificationConfigurationRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketNotificationConfigurationRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API GetBucketNotificationConfigurationRequest : public S3Request
+ class GetBucketNotificationConfigurationRequest : public S3Request
{
public:
- GetBucketNotificationConfigurationRequest();
+ AWS_S3_API GetBucketNotificationConfigurationRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetBucketNotificationConfiguration"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket for which to get the notification configuration.</p>
@@ -84,57 +88,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketNotificationConfigurationRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketNotificationConfigurationRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketNotificationConfigurationRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -181,13 +185,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketNotificationConfigurationResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketNotificationConfigurationResult.h
index a8fe0671ca..49673d0e44 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketNotificationConfigurationResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketNotificationConfigurationResult.h
@@ -6,6 +6,7 @@
#pragma once
#include <aws/s3/S3_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
+#include <aws/s3/model/EventBridgeConfiguration.h>
#include <aws/s3/model/TopicConfiguration.h>
#include <aws/s3/model/QueueConfiguration.h>
#include <aws/s3/model/LambdaFunctionConfiguration.h>
@@ -34,12 +35,12 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfiguration">AWS
* API Reference</a></p>
*/
- class AWS_S3_API GetBucketNotificationConfigurationResult
+ class GetBucketNotificationConfigurationResult
{
public:
- GetBucketNotificationConfigurationResult();
- GetBucketNotificationConfigurationResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- GetBucketNotificationConfigurationResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketNotificationConfigurationResult();
+ AWS_S3_API GetBucketNotificationConfigurationResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketNotificationConfigurationResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
@@ -129,47 +130,73 @@ namespace Model
/**
- * <p>Describes the AWS Lambda functions to invoke and the events for which to
- * invoke them.</p>
+ * <p>Describes the Lambda functions to invoke and the events for which to invoke
+ * them.</p>
*/
inline const Aws::Vector<LambdaFunctionConfiguration>& GetLambdaFunctionConfigurations() const{ return m_lambdaFunctionConfigurations; }
/**
- * <p>Describes the AWS Lambda functions to invoke and the events for which to
- * invoke them.</p>
+ * <p>Describes the Lambda functions to invoke and the events for which to invoke
+ * them.</p>
*/
inline void SetLambdaFunctionConfigurations(const Aws::Vector<LambdaFunctionConfiguration>& value) { m_lambdaFunctionConfigurations = value; }
/**
- * <p>Describes the AWS Lambda functions to invoke and the events for which to
- * invoke them.</p>
+ * <p>Describes the Lambda functions to invoke and the events for which to invoke
+ * them.</p>
*/
inline void SetLambdaFunctionConfigurations(Aws::Vector<LambdaFunctionConfiguration>&& value) { m_lambdaFunctionConfigurations = std::move(value); }
/**
- * <p>Describes the AWS Lambda functions to invoke and the events for which to
- * invoke them.</p>
+ * <p>Describes the Lambda functions to invoke and the events for which to invoke
+ * them.</p>
*/
inline GetBucketNotificationConfigurationResult& WithLambdaFunctionConfigurations(const Aws::Vector<LambdaFunctionConfiguration>& value) { SetLambdaFunctionConfigurations(value); return *this;}
/**
- * <p>Describes the AWS Lambda functions to invoke and the events for which to
- * invoke them.</p>
+ * <p>Describes the Lambda functions to invoke and the events for which to invoke
+ * them.</p>
*/
inline GetBucketNotificationConfigurationResult& WithLambdaFunctionConfigurations(Aws::Vector<LambdaFunctionConfiguration>&& value) { SetLambdaFunctionConfigurations(std::move(value)); return *this;}
/**
- * <p>Describes the AWS Lambda functions to invoke and the events for which to
- * invoke them.</p>
+ * <p>Describes the Lambda functions to invoke and the events for which to invoke
+ * them.</p>
*/
inline GetBucketNotificationConfigurationResult& AddLambdaFunctionConfigurations(const LambdaFunctionConfiguration& value) { m_lambdaFunctionConfigurations.push_back(value); return *this; }
/**
- * <p>Describes the AWS Lambda functions to invoke and the events for which to
- * invoke them.</p>
+ * <p>Describes the Lambda functions to invoke and the events for which to invoke
+ * them.</p>
*/
inline GetBucketNotificationConfigurationResult& AddLambdaFunctionConfigurations(LambdaFunctionConfiguration&& value) { m_lambdaFunctionConfigurations.push_back(std::move(value)); return *this; }
+
+ /**
+ * <p>Enables delivery of events to Amazon EventBridge.</p>
+ */
+ inline const EventBridgeConfiguration& GetEventBridgeConfiguration() const{ return m_eventBridgeConfiguration; }
+
+ /**
+ * <p>Enables delivery of events to Amazon EventBridge.</p>
+ */
+ inline void SetEventBridgeConfiguration(const EventBridgeConfiguration& value) { m_eventBridgeConfiguration = value; }
+
+ /**
+ * <p>Enables delivery of events to Amazon EventBridge.</p>
+ */
+ inline void SetEventBridgeConfiguration(EventBridgeConfiguration&& value) { m_eventBridgeConfiguration = std::move(value); }
+
+ /**
+ * <p>Enables delivery of events to Amazon EventBridge.</p>
+ */
+ inline GetBucketNotificationConfigurationResult& WithEventBridgeConfiguration(const EventBridgeConfiguration& value) { SetEventBridgeConfiguration(value); return *this;}
+
+ /**
+ * <p>Enables delivery of events to Amazon EventBridge.</p>
+ */
+ inline GetBucketNotificationConfigurationResult& WithEventBridgeConfiguration(EventBridgeConfiguration&& value) { SetEventBridgeConfiguration(std::move(value)); return *this;}
+
private:
Aws::Vector<TopicConfiguration> m_topicConfigurations;
@@ -177,6 +204,8 @@ namespace Model
Aws::Vector<QueueConfiguration> m_queueConfigurations;
Aws::Vector<LambdaFunctionConfiguration> m_lambdaFunctionConfigurations;
+
+ EventBridgeConfiguration m_eventBridgeConfiguration;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketOwnershipControlsRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketOwnershipControlsRequest.h
index 37f8628125..a9355e3861 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketOwnershipControlsRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketOwnershipControlsRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API GetBucketOwnershipControlsRequest : public S3Request
+ class GetBucketOwnershipControlsRequest : public S3Request
{
public:
- GetBucketOwnershipControlsRequest();
+ AWS_S3_API GetBucketOwnershipControlsRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetBucketOwnershipControls"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the Amazon S3 bucket whose <code>OwnershipControls</code> you
@@ -92,57 +96,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketOwnershipControlsRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketOwnershipControlsRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketOwnershipControlsRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -189,13 +193,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketOwnershipControlsResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketOwnershipControlsResult.h
index 9c28367889..b49e907ce8 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketOwnershipControlsResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketOwnershipControlsResult.h
@@ -24,41 +24,46 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API GetBucketOwnershipControlsResult
+ class GetBucketOwnershipControlsResult
{
public:
- GetBucketOwnershipControlsResult();
- GetBucketOwnershipControlsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- GetBucketOwnershipControlsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketOwnershipControlsResult();
+ AWS_S3_API GetBucketOwnershipControlsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketOwnershipControlsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
- * <p>The <code>OwnershipControls</code> (BucketOwnerPreferred or ObjectWriter)
- * currently in effect for this Amazon S3 bucket.</p>
+ * <p>The <code>OwnershipControls</code> (BucketOwnerEnforced,
+ * BucketOwnerPreferred, or ObjectWriter) currently in effect for this Amazon S3
+ * bucket.</p>
*/
inline const OwnershipControls& GetOwnershipControls() const{ return m_ownershipControls; }
/**
- * <p>The <code>OwnershipControls</code> (BucketOwnerPreferred or ObjectWriter)
- * currently in effect for this Amazon S3 bucket.</p>
+ * <p>The <code>OwnershipControls</code> (BucketOwnerEnforced,
+ * BucketOwnerPreferred, or ObjectWriter) currently in effect for this Amazon S3
+ * bucket.</p>
*/
inline void SetOwnershipControls(const OwnershipControls& value) { m_ownershipControls = value; }
/**
- * <p>The <code>OwnershipControls</code> (BucketOwnerPreferred or ObjectWriter)
- * currently in effect for this Amazon S3 bucket.</p>
+ * <p>The <code>OwnershipControls</code> (BucketOwnerEnforced,
+ * BucketOwnerPreferred, or ObjectWriter) currently in effect for this Amazon S3
+ * bucket.</p>
*/
inline void SetOwnershipControls(OwnershipControls&& value) { m_ownershipControls = std::move(value); }
/**
- * <p>The <code>OwnershipControls</code> (BucketOwnerPreferred or ObjectWriter)
- * currently in effect for this Amazon S3 bucket.</p>
+ * <p>The <code>OwnershipControls</code> (BucketOwnerEnforced,
+ * BucketOwnerPreferred, or ObjectWriter) currently in effect for this Amazon S3
+ * bucket.</p>
*/
inline GetBucketOwnershipControlsResult& WithOwnershipControls(const OwnershipControls& value) { SetOwnershipControls(value); return *this;}
/**
- * <p>The <code>OwnershipControls</code> (BucketOwnerPreferred or ObjectWriter)
- * currently in effect for this Amazon S3 bucket.</p>
+ * <p>The <code>OwnershipControls</code> (BucketOwnerEnforced,
+ * BucketOwnerPreferred, or ObjectWriter) currently in effect for this Amazon S3
+ * bucket.</p>
*/
inline GetBucketOwnershipControlsResult& WithOwnershipControls(OwnershipControls&& value) { SetOwnershipControls(std::move(value)); return *this;}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketPolicyRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketPolicyRequest.h
index 2f3c3e4b67..37ed5815f1 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketPolicyRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketPolicyRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API GetBucketPolicyRequest : public S3Request
+ class GetBucketPolicyRequest : public S3Request
{
public:
- GetBucketPolicyRequest();
+ AWS_S3_API GetBucketPolicyRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetBucketPolicy"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The bucket name for which to get the bucket policy.</p>
@@ -84,57 +88,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketPolicyRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketPolicyRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketPolicyRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -181,13 +185,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketPolicyResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketPolicyResult.h
index 80857238c6..588b178f68 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketPolicyResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketPolicyResult.h
@@ -18,28 +18,28 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API GetBucketPolicyResult
+ class GetBucketPolicyResult
{
public:
- GetBucketPolicyResult();
+ AWS_S3_API GetBucketPolicyResult();
//We have to define these because Microsoft doesn't auto generate them
- GetBucketPolicyResult(GetBucketPolicyResult&&);
- GetBucketPolicyResult& operator=(GetBucketPolicyResult&&);
+ AWS_S3_API GetBucketPolicyResult(GetBucketPolicyResult&&);
+ AWS_S3_API GetBucketPolicyResult& operator=(GetBucketPolicyResult&&);
//we delete these because Microsoft doesn't handle move generation correctly
//and we therefore don't trust them to get it right here either.
GetBucketPolicyResult(const GetBucketPolicyResult&) = delete;
GetBucketPolicyResult& operator=(const GetBucketPolicyResult&) = delete;
- GetBucketPolicyResult(Aws::AmazonWebServiceResult<Aws::Utils::Stream::ResponseStream>&& result);
- GetBucketPolicyResult& operator=(Aws::AmazonWebServiceResult<Aws::Utils::Stream::ResponseStream>&& result);
+ AWS_S3_API GetBucketPolicyResult(Aws::AmazonWebServiceResult<Aws::Utils::Stream::ResponseStream>&& result);
+ AWS_S3_API GetBucketPolicyResult& operator=(Aws::AmazonWebServiceResult<Aws::Utils::Stream::ResponseStream>&& result);
/**
* <p>The bucket policy as a JSON document.</p>
*/
- inline Aws::IOStream& GetPolicy() { return m_policy.GetUnderlyingStream(); }
+ inline Aws::IOStream& GetPolicy() const { return m_policy.GetUnderlyingStream(); }
/**
* <p>The bucket policy as a JSON document.</p>
@@ -48,7 +48,7 @@ namespace Model
private:
- Aws::Utils::Stream::ResponseStream m_policy;
+ Aws::Utils::Stream::ResponseStream m_policy;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketPolicyStatusRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketPolicyStatusRequest.h
index 2afaaf0f9b..b96f3f1949 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketPolicyStatusRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketPolicyStatusRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API GetBucketPolicyStatusRequest : public S3Request
+ class GetBucketPolicyStatusRequest : public S3Request
{
public:
- GetBucketPolicyStatusRequest();
+ AWS_S3_API GetBucketPolicyStatusRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetBucketPolicyStatus"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the Amazon S3 bucket whose policy status you want to
@@ -92,57 +96,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketPolicyStatusRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketPolicyStatusRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketPolicyStatusRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -189,13 +193,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketPolicyStatusResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketPolicyStatusResult.h
index d1935f3590..01054239d9 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketPolicyStatusResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketPolicyStatusResult.h
@@ -24,12 +24,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API GetBucketPolicyStatusResult
+ class GetBucketPolicyStatusResult
{
public:
- GetBucketPolicyStatusResult();
- GetBucketPolicyStatusResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- GetBucketPolicyStatusResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketPolicyStatusResult();
+ AWS_S3_API GetBucketPolicyStatusResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketPolicyStatusResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketReplicationRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketReplicationRequest.h
index 71aa9f131b..6ea3c4c739 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketReplicationRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketReplicationRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API GetBucketReplicationRequest : public S3Request
+ class GetBucketReplicationRequest : public S3Request
{
public:
- GetBucketReplicationRequest();
+ AWS_S3_API GetBucketReplicationRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetBucketReplication"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The bucket name for which to get the replication information.</p>
@@ -84,57 +88,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketReplicationRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketReplicationRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketReplicationRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -181,13 +185,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketReplicationResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketReplicationResult.h
index ff53188ffd..5ae08fcb62 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketReplicationResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketReplicationResult.h
@@ -24,12 +24,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API GetBucketReplicationResult
+ class GetBucketReplicationResult
{
public:
- GetBucketReplicationResult();
- GetBucketReplicationResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- GetBucketReplicationResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketReplicationResult();
+ AWS_S3_API GetBucketReplicationResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketReplicationResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketRequestPaymentRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketRequestPaymentRequest.h
index 4471b13374..bece65a37b 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketRequestPaymentRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketRequestPaymentRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API GetBucketRequestPaymentRequest : public S3Request
+ class GetBucketRequestPaymentRequest : public S3Request
{
public:
- GetBucketRequestPaymentRequest();
+ AWS_S3_API GetBucketRequestPaymentRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetBucketRequestPayment"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket for which to get the payment request configuration</p>
@@ -84,57 +88,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketRequestPaymentRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketRequestPaymentRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketRequestPaymentRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -181,13 +185,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketRequestPaymentResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketRequestPaymentResult.h
index 48b84e3462..71239a74fa 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketRequestPaymentResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketRequestPaymentResult.h
@@ -24,12 +24,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API GetBucketRequestPaymentResult
+ class GetBucketRequestPaymentResult
{
public:
- GetBucketRequestPaymentResult();
- GetBucketRequestPaymentResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- GetBucketRequestPaymentResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketRequestPaymentResult();
+ AWS_S3_API GetBucketRequestPaymentResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketRequestPaymentResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketTaggingRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketTaggingRequest.h
index e660344c3f..2b9e3febb1 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketTaggingRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketTaggingRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API GetBucketTaggingRequest : public S3Request
+ class GetBucketTaggingRequest : public S3Request
{
public:
- GetBucketTaggingRequest();
+ AWS_S3_API GetBucketTaggingRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetBucketTagging"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket for which to get the tagging information.</p>
@@ -84,57 +88,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketTaggingRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketTaggingRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketTaggingRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -181,13 +185,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketTaggingResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketTaggingResult.h
index 7555a772b4..2012ef8a58 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketTaggingResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketTaggingResult.h
@@ -25,12 +25,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API GetBucketTaggingResult
+ class GetBucketTaggingResult
{
public:
- GetBucketTaggingResult();
- GetBucketTaggingResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- GetBucketTaggingResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketTaggingResult();
+ AWS_S3_API GetBucketTaggingResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketTaggingResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketVersioningRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketVersioningRequest.h
index 787c5ff90a..c9499fdb70 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketVersioningRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketVersioningRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API GetBucketVersioningRequest : public S3Request
+ class GetBucketVersioningRequest : public S3Request
{
public:
- GetBucketVersioningRequest();
+ AWS_S3_API GetBucketVersioningRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetBucketVersioning"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket for which to get the versioning information.</p>
@@ -84,57 +88,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketVersioningRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketVersioningRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketVersioningRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -181,13 +185,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketVersioningResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketVersioningResult.h
index 2e4d770751..955dcbc65c 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketVersioningResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketVersioningResult.h
@@ -25,12 +25,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API GetBucketVersioningResult
+ class GetBucketVersioningResult
{
public:
- GetBucketVersioningResult();
- GetBucketVersioningResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- GetBucketVersioningResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketVersioningResult();
+ AWS_S3_API GetBucketVersioningResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketVersioningResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketWebsiteRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketWebsiteRequest.h
index 6422d3c876..ea6f63838a 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketWebsiteRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketWebsiteRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API GetBucketWebsiteRequest : public S3Request
+ class GetBucketWebsiteRequest : public S3Request
{
public:
- GetBucketWebsiteRequest();
+ AWS_S3_API GetBucketWebsiteRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetBucketWebsite"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The bucket name for which to get the website configuration.</p>
@@ -84,57 +88,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketWebsiteRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketWebsiteRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetBucketWebsiteRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -181,13 +185,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketWebsiteResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketWebsiteResult.h
index f5cb5577b5..3750567104 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketWebsiteResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetBucketWebsiteResult.h
@@ -28,12 +28,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API GetBucketWebsiteResult
+ class GetBucketWebsiteResult
{
public:
- GetBucketWebsiteResult();
- GetBucketWebsiteResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- GetBucketWebsiteResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketWebsiteResult();
+ AWS_S3_API GetBucketWebsiteResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetBucketWebsiteResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectAclRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectAclRequest.h
index 412a4ac4fc..b947c61890 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectAclRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectAclRequest.h
@@ -24,10 +24,10 @@ namespace Model
/**
*/
- class AWS_S3_API GetObjectAclRequest : public S3Request
+ class GetObjectAclRequest : public S3Request
{
public:
- GetObjectAclRequest();
+ AWS_S3_API GetObjectAclRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -35,12 +35,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetObjectAcl"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The bucket name that contains the object for which to get the ACL
@@ -48,11 +52,11 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetBucket() const{ return m_bucket; }
@@ -62,11 +66,11 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool BucketHasBeenSet() const { return m_bucketHasBeenSet; }
@@ -76,11 +80,11 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const Aws::String& value) { m_bucketHasBeenSet = true; m_bucket = value; }
@@ -90,11 +94,11 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(Aws::String&& value) { m_bucketHasBeenSet = true; m_bucket = std::move(value); }
@@ -104,11 +108,11 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const char* value) { m_bucketHasBeenSet = true; m_bucket.assign(value); }
@@ -118,11 +122,11 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline GetObjectAclRequest& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
@@ -132,11 +136,11 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline GetObjectAclRequest& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
@@ -146,11 +150,11 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline GetObjectAclRequest& WithBucket(const char* value) { SetBucket(value); return *this;}
@@ -258,57 +262,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetObjectAclRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetObjectAclRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetObjectAclRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -355,22 +359,22 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
Aws::String m_versionId;
- bool m_versionIdHasBeenSet;
+ bool m_versionIdHasBeenSet = false;
RequestPayer m_requestPayer;
- bool m_requestPayerHasBeenSet;
+ bool m_requestPayerHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectAclResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectAclResult.h
index 26f7bae035..d023dc0b11 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectAclResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectAclResult.h
@@ -27,12 +27,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API GetObjectAclResult
+ class GetObjectAclResult
{
public:
- GetObjectAclResult();
- GetObjectAclResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- GetObjectAclResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetObjectAclResult();
+ AWS_S3_API GetObjectAclResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetObjectAclResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectAttributesParts.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectAttributesParts.h
new file mode 100644
index 0000000000..a852e16843
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectAttributesParts.h
@@ -0,0 +1,238 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+#include <aws/s3/S3_EXPORTS.h>
+#include <aws/core/utils/memory/stl/AWSVector.h>
+#include <aws/s3/model/ObjectPart.h>
+#include <utility>
+
+namespace Aws
+{
+namespace Utils
+{
+namespace Xml
+{
+ class XmlNode;
+} // namespace Xml
+} // namespace Utils
+namespace S3
+{
+namespace Model
+{
+
+ /**
+ * <p>A collection of parts associated with a multipart upload.</p><p><h3>See
+ * Also:</h3> <a
+ * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GetObjectAttributesParts">AWS
+ * API Reference</a></p>
+ */
+ class GetObjectAttributesParts
+ {
+ public:
+ AWS_S3_API GetObjectAttributesParts();
+ AWS_S3_API GetObjectAttributesParts(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API GetObjectAttributesParts& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+
+
+ /**
+ * <p>The total number of parts.</p>
+ */
+ inline int GetTotalPartsCount() const{ return m_totalPartsCount; }
+
+ /**
+ * <p>The total number of parts.</p>
+ */
+ inline bool TotalPartsCountHasBeenSet() const { return m_totalPartsCountHasBeenSet; }
+
+ /**
+ * <p>The total number of parts.</p>
+ */
+ inline void SetTotalPartsCount(int value) { m_totalPartsCountHasBeenSet = true; m_totalPartsCount = value; }
+
+ /**
+ * <p>The total number of parts.</p>
+ */
+ inline GetObjectAttributesParts& WithTotalPartsCount(int value) { SetTotalPartsCount(value); return *this;}
+
+
+ /**
+ * <p>The marker for the current part.</p>
+ */
+ inline int GetPartNumberMarker() const{ return m_partNumberMarker; }
+
+ /**
+ * <p>The marker for the current part.</p>
+ */
+ inline bool PartNumberMarkerHasBeenSet() const { return m_partNumberMarkerHasBeenSet; }
+
+ /**
+ * <p>The marker for the current part.</p>
+ */
+ inline void SetPartNumberMarker(int value) { m_partNumberMarkerHasBeenSet = true; m_partNumberMarker = value; }
+
+ /**
+ * <p>The marker for the current part.</p>
+ */
+ inline GetObjectAttributesParts& WithPartNumberMarker(int value) { SetPartNumberMarker(value); return *this;}
+
+
+ /**
+ * <p>When a list is truncated, this element specifies the last part in the list,
+ * as well as the value to use for the <code>PartNumberMarker</code> request
+ * parameter in a subsequent request.</p>
+ */
+ inline int GetNextPartNumberMarker() const{ return m_nextPartNumberMarker; }
+
+ /**
+ * <p>When a list is truncated, this element specifies the last part in the list,
+ * as well as the value to use for the <code>PartNumberMarker</code> request
+ * parameter in a subsequent request.</p>
+ */
+ inline bool NextPartNumberMarkerHasBeenSet() const { return m_nextPartNumberMarkerHasBeenSet; }
+
+ /**
+ * <p>When a list is truncated, this element specifies the last part in the list,
+ * as well as the value to use for the <code>PartNumberMarker</code> request
+ * parameter in a subsequent request.</p>
+ */
+ inline void SetNextPartNumberMarker(int value) { m_nextPartNumberMarkerHasBeenSet = true; m_nextPartNumberMarker = value; }
+
+ /**
+ * <p>When a list is truncated, this element specifies the last part in the list,
+ * as well as the value to use for the <code>PartNumberMarker</code> request
+ * parameter in a subsequent request.</p>
+ */
+ inline GetObjectAttributesParts& WithNextPartNumberMarker(int value) { SetNextPartNumberMarker(value); return *this;}
+
+
+ /**
+ * <p>The maximum number of parts allowed in the response.</p>
+ */
+ inline int GetMaxParts() const{ return m_maxParts; }
+
+ /**
+ * <p>The maximum number of parts allowed in the response.</p>
+ */
+ inline bool MaxPartsHasBeenSet() const { return m_maxPartsHasBeenSet; }
+
+ /**
+ * <p>The maximum number of parts allowed in the response.</p>
+ */
+ inline void SetMaxParts(int value) { m_maxPartsHasBeenSet = true; m_maxParts = value; }
+
+ /**
+ * <p>The maximum number of parts allowed in the response.</p>
+ */
+ inline GetObjectAttributesParts& WithMaxParts(int value) { SetMaxParts(value); return *this;}
+
+
+ /**
+ * <p>Indicates whether the returned list of parts is truncated. A value of
+ * <code>true</code> indicates that the list was truncated. A list can be truncated
+ * if the number of parts exceeds the limit returned in the <code>MaxParts</code>
+ * element.</p>
+ */
+ inline bool GetIsTruncated() const{ return m_isTruncated; }
+
+ /**
+ * <p>Indicates whether the returned list of parts is truncated. A value of
+ * <code>true</code> indicates that the list was truncated. A list can be truncated
+ * if the number of parts exceeds the limit returned in the <code>MaxParts</code>
+ * element.</p>
+ */
+ inline bool IsTruncatedHasBeenSet() const { return m_isTruncatedHasBeenSet; }
+
+ /**
+ * <p>Indicates whether the returned list of parts is truncated. A value of
+ * <code>true</code> indicates that the list was truncated. A list can be truncated
+ * if the number of parts exceeds the limit returned in the <code>MaxParts</code>
+ * element.</p>
+ */
+ inline void SetIsTruncated(bool value) { m_isTruncatedHasBeenSet = true; m_isTruncated = value; }
+
+ /**
+ * <p>Indicates whether the returned list of parts is truncated. A value of
+ * <code>true</code> indicates that the list was truncated. A list can be truncated
+ * if the number of parts exceeds the limit returned in the <code>MaxParts</code>
+ * element.</p>
+ */
+ inline GetObjectAttributesParts& WithIsTruncated(bool value) { SetIsTruncated(value); return *this;}
+
+
+ /**
+ * <p>A container for elements related to a particular part. A response can contain
+ * zero or more <code>Parts</code> elements.</p>
+ */
+ inline const Aws::Vector<ObjectPart>& GetParts() const{ return m_parts; }
+
+ /**
+ * <p>A container for elements related to a particular part. A response can contain
+ * zero or more <code>Parts</code> elements.</p>
+ */
+ inline bool PartsHasBeenSet() const { return m_partsHasBeenSet; }
+
+ /**
+ * <p>A container for elements related to a particular part. A response can contain
+ * zero or more <code>Parts</code> elements.</p>
+ */
+ inline void SetParts(const Aws::Vector<ObjectPart>& value) { m_partsHasBeenSet = true; m_parts = value; }
+
+ /**
+ * <p>A container for elements related to a particular part. A response can contain
+ * zero or more <code>Parts</code> elements.</p>
+ */
+ inline void SetParts(Aws::Vector<ObjectPart>&& value) { m_partsHasBeenSet = true; m_parts = std::move(value); }
+
+ /**
+ * <p>A container for elements related to a particular part. A response can contain
+ * zero or more <code>Parts</code> elements.</p>
+ */
+ inline GetObjectAttributesParts& WithParts(const Aws::Vector<ObjectPart>& value) { SetParts(value); return *this;}
+
+ /**
+ * <p>A container for elements related to a particular part. A response can contain
+ * zero or more <code>Parts</code> elements.</p>
+ */
+ inline GetObjectAttributesParts& WithParts(Aws::Vector<ObjectPart>&& value) { SetParts(std::move(value)); return *this;}
+
+ /**
+ * <p>A container for elements related to a particular part. A response can contain
+ * zero or more <code>Parts</code> elements.</p>
+ */
+ inline GetObjectAttributesParts& AddParts(const ObjectPart& value) { m_partsHasBeenSet = true; m_parts.push_back(value); return *this; }
+
+ /**
+ * <p>A container for elements related to a particular part. A response can contain
+ * zero or more <code>Parts</code> elements.</p>
+ */
+ inline GetObjectAttributesParts& AddParts(ObjectPart&& value) { m_partsHasBeenSet = true; m_parts.push_back(std::move(value)); return *this; }
+
+ private:
+
+ int m_totalPartsCount;
+ bool m_totalPartsCountHasBeenSet = false;
+
+ int m_partNumberMarker;
+ bool m_partNumberMarkerHasBeenSet = false;
+
+ int m_nextPartNumberMarker;
+ bool m_nextPartNumberMarkerHasBeenSet = false;
+
+ int m_maxParts;
+ bool m_maxPartsHasBeenSet = false;
+
+ bool m_isTruncated;
+ bool m_isTruncatedHasBeenSet = false;
+
+ Aws::Vector<ObjectPart> m_parts;
+ bool m_partsHasBeenSet = false;
+ };
+
+} // namespace Model
+} // namespace S3
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectAttributesRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectAttributesRequest.h
new file mode 100644
index 0000000000..0a55365fe4
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectAttributesRequest.h
@@ -0,0 +1,732 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+#include <aws/s3/S3_EXPORTS.h>
+#include <aws/s3/S3Request.h>
+#include <aws/core/utils/memory/stl/AWSString.h>
+#include <aws/s3/model/RequestPayer.h>
+#include <aws/core/utils/memory/stl/AWSVector.h>
+#include <aws/core/utils/memory/stl/AWSMap.h>
+#include <aws/s3/model/ObjectAttributes.h>
+#include <utility>
+
+namespace Aws
+{
+namespace Http
+{
+ class URI;
+} //namespace Http
+namespace S3
+{
+namespace Model
+{
+
+ /**
+ */
+ class GetObjectAttributesRequest : public S3Request
+ {
+ public:
+ AWS_S3_API GetObjectAttributesRequest();
+
+ // Service request name is the Operation name which will send this request out,
+ // each operation should has unique request name, so that we can get operation's name from this request.
+ // Note: this is not true for response, multiple operations may have the same response name,
+ // so we can not get operation's name from response.
+ inline virtual const char* GetServiceRequestName() const override { return "GetObjectAttributes"; }
+
+ AWS_S3_API Aws::String SerializePayload() const override;
+
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
+
+ /**
+ * <p>The name of the bucket that contains the object.</p> <p>When using this
+ * action with an access point, you must direct requests to the access point
+ * hostname. The access point hostname takes the form
+ * <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * action with Amazon S3 on Outposts, you must direct requests to the S3 on
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetBucket() const{ return m_bucket; }
+
+ /**
+ * <p>The name of the bucket that contains the object.</p> <p>When using this
+ * action with an access point, you must direct requests to the access point
+ * hostname. The access point hostname takes the form
+ * <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * action with Amazon S3 on Outposts, you must direct requests to the S3 on
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool BucketHasBeenSet() const { return m_bucketHasBeenSet; }
+
+ /**
+ * <p>The name of the bucket that contains the object.</p> <p>When using this
+ * action with an access point, you must direct requests to the access point
+ * hostname. The access point hostname takes the form
+ * <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * action with Amazon S3 on Outposts, you must direct requests to the S3 on
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetBucket(const Aws::String& value) { m_bucketHasBeenSet = true; m_bucket = value; }
+
+ /**
+ * <p>The name of the bucket that contains the object.</p> <p>When using this
+ * action with an access point, you must direct requests to the access point
+ * hostname. The access point hostname takes the form
+ * <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * action with Amazon S3 on Outposts, you must direct requests to the S3 on
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetBucket(Aws::String&& value) { m_bucketHasBeenSet = true; m_bucket = std::move(value); }
+
+ /**
+ * <p>The name of the bucket that contains the object.</p> <p>When using this
+ * action with an access point, you must direct requests to the access point
+ * hostname. The access point hostname takes the form
+ * <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * action with Amazon S3 on Outposts, you must direct requests to the S3 on
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetBucket(const char* value) { m_bucketHasBeenSet = true; m_bucket.assign(value); }
+
+ /**
+ * <p>The name of the bucket that contains the object.</p> <p>When using this
+ * action with an access point, you must direct requests to the access point
+ * hostname. The access point hostname takes the form
+ * <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * action with Amazon S3 on Outposts, you must direct requests to the S3 on
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline GetObjectAttributesRequest& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
+
+ /**
+ * <p>The name of the bucket that contains the object.</p> <p>When using this
+ * action with an access point, you must direct requests to the access point
+ * hostname. The access point hostname takes the form
+ * <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * action with Amazon S3 on Outposts, you must direct requests to the S3 on
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline GetObjectAttributesRequest& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
+
+ /**
+ * <p>The name of the bucket that contains the object.</p> <p>When using this
+ * action with an access point, you must direct requests to the access point
+ * hostname. The access point hostname takes the form
+ * <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * action with Amazon S3 on Outposts, you must direct requests to the S3 on
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline GetObjectAttributesRequest& WithBucket(const char* value) { SetBucket(value); return *this;}
+
+
+ /**
+ * <p>The object key.</p>
+ */
+ inline const Aws::String& GetKey() const{ return m_key; }
+
+ /**
+ * <p>The object key.</p>
+ */
+ inline bool KeyHasBeenSet() const { return m_keyHasBeenSet; }
+
+ /**
+ * <p>The object key.</p>
+ */
+ inline void SetKey(const Aws::String& value) { m_keyHasBeenSet = true; m_key = value; }
+
+ /**
+ * <p>The object key.</p>
+ */
+ inline void SetKey(Aws::String&& value) { m_keyHasBeenSet = true; m_key = std::move(value); }
+
+ /**
+ * <p>The object key.</p>
+ */
+ inline void SetKey(const char* value) { m_keyHasBeenSet = true; m_key.assign(value); }
+
+ /**
+ * <p>The object key.</p>
+ */
+ inline GetObjectAttributesRequest& WithKey(const Aws::String& value) { SetKey(value); return *this;}
+
+ /**
+ * <p>The object key.</p>
+ */
+ inline GetObjectAttributesRequest& WithKey(Aws::String&& value) { SetKey(std::move(value)); return *this;}
+
+ /**
+ * <p>The object key.</p>
+ */
+ inline GetObjectAttributesRequest& WithKey(const char* value) { SetKey(value); return *this;}
+
+
+ /**
+ * <p>The version ID used to reference a specific version of the object.</p>
+ */
+ inline const Aws::String& GetVersionId() const{ return m_versionId; }
+
+ /**
+ * <p>The version ID used to reference a specific version of the object.</p>
+ */
+ inline bool VersionIdHasBeenSet() const { return m_versionIdHasBeenSet; }
+
+ /**
+ * <p>The version ID used to reference a specific version of the object.</p>
+ */
+ inline void SetVersionId(const Aws::String& value) { m_versionIdHasBeenSet = true; m_versionId = value; }
+
+ /**
+ * <p>The version ID used to reference a specific version of the object.</p>
+ */
+ inline void SetVersionId(Aws::String&& value) { m_versionIdHasBeenSet = true; m_versionId = std::move(value); }
+
+ /**
+ * <p>The version ID used to reference a specific version of the object.</p>
+ */
+ inline void SetVersionId(const char* value) { m_versionIdHasBeenSet = true; m_versionId.assign(value); }
+
+ /**
+ * <p>The version ID used to reference a specific version of the object.</p>
+ */
+ inline GetObjectAttributesRequest& WithVersionId(const Aws::String& value) { SetVersionId(value); return *this;}
+
+ /**
+ * <p>The version ID used to reference a specific version of the object.</p>
+ */
+ inline GetObjectAttributesRequest& WithVersionId(Aws::String&& value) { SetVersionId(std::move(value)); return *this;}
+
+ /**
+ * <p>The version ID used to reference a specific version of the object.</p>
+ */
+ inline GetObjectAttributesRequest& WithVersionId(const char* value) { SetVersionId(value); return *this;}
+
+
+ /**
+ * <p>Sets the maximum number of parts to return.</p>
+ */
+ inline int GetMaxParts() const{ return m_maxParts; }
+
+ /**
+ * <p>Sets the maximum number of parts to return.</p>
+ */
+ inline bool MaxPartsHasBeenSet() const { return m_maxPartsHasBeenSet; }
+
+ /**
+ * <p>Sets the maximum number of parts to return.</p>
+ */
+ inline void SetMaxParts(int value) { m_maxPartsHasBeenSet = true; m_maxParts = value; }
+
+ /**
+ * <p>Sets the maximum number of parts to return.</p>
+ */
+ inline GetObjectAttributesRequest& WithMaxParts(int value) { SetMaxParts(value); return *this;}
+
+
+ /**
+ * <p>Specifies the part after which listing should begin. Only parts with higher
+ * part numbers will be listed.</p>
+ */
+ inline int GetPartNumberMarker() const{ return m_partNumberMarker; }
+
+ /**
+ * <p>Specifies the part after which listing should begin. Only parts with higher
+ * part numbers will be listed.</p>
+ */
+ inline bool PartNumberMarkerHasBeenSet() const { return m_partNumberMarkerHasBeenSet; }
+
+ /**
+ * <p>Specifies the part after which listing should begin. Only parts with higher
+ * part numbers will be listed.</p>
+ */
+ inline void SetPartNumberMarker(int value) { m_partNumberMarkerHasBeenSet = true; m_partNumberMarker = value; }
+
+ /**
+ * <p>Specifies the part after which listing should begin. Only parts with higher
+ * part numbers will be listed.</p>
+ */
+ inline GetObjectAttributesRequest& WithPartNumberMarker(int value) { SetPartNumberMarker(value); return *this;}
+
+
+ /**
+ * <p>Specifies the algorithm to use when encrypting the object (for example,
+ * AES256).</p>
+ */
+ inline const Aws::String& GetSSECustomerAlgorithm() const{ return m_sSECustomerAlgorithm; }
+
+ /**
+ * <p>Specifies the algorithm to use when encrypting the object (for example,
+ * AES256).</p>
+ */
+ inline bool SSECustomerAlgorithmHasBeenSet() const { return m_sSECustomerAlgorithmHasBeenSet; }
+
+ /**
+ * <p>Specifies the algorithm to use when encrypting the object (for example,
+ * AES256).</p>
+ */
+ inline void SetSSECustomerAlgorithm(const Aws::String& value) { m_sSECustomerAlgorithmHasBeenSet = true; m_sSECustomerAlgorithm = value; }
+
+ /**
+ * <p>Specifies the algorithm to use when encrypting the object (for example,
+ * AES256).</p>
+ */
+ inline void SetSSECustomerAlgorithm(Aws::String&& value) { m_sSECustomerAlgorithmHasBeenSet = true; m_sSECustomerAlgorithm = std::move(value); }
+
+ /**
+ * <p>Specifies the algorithm to use when encrypting the object (for example,
+ * AES256).</p>
+ */
+ inline void SetSSECustomerAlgorithm(const char* value) { m_sSECustomerAlgorithmHasBeenSet = true; m_sSECustomerAlgorithm.assign(value); }
+
+ /**
+ * <p>Specifies the algorithm to use when encrypting the object (for example,
+ * AES256).</p>
+ */
+ inline GetObjectAttributesRequest& WithSSECustomerAlgorithm(const Aws::String& value) { SetSSECustomerAlgorithm(value); return *this;}
+
+ /**
+ * <p>Specifies the algorithm to use when encrypting the object (for example,
+ * AES256).</p>
+ */
+ inline GetObjectAttributesRequest& WithSSECustomerAlgorithm(Aws::String&& value) { SetSSECustomerAlgorithm(std::move(value)); return *this;}
+
+ /**
+ * <p>Specifies the algorithm to use when encrypting the object (for example,
+ * AES256).</p>
+ */
+ inline GetObjectAttributesRequest& WithSSECustomerAlgorithm(const char* value) { SetSSECustomerAlgorithm(value); return *this;}
+
+
+ /**
+ * <p>Specifies the customer-provided encryption key for Amazon S3 to use in
+ * encrypting data. This value is used to store the object and then it is
+ * discarded; Amazon S3 does not store the encryption key. The key must be
+ * appropriate for use with the algorithm specified in the
+ * <code>x-amz-server-side-encryption-customer-algorithm</code> header.</p>
+ */
+ inline const Aws::String& GetSSECustomerKey() const{ return m_sSECustomerKey; }
+
+ /**
+ * <p>Specifies the customer-provided encryption key for Amazon S3 to use in
+ * encrypting data. This value is used to store the object and then it is
+ * discarded; Amazon S3 does not store the encryption key. The key must be
+ * appropriate for use with the algorithm specified in the
+ * <code>x-amz-server-side-encryption-customer-algorithm</code> header.</p>
+ */
+ inline bool SSECustomerKeyHasBeenSet() const { return m_sSECustomerKeyHasBeenSet; }
+
+ /**
+ * <p>Specifies the customer-provided encryption key for Amazon S3 to use in
+ * encrypting data. This value is used to store the object and then it is
+ * discarded; Amazon S3 does not store the encryption key. The key must be
+ * appropriate for use with the algorithm specified in the
+ * <code>x-amz-server-side-encryption-customer-algorithm</code> header.</p>
+ */
+ inline void SetSSECustomerKey(const Aws::String& value) { m_sSECustomerKeyHasBeenSet = true; m_sSECustomerKey = value; }
+
+ /**
+ * <p>Specifies the customer-provided encryption key for Amazon S3 to use in
+ * encrypting data. This value is used to store the object and then it is
+ * discarded; Amazon S3 does not store the encryption key. The key must be
+ * appropriate for use with the algorithm specified in the
+ * <code>x-amz-server-side-encryption-customer-algorithm</code> header.</p>
+ */
+ inline void SetSSECustomerKey(Aws::String&& value) { m_sSECustomerKeyHasBeenSet = true; m_sSECustomerKey = std::move(value); }
+
+ /**
+ * <p>Specifies the customer-provided encryption key for Amazon S3 to use in
+ * encrypting data. This value is used to store the object and then it is
+ * discarded; Amazon S3 does not store the encryption key. The key must be
+ * appropriate for use with the algorithm specified in the
+ * <code>x-amz-server-side-encryption-customer-algorithm</code> header.</p>
+ */
+ inline void SetSSECustomerKey(const char* value) { m_sSECustomerKeyHasBeenSet = true; m_sSECustomerKey.assign(value); }
+
+ /**
+ * <p>Specifies the customer-provided encryption key for Amazon S3 to use in
+ * encrypting data. This value is used to store the object and then it is
+ * discarded; Amazon S3 does not store the encryption key. The key must be
+ * appropriate for use with the algorithm specified in the
+ * <code>x-amz-server-side-encryption-customer-algorithm</code> header.</p>
+ */
+ inline GetObjectAttributesRequest& WithSSECustomerKey(const Aws::String& value) { SetSSECustomerKey(value); return *this;}
+
+ /**
+ * <p>Specifies the customer-provided encryption key for Amazon S3 to use in
+ * encrypting data. This value is used to store the object and then it is
+ * discarded; Amazon S3 does not store the encryption key. The key must be
+ * appropriate for use with the algorithm specified in the
+ * <code>x-amz-server-side-encryption-customer-algorithm</code> header.</p>
+ */
+ inline GetObjectAttributesRequest& WithSSECustomerKey(Aws::String&& value) { SetSSECustomerKey(std::move(value)); return *this;}
+
+ /**
+ * <p>Specifies the customer-provided encryption key for Amazon S3 to use in
+ * encrypting data. This value is used to store the object and then it is
+ * discarded; Amazon S3 does not store the encryption key. The key must be
+ * appropriate for use with the algorithm specified in the
+ * <code>x-amz-server-side-encryption-customer-algorithm</code> header.</p>
+ */
+ inline GetObjectAttributesRequest& WithSSECustomerKey(const char* value) { SetSSECustomerKey(value); return *this;}
+
+
+ /**
+ * <p>Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ * Amazon S3 uses this header for a message integrity check to ensure that the
+ * encryption key was transmitted without error.</p>
+ */
+ inline const Aws::String& GetSSECustomerKeyMD5() const{ return m_sSECustomerKeyMD5; }
+
+ /**
+ * <p>Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ * Amazon S3 uses this header for a message integrity check to ensure that the
+ * encryption key was transmitted without error.</p>
+ */
+ inline bool SSECustomerKeyMD5HasBeenSet() const { return m_sSECustomerKeyMD5HasBeenSet; }
+
+ /**
+ * <p>Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ * Amazon S3 uses this header for a message integrity check to ensure that the
+ * encryption key was transmitted without error.</p>
+ */
+ inline void SetSSECustomerKeyMD5(const Aws::String& value) { m_sSECustomerKeyMD5HasBeenSet = true; m_sSECustomerKeyMD5 = value; }
+
+ /**
+ * <p>Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ * Amazon S3 uses this header for a message integrity check to ensure that the
+ * encryption key was transmitted without error.</p>
+ */
+ inline void SetSSECustomerKeyMD5(Aws::String&& value) { m_sSECustomerKeyMD5HasBeenSet = true; m_sSECustomerKeyMD5 = std::move(value); }
+
+ /**
+ * <p>Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ * Amazon S3 uses this header for a message integrity check to ensure that the
+ * encryption key was transmitted without error.</p>
+ */
+ inline void SetSSECustomerKeyMD5(const char* value) { m_sSECustomerKeyMD5HasBeenSet = true; m_sSECustomerKeyMD5.assign(value); }
+
+ /**
+ * <p>Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ * Amazon S3 uses this header for a message integrity check to ensure that the
+ * encryption key was transmitted without error.</p>
+ */
+ inline GetObjectAttributesRequest& WithSSECustomerKeyMD5(const Aws::String& value) { SetSSECustomerKeyMD5(value); return *this;}
+
+ /**
+ * <p>Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ * Amazon S3 uses this header for a message integrity check to ensure that the
+ * encryption key was transmitted without error.</p>
+ */
+ inline GetObjectAttributesRequest& WithSSECustomerKeyMD5(Aws::String&& value) { SetSSECustomerKeyMD5(std::move(value)); return *this;}
+
+ /**
+ * <p>Specifies the 128-bit MD5 digest of the encryption key according to RFC 1321.
+ * Amazon S3 uses this header for a message integrity check to ensure that the
+ * encryption key was transmitted without error.</p>
+ */
+ inline GetObjectAttributesRequest& WithSSECustomerKeyMD5(const char* value) { SetSSECustomerKeyMD5(value); return *this;}
+
+
+
+ inline const RequestPayer& GetRequestPayer() const{ return m_requestPayer; }
+
+
+ inline bool RequestPayerHasBeenSet() const { return m_requestPayerHasBeenSet; }
+
+
+ inline void SetRequestPayer(const RequestPayer& value) { m_requestPayerHasBeenSet = true; m_requestPayer = value; }
+
+
+ inline void SetRequestPayer(RequestPayer&& value) { m_requestPayerHasBeenSet = true; m_requestPayer = std::move(value); }
+
+
+ inline GetObjectAttributesRequest& WithRequestPayer(const RequestPayer& value) { SetRequestPayer(value); return *this;}
+
+
+ inline GetObjectAttributesRequest& WithRequestPayer(RequestPayer&& value) { SetRequestPayer(std::move(value)); return *this;}
+
+
+ /**
+ * <p>The account ID of the expected bucket owner. If the bucket is owned by a
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
+ */
+ inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
+
+ /**
+ * <p>The account ID of the expected bucket owner. If the bucket is owned by a
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
+ */
+ inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
+
+ /**
+ * <p>The account ID of the expected bucket owner. If the bucket is owned by a
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
+ */
+ inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
+
+ /**
+ * <p>The account ID of the expected bucket owner. If the bucket is owned by a
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
+ */
+ inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
+
+ /**
+ * <p>The account ID of the expected bucket owner. If the bucket is owned by a
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
+ */
+ inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
+
+ /**
+ * <p>The account ID of the expected bucket owner. If the bucket is owned by a
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
+ */
+ inline GetObjectAttributesRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
+
+ /**
+ * <p>The account ID of the expected bucket owner. If the bucket is owned by a
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
+ */
+ inline GetObjectAttributesRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
+
+ /**
+ * <p>The account ID of the expected bucket owner. If the bucket is owned by a
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
+ */
+ inline GetObjectAttributesRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
+
+
+ /**
+ * <p>An XML header that specifies the fields at the root level that you want
+ * returned in the response. Fields that you do not specify are not returned.</p>
+ */
+ inline const Aws::Vector<ObjectAttributes>& GetObjectAttributes() const{ return m_objectAttributes; }
+
+ /**
+ * <p>An XML header that specifies the fields at the root level that you want
+ * returned in the response. Fields that you do not specify are not returned.</p>
+ */
+ inline bool ObjectAttributesHasBeenSet() const { return m_objectAttributesHasBeenSet; }
+
+ /**
+ * <p>An XML header that specifies the fields at the root level that you want
+ * returned in the response. Fields that you do not specify are not returned.</p>
+ */
+ inline void SetObjectAttributes(const Aws::Vector<ObjectAttributes>& value) { m_objectAttributesHasBeenSet = true; m_objectAttributes = value; }
+
+ /**
+ * <p>An XML header that specifies the fields at the root level that you want
+ * returned in the response. Fields that you do not specify are not returned.</p>
+ */
+ inline void SetObjectAttributes(Aws::Vector<ObjectAttributes>&& value) { m_objectAttributesHasBeenSet = true; m_objectAttributes = std::move(value); }
+
+ /**
+ * <p>An XML header that specifies the fields at the root level that you want
+ * returned in the response. Fields that you do not specify are not returned.</p>
+ */
+ inline GetObjectAttributesRequest& WithObjectAttributes(const Aws::Vector<ObjectAttributes>& value) { SetObjectAttributes(value); return *this;}
+
+ /**
+ * <p>An XML header that specifies the fields at the root level that you want
+ * returned in the response. Fields that you do not specify are not returned.</p>
+ */
+ inline GetObjectAttributesRequest& WithObjectAttributes(Aws::Vector<ObjectAttributes>&& value) { SetObjectAttributes(std::move(value)); return *this;}
+
+ /**
+ * <p>An XML header that specifies the fields at the root level that you want
+ * returned in the response. Fields that you do not specify are not returned.</p>
+ */
+ inline GetObjectAttributesRequest& AddObjectAttributes(const ObjectAttributes& value) { m_objectAttributesHasBeenSet = true; m_objectAttributes.push_back(value); return *this; }
+
+ /**
+ * <p>An XML header that specifies the fields at the root level that you want
+ * returned in the response. Fields that you do not specify are not returned.</p>
+ */
+ inline GetObjectAttributesRequest& AddObjectAttributes(ObjectAttributes&& value) { m_objectAttributesHasBeenSet = true; m_objectAttributes.push_back(std::move(value)); return *this; }
+
+
+
+ inline const Aws::Map<Aws::String, Aws::String>& GetCustomizedAccessLogTag() const{ return m_customizedAccessLogTag; }
+
+
+ inline bool CustomizedAccessLogTagHasBeenSet() const { return m_customizedAccessLogTagHasBeenSet; }
+
+
+ inline void SetCustomizedAccessLogTag(const Aws::Map<Aws::String, Aws::String>& value) { m_customizedAccessLogTagHasBeenSet = true; m_customizedAccessLogTag = value; }
+
+
+ inline void SetCustomizedAccessLogTag(Aws::Map<Aws::String, Aws::String>&& value) { m_customizedAccessLogTagHasBeenSet = true; m_customizedAccessLogTag = std::move(value); }
+
+
+ inline GetObjectAttributesRequest& WithCustomizedAccessLogTag(const Aws::Map<Aws::String, Aws::String>& value) { SetCustomizedAccessLogTag(value); return *this;}
+
+
+ inline GetObjectAttributesRequest& WithCustomizedAccessLogTag(Aws::Map<Aws::String, Aws::String>&& value) { SetCustomizedAccessLogTag(std::move(value)); return *this;}
+
+
+ inline GetObjectAttributesRequest& AddCustomizedAccessLogTag(const Aws::String& key, const Aws::String& value) { m_customizedAccessLogTagHasBeenSet = true; m_customizedAccessLogTag.emplace(key, value); return *this; }
+
+
+ inline GetObjectAttributesRequest& AddCustomizedAccessLogTag(Aws::String&& key, const Aws::String& value) { m_customizedAccessLogTagHasBeenSet = true; m_customizedAccessLogTag.emplace(std::move(key), value); return *this; }
+
+
+ inline GetObjectAttributesRequest& AddCustomizedAccessLogTag(const Aws::String& key, Aws::String&& value) { m_customizedAccessLogTagHasBeenSet = true; m_customizedAccessLogTag.emplace(key, std::move(value)); return *this; }
+
+
+ inline GetObjectAttributesRequest& AddCustomizedAccessLogTag(Aws::String&& key, Aws::String&& value) { m_customizedAccessLogTagHasBeenSet = true; m_customizedAccessLogTag.emplace(std::move(key), std::move(value)); return *this; }
+
+
+ inline GetObjectAttributesRequest& AddCustomizedAccessLogTag(const char* key, Aws::String&& value) { m_customizedAccessLogTagHasBeenSet = true; m_customizedAccessLogTag.emplace(key, std::move(value)); return *this; }
+
+
+ inline GetObjectAttributesRequest& AddCustomizedAccessLogTag(Aws::String&& key, const char* value) { m_customizedAccessLogTagHasBeenSet = true; m_customizedAccessLogTag.emplace(std::move(key), value); return *this; }
+
+
+ inline GetObjectAttributesRequest& AddCustomizedAccessLogTag(const char* key, const char* value) { m_customizedAccessLogTagHasBeenSet = true; m_customizedAccessLogTag.emplace(key, value); return *this; }
+
+ private:
+
+ Aws::String m_bucket;
+ bool m_bucketHasBeenSet = false;
+
+ Aws::String m_key;
+ bool m_keyHasBeenSet = false;
+
+ Aws::String m_versionId;
+ bool m_versionIdHasBeenSet = false;
+
+ int m_maxParts;
+ bool m_maxPartsHasBeenSet = false;
+
+ int m_partNumberMarker;
+ bool m_partNumberMarkerHasBeenSet = false;
+
+ Aws::String m_sSECustomerAlgorithm;
+ bool m_sSECustomerAlgorithmHasBeenSet = false;
+
+ Aws::String m_sSECustomerKey;
+ bool m_sSECustomerKeyHasBeenSet = false;
+
+ Aws::String m_sSECustomerKeyMD5;
+ bool m_sSECustomerKeyMD5HasBeenSet = false;
+
+ RequestPayer m_requestPayer;
+ bool m_requestPayerHasBeenSet = false;
+
+ Aws::String m_expectedBucketOwner;
+ bool m_expectedBucketOwnerHasBeenSet = false;
+
+ Aws::Vector<ObjectAttributes> m_objectAttributes;
+ bool m_objectAttributesHasBeenSet = false;
+
+ Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
+ bool m_customizedAccessLogTagHasBeenSet = false;
+ };
+
+} // namespace Model
+} // namespace S3
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectAttributesResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectAttributesResult.h
new file mode 100644
index 0000000000..5ccfd2a4a3
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectAttributesResult.h
@@ -0,0 +1,319 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+#include <aws/s3/S3_EXPORTS.h>
+#include <aws/core/utils/DateTime.h>
+#include <aws/core/utils/memory/stl/AWSString.h>
+#include <aws/s3/model/RequestCharged.h>
+#include <aws/s3/model/Checksum.h>
+#include <aws/s3/model/GetObjectAttributesParts.h>
+#include <aws/s3/model/StorageClass.h>
+#include <utility>
+
+namespace Aws
+{
+template<typename RESULT_TYPE>
+class AmazonWebServiceResult;
+
+namespace Utils
+{
+namespace Xml
+{
+ class XmlDocument;
+} // namespace Xml
+} // namespace Utils
+namespace S3
+{
+namespace Model
+{
+ class GetObjectAttributesResult
+ {
+ public:
+ AWS_S3_API GetObjectAttributesResult();
+ AWS_S3_API GetObjectAttributesResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetObjectAttributesResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+
+
+ /**
+ * <p>Specifies whether the object retrieved was (<code>true</code>) or was not
+ * (<code>false</code>) a delete marker. If <code>false</code>, this response
+ * header does not appear in the response.</p>
+ */
+ inline bool GetDeleteMarker() const{ return m_deleteMarker; }
+
+ /**
+ * <p>Specifies whether the object retrieved was (<code>true</code>) or was not
+ * (<code>false</code>) a delete marker. If <code>false</code>, this response
+ * header does not appear in the response.</p>
+ */
+ inline void SetDeleteMarker(bool value) { m_deleteMarker = value; }
+
+ /**
+ * <p>Specifies whether the object retrieved was (<code>true</code>) or was not
+ * (<code>false</code>) a delete marker. If <code>false</code>, this response
+ * header does not appear in the response.</p>
+ */
+ inline GetObjectAttributesResult& WithDeleteMarker(bool value) { SetDeleteMarker(value); return *this;}
+
+
+ /**
+ * <p>The creation date of the object.</p>
+ */
+ inline const Aws::Utils::DateTime& GetLastModified() const{ return m_lastModified; }
+
+ /**
+ * <p>The creation date of the object.</p>
+ */
+ inline void SetLastModified(const Aws::Utils::DateTime& value) { m_lastModified = value; }
+
+ /**
+ * <p>The creation date of the object.</p>
+ */
+ inline void SetLastModified(Aws::Utils::DateTime&& value) { m_lastModified = std::move(value); }
+
+ /**
+ * <p>The creation date of the object.</p>
+ */
+ inline GetObjectAttributesResult& WithLastModified(const Aws::Utils::DateTime& value) { SetLastModified(value); return *this;}
+
+ /**
+ * <p>The creation date of the object.</p>
+ */
+ inline GetObjectAttributesResult& WithLastModified(Aws::Utils::DateTime&& value) { SetLastModified(std::move(value)); return *this;}
+
+
+ /**
+ * <p>The version ID of the object.</p>
+ */
+ inline const Aws::String& GetVersionId() const{ return m_versionId; }
+
+ /**
+ * <p>The version ID of the object.</p>
+ */
+ inline void SetVersionId(const Aws::String& value) { m_versionId = value; }
+
+ /**
+ * <p>The version ID of the object.</p>
+ */
+ inline void SetVersionId(Aws::String&& value) { m_versionId = std::move(value); }
+
+ /**
+ * <p>The version ID of the object.</p>
+ */
+ inline void SetVersionId(const char* value) { m_versionId.assign(value); }
+
+ /**
+ * <p>The version ID of the object.</p>
+ */
+ inline GetObjectAttributesResult& WithVersionId(const Aws::String& value) { SetVersionId(value); return *this;}
+
+ /**
+ * <p>The version ID of the object.</p>
+ */
+ inline GetObjectAttributesResult& WithVersionId(Aws::String&& value) { SetVersionId(std::move(value)); return *this;}
+
+ /**
+ * <p>The version ID of the object.</p>
+ */
+ inline GetObjectAttributesResult& WithVersionId(const char* value) { SetVersionId(value); return *this;}
+
+
+
+ inline const RequestCharged& GetRequestCharged() const{ return m_requestCharged; }
+
+
+ inline void SetRequestCharged(const RequestCharged& value) { m_requestCharged = value; }
+
+
+ inline void SetRequestCharged(RequestCharged&& value) { m_requestCharged = std::move(value); }
+
+
+ inline GetObjectAttributesResult& WithRequestCharged(const RequestCharged& value) { SetRequestCharged(value); return *this;}
+
+
+ inline GetObjectAttributesResult& WithRequestCharged(RequestCharged&& value) { SetRequestCharged(std::move(value)); return *this;}
+
+
+ /**
+ * <p>An ETag is an opaque identifier assigned by a web server to a specific
+ * version of a resource found at a URL.</p>
+ */
+ inline const Aws::String& GetETag() const{ return m_eTag; }
+
+ /**
+ * <p>An ETag is an opaque identifier assigned by a web server to a specific
+ * version of a resource found at a URL.</p>
+ */
+ inline void SetETag(const Aws::String& value) { m_eTag = value; }
+
+ /**
+ * <p>An ETag is an opaque identifier assigned by a web server to a specific
+ * version of a resource found at a URL.</p>
+ */
+ inline void SetETag(Aws::String&& value) { m_eTag = std::move(value); }
+
+ /**
+ * <p>An ETag is an opaque identifier assigned by a web server to a specific
+ * version of a resource found at a URL.</p>
+ */
+ inline void SetETag(const char* value) { m_eTag.assign(value); }
+
+ /**
+ * <p>An ETag is an opaque identifier assigned by a web server to a specific
+ * version of a resource found at a URL.</p>
+ */
+ inline GetObjectAttributesResult& WithETag(const Aws::String& value) { SetETag(value); return *this;}
+
+ /**
+ * <p>An ETag is an opaque identifier assigned by a web server to a specific
+ * version of a resource found at a URL.</p>
+ */
+ inline GetObjectAttributesResult& WithETag(Aws::String&& value) { SetETag(std::move(value)); return *this;}
+
+ /**
+ * <p>An ETag is an opaque identifier assigned by a web server to a specific
+ * version of a resource found at a URL.</p>
+ */
+ inline GetObjectAttributesResult& WithETag(const char* value) { SetETag(value); return *this;}
+
+
+ /**
+ * <p>The checksum or digest of the object.</p>
+ */
+ inline const Checksum& GetChecksum() const{ return m_checksum; }
+
+ /**
+ * <p>The checksum or digest of the object.</p>
+ */
+ inline void SetChecksum(const Checksum& value) { m_checksum = value; }
+
+ /**
+ * <p>The checksum or digest of the object.</p>
+ */
+ inline void SetChecksum(Checksum&& value) { m_checksum = std::move(value); }
+
+ /**
+ * <p>The checksum or digest of the object.</p>
+ */
+ inline GetObjectAttributesResult& WithChecksum(const Checksum& value) { SetChecksum(value); return *this;}
+
+ /**
+ * <p>The checksum or digest of the object.</p>
+ */
+ inline GetObjectAttributesResult& WithChecksum(Checksum&& value) { SetChecksum(std::move(value)); return *this;}
+
+
+ /**
+ * <p>A collection of parts associated with a multipart upload.</p>
+ */
+ inline const GetObjectAttributesParts& GetObjectParts() const{ return m_objectParts; }
+
+ /**
+ * <p>A collection of parts associated with a multipart upload.</p>
+ */
+ inline void SetObjectParts(const GetObjectAttributesParts& value) { m_objectParts = value; }
+
+ /**
+ * <p>A collection of parts associated with a multipart upload.</p>
+ */
+ inline void SetObjectParts(GetObjectAttributesParts&& value) { m_objectParts = std::move(value); }
+
+ /**
+ * <p>A collection of parts associated with a multipart upload.</p>
+ */
+ inline GetObjectAttributesResult& WithObjectParts(const GetObjectAttributesParts& value) { SetObjectParts(value); return *this;}
+
+ /**
+ * <p>A collection of parts associated with a multipart upload.</p>
+ */
+ inline GetObjectAttributesResult& WithObjectParts(GetObjectAttributesParts&& value) { SetObjectParts(std::move(value)); return *this;}
+
+
+ /**
+ * <p>Provides the storage class information of the object. Amazon S3 returns this
+ * header for all objects except for S3 Standard storage class objects.</p> <p>For
+ * more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
+ * Classes</a>.</p>
+ */
+ inline const StorageClass& GetStorageClass() const{ return m_storageClass; }
+
+ /**
+ * <p>Provides the storage class information of the object. Amazon S3 returns this
+ * header for all objects except for S3 Standard storage class objects.</p> <p>For
+ * more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
+ * Classes</a>.</p>
+ */
+ inline void SetStorageClass(const StorageClass& value) { m_storageClass = value; }
+
+ /**
+ * <p>Provides the storage class information of the object. Amazon S3 returns this
+ * header for all objects except for S3 Standard storage class objects.</p> <p>For
+ * more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
+ * Classes</a>.</p>
+ */
+ inline void SetStorageClass(StorageClass&& value) { m_storageClass = std::move(value); }
+
+ /**
+ * <p>Provides the storage class information of the object. Amazon S3 returns this
+ * header for all objects except for S3 Standard storage class objects.</p> <p>For
+ * more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
+ * Classes</a>.</p>
+ */
+ inline GetObjectAttributesResult& WithStorageClass(const StorageClass& value) { SetStorageClass(value); return *this;}
+
+ /**
+ * <p>Provides the storage class information of the object. Amazon S3 returns this
+ * header for all objects except for S3 Standard storage class objects.</p> <p>For
+ * more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
+ * Classes</a>.</p>
+ */
+ inline GetObjectAttributesResult& WithStorageClass(StorageClass&& value) { SetStorageClass(std::move(value)); return *this;}
+
+
+ /**
+ * <p>The size of the object in bytes.</p>
+ */
+ inline long long GetObjectSize() const{ return m_objectSize; }
+
+ /**
+ * <p>The size of the object in bytes.</p>
+ */
+ inline void SetObjectSize(long long value) { m_objectSize = value; }
+
+ /**
+ * <p>The size of the object in bytes.</p>
+ */
+ inline GetObjectAttributesResult& WithObjectSize(long long value) { SetObjectSize(value); return *this;}
+
+ private:
+
+ bool m_deleteMarker;
+
+ Aws::Utils::DateTime m_lastModified;
+
+ Aws::String m_versionId;
+
+ RequestCharged m_requestCharged;
+
+ Aws::String m_eTag;
+
+ Checksum m_checksum;
+
+ GetObjectAttributesParts m_objectParts;
+
+ StorageClass m_storageClass;
+
+ long long m_objectSize;
+ };
+
+} // namespace Model
+} // namespace S3
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectLegalHoldRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectLegalHoldRequest.h
index e9c4940777..a7d3f6c388 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectLegalHoldRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectLegalHoldRequest.h
@@ -24,10 +24,10 @@ namespace Model
/**
*/
- class AWS_S3_API GetObjectLegalHoldRequest : public S3Request
+ class GetObjectLegalHoldRequest : public S3Request
{
public:
- GetObjectLegalHoldRequest();
+ AWS_S3_API GetObjectLegalHoldRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -35,203 +35,207 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetObjectLegalHold"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
- * <p>The bucket name containing the object whose Legal Hold status you want to
+ * <p>The bucket name containing the object whose legal hold status you want to
* retrieve. </p> <p>When using this action with an access point, you must direct
* requests to the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetBucket() const{ return m_bucket; }
/**
- * <p>The bucket name containing the object whose Legal Hold status you want to
+ * <p>The bucket name containing the object whose legal hold status you want to
* retrieve. </p> <p>When using this action with an access point, you must direct
* requests to the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool BucketHasBeenSet() const { return m_bucketHasBeenSet; }
/**
- * <p>The bucket name containing the object whose Legal Hold status you want to
+ * <p>The bucket name containing the object whose legal hold status you want to
* retrieve. </p> <p>When using this action with an access point, you must direct
* requests to the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const Aws::String& value) { m_bucketHasBeenSet = true; m_bucket = value; }
/**
- * <p>The bucket name containing the object whose Legal Hold status you want to
+ * <p>The bucket name containing the object whose legal hold status you want to
* retrieve. </p> <p>When using this action with an access point, you must direct
* requests to the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(Aws::String&& value) { m_bucketHasBeenSet = true; m_bucket = std::move(value); }
/**
- * <p>The bucket name containing the object whose Legal Hold status you want to
+ * <p>The bucket name containing the object whose legal hold status you want to
* retrieve. </p> <p>When using this action with an access point, you must direct
* requests to the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const char* value) { m_bucketHasBeenSet = true; m_bucket.assign(value); }
/**
- * <p>The bucket name containing the object whose Legal Hold status you want to
+ * <p>The bucket name containing the object whose legal hold status you want to
* retrieve. </p> <p>When using this action with an access point, you must direct
* requests to the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline GetObjectLegalHoldRequest& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
/**
- * <p>The bucket name containing the object whose Legal Hold status you want to
+ * <p>The bucket name containing the object whose legal hold status you want to
* retrieve. </p> <p>When using this action with an access point, you must direct
* requests to the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline GetObjectLegalHoldRequest& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
/**
- * <p>The bucket name containing the object whose Legal Hold status you want to
+ * <p>The bucket name containing the object whose legal hold status you want to
* retrieve. </p> <p>When using this action with an access point, you must direct
* requests to the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline GetObjectLegalHoldRequest& WithBucket(const char* value) { SetBucket(value); return *this;}
/**
- * <p>The key name for the object whose Legal Hold status you want to retrieve.</p>
+ * <p>The key name for the object whose legal hold status you want to retrieve.</p>
*/
inline const Aws::String& GetKey() const{ return m_key; }
/**
- * <p>The key name for the object whose Legal Hold status you want to retrieve.</p>
+ * <p>The key name for the object whose legal hold status you want to retrieve.</p>
*/
inline bool KeyHasBeenSet() const { return m_keyHasBeenSet; }
/**
- * <p>The key name for the object whose Legal Hold status you want to retrieve.</p>
+ * <p>The key name for the object whose legal hold status you want to retrieve.</p>
*/
inline void SetKey(const Aws::String& value) { m_keyHasBeenSet = true; m_key = value; }
/**
- * <p>The key name for the object whose Legal Hold status you want to retrieve.</p>
+ * <p>The key name for the object whose legal hold status you want to retrieve.</p>
*/
inline void SetKey(Aws::String&& value) { m_keyHasBeenSet = true; m_key = std::move(value); }
/**
- * <p>The key name for the object whose Legal Hold status you want to retrieve.</p>
+ * <p>The key name for the object whose legal hold status you want to retrieve.</p>
*/
inline void SetKey(const char* value) { m_keyHasBeenSet = true; m_key.assign(value); }
/**
- * <p>The key name for the object whose Legal Hold status you want to retrieve.</p>
+ * <p>The key name for the object whose legal hold status you want to retrieve.</p>
*/
inline GetObjectLegalHoldRequest& WithKey(const Aws::String& value) { SetKey(value); return *this;}
/**
- * <p>The key name for the object whose Legal Hold status you want to retrieve.</p>
+ * <p>The key name for the object whose legal hold status you want to retrieve.</p>
*/
inline GetObjectLegalHoldRequest& WithKey(Aws::String&& value) { SetKey(std::move(value)); return *this;}
/**
- * <p>The key name for the object whose Legal Hold status you want to retrieve.</p>
+ * <p>The key name for the object whose legal hold status you want to retrieve.</p>
*/
inline GetObjectLegalHoldRequest& WithKey(const char* value) { SetKey(value); return *this;}
/**
- * <p>The version ID of the object whose Legal Hold status you want to
+ * <p>The version ID of the object whose legal hold status you want to
* retrieve.</p>
*/
inline const Aws::String& GetVersionId() const{ return m_versionId; }
/**
- * <p>The version ID of the object whose Legal Hold status you want to
+ * <p>The version ID of the object whose legal hold status you want to
* retrieve.</p>
*/
inline bool VersionIdHasBeenSet() const { return m_versionIdHasBeenSet; }
/**
- * <p>The version ID of the object whose Legal Hold status you want to
+ * <p>The version ID of the object whose legal hold status you want to
* retrieve.</p>
*/
inline void SetVersionId(const Aws::String& value) { m_versionIdHasBeenSet = true; m_versionId = value; }
/**
- * <p>The version ID of the object whose Legal Hold status you want to
+ * <p>The version ID of the object whose legal hold status you want to
* retrieve.</p>
*/
inline void SetVersionId(Aws::String&& value) { m_versionIdHasBeenSet = true; m_versionId = std::move(value); }
/**
- * <p>The version ID of the object whose Legal Hold status you want to
+ * <p>The version ID of the object whose legal hold status you want to
* retrieve.</p>
*/
inline void SetVersionId(const char* value) { m_versionIdHasBeenSet = true; m_versionId.assign(value); }
/**
- * <p>The version ID of the object whose Legal Hold status you want to
+ * <p>The version ID of the object whose legal hold status you want to
* retrieve.</p>
*/
inline GetObjectLegalHoldRequest& WithVersionId(const Aws::String& value) { SetVersionId(value); return *this;}
/**
- * <p>The version ID of the object whose Legal Hold status you want to
+ * <p>The version ID of the object whose legal hold status you want to
* retrieve.</p>
*/
inline GetObjectLegalHoldRequest& WithVersionId(Aws::String&& value) { SetVersionId(std::move(value)); return *this;}
/**
- * <p>The version ID of the object whose Legal Hold status you want to
+ * <p>The version ID of the object whose legal hold status you want to
* retrieve.</p>
*/
inline GetObjectLegalHoldRequest& WithVersionId(const char* value) { SetVersionId(value); return *this;}
@@ -258,57 +262,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetObjectLegalHoldRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetObjectLegalHoldRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetObjectLegalHoldRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -355,22 +359,22 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
Aws::String m_versionId;
- bool m_versionIdHasBeenSet;
+ bool m_versionIdHasBeenSet = false;
RequestPayer m_requestPayer;
- bool m_requestPayerHasBeenSet;
+ bool m_requestPayerHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectLegalHoldResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectLegalHoldResult.h
index c69ac4c2c4..ce26a4251d 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectLegalHoldResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectLegalHoldResult.h
@@ -24,36 +24,36 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API GetObjectLegalHoldResult
+ class GetObjectLegalHoldResult
{
public:
- GetObjectLegalHoldResult();
- GetObjectLegalHoldResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- GetObjectLegalHoldResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetObjectLegalHoldResult();
+ AWS_S3_API GetObjectLegalHoldResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetObjectLegalHoldResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
- * <p>The current Legal Hold status for the specified object.</p>
+ * <p>The current legal hold status for the specified object.</p>
*/
inline const ObjectLockLegalHold& GetLegalHold() const{ return m_legalHold; }
/**
- * <p>The current Legal Hold status for the specified object.</p>
+ * <p>The current legal hold status for the specified object.</p>
*/
inline void SetLegalHold(const ObjectLockLegalHold& value) { m_legalHold = value; }
/**
- * <p>The current Legal Hold status for the specified object.</p>
+ * <p>The current legal hold status for the specified object.</p>
*/
inline void SetLegalHold(ObjectLockLegalHold&& value) { m_legalHold = std::move(value); }
/**
- * <p>The current Legal Hold status for the specified object.</p>
+ * <p>The current legal hold status for the specified object.</p>
*/
inline GetObjectLegalHoldResult& WithLegalHold(const ObjectLockLegalHold& value) { SetLegalHold(value); return *this;}
/**
- * <p>The current Legal Hold status for the specified object.</p>
+ * <p>The current legal hold status for the specified object.</p>
*/
inline GetObjectLegalHoldResult& WithLegalHold(ObjectLockLegalHold&& value) { SetLegalHold(std::move(value)); return *this;}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectLockConfigurationRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectLockConfigurationRequest.h
index d7ac269545..575ec24d05 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectLockConfigurationRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectLockConfigurationRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API GetObjectLockConfigurationRequest : public S3Request
+ class GetObjectLockConfigurationRequest : public S3Request
{
public:
- GetObjectLockConfigurationRequest();
+ AWS_S3_API GetObjectLockConfigurationRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,23 +34,27 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetObjectLockConfiguration"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The bucket whose Object Lock configuration you want to retrieve.</p> <p>When
* using this action with an access point, you must direct requests to the access
* point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetBucket() const{ return m_bucket; }
@@ -59,11 +63,11 @@ namespace Model
* using this action with an access point, you must direct requests to the access
* point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool BucketHasBeenSet() const { return m_bucketHasBeenSet; }
@@ -72,11 +76,11 @@ namespace Model
* using this action with an access point, you must direct requests to the access
* point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const Aws::String& value) { m_bucketHasBeenSet = true; m_bucket = value; }
@@ -85,11 +89,11 @@ namespace Model
* using this action with an access point, you must direct requests to the access
* point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(Aws::String&& value) { m_bucketHasBeenSet = true; m_bucket = std::move(value); }
@@ -98,11 +102,11 @@ namespace Model
* using this action with an access point, you must direct requests to the access
* point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const char* value) { m_bucketHasBeenSet = true; m_bucket.assign(value); }
@@ -111,11 +115,11 @@ namespace Model
* using this action with an access point, you must direct requests to the access
* point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline GetObjectLockConfigurationRequest& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
@@ -124,11 +128,11 @@ namespace Model
* using this action with an access point, you must direct requests to the access
* point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline GetObjectLockConfigurationRequest& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
@@ -137,68 +141,68 @@ namespace Model
* using this action with an access point, you must direct requests to the access
* point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline GetObjectLockConfigurationRequest& WithBucket(const char* value) { SetBucket(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetObjectLockConfigurationRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetObjectLockConfigurationRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetObjectLockConfigurationRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -245,13 +249,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectLockConfigurationResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectLockConfigurationResult.h
index 8d1a8701ab..32384b6d0a 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectLockConfigurationResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectLockConfigurationResult.h
@@ -24,12 +24,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API GetObjectLockConfigurationResult
+ class GetObjectLockConfigurationResult
{
public:
- GetObjectLockConfigurationResult();
- GetObjectLockConfigurationResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- GetObjectLockConfigurationResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetObjectLockConfigurationResult();
+ AWS_S3_API GetObjectLockConfigurationResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetObjectLockConfigurationResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectRequest.h
index 82ef5b73f7..71a4b9e616 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectRequest.h
@@ -9,6 +9,7 @@
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/core/utils/DateTime.h>
#include <aws/s3/model/RequestPayer.h>
+#include <aws/s3/model/ChecksumMode.h>
#include <aws/core/utils/memory/stl/AWSMap.h>
#include <utility>
@@ -25,10 +26,10 @@ namespace Model
/**
*/
- class AWS_S3_API GetObjectRequest : public S3Request
+ class GetObjectRequest : public S3Request
{
public:
- GetObjectRequest();
+ AWS_S3_API GetObjectRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -36,31 +37,42 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetObject"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API bool ShouldValidateResponseChecksum() const override;
+
+ AWS_S3_API Aws::Vector<Aws::String> GetResponseChecksumAlgorithmNames() const override;
+
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The bucket name containing the object. </p> <p>When using this action with an
* access point, you must direct requests to the access point hostname. The access
* point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
- * action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using an
+ * Object Lambda access point the hostname takes the form
+ * <i>AccessPointName</i>-<i>AccountId</i>.s3-object-lambda.<i>Region</i>.amazonaws.com.</p>
+ * <p>When using this action with Amazon S3 on Outposts, you must direct requests
+ * to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
+ * <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetBucket() const{ return m_bucket; }
@@ -69,19 +81,22 @@ namespace Model
* access point, you must direct requests to the access point hostname. The access
* point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
- * action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using an
+ * Object Lambda access point the hostname takes the form
+ * <i>AccessPointName</i>-<i>AccountId</i>.s3-object-lambda.<i>Region</i>.amazonaws.com.</p>
+ * <p>When using this action with Amazon S3 on Outposts, you must direct requests
+ * to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
+ * <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool BucketHasBeenSet() const { return m_bucketHasBeenSet; }
@@ -90,19 +105,22 @@ namespace Model
* access point, you must direct requests to the access point hostname. The access
* point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
- * action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using an
+ * Object Lambda access point the hostname takes the form
+ * <i>AccessPointName</i>-<i>AccountId</i>.s3-object-lambda.<i>Region</i>.amazonaws.com.</p>
+ * <p>When using this action with Amazon S3 on Outposts, you must direct requests
+ * to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
+ * <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const Aws::String& value) { m_bucketHasBeenSet = true; m_bucket = value; }
@@ -111,19 +129,22 @@ namespace Model
* access point, you must direct requests to the access point hostname. The access
* point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
- * action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using an
+ * Object Lambda access point the hostname takes the form
+ * <i>AccessPointName</i>-<i>AccountId</i>.s3-object-lambda.<i>Region</i>.amazonaws.com.</p>
+ * <p>When using this action with Amazon S3 on Outposts, you must direct requests
+ * to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
+ * <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(Aws::String&& value) { m_bucketHasBeenSet = true; m_bucket = std::move(value); }
@@ -132,19 +153,22 @@ namespace Model
* access point, you must direct requests to the access point hostname. The access
* point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
- * action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using an
+ * Object Lambda access point the hostname takes the form
+ * <i>AccessPointName</i>-<i>AccountId</i>.s3-object-lambda.<i>Region</i>.amazonaws.com.</p>
+ * <p>When using this action with Amazon S3 on Outposts, you must direct requests
+ * to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
+ * <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const char* value) { m_bucketHasBeenSet = true; m_bucket.assign(value); }
@@ -153,19 +177,22 @@ namespace Model
* access point, you must direct requests to the access point hostname. The access
* point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
- * action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using an
+ * Object Lambda access point the hostname takes the form
+ * <i>AccessPointName</i>-<i>AccountId</i>.s3-object-lambda.<i>Region</i>.amazonaws.com.</p>
+ * <p>When using this action with Amazon S3 on Outposts, you must direct requests
+ * to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
+ * <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline GetObjectRequest& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
@@ -174,19 +201,22 @@ namespace Model
* access point, you must direct requests to the access point hostname. The access
* point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
- * action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using an
+ * Object Lambda access point the hostname takes the form
+ * <i>AccessPointName</i>-<i>AccountId</i>.s3-object-lambda.<i>Region</i>.amazonaws.com.</p>
+ * <p>When using this action with Amazon S3 on Outposts, you must direct requests
+ * to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
+ * <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline GetObjectRequest& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
@@ -195,191 +225,194 @@ namespace Model
* access point, you must direct requests to the access point hostname. The access
* point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
- * action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using an
+ * Object Lambda access point the hostname takes the form
+ * <i>AccessPointName</i>-<i>AccountId</i>.s3-object-lambda.<i>Region</i>.amazonaws.com.</p>
+ * <p>When using this action with Amazon S3 on Outposts, you must direct requests
+ * to the S3 on Outposts hostname. The S3 on Outposts hostname takes the form
+ * <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline GetObjectRequest& WithBucket(const char* value) { SetBucket(value); return *this;}
/**
* <p>Return the object only if its entity tag (ETag) is the same as the one
- * specified, otherwise return a 412 (precondition failed).</p>
+ * specified; otherwise, return a 412 (precondition failed) error.</p>
*/
inline const Aws::String& GetIfMatch() const{ return m_ifMatch; }
/**
* <p>Return the object only if its entity tag (ETag) is the same as the one
- * specified, otherwise return a 412 (precondition failed).</p>
+ * specified; otherwise, return a 412 (precondition failed) error.</p>
*/
inline bool IfMatchHasBeenSet() const { return m_ifMatchHasBeenSet; }
/**
* <p>Return the object only if its entity tag (ETag) is the same as the one
- * specified, otherwise return a 412 (precondition failed).</p>
+ * specified; otherwise, return a 412 (precondition failed) error.</p>
*/
inline void SetIfMatch(const Aws::String& value) { m_ifMatchHasBeenSet = true; m_ifMatch = value; }
/**
* <p>Return the object only if its entity tag (ETag) is the same as the one
- * specified, otherwise return a 412 (precondition failed).</p>
+ * specified; otherwise, return a 412 (precondition failed) error.</p>
*/
inline void SetIfMatch(Aws::String&& value) { m_ifMatchHasBeenSet = true; m_ifMatch = std::move(value); }
/**
* <p>Return the object only if its entity tag (ETag) is the same as the one
- * specified, otherwise return a 412 (precondition failed).</p>
+ * specified; otherwise, return a 412 (precondition failed) error.</p>
*/
inline void SetIfMatch(const char* value) { m_ifMatchHasBeenSet = true; m_ifMatch.assign(value); }
/**
* <p>Return the object only if its entity tag (ETag) is the same as the one
- * specified, otherwise return a 412 (precondition failed).</p>
+ * specified; otherwise, return a 412 (precondition failed) error.</p>
*/
inline GetObjectRequest& WithIfMatch(const Aws::String& value) { SetIfMatch(value); return *this;}
/**
* <p>Return the object only if its entity tag (ETag) is the same as the one
- * specified, otherwise return a 412 (precondition failed).</p>
+ * specified; otherwise, return a 412 (precondition failed) error.</p>
*/
inline GetObjectRequest& WithIfMatch(Aws::String&& value) { SetIfMatch(std::move(value)); return *this;}
/**
* <p>Return the object only if its entity tag (ETag) is the same as the one
- * specified, otherwise return a 412 (precondition failed).</p>
+ * specified; otherwise, return a 412 (precondition failed) error.</p>
*/
inline GetObjectRequest& WithIfMatch(const char* value) { SetIfMatch(value); return *this;}
/**
- * <p>Return the object only if it has been modified since the specified time,
- * otherwise return a 304 (not modified).</p>
+ * <p>Return the object only if it has been modified since the specified time;
+ * otherwise, return a 304 (not modified) error.</p>
*/
inline const Aws::Utils::DateTime& GetIfModifiedSince() const{ return m_ifModifiedSince; }
/**
- * <p>Return the object only if it has been modified since the specified time,
- * otherwise return a 304 (not modified).</p>
+ * <p>Return the object only if it has been modified since the specified time;
+ * otherwise, return a 304 (not modified) error.</p>
*/
inline bool IfModifiedSinceHasBeenSet() const { return m_ifModifiedSinceHasBeenSet; }
/**
- * <p>Return the object only if it has been modified since the specified time,
- * otherwise return a 304 (not modified).</p>
+ * <p>Return the object only if it has been modified since the specified time;
+ * otherwise, return a 304 (not modified) error.</p>
*/
inline void SetIfModifiedSince(const Aws::Utils::DateTime& value) { m_ifModifiedSinceHasBeenSet = true; m_ifModifiedSince = value; }
/**
- * <p>Return the object only if it has been modified since the specified time,
- * otherwise return a 304 (not modified).</p>
+ * <p>Return the object only if it has been modified since the specified time;
+ * otherwise, return a 304 (not modified) error.</p>
*/
inline void SetIfModifiedSince(Aws::Utils::DateTime&& value) { m_ifModifiedSinceHasBeenSet = true; m_ifModifiedSince = std::move(value); }
/**
- * <p>Return the object only if it has been modified since the specified time,
- * otherwise return a 304 (not modified).</p>
+ * <p>Return the object only if it has been modified since the specified time;
+ * otherwise, return a 304 (not modified) error.</p>
*/
inline GetObjectRequest& WithIfModifiedSince(const Aws::Utils::DateTime& value) { SetIfModifiedSince(value); return *this;}
/**
- * <p>Return the object only if it has been modified since the specified time,
- * otherwise return a 304 (not modified).</p>
+ * <p>Return the object only if it has been modified since the specified time;
+ * otherwise, return a 304 (not modified) error.</p>
*/
inline GetObjectRequest& WithIfModifiedSince(Aws::Utils::DateTime&& value) { SetIfModifiedSince(std::move(value)); return *this;}
/**
* <p>Return the object only if its entity tag (ETag) is different from the one
- * specified, otherwise return a 304 (not modified).</p>
+ * specified; otherwise, return a 304 (not modified) error.</p>
*/
inline const Aws::String& GetIfNoneMatch() const{ return m_ifNoneMatch; }
/**
* <p>Return the object only if its entity tag (ETag) is different from the one
- * specified, otherwise return a 304 (not modified).</p>
+ * specified; otherwise, return a 304 (not modified) error.</p>
*/
inline bool IfNoneMatchHasBeenSet() const { return m_ifNoneMatchHasBeenSet; }
/**
* <p>Return the object only if its entity tag (ETag) is different from the one
- * specified, otherwise return a 304 (not modified).</p>
+ * specified; otherwise, return a 304 (not modified) error.</p>
*/
inline void SetIfNoneMatch(const Aws::String& value) { m_ifNoneMatchHasBeenSet = true; m_ifNoneMatch = value; }
/**
* <p>Return the object only if its entity tag (ETag) is different from the one
- * specified, otherwise return a 304 (not modified).</p>
+ * specified; otherwise, return a 304 (not modified) error.</p>
*/
inline void SetIfNoneMatch(Aws::String&& value) { m_ifNoneMatchHasBeenSet = true; m_ifNoneMatch = std::move(value); }
/**
* <p>Return the object only if its entity tag (ETag) is different from the one
- * specified, otherwise return a 304 (not modified).</p>
+ * specified; otherwise, return a 304 (not modified) error.</p>
*/
inline void SetIfNoneMatch(const char* value) { m_ifNoneMatchHasBeenSet = true; m_ifNoneMatch.assign(value); }
/**
* <p>Return the object only if its entity tag (ETag) is different from the one
- * specified, otherwise return a 304 (not modified).</p>
+ * specified; otherwise, return a 304 (not modified) error.</p>
*/
inline GetObjectRequest& WithIfNoneMatch(const Aws::String& value) { SetIfNoneMatch(value); return *this;}
/**
* <p>Return the object only if its entity tag (ETag) is different from the one
- * specified, otherwise return a 304 (not modified).</p>
+ * specified; otherwise, return a 304 (not modified) error.</p>
*/
inline GetObjectRequest& WithIfNoneMatch(Aws::String&& value) { SetIfNoneMatch(std::move(value)); return *this;}
/**
* <p>Return the object only if its entity tag (ETag) is different from the one
- * specified, otherwise return a 304 (not modified).</p>
+ * specified; otherwise, return a 304 (not modified) error.</p>
*/
inline GetObjectRequest& WithIfNoneMatch(const char* value) { SetIfNoneMatch(value); return *this;}
/**
- * <p>Return the object only if it has not been modified since the specified time,
- * otherwise return a 412 (precondition failed).</p>
+ * <p>Return the object only if it has not been modified since the specified time;
+ * otherwise, return a 412 (precondition failed) error.</p>
*/
inline const Aws::Utils::DateTime& GetIfUnmodifiedSince() const{ return m_ifUnmodifiedSince; }
/**
- * <p>Return the object only if it has not been modified since the specified time,
- * otherwise return a 412 (precondition failed).</p>
+ * <p>Return the object only if it has not been modified since the specified time;
+ * otherwise, return a 412 (precondition failed) error.</p>
*/
inline bool IfUnmodifiedSinceHasBeenSet() const { return m_ifUnmodifiedSinceHasBeenSet; }
/**
- * <p>Return the object only if it has not been modified since the specified time,
- * otherwise return a 412 (precondition failed).</p>
+ * <p>Return the object only if it has not been modified since the specified time;
+ * otherwise, return a 412 (precondition failed) error.</p>
*/
inline void SetIfUnmodifiedSince(const Aws::Utils::DateTime& value) { m_ifUnmodifiedSinceHasBeenSet = true; m_ifUnmodifiedSince = value; }
/**
- * <p>Return the object only if it has not been modified since the specified time,
- * otherwise return a 412 (precondition failed).</p>
+ * <p>Return the object only if it has not been modified since the specified time;
+ * otherwise, return a 412 (precondition failed) error.</p>
*/
inline void SetIfUnmodifiedSince(Aws::Utils::DateTime&& value) { m_ifUnmodifiedSinceHasBeenSet = true; m_ifUnmodifiedSince = std::move(value); }
/**
- * <p>Return the object only if it has not been modified since the specified time,
- * otherwise return a 412 (precondition failed).</p>
+ * <p>Return the object only if it has not been modified since the specified time;
+ * otherwise, return a 412 (precondition failed) error.</p>
*/
inline GetObjectRequest& WithIfUnmodifiedSince(const Aws::Utils::DateTime& value) { SetIfUnmodifiedSince(value); return *this;}
/**
- * <p>Return the object only if it has not been modified since the specified time,
- * otherwise return a 412 (precondition failed).</p>
+ * <p>Return the object only if it has not been modified since the specified time;
+ * otherwise, return a 412 (precondition failed) error.</p>
*/
inline GetObjectRequest& WithIfUnmodifiedSince(Aws::Utils::DateTime&& value) { SetIfUnmodifiedSince(std::move(value)); return *this;}
@@ -1004,61 +1037,92 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetObjectRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetObjectRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetObjectRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
+ /**
+ * <p>To retrieve the checksum, this mode must be enabled.</p>
+ */
+ inline const ChecksumMode& GetChecksumMode() const{ return m_checksumMode; }
+
+ /**
+ * <p>To retrieve the checksum, this mode must be enabled.</p>
+ */
+ inline bool ChecksumModeHasBeenSet() const { return m_checksumModeHasBeenSet; }
+
+ /**
+ * <p>To retrieve the checksum, this mode must be enabled.</p>
+ */
+ inline void SetChecksumMode(const ChecksumMode& value) { m_checksumModeHasBeenSet = true; m_checksumMode = value; }
+
+ /**
+ * <p>To retrieve the checksum, this mode must be enabled.</p>
+ */
+ inline void SetChecksumMode(ChecksumMode&& value) { m_checksumModeHasBeenSet = true; m_checksumMode = std::move(value); }
+
+ /**
+ * <p>To retrieve the checksum, this mode must be enabled.</p>
+ */
+ inline GetObjectRequest& WithChecksumMode(const ChecksumMode& value) { SetChecksumMode(value); return *this;}
+
+ /**
+ * <p>To retrieve the checksum, this mode must be enabled.</p>
+ */
+ inline GetObjectRequest& WithChecksumMode(ChecksumMode&& value) { SetChecksumMode(std::move(value)); return *this;}
+
+
inline const Aws::Map<Aws::String, Aws::String>& GetCustomizedAccessLogTag() const{ return m_customizedAccessLogTag; }
@@ -1101,67 +1165,70 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_ifMatch;
- bool m_ifMatchHasBeenSet;
+ bool m_ifMatchHasBeenSet = false;
Aws::Utils::DateTime m_ifModifiedSince;
- bool m_ifModifiedSinceHasBeenSet;
+ bool m_ifModifiedSinceHasBeenSet = false;
Aws::String m_ifNoneMatch;
- bool m_ifNoneMatchHasBeenSet;
+ bool m_ifNoneMatchHasBeenSet = false;
Aws::Utils::DateTime m_ifUnmodifiedSince;
- bool m_ifUnmodifiedSinceHasBeenSet;
+ bool m_ifUnmodifiedSinceHasBeenSet = false;
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
Aws::String m_range;
- bool m_rangeHasBeenSet;
+ bool m_rangeHasBeenSet = false;
Aws::String m_responseCacheControl;
- bool m_responseCacheControlHasBeenSet;
+ bool m_responseCacheControlHasBeenSet = false;
Aws::String m_responseContentDisposition;
- bool m_responseContentDispositionHasBeenSet;
+ bool m_responseContentDispositionHasBeenSet = false;
Aws::String m_responseContentEncoding;
- bool m_responseContentEncodingHasBeenSet;
+ bool m_responseContentEncodingHasBeenSet = false;
Aws::String m_responseContentLanguage;
- bool m_responseContentLanguageHasBeenSet;
+ bool m_responseContentLanguageHasBeenSet = false;
Aws::String m_responseContentType;
- bool m_responseContentTypeHasBeenSet;
+ bool m_responseContentTypeHasBeenSet = false;
Aws::Utils::DateTime m_responseExpires;
- bool m_responseExpiresHasBeenSet;
+ bool m_responseExpiresHasBeenSet = false;
Aws::String m_versionId;
- bool m_versionIdHasBeenSet;
+ bool m_versionIdHasBeenSet = false;
Aws::String m_sSECustomerAlgorithm;
- bool m_sSECustomerAlgorithmHasBeenSet;
+ bool m_sSECustomerAlgorithmHasBeenSet = false;
Aws::String m_sSECustomerKey;
- bool m_sSECustomerKeyHasBeenSet;
+ bool m_sSECustomerKeyHasBeenSet = false;
Aws::String m_sSECustomerKeyMD5;
- bool m_sSECustomerKeyMD5HasBeenSet;
+ bool m_sSECustomerKeyMD5HasBeenSet = false;
RequestPayer m_requestPayer;
- bool m_requestPayerHasBeenSet;
+ bool m_requestPayerHasBeenSet = false;
int m_partNumber;
- bool m_partNumberHasBeenSet;
+ bool m_partNumberHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
+
+ ChecksumMode m_checksumMode;
+ bool m_checksumModeHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectResult.h
index cac0384d29..d19738ba59 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectResult.h
@@ -27,28 +27,28 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API GetObjectResult
+ class GetObjectResult
{
public:
- GetObjectResult();
+ AWS_S3_API GetObjectResult();
//We have to define these because Microsoft doesn't auto generate them
- GetObjectResult(GetObjectResult&&);
- GetObjectResult& operator=(GetObjectResult&&);
+ AWS_S3_API GetObjectResult(GetObjectResult&&);
+ AWS_S3_API GetObjectResult& operator=(GetObjectResult&&);
//we delete these because Microsoft doesn't handle move generation correctly
//and we therefore don't trust them to get it right here either.
GetObjectResult(const GetObjectResult&) = delete;
GetObjectResult& operator=(const GetObjectResult&) = delete;
- GetObjectResult(Aws::AmazonWebServiceResult<Aws::Utils::Stream::ResponseStream>&& result);
- GetObjectResult& operator=(Aws::AmazonWebServiceResult<Aws::Utils::Stream::ResponseStream>&& result);
+ AWS_S3_API GetObjectResult(Aws::AmazonWebServiceResult<Aws::Utils::Stream::ResponseStream>&& result);
+ AWS_S3_API GetObjectResult& operator=(Aws::AmazonWebServiceResult<Aws::Utils::Stream::ResponseStream>&& result);
/**
* <p>Object data.</p>
*/
- inline Aws::IOStream& GetBody() { return m_body.GetUnderlyingStream(); }
+ inline Aws::IOStream& GetBody() const { return m_body.GetUnderlyingStream(); }
/**
* <p>Object data.</p>
@@ -113,57 +113,57 @@ namespace Model
/**
* <p>If the object expiration is configured (see PUT Bucket lifecycle), the
- * response includes this header. It includes the expiry-date and rule-id key-value
- * pairs providing object expiration information. The value of the rule-id is URL
- * encoded.</p>
+ * response includes this header. It includes the <code>expiry-date</code> and
+ * <code>rule-id</code> key-value pairs providing object expiration information.
+ * The value of the <code>rule-id</code> is URL-encoded.</p>
*/
inline const Aws::String& GetExpiration() const{ return m_expiration; }
/**
* <p>If the object expiration is configured (see PUT Bucket lifecycle), the
- * response includes this header. It includes the expiry-date and rule-id key-value
- * pairs providing object expiration information. The value of the rule-id is URL
- * encoded.</p>
+ * response includes this header. It includes the <code>expiry-date</code> and
+ * <code>rule-id</code> key-value pairs providing object expiration information.
+ * The value of the <code>rule-id</code> is URL-encoded.</p>
*/
inline void SetExpiration(const Aws::String& value) { m_expiration = value; }
/**
* <p>If the object expiration is configured (see PUT Bucket lifecycle), the
- * response includes this header. It includes the expiry-date and rule-id key-value
- * pairs providing object expiration information. The value of the rule-id is URL
- * encoded.</p>
+ * response includes this header. It includes the <code>expiry-date</code> and
+ * <code>rule-id</code> key-value pairs providing object expiration information.
+ * The value of the <code>rule-id</code> is URL-encoded.</p>
*/
inline void SetExpiration(Aws::String&& value) { m_expiration = std::move(value); }
/**
* <p>If the object expiration is configured (see PUT Bucket lifecycle), the
- * response includes this header. It includes the expiry-date and rule-id key-value
- * pairs providing object expiration information. The value of the rule-id is URL
- * encoded.</p>
+ * response includes this header. It includes the <code>expiry-date</code> and
+ * <code>rule-id</code> key-value pairs providing object expiration information.
+ * The value of the <code>rule-id</code> is URL-encoded.</p>
*/
inline void SetExpiration(const char* value) { m_expiration.assign(value); }
/**
* <p>If the object expiration is configured (see PUT Bucket lifecycle), the
- * response includes this header. It includes the expiry-date and rule-id key-value
- * pairs providing object expiration information. The value of the rule-id is URL
- * encoded.</p>
+ * response includes this header. It includes the <code>expiry-date</code> and
+ * <code>rule-id</code> key-value pairs providing object expiration information.
+ * The value of the <code>rule-id</code> is URL-encoded.</p>
*/
inline GetObjectResult& WithExpiration(const Aws::String& value) { SetExpiration(value); return *this;}
/**
* <p>If the object expiration is configured (see PUT Bucket lifecycle), the
- * response includes this header. It includes the expiry-date and rule-id key-value
- * pairs providing object expiration information. The value of the rule-id is URL
- * encoded.</p>
+ * response includes this header. It includes the <code>expiry-date</code> and
+ * <code>rule-id</code> key-value pairs providing object expiration information.
+ * The value of the <code>rule-id</code> is URL-encoded.</p>
*/
inline GetObjectResult& WithExpiration(Aws::String&& value) { SetExpiration(std::move(value)); return *this;}
/**
* <p>If the object expiration is configured (see PUT Bucket lifecycle), the
- * response includes this header. It includes the expiry-date and rule-id key-value
- * pairs providing object expiration information. The value of the rule-id is URL
- * encoded.</p>
+ * response includes this header. It includes the <code>expiry-date</code> and
+ * <code>rule-id</code> key-value pairs providing object expiration information.
+ * The value of the <code>rule-id</code> is URL-encoded.</p>
*/
inline GetObjectResult& WithExpiration(const char* value) { SetExpiration(value); return *this;}
@@ -254,49 +254,333 @@ namespace Model
/**
- * <p>An ETag is an opaque identifier assigned by a web server to a specific
- * version of a resource found at a URL.</p>
+ * <p>An entity tag (ETag) is an opaque identifier assigned by a web server to a
+ * specific version of a resource found at a URL.</p>
*/
inline const Aws::String& GetETag() const{ return m_eTag; }
/**
- * <p>An ETag is an opaque identifier assigned by a web server to a specific
- * version of a resource found at a URL.</p>
+ * <p>An entity tag (ETag) is an opaque identifier assigned by a web server to a
+ * specific version of a resource found at a URL.</p>
*/
inline void SetETag(const Aws::String& value) { m_eTag = value; }
/**
- * <p>An ETag is an opaque identifier assigned by a web server to a specific
- * version of a resource found at a URL.</p>
+ * <p>An entity tag (ETag) is an opaque identifier assigned by a web server to a
+ * specific version of a resource found at a URL.</p>
*/
inline void SetETag(Aws::String&& value) { m_eTag = std::move(value); }
/**
- * <p>An ETag is an opaque identifier assigned by a web server to a specific
- * version of a resource found at a URL.</p>
+ * <p>An entity tag (ETag) is an opaque identifier assigned by a web server to a
+ * specific version of a resource found at a URL.</p>
*/
inline void SetETag(const char* value) { m_eTag.assign(value); }
/**
- * <p>An ETag is an opaque identifier assigned by a web server to a specific
- * version of a resource found at a URL.</p>
+ * <p>An entity tag (ETag) is an opaque identifier assigned by a web server to a
+ * specific version of a resource found at a URL.</p>
*/
inline GetObjectResult& WithETag(const Aws::String& value) { SetETag(value); return *this;}
/**
- * <p>An ETag is an opaque identifier assigned by a web server to a specific
- * version of a resource found at a URL.</p>
+ * <p>An entity tag (ETag) is an opaque identifier assigned by a web server to a
+ * specific version of a resource found at a URL.</p>
*/
inline GetObjectResult& WithETag(Aws::String&& value) { SetETag(std::move(value)); return *this;}
/**
- * <p>An ETag is an opaque identifier assigned by a web server to a specific
- * version of a resource found at a URL.</p>
+ * <p>An entity tag (ETag) is an opaque identifier assigned by a web server to a
+ * specific version of a resource found at a URL.</p>
*/
inline GetObjectResult& WithETag(const char* value) { SetETag(value); return *this;}
/**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumCRC32() const{ return m_checksumCRC32; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(const Aws::String& value) { m_checksumCRC32 = value; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(Aws::String&& value) { m_checksumCRC32 = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(const char* value) { m_checksumCRC32.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline GetObjectResult& WithChecksumCRC32(const Aws::String& value) { SetChecksumCRC32(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline GetObjectResult& WithChecksumCRC32(Aws::String&& value) { SetChecksumCRC32(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline GetObjectResult& WithChecksumCRC32(const char* value) { SetChecksumCRC32(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumCRC32C() const{ return m_checksumCRC32C; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(const Aws::String& value) { m_checksumCRC32C = value; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(Aws::String&& value) { m_checksumCRC32C = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(const char* value) { m_checksumCRC32C.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline GetObjectResult& WithChecksumCRC32C(const Aws::String& value) { SetChecksumCRC32C(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline GetObjectResult& WithChecksumCRC32C(Aws::String&& value) { SetChecksumCRC32C(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline GetObjectResult& WithChecksumCRC32C(const char* value) { SetChecksumCRC32C(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumSHA1() const{ return m_checksumSHA1; }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(const Aws::String& value) { m_checksumSHA1 = value; }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(Aws::String&& value) { m_checksumSHA1 = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(const char* value) { m_checksumSHA1.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline GetObjectResult& WithChecksumSHA1(const Aws::String& value) { SetChecksumSHA1(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline GetObjectResult& WithChecksumSHA1(Aws::String&& value) { SetChecksumSHA1(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline GetObjectResult& WithChecksumSHA1(const char* value) { SetChecksumSHA1(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumSHA256() const{ return m_checksumSHA256; }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(const Aws::String& value) { m_checksumSHA256 = value; }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(Aws::String&& value) { m_checksumSHA256 = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(const char* value) { m_checksumSHA256.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline GetObjectResult& WithChecksumSHA256(const Aws::String& value) { SetChecksumSHA256(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline GetObjectResult& WithChecksumSHA256(Aws::String&& value) { SetChecksumSHA256(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline GetObjectResult& WithChecksumSHA256(const char* value) { SetChecksumSHA256(value); return *this;}
+
+
+ /**
* <p>This is set to the number of metadata entries not returned in
* <code>x-amz-meta</code> headers. This can happen if you create metadata using an
* API like SOAP that supports more flexible metadata than the REST API. For
@@ -859,70 +1143,70 @@ namespace Model
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline const Aws::String& GetSSEKMSKeyId() const{ return m_sSEKMSKeyId; }
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline void SetSSEKMSKeyId(const Aws::String& value) { m_sSEKMSKeyId = value; }
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline void SetSSEKMSKeyId(Aws::String&& value) { m_sSEKMSKeyId = std::move(value); }
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline void SetSSEKMSKeyId(const char* value) { m_sSEKMSKeyId.assign(value); }
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline GetObjectResult& WithSSEKMSKeyId(const Aws::String& value) { SetSSEKMSKeyId(value); return *this;}
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline GetObjectResult& WithSSEKMSKeyId(Aws::String&& value) { SetSSEKMSKeyId(std::move(value)); return *this;}
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline GetObjectResult& WithSSEKMSKeyId(const char* value) { SetSSEKMSKeyId(value); return *this;}
/**
* <p>Indicates whether the object uses an S3 Bucket Key for server-side encryption
- * with AWS KMS (SSE-KMS).</p>
+ * with Amazon Web Services KMS (SSE-KMS).</p>
*/
inline bool GetBucketKeyEnabled() const{ return m_bucketKeyEnabled; }
/**
* <p>Indicates whether the object uses an S3 Bucket Key for server-side encryption
- * with AWS KMS (SSE-KMS).</p>
+ * with Amazon Web Services KMS (SSE-KMS).</p>
*/
inline void SetBucketKeyEnabled(bool value) { m_bucketKeyEnabled = value; }
/**
* <p>Indicates whether the object uses an S3 Bucket Key for server-side encryption
- * with AWS KMS (SSE-KMS).</p>
+ * with Amazon Web Services KMS (SSE-KMS).</p>
*/
inline GetObjectResult& WithBucketKeyEnabled(bool value) { SetBucketKeyEnabled(value); return *this;}
@@ -1006,17 +1290,23 @@ namespace Model
/**
- * <p>The count of parts this object has.</p>
+ * <p>The count of parts this object has. This value is only returned if you
+ * specify <code>partNumber</code> in your request and the object was uploaded as a
+ * multipart upload.</p>
*/
inline int GetPartsCount() const{ return m_partsCount; }
/**
- * <p>The count of parts this object has.</p>
+ * <p>The count of parts this object has. This value is only returned if you
+ * specify <code>partNumber</code> in your request and the object was uploaded as a
+ * multipart upload.</p>
*/
inline void SetPartsCount(int value) { m_partsCount = value; }
/**
- * <p>The count of parts this object has.</p>
+ * <p>The count of parts this object has. This value is only returned if you
+ * specify <code>partNumber</code> in your request and the object was uploaded as a
+ * multipart upload.</p>
*/
inline GetObjectResult& WithPartsCount(int value) { SetPartsCount(value); return *this;}
@@ -1165,7 +1455,7 @@ namespace Model
private:
- Aws::Utils::Stream::ResponseStream m_body;
+ Aws::Utils::Stream::ResponseStream m_body;
bool m_deleteMarker;
@@ -1181,6 +1471,14 @@ namespace Model
Aws::String m_eTag;
+ Aws::String m_checksumCRC32;
+
+ Aws::String m_checksumCRC32C;
+
+ Aws::String m_checksumSHA1;
+
+ Aws::String m_checksumSHA256;
+
int m_missingMeta;
Aws::String m_versionId;
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectRetentionRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectRetentionRequest.h
index 15f8adc388..9687dc8dec 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectRetentionRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectRetentionRequest.h
@@ -24,10 +24,10 @@ namespace Model
/**
*/
- class AWS_S3_API GetObjectRetentionRequest : public S3Request
+ class GetObjectRetentionRequest : public S3Request
{
public:
- GetObjectRetentionRequest();
+ AWS_S3_API GetObjectRetentionRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -35,23 +35,27 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetObjectRetention"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The bucket name containing the object whose retention settings you want to
* retrieve. </p> <p>When using this action with an access point, you must direct
* requests to the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetBucket() const{ return m_bucket; }
@@ -60,11 +64,11 @@ namespace Model
* retrieve. </p> <p>When using this action with an access point, you must direct
* requests to the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool BucketHasBeenSet() const { return m_bucketHasBeenSet; }
@@ -73,11 +77,11 @@ namespace Model
* retrieve. </p> <p>When using this action with an access point, you must direct
* requests to the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const Aws::String& value) { m_bucketHasBeenSet = true; m_bucket = value; }
@@ -86,11 +90,11 @@ namespace Model
* retrieve. </p> <p>When using this action with an access point, you must direct
* requests to the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(Aws::String&& value) { m_bucketHasBeenSet = true; m_bucket = std::move(value); }
@@ -99,11 +103,11 @@ namespace Model
* retrieve. </p> <p>When using this action with an access point, you must direct
* requests to the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const char* value) { m_bucketHasBeenSet = true; m_bucket.assign(value); }
@@ -112,11 +116,11 @@ namespace Model
* retrieve. </p> <p>When using this action with an access point, you must direct
* requests to the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline GetObjectRetentionRequest& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
@@ -125,11 +129,11 @@ namespace Model
* retrieve. </p> <p>When using this action with an access point, you must direct
* requests to the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline GetObjectRetentionRequest& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
@@ -138,11 +142,11 @@ namespace Model
* retrieve. </p> <p>When using this action with an access point, you must direct
* requests to the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline GetObjectRetentionRequest& WithBucket(const char* value) { SetBucket(value); return *this;}
@@ -266,57 +270,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetObjectRetentionRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetObjectRetentionRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetObjectRetentionRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -363,22 +367,22 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
Aws::String m_versionId;
- bool m_versionIdHasBeenSet;
+ bool m_versionIdHasBeenSet = false;
RequestPayer m_requestPayer;
- bool m_requestPayerHasBeenSet;
+ bool m_requestPayerHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectRetentionResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectRetentionResult.h
index 13c5e6b25d..3e05d76c10 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectRetentionResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectRetentionResult.h
@@ -24,12 +24,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API GetObjectRetentionResult
+ class GetObjectRetentionResult
{
public:
- GetObjectRetentionResult();
- GetObjectRetentionResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- GetObjectRetentionResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetObjectRetentionResult();
+ AWS_S3_API GetObjectRetentionResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetObjectRetentionResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectTaggingRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectTaggingRequest.h
index b29119c226..068557654f 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectTaggingRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectTaggingRequest.h
@@ -24,10 +24,10 @@ namespace Model
/**
*/
- class AWS_S3_API GetObjectTaggingRequest : public S3Request
+ class GetObjectTaggingRequest : public S3Request
{
public:
- GetObjectTaggingRequest();
+ AWS_S3_API GetObjectTaggingRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -35,12 +35,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetObjectTagging"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The bucket name containing the object for which to get the tagging
@@ -48,19 +52,19 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetBucket() const{ return m_bucket; }
@@ -70,19 +74,19 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool BucketHasBeenSet() const { return m_bucketHasBeenSet; }
@@ -92,19 +96,19 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const Aws::String& value) { m_bucketHasBeenSet = true; m_bucket = value; }
@@ -114,19 +118,19 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(Aws::String&& value) { m_bucketHasBeenSet = true; m_bucket = std::move(value); }
@@ -136,19 +140,19 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const char* value) { m_bucketHasBeenSet = true; m_bucket.assign(value); }
@@ -158,19 +162,19 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline GetObjectTaggingRequest& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
@@ -180,19 +184,19 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline GetObjectTaggingRequest& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
@@ -202,19 +206,19 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline GetObjectTaggingRequest& WithBucket(const char* value) { SetBucket(value); return *this;}
@@ -303,57 +307,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetObjectTaggingRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetObjectTaggingRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetObjectTaggingRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -419,22 +423,22 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
Aws::String m_versionId;
- bool m_versionIdHasBeenSet;
+ bool m_versionIdHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
RequestPayer m_requestPayer;
- bool m_requestPayerHasBeenSet;
+ bool m_requestPayerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectTaggingResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectTaggingResult.h
index c945f85e61..c307b802a5 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectTaggingResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectTaggingResult.h
@@ -26,12 +26,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API GetObjectTaggingResult
+ class GetObjectTaggingResult
{
public:
- GetObjectTaggingResult();
- GetObjectTaggingResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- GetObjectTaggingResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetObjectTaggingResult();
+ AWS_S3_API GetObjectTaggingResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetObjectTaggingResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectTorrentRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectTorrentRequest.h
index 08f359e8da..a01a7901a1 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectTorrentRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectTorrentRequest.h
@@ -24,10 +24,10 @@ namespace Model
/**
*/
- class AWS_S3_API GetObjectTorrentRequest : public S3Request
+ class GetObjectTorrentRequest : public S3Request
{
public:
- GetObjectTorrentRequest();
+ AWS_S3_API GetObjectTorrentRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -35,12 +35,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetObjectTorrent"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket containing the object for which to get the torrent
@@ -153,57 +157,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetObjectTorrentRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetObjectTorrentRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetObjectTorrentRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -250,19 +254,19 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
RequestPayer m_requestPayer;
- bool m_requestPayerHasBeenSet;
+ bool m_requestPayerHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectTorrentResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectTorrentResult.h
index 97c8b3edf9..546c6d1c26 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectTorrentResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetObjectTorrentResult.h
@@ -19,28 +19,28 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API GetObjectTorrentResult
+ class GetObjectTorrentResult
{
public:
- GetObjectTorrentResult();
+ AWS_S3_API GetObjectTorrentResult();
//We have to define these because Microsoft doesn't auto generate them
- GetObjectTorrentResult(GetObjectTorrentResult&&);
- GetObjectTorrentResult& operator=(GetObjectTorrentResult&&);
+ AWS_S3_API GetObjectTorrentResult(GetObjectTorrentResult&&);
+ AWS_S3_API GetObjectTorrentResult& operator=(GetObjectTorrentResult&&);
//we delete these because Microsoft doesn't handle move generation correctly
//and we therefore don't trust them to get it right here either.
GetObjectTorrentResult(const GetObjectTorrentResult&) = delete;
GetObjectTorrentResult& operator=(const GetObjectTorrentResult&) = delete;
- GetObjectTorrentResult(Aws::AmazonWebServiceResult<Aws::Utils::Stream::ResponseStream>&& result);
- GetObjectTorrentResult& operator=(Aws::AmazonWebServiceResult<Aws::Utils::Stream::ResponseStream>&& result);
+ AWS_S3_API GetObjectTorrentResult(Aws::AmazonWebServiceResult<Aws::Utils::Stream::ResponseStream>&& result);
+ AWS_S3_API GetObjectTorrentResult& operator=(Aws::AmazonWebServiceResult<Aws::Utils::Stream::ResponseStream>&& result);
/**
* <p>A Bencoded dictionary as defined by the BitTorrent specification</p>
*/
- inline Aws::IOStream& GetBody() { return m_body.GetUnderlyingStream(); }
+ inline Aws::IOStream& GetBody() const { return m_body.GetUnderlyingStream(); }
/**
* <p>A Bencoded dictionary as defined by the BitTorrent specification</p>
@@ -65,7 +65,7 @@ namespace Model
private:
- Aws::Utils::Stream::ResponseStream m_body;
+ Aws::Utils::Stream::ResponseStream m_body;
RequestCharged m_requestCharged;
};
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetPublicAccessBlockRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetPublicAccessBlockRequest.h
index 1648400d7a..bbc11f1539 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetPublicAccessBlockRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetPublicAccessBlockRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API GetPublicAccessBlockRequest : public S3Request
+ class GetPublicAccessBlockRequest : public S3Request
{
public:
- GetPublicAccessBlockRequest();
+ AWS_S3_API GetPublicAccessBlockRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "GetPublicAccessBlock"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the Amazon S3 bucket whose <code>PublicAccessBlock</code>
@@ -92,57 +96,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetPublicAccessBlockRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetPublicAccessBlockRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline GetPublicAccessBlockRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -189,13 +193,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetPublicAccessBlockResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetPublicAccessBlockResult.h
index 22d40032cb..05907ca4c0 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetPublicAccessBlockResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GetPublicAccessBlockResult.h
@@ -24,12 +24,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API GetPublicAccessBlockResult
+ class GetPublicAccessBlockResult
{
public:
- GetPublicAccessBlockResult();
- GetPublicAccessBlockResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- GetPublicAccessBlockResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetPublicAccessBlockResult();
+ AWS_S3_API GetPublicAccessBlockResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API GetPublicAccessBlockResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GlacierJobParameters.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GlacierJobParameters.h
index d6fcfdaee3..daf8e7eccc 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GlacierJobParameters.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/GlacierJobParameters.h
@@ -27,14 +27,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/GlacierJobParameters">AWS
* API Reference</a></p>
*/
- class AWS_S3_API GlacierJobParameters
+ class GlacierJobParameters
{
public:
- GlacierJobParameters();
- GlacierJobParameters(const Aws::Utils::Xml::XmlNode& xmlNode);
- GlacierJobParameters& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API GlacierJobParameters();
+ AWS_S3_API GlacierJobParameters(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API GlacierJobParameters& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -70,7 +70,7 @@ namespace Model
private:
Tier m_tier;
- bool m_tierHasBeenSet;
+ bool m_tierHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Grant.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Grant.h
index 392c95090e..90af4c1f4c 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Grant.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Grant.h
@@ -28,14 +28,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grant">AWS API
* Reference</a></p>
*/
- class AWS_S3_API Grant
+ class Grant
{
public:
- Grant();
- Grant(const Aws::Utils::Xml::XmlNode& xmlNode);
- Grant& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Grant();
+ AWS_S3_API Grant(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Grant& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -102,10 +102,10 @@ namespace Model
private:
Grantee m_grantee;
- bool m_granteeHasBeenSet;
+ bool m_granteeHasBeenSet = false;
Permission m_permission;
- bool m_permissionHasBeenSet;
+ bool m_permissionHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Grantee.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Grantee.h
index a6715e45cd..1e6ad003d9 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Grantee.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Grantee.h
@@ -28,14 +28,14 @@ namespace Model
* <a href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Grantee">AWS API
* Reference</a></p>
*/
- class AWS_S3_API Grantee
+ class Grantee
{
public:
- Grantee();
- Grantee(const Aws::Utils::Xml::XmlNode& xmlNode);
- Grantee& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Grantee();
+ AWS_S3_API Grantee(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Grantee& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -81,105 +81,113 @@ namespace Model
/**
* <p>Email address of the grantee.</p> <p>Using email addresses to specify
- * a grantee is only supported in the following AWS Regions: </p> <ul> <li> <p>US
- * East (N. Virginia)</p> </li> <li> <p>US West (N. California)</p> </li> <li> <p>
- * US West (Oregon)</p> </li> <li> <p> Asia Pacific (Singapore)</p> </li> <li>
- * <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia Pacific (Tokyo)</p> </li> <li>
- * <p>Europe (Ireland)</p> </li> <li> <p>South America (São Paulo)</p> </li> </ul>
- * <p>For a list of all the Amazon S3 supported Regions and endpoints, see <a
+ * a grantee is only supported in the following Amazon Web Services Regions: </p>
+ * <ul> <li> <p>US East (N. Virginia)</p> </li> <li> <p>US West (N. California)</p>
+ * </li> <li> <p> US West (Oregon)</p> </li> <li> <p> Asia Pacific (Singapore)</p>
+ * </li> <li> <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia Pacific (Tokyo)</p>
+ * </li> <li> <p>Europe (Ireland)</p> </li> <li> <p>South America (São Paulo)</p>
+ * </li> </ul> <p>For a list of all the Amazon S3 supported Regions and endpoints,
+ * see <a
* href="https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">Regions
- * and Endpoints</a> in the AWS General Reference.</p>
+ * and Endpoints</a> in the Amazon Web Services General Reference.</p>
*/
inline const Aws::String& GetEmailAddress() const{ return m_emailAddress; }
/**
* <p>Email address of the grantee.</p> <p>Using email addresses to specify
- * a grantee is only supported in the following AWS Regions: </p> <ul> <li> <p>US
- * East (N. Virginia)</p> </li> <li> <p>US West (N. California)</p> </li> <li> <p>
- * US West (Oregon)</p> </li> <li> <p> Asia Pacific (Singapore)</p> </li> <li>
- * <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia Pacific (Tokyo)</p> </li> <li>
- * <p>Europe (Ireland)</p> </li> <li> <p>South America (São Paulo)</p> </li> </ul>
- * <p>For a list of all the Amazon S3 supported Regions and endpoints, see <a
+ * a grantee is only supported in the following Amazon Web Services Regions: </p>
+ * <ul> <li> <p>US East (N. Virginia)</p> </li> <li> <p>US West (N. California)</p>
+ * </li> <li> <p> US West (Oregon)</p> </li> <li> <p> Asia Pacific (Singapore)</p>
+ * </li> <li> <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia Pacific (Tokyo)</p>
+ * </li> <li> <p>Europe (Ireland)</p> </li> <li> <p>South America (São Paulo)</p>
+ * </li> </ul> <p>For a list of all the Amazon S3 supported Regions and endpoints,
+ * see <a
* href="https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">Regions
- * and Endpoints</a> in the AWS General Reference.</p>
+ * and Endpoints</a> in the Amazon Web Services General Reference.</p>
*/
inline bool EmailAddressHasBeenSet() const { return m_emailAddressHasBeenSet; }
/**
* <p>Email address of the grantee.</p> <p>Using email addresses to specify
- * a grantee is only supported in the following AWS Regions: </p> <ul> <li> <p>US
- * East (N. Virginia)</p> </li> <li> <p>US West (N. California)</p> </li> <li> <p>
- * US West (Oregon)</p> </li> <li> <p> Asia Pacific (Singapore)</p> </li> <li>
- * <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia Pacific (Tokyo)</p> </li> <li>
- * <p>Europe (Ireland)</p> </li> <li> <p>South America (São Paulo)</p> </li> </ul>
- * <p>For a list of all the Amazon S3 supported Regions and endpoints, see <a
+ * a grantee is only supported in the following Amazon Web Services Regions: </p>
+ * <ul> <li> <p>US East (N. Virginia)</p> </li> <li> <p>US West (N. California)</p>
+ * </li> <li> <p> US West (Oregon)</p> </li> <li> <p> Asia Pacific (Singapore)</p>
+ * </li> <li> <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia Pacific (Tokyo)</p>
+ * </li> <li> <p>Europe (Ireland)</p> </li> <li> <p>South America (São Paulo)</p>
+ * </li> </ul> <p>For a list of all the Amazon S3 supported Regions and endpoints,
+ * see <a
* href="https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">Regions
- * and Endpoints</a> in the AWS General Reference.</p>
+ * and Endpoints</a> in the Amazon Web Services General Reference.</p>
*/
inline void SetEmailAddress(const Aws::String& value) { m_emailAddressHasBeenSet = true; m_emailAddress = value; }
/**
* <p>Email address of the grantee.</p> <p>Using email addresses to specify
- * a grantee is only supported in the following AWS Regions: </p> <ul> <li> <p>US
- * East (N. Virginia)</p> </li> <li> <p>US West (N. California)</p> </li> <li> <p>
- * US West (Oregon)</p> </li> <li> <p> Asia Pacific (Singapore)</p> </li> <li>
- * <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia Pacific (Tokyo)</p> </li> <li>
- * <p>Europe (Ireland)</p> </li> <li> <p>South America (São Paulo)</p> </li> </ul>
- * <p>For a list of all the Amazon S3 supported Regions and endpoints, see <a
+ * a grantee is only supported in the following Amazon Web Services Regions: </p>
+ * <ul> <li> <p>US East (N. Virginia)</p> </li> <li> <p>US West (N. California)</p>
+ * </li> <li> <p> US West (Oregon)</p> </li> <li> <p> Asia Pacific (Singapore)</p>
+ * </li> <li> <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia Pacific (Tokyo)</p>
+ * </li> <li> <p>Europe (Ireland)</p> </li> <li> <p>South America (São Paulo)</p>
+ * </li> </ul> <p>For a list of all the Amazon S3 supported Regions and endpoints,
+ * see <a
* href="https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">Regions
- * and Endpoints</a> in the AWS General Reference.</p>
+ * and Endpoints</a> in the Amazon Web Services General Reference.</p>
*/
inline void SetEmailAddress(Aws::String&& value) { m_emailAddressHasBeenSet = true; m_emailAddress = std::move(value); }
/**
* <p>Email address of the grantee.</p> <p>Using email addresses to specify
- * a grantee is only supported in the following AWS Regions: </p> <ul> <li> <p>US
- * East (N. Virginia)</p> </li> <li> <p>US West (N. California)</p> </li> <li> <p>
- * US West (Oregon)</p> </li> <li> <p> Asia Pacific (Singapore)</p> </li> <li>
- * <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia Pacific (Tokyo)</p> </li> <li>
- * <p>Europe (Ireland)</p> </li> <li> <p>South America (São Paulo)</p> </li> </ul>
- * <p>For a list of all the Amazon S3 supported Regions and endpoints, see <a
+ * a grantee is only supported in the following Amazon Web Services Regions: </p>
+ * <ul> <li> <p>US East (N. Virginia)</p> </li> <li> <p>US West (N. California)</p>
+ * </li> <li> <p> US West (Oregon)</p> </li> <li> <p> Asia Pacific (Singapore)</p>
+ * </li> <li> <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia Pacific (Tokyo)</p>
+ * </li> <li> <p>Europe (Ireland)</p> </li> <li> <p>South America (São Paulo)</p>
+ * </li> </ul> <p>For a list of all the Amazon S3 supported Regions and endpoints,
+ * see <a
* href="https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">Regions
- * and Endpoints</a> in the AWS General Reference.</p>
+ * and Endpoints</a> in the Amazon Web Services General Reference.</p>
*/
inline void SetEmailAddress(const char* value) { m_emailAddressHasBeenSet = true; m_emailAddress.assign(value); }
/**
* <p>Email address of the grantee.</p> <p>Using email addresses to specify
- * a grantee is only supported in the following AWS Regions: </p> <ul> <li> <p>US
- * East (N. Virginia)</p> </li> <li> <p>US West (N. California)</p> </li> <li> <p>
- * US West (Oregon)</p> </li> <li> <p> Asia Pacific (Singapore)</p> </li> <li>
- * <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia Pacific (Tokyo)</p> </li> <li>
- * <p>Europe (Ireland)</p> </li> <li> <p>South America (São Paulo)</p> </li> </ul>
- * <p>For a list of all the Amazon S3 supported Regions and endpoints, see <a
+ * a grantee is only supported in the following Amazon Web Services Regions: </p>
+ * <ul> <li> <p>US East (N. Virginia)</p> </li> <li> <p>US West (N. California)</p>
+ * </li> <li> <p> US West (Oregon)</p> </li> <li> <p> Asia Pacific (Singapore)</p>
+ * </li> <li> <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia Pacific (Tokyo)</p>
+ * </li> <li> <p>Europe (Ireland)</p> </li> <li> <p>South America (São Paulo)</p>
+ * </li> </ul> <p>For a list of all the Amazon S3 supported Regions and endpoints,
+ * see <a
* href="https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">Regions
- * and Endpoints</a> in the AWS General Reference.</p>
+ * and Endpoints</a> in the Amazon Web Services General Reference.</p>
*/
inline Grantee& WithEmailAddress(const Aws::String& value) { SetEmailAddress(value); return *this;}
/**
* <p>Email address of the grantee.</p> <p>Using email addresses to specify
- * a grantee is only supported in the following AWS Regions: </p> <ul> <li> <p>US
- * East (N. Virginia)</p> </li> <li> <p>US West (N. California)</p> </li> <li> <p>
- * US West (Oregon)</p> </li> <li> <p> Asia Pacific (Singapore)</p> </li> <li>
- * <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia Pacific (Tokyo)</p> </li> <li>
- * <p>Europe (Ireland)</p> </li> <li> <p>South America (São Paulo)</p> </li> </ul>
- * <p>For a list of all the Amazon S3 supported Regions and endpoints, see <a
+ * a grantee is only supported in the following Amazon Web Services Regions: </p>
+ * <ul> <li> <p>US East (N. Virginia)</p> </li> <li> <p>US West (N. California)</p>
+ * </li> <li> <p> US West (Oregon)</p> </li> <li> <p> Asia Pacific (Singapore)</p>
+ * </li> <li> <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia Pacific (Tokyo)</p>
+ * </li> <li> <p>Europe (Ireland)</p> </li> <li> <p>South America (São Paulo)</p>
+ * </li> </ul> <p>For a list of all the Amazon S3 supported Regions and endpoints,
+ * see <a
* href="https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">Regions
- * and Endpoints</a> in the AWS General Reference.</p>
+ * and Endpoints</a> in the Amazon Web Services General Reference.</p>
*/
inline Grantee& WithEmailAddress(Aws::String&& value) { SetEmailAddress(std::move(value)); return *this;}
/**
* <p>Email address of the grantee.</p> <p>Using email addresses to specify
- * a grantee is only supported in the following AWS Regions: </p> <ul> <li> <p>US
- * East (N. Virginia)</p> </li> <li> <p>US West (N. California)</p> </li> <li> <p>
- * US West (Oregon)</p> </li> <li> <p> Asia Pacific (Singapore)</p> </li> <li>
- * <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia Pacific (Tokyo)</p> </li> <li>
- * <p>Europe (Ireland)</p> </li> <li> <p>South America (São Paulo)</p> </li> </ul>
- * <p>For a list of all the Amazon S3 supported Regions and endpoints, see <a
+ * a grantee is only supported in the following Amazon Web Services Regions: </p>
+ * <ul> <li> <p>US East (N. Virginia)</p> </li> <li> <p>US West (N. California)</p>
+ * </li> <li> <p> US West (Oregon)</p> </li> <li> <p> Asia Pacific (Singapore)</p>
+ * </li> <li> <p>Asia Pacific (Sydney)</p> </li> <li> <p>Asia Pacific (Tokyo)</p>
+ * </li> <li> <p>Europe (Ireland)</p> </li> <li> <p>South America (São Paulo)</p>
+ * </li> </ul> <p>For a list of all the Amazon S3 supported Regions and endpoints,
+ * see <a
* href="https://docs.aws.amazon.com/general/latest/gr/rande.html#s3_region">Regions
- * and Endpoints</a> in the AWS General Reference.</p>
+ * and Endpoints</a> in the Amazon Web Services General Reference.</p>
*/
inline Grantee& WithEmailAddress(const char* value) { SetEmailAddress(value); return *this;}
@@ -299,19 +307,19 @@ namespace Model
private:
Aws::String m_displayName;
- bool m_displayNameHasBeenSet;
+ bool m_displayNameHasBeenSet = false;
Aws::String m_emailAddress;
- bool m_emailAddressHasBeenSet;
+ bool m_emailAddressHasBeenSet = false;
Aws::String m_iD;
- bool m_iDHasBeenSet;
+ bool m_iDHasBeenSet = false;
Type m_type;
- bool m_typeHasBeenSet;
+ bool m_typeHasBeenSet = false;
Aws::String m_uRI;
- bool m_uRIHasBeenSet;
+ bool m_uRIHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/HeadBucketRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/HeadBucketRequest.h
index 40078434f3..7c9c4240c9 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/HeadBucketRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/HeadBucketRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API HeadBucketRequest : public S3Request
+ class HeadBucketRequest : public S3Request
{
public:
- HeadBucketRequest();
+ AWS_S3_API HeadBucketRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,31 +34,35 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "HeadBucket"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The bucket name.</p> <p>When using this action with an access point, you must
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetBucket() const{ return m_bucket; }
@@ -67,19 +71,19 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool BucketHasBeenSet() const { return m_bucketHasBeenSet; }
@@ -88,19 +92,19 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const Aws::String& value) { m_bucketHasBeenSet = true; m_bucket = value; }
@@ -109,19 +113,19 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(Aws::String&& value) { m_bucketHasBeenSet = true; m_bucket = std::move(value); }
@@ -130,19 +134,19 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const char* value) { m_bucketHasBeenSet = true; m_bucket.assign(value); }
@@ -151,19 +155,19 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline HeadBucketRequest& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
@@ -172,19 +176,19 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline HeadBucketRequest& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
@@ -193,76 +197,76 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline HeadBucketRequest& WithBucket(const char* value) { SetBucket(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline HeadBucketRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline HeadBucketRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline HeadBucketRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -309,13 +313,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/HeadObjectRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/HeadObjectRequest.h
index 88865df794..a507cf6ad7 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/HeadObjectRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/HeadObjectRequest.h
@@ -9,6 +9,7 @@
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/core/utils/DateTime.h>
#include <aws/s3/model/RequestPayer.h>
+#include <aws/s3/model/ChecksumMode.h>
#include <aws/core/utils/memory/stl/AWSMap.h>
#include <utility>
@@ -25,10 +26,10 @@ namespace Model
/**
*/
- class AWS_S3_API HeadObjectRequest : public S3Request
+ class HeadObjectRequest : public S3Request
{
public:
- HeadObjectRequest();
+ AWS_S3_API HeadObjectRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -36,31 +37,35 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "HeadObject"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket containing the object.</p> <p>When using this action
* with an access point, you must direct requests to the access point hostname. The
* access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetBucket() const{ return m_bucket; }
@@ -69,19 +74,19 @@ namespace Model
* with an access point, you must direct requests to the access point hostname. The
* access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool BucketHasBeenSet() const { return m_bucketHasBeenSet; }
@@ -90,19 +95,19 @@ namespace Model
* with an access point, you must direct requests to the access point hostname. The
* access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const Aws::String& value) { m_bucketHasBeenSet = true; m_bucket = value; }
@@ -111,19 +116,19 @@ namespace Model
* with an access point, you must direct requests to the access point hostname. The
* access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(Aws::String&& value) { m_bucketHasBeenSet = true; m_bucket = std::move(value); }
@@ -132,19 +137,19 @@ namespace Model
* with an access point, you must direct requests to the access point hostname. The
* access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const char* value) { m_bucketHasBeenSet = true; m_bucket.assign(value); }
@@ -153,19 +158,19 @@ namespace Model
* with an access point, you must direct requests to the access point hostname. The
* access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline HeadObjectRequest& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
@@ -174,19 +179,19 @@ namespace Model
* with an access point, you must direct requests to the access point hostname. The
* access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline HeadObjectRequest& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
@@ -195,191 +200,191 @@ namespace Model
* with an access point, you must direct requests to the access point hostname. The
* access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline HeadObjectRequest& WithBucket(const char* value) { SetBucket(value); return *this;}
/**
* <p>Return the object only if its entity tag (ETag) is the same as the one
- * specified, otherwise return a 412 (precondition failed).</p>
+ * specified; otherwise, return a 412 (precondition failed) error.</p>
*/
inline const Aws::String& GetIfMatch() const{ return m_ifMatch; }
/**
* <p>Return the object only if its entity tag (ETag) is the same as the one
- * specified, otherwise return a 412 (precondition failed).</p>
+ * specified; otherwise, return a 412 (precondition failed) error.</p>
*/
inline bool IfMatchHasBeenSet() const { return m_ifMatchHasBeenSet; }
/**
* <p>Return the object only if its entity tag (ETag) is the same as the one
- * specified, otherwise return a 412 (precondition failed).</p>
+ * specified; otherwise, return a 412 (precondition failed) error.</p>
*/
inline void SetIfMatch(const Aws::String& value) { m_ifMatchHasBeenSet = true; m_ifMatch = value; }
/**
* <p>Return the object only if its entity tag (ETag) is the same as the one
- * specified, otherwise return a 412 (precondition failed).</p>
+ * specified; otherwise, return a 412 (precondition failed) error.</p>
*/
inline void SetIfMatch(Aws::String&& value) { m_ifMatchHasBeenSet = true; m_ifMatch = std::move(value); }
/**
* <p>Return the object only if its entity tag (ETag) is the same as the one
- * specified, otherwise return a 412 (precondition failed).</p>
+ * specified; otherwise, return a 412 (precondition failed) error.</p>
*/
inline void SetIfMatch(const char* value) { m_ifMatchHasBeenSet = true; m_ifMatch.assign(value); }
/**
* <p>Return the object only if its entity tag (ETag) is the same as the one
- * specified, otherwise return a 412 (precondition failed).</p>
+ * specified; otherwise, return a 412 (precondition failed) error.</p>
*/
inline HeadObjectRequest& WithIfMatch(const Aws::String& value) { SetIfMatch(value); return *this;}
/**
* <p>Return the object only if its entity tag (ETag) is the same as the one
- * specified, otherwise return a 412 (precondition failed).</p>
+ * specified; otherwise, return a 412 (precondition failed) error.</p>
*/
inline HeadObjectRequest& WithIfMatch(Aws::String&& value) { SetIfMatch(std::move(value)); return *this;}
/**
* <p>Return the object only if its entity tag (ETag) is the same as the one
- * specified, otherwise return a 412 (precondition failed).</p>
+ * specified; otherwise, return a 412 (precondition failed) error.</p>
*/
inline HeadObjectRequest& WithIfMatch(const char* value) { SetIfMatch(value); return *this;}
/**
- * <p>Return the object only if it has been modified since the specified time,
- * otherwise return a 304 (not modified).</p>
+ * <p>Return the object only if it has been modified since the specified time;
+ * otherwise, return a 304 (not modified) error.</p>
*/
inline const Aws::Utils::DateTime& GetIfModifiedSince() const{ return m_ifModifiedSince; }
/**
- * <p>Return the object only if it has been modified since the specified time,
- * otherwise return a 304 (not modified).</p>
+ * <p>Return the object only if it has been modified since the specified time;
+ * otherwise, return a 304 (not modified) error.</p>
*/
inline bool IfModifiedSinceHasBeenSet() const { return m_ifModifiedSinceHasBeenSet; }
/**
- * <p>Return the object only if it has been modified since the specified time,
- * otherwise return a 304 (not modified).</p>
+ * <p>Return the object only if it has been modified since the specified time;
+ * otherwise, return a 304 (not modified) error.</p>
*/
inline void SetIfModifiedSince(const Aws::Utils::DateTime& value) { m_ifModifiedSinceHasBeenSet = true; m_ifModifiedSince = value; }
/**
- * <p>Return the object only if it has been modified since the specified time,
- * otherwise return a 304 (not modified).</p>
+ * <p>Return the object only if it has been modified since the specified time;
+ * otherwise, return a 304 (not modified) error.</p>
*/
inline void SetIfModifiedSince(Aws::Utils::DateTime&& value) { m_ifModifiedSinceHasBeenSet = true; m_ifModifiedSince = std::move(value); }
/**
- * <p>Return the object only if it has been modified since the specified time,
- * otherwise return a 304 (not modified).</p>
+ * <p>Return the object only if it has been modified since the specified time;
+ * otherwise, return a 304 (not modified) error.</p>
*/
inline HeadObjectRequest& WithIfModifiedSince(const Aws::Utils::DateTime& value) { SetIfModifiedSince(value); return *this;}
/**
- * <p>Return the object only if it has been modified since the specified time,
- * otherwise return a 304 (not modified).</p>
+ * <p>Return the object only if it has been modified since the specified time;
+ * otherwise, return a 304 (not modified) error.</p>
*/
inline HeadObjectRequest& WithIfModifiedSince(Aws::Utils::DateTime&& value) { SetIfModifiedSince(std::move(value)); return *this;}
/**
* <p>Return the object only if its entity tag (ETag) is different from the one
- * specified, otherwise return a 304 (not modified).</p>
+ * specified; otherwise, return a 304 (not modified) error.</p>
*/
inline const Aws::String& GetIfNoneMatch() const{ return m_ifNoneMatch; }
/**
* <p>Return the object only if its entity tag (ETag) is different from the one
- * specified, otherwise return a 304 (not modified).</p>
+ * specified; otherwise, return a 304 (not modified) error.</p>
*/
inline bool IfNoneMatchHasBeenSet() const { return m_ifNoneMatchHasBeenSet; }
/**
* <p>Return the object only if its entity tag (ETag) is different from the one
- * specified, otherwise return a 304 (not modified).</p>
+ * specified; otherwise, return a 304 (not modified) error.</p>
*/
inline void SetIfNoneMatch(const Aws::String& value) { m_ifNoneMatchHasBeenSet = true; m_ifNoneMatch = value; }
/**
* <p>Return the object only if its entity tag (ETag) is different from the one
- * specified, otherwise return a 304 (not modified).</p>
+ * specified; otherwise, return a 304 (not modified) error.</p>
*/
inline void SetIfNoneMatch(Aws::String&& value) { m_ifNoneMatchHasBeenSet = true; m_ifNoneMatch = std::move(value); }
/**
* <p>Return the object only if its entity tag (ETag) is different from the one
- * specified, otherwise return a 304 (not modified).</p>
+ * specified; otherwise, return a 304 (not modified) error.</p>
*/
inline void SetIfNoneMatch(const char* value) { m_ifNoneMatchHasBeenSet = true; m_ifNoneMatch.assign(value); }
/**
* <p>Return the object only if its entity tag (ETag) is different from the one
- * specified, otherwise return a 304 (not modified).</p>
+ * specified; otherwise, return a 304 (not modified) error.</p>
*/
inline HeadObjectRequest& WithIfNoneMatch(const Aws::String& value) { SetIfNoneMatch(value); return *this;}
/**
* <p>Return the object only if its entity tag (ETag) is different from the one
- * specified, otherwise return a 304 (not modified).</p>
+ * specified; otherwise, return a 304 (not modified) error.</p>
*/
inline HeadObjectRequest& WithIfNoneMatch(Aws::String&& value) { SetIfNoneMatch(std::move(value)); return *this;}
/**
* <p>Return the object only if its entity tag (ETag) is different from the one
- * specified, otherwise return a 304 (not modified).</p>
+ * specified; otherwise, return a 304 (not modified) error.</p>
*/
inline HeadObjectRequest& WithIfNoneMatch(const char* value) { SetIfNoneMatch(value); return *this;}
/**
- * <p>Return the object only if it has not been modified since the specified time,
- * otherwise return a 412 (precondition failed).</p>
+ * <p>Return the object only if it has not been modified since the specified time;
+ * otherwise, return a 412 (precondition failed) error.</p>
*/
inline const Aws::Utils::DateTime& GetIfUnmodifiedSince() const{ return m_ifUnmodifiedSince; }
/**
- * <p>Return the object only if it has not been modified since the specified time,
- * otherwise return a 412 (precondition failed).</p>
+ * <p>Return the object only if it has not been modified since the specified time;
+ * otherwise, return a 412 (precondition failed) error.</p>
*/
inline bool IfUnmodifiedSinceHasBeenSet() const { return m_ifUnmodifiedSinceHasBeenSet; }
/**
- * <p>Return the object only if it has not been modified since the specified time,
- * otherwise return a 412 (precondition failed).</p>
+ * <p>Return the object only if it has not been modified since the specified time;
+ * otherwise, return a 412 (precondition failed) error.</p>
*/
inline void SetIfUnmodifiedSince(const Aws::Utils::DateTime& value) { m_ifUnmodifiedSinceHasBeenSet = true; m_ifUnmodifiedSince = value; }
/**
- * <p>Return the object only if it has not been modified since the specified time,
- * otherwise return a 412 (precondition failed).</p>
+ * <p>Return the object only if it has not been modified since the specified time;
+ * otherwise, return a 412 (precondition failed) error.</p>
*/
inline void SetIfUnmodifiedSince(Aws::Utils::DateTime&& value) { m_ifUnmodifiedSinceHasBeenSet = true; m_ifUnmodifiedSince = std::move(value); }
/**
- * <p>Return the object only if it has not been modified since the specified time,
- * otherwise return a 412 (precondition failed).</p>
+ * <p>Return the object only if it has not been modified since the specified time;
+ * otherwise, return a 412 (precondition failed) error.</p>
*/
inline HeadObjectRequest& WithIfUnmodifiedSince(const Aws::Utils::DateTime& value) { SetIfUnmodifiedSince(value); return *this;}
/**
- * <p>Return the object only if it has not been modified since the specified time,
- * otherwise return a 412 (precondition failed).</p>
+ * <p>Return the object only if it has not been modified since the specified time;
+ * otherwise, return a 412 (precondition failed) error.</p>
*/
inline HeadObjectRequest& WithIfUnmodifiedSince(Aws::Utils::DateTime&& value) { SetIfUnmodifiedSince(std::move(value)); return *this;}
@@ -426,74 +431,50 @@ namespace Model
/**
- * <p>Downloads the specified range bytes of an object. For more information about
- * the HTTP Range header, see <a
- * href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35">http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35</a>.</p>
- * <p>Amazon S3 doesn't support retrieving multiple ranges of data per
- * <code>GET</code> request.</p>
+ * <p>Because <code>HeadObject</code> returns only the metadata for an object, this
+ * parameter has no effect.</p>
*/
inline const Aws::String& GetRange() const{ return m_range; }
/**
- * <p>Downloads the specified range bytes of an object. For more information about
- * the HTTP Range header, see <a
- * href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35">http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35</a>.</p>
- * <p>Amazon S3 doesn't support retrieving multiple ranges of data per
- * <code>GET</code> request.</p>
+ * <p>Because <code>HeadObject</code> returns only the metadata for an object, this
+ * parameter has no effect.</p>
*/
inline bool RangeHasBeenSet() const { return m_rangeHasBeenSet; }
/**
- * <p>Downloads the specified range bytes of an object. For more information about
- * the HTTP Range header, see <a
- * href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35">http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35</a>.</p>
- * <p>Amazon S3 doesn't support retrieving multiple ranges of data per
- * <code>GET</code> request.</p>
+ * <p>Because <code>HeadObject</code> returns only the metadata for an object, this
+ * parameter has no effect.</p>
*/
inline void SetRange(const Aws::String& value) { m_rangeHasBeenSet = true; m_range = value; }
/**
- * <p>Downloads the specified range bytes of an object. For more information about
- * the HTTP Range header, see <a
- * href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35">http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35</a>.</p>
- * <p>Amazon S3 doesn't support retrieving multiple ranges of data per
- * <code>GET</code> request.</p>
+ * <p>Because <code>HeadObject</code> returns only the metadata for an object, this
+ * parameter has no effect.</p>
*/
inline void SetRange(Aws::String&& value) { m_rangeHasBeenSet = true; m_range = std::move(value); }
/**
- * <p>Downloads the specified range bytes of an object. For more information about
- * the HTTP Range header, see <a
- * href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35">http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35</a>.</p>
- * <p>Amazon S3 doesn't support retrieving multiple ranges of data per
- * <code>GET</code> request.</p>
+ * <p>Because <code>HeadObject</code> returns only the metadata for an object, this
+ * parameter has no effect.</p>
*/
inline void SetRange(const char* value) { m_rangeHasBeenSet = true; m_range.assign(value); }
/**
- * <p>Downloads the specified range bytes of an object. For more information about
- * the HTTP Range header, see <a
- * href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35">http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35</a>.</p>
- * <p>Amazon S3 doesn't support retrieving multiple ranges of data per
- * <code>GET</code> request.</p>
+ * <p>Because <code>HeadObject</code> returns only the metadata for an object, this
+ * parameter has no effect.</p>
*/
inline HeadObjectRequest& WithRange(const Aws::String& value) { SetRange(value); return *this;}
/**
- * <p>Downloads the specified range bytes of an object. For more information about
- * the HTTP Range header, see <a
- * href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35">http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35</a>.</p>
- * <p>Amazon S3 doesn't support retrieving multiple ranges of data per
- * <code>GET</code> request.</p>
+ * <p>Because <code>HeadObject</code> returns only the metadata for an object, this
+ * parameter has no effect.</p>
*/
inline HeadObjectRequest& WithRange(Aws::String&& value) { SetRange(std::move(value)); return *this;}
/**
- * <p>Downloads the specified range bytes of an object. For more information about
- * the HTTP Range header, see <a
- * href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35">http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35</a>.</p>
- * <p>Amazon S3 doesn't support retrieving multiple ranges of data per
- * <code>GET</code> request.</p>
+ * <p>Because <code>HeadObject</code> returns only the metadata for an object, this
+ * parameter has no effect.</p>
*/
inline HeadObjectRequest& WithRange(const char* value) { SetRange(value); return *this;}
@@ -772,61 +753,116 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline HeadObjectRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline HeadObjectRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline HeadObjectRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
+ /**
+ * <p>To retrieve the checksum, this parameter must be enabled.</p> <p>In addition,
+ * if you enable <code>ChecksumMode</code> and the object is encrypted with Amazon
+ * Web Services Key Management Service (Amazon Web Services KMS), you must have
+ * permission to use the <code>kms:Decrypt</code> action for the request to
+ * succeed.</p>
+ */
+ inline const ChecksumMode& GetChecksumMode() const{ return m_checksumMode; }
+
+ /**
+ * <p>To retrieve the checksum, this parameter must be enabled.</p> <p>In addition,
+ * if you enable <code>ChecksumMode</code> and the object is encrypted with Amazon
+ * Web Services Key Management Service (Amazon Web Services KMS), you must have
+ * permission to use the <code>kms:Decrypt</code> action for the request to
+ * succeed.</p>
+ */
+ inline bool ChecksumModeHasBeenSet() const { return m_checksumModeHasBeenSet; }
+
+ /**
+ * <p>To retrieve the checksum, this parameter must be enabled.</p> <p>In addition,
+ * if you enable <code>ChecksumMode</code> and the object is encrypted with Amazon
+ * Web Services Key Management Service (Amazon Web Services KMS), you must have
+ * permission to use the <code>kms:Decrypt</code> action for the request to
+ * succeed.</p>
+ */
+ inline void SetChecksumMode(const ChecksumMode& value) { m_checksumModeHasBeenSet = true; m_checksumMode = value; }
+
+ /**
+ * <p>To retrieve the checksum, this parameter must be enabled.</p> <p>In addition,
+ * if you enable <code>ChecksumMode</code> and the object is encrypted with Amazon
+ * Web Services Key Management Service (Amazon Web Services KMS), you must have
+ * permission to use the <code>kms:Decrypt</code> action for the request to
+ * succeed.</p>
+ */
+ inline void SetChecksumMode(ChecksumMode&& value) { m_checksumModeHasBeenSet = true; m_checksumMode = std::move(value); }
+
+ /**
+ * <p>To retrieve the checksum, this parameter must be enabled.</p> <p>In addition,
+ * if you enable <code>ChecksumMode</code> and the object is encrypted with Amazon
+ * Web Services Key Management Service (Amazon Web Services KMS), you must have
+ * permission to use the <code>kms:Decrypt</code> action for the request to
+ * succeed.</p>
+ */
+ inline HeadObjectRequest& WithChecksumMode(const ChecksumMode& value) { SetChecksumMode(value); return *this;}
+
+ /**
+ * <p>To retrieve the checksum, this parameter must be enabled.</p> <p>In addition,
+ * if you enable <code>ChecksumMode</code> and the object is encrypted with Amazon
+ * Web Services Key Management Service (Amazon Web Services KMS), you must have
+ * permission to use the <code>kms:Decrypt</code> action for the request to
+ * succeed.</p>
+ */
+ inline HeadObjectRequest& WithChecksumMode(ChecksumMode&& value) { SetChecksumMode(std::move(value)); return *this;}
+
+
inline const Aws::Map<Aws::String, Aws::String>& GetCustomizedAccessLogTag() const{ return m_customizedAccessLogTag; }
@@ -869,49 +905,52 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_ifMatch;
- bool m_ifMatchHasBeenSet;
+ bool m_ifMatchHasBeenSet = false;
Aws::Utils::DateTime m_ifModifiedSince;
- bool m_ifModifiedSinceHasBeenSet;
+ bool m_ifModifiedSinceHasBeenSet = false;
Aws::String m_ifNoneMatch;
- bool m_ifNoneMatchHasBeenSet;
+ bool m_ifNoneMatchHasBeenSet = false;
Aws::Utils::DateTime m_ifUnmodifiedSince;
- bool m_ifUnmodifiedSinceHasBeenSet;
+ bool m_ifUnmodifiedSinceHasBeenSet = false;
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
Aws::String m_range;
- bool m_rangeHasBeenSet;
+ bool m_rangeHasBeenSet = false;
Aws::String m_versionId;
- bool m_versionIdHasBeenSet;
+ bool m_versionIdHasBeenSet = false;
Aws::String m_sSECustomerAlgorithm;
- bool m_sSECustomerAlgorithmHasBeenSet;
+ bool m_sSECustomerAlgorithmHasBeenSet = false;
Aws::String m_sSECustomerKey;
- bool m_sSECustomerKeyHasBeenSet;
+ bool m_sSECustomerKeyHasBeenSet = false;
Aws::String m_sSECustomerKeyMD5;
- bool m_sSECustomerKeyMD5HasBeenSet;
+ bool m_sSECustomerKeyMD5HasBeenSet = false;
RequestPayer m_requestPayer;
- bool m_requestPayerHasBeenSet;
+ bool m_requestPayerHasBeenSet = false;
int m_partNumber;
- bool m_partNumberHasBeenSet;
+ bool m_partNumberHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
+
+ ChecksumMode m_checksumMode;
+ bool m_checksumModeHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/HeadObjectResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/HeadObjectResult.h
index e12fddb225..fe2fd7373f 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/HeadObjectResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/HeadObjectResult.h
@@ -33,12 +33,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API HeadObjectResult
+ class HeadObjectResult
{
public:
- HeadObjectResult();
- HeadObjectResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- HeadObjectResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API HeadObjectResult();
+ AWS_S3_API HeadObjectResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API HeadObjectResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
@@ -98,57 +98,57 @@ namespace Model
/**
* <p>If the object expiration is configured (see PUT Bucket lifecycle), the
- * response includes this header. It includes the expiry-date and rule-id key-value
- * pairs providing object expiration information. The value of the rule-id is URL
- * encoded.</p>
+ * response includes this header. It includes the <code>expiry-date</code> and
+ * <code>rule-id</code> key-value pairs providing object expiration information.
+ * The value of the <code>rule-id</code> is URL-encoded.</p>
*/
inline const Aws::String& GetExpiration() const{ return m_expiration; }
/**
* <p>If the object expiration is configured (see PUT Bucket lifecycle), the
- * response includes this header. It includes the expiry-date and rule-id key-value
- * pairs providing object expiration information. The value of the rule-id is URL
- * encoded.</p>
+ * response includes this header. It includes the <code>expiry-date</code> and
+ * <code>rule-id</code> key-value pairs providing object expiration information.
+ * The value of the <code>rule-id</code> is URL-encoded.</p>
*/
inline void SetExpiration(const Aws::String& value) { m_expiration = value; }
/**
* <p>If the object expiration is configured (see PUT Bucket lifecycle), the
- * response includes this header. It includes the expiry-date and rule-id key-value
- * pairs providing object expiration information. The value of the rule-id is URL
- * encoded.</p>
+ * response includes this header. It includes the <code>expiry-date</code> and
+ * <code>rule-id</code> key-value pairs providing object expiration information.
+ * The value of the <code>rule-id</code> is URL-encoded.</p>
*/
inline void SetExpiration(Aws::String&& value) { m_expiration = std::move(value); }
/**
* <p>If the object expiration is configured (see PUT Bucket lifecycle), the
- * response includes this header. It includes the expiry-date and rule-id key-value
- * pairs providing object expiration information. The value of the rule-id is URL
- * encoded.</p>
+ * response includes this header. It includes the <code>expiry-date</code> and
+ * <code>rule-id</code> key-value pairs providing object expiration information.
+ * The value of the <code>rule-id</code> is URL-encoded.</p>
*/
inline void SetExpiration(const char* value) { m_expiration.assign(value); }
/**
* <p>If the object expiration is configured (see PUT Bucket lifecycle), the
- * response includes this header. It includes the expiry-date and rule-id key-value
- * pairs providing object expiration information. The value of the rule-id is URL
- * encoded.</p>
+ * response includes this header. It includes the <code>expiry-date</code> and
+ * <code>rule-id</code> key-value pairs providing object expiration information.
+ * The value of the <code>rule-id</code> is URL-encoded.</p>
*/
inline HeadObjectResult& WithExpiration(const Aws::String& value) { SetExpiration(value); return *this;}
/**
* <p>If the object expiration is configured (see PUT Bucket lifecycle), the
- * response includes this header. It includes the expiry-date and rule-id key-value
- * pairs providing object expiration information. The value of the rule-id is URL
- * encoded.</p>
+ * response includes this header. It includes the <code>expiry-date</code> and
+ * <code>rule-id</code> key-value pairs providing object expiration information.
+ * The value of the <code>rule-id</code> is URL-encoded.</p>
*/
inline HeadObjectResult& WithExpiration(Aws::String&& value) { SetExpiration(std::move(value)); return *this;}
/**
* <p>If the object expiration is configured (see PUT Bucket lifecycle), the
- * response includes this header. It includes the expiry-date and rule-id key-value
- * pairs providing object expiration information. The value of the rule-id is URL
- * encoded.</p>
+ * response includes this header. It includes the <code>expiry-date</code> and
+ * <code>rule-id</code> key-value pairs providing object expiration information.
+ * The value of the <code>rule-id</code> is URL-encoded.</p>
*/
inline HeadObjectResult& WithExpiration(const char* value) { SetExpiration(value); return *this;}
@@ -342,44 +342,328 @@ namespace Model
/**
- * <p>An ETag is an opaque identifier assigned by a web server to a specific
- * version of a resource found at a URL.</p>
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumCRC32() const{ return m_checksumCRC32; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(const Aws::String& value) { m_checksumCRC32 = value; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(Aws::String&& value) { m_checksumCRC32 = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(const char* value) { m_checksumCRC32.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline HeadObjectResult& WithChecksumCRC32(const Aws::String& value) { SetChecksumCRC32(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline HeadObjectResult& WithChecksumCRC32(Aws::String&& value) { SetChecksumCRC32(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline HeadObjectResult& WithChecksumCRC32(const char* value) { SetChecksumCRC32(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumCRC32C() const{ return m_checksumCRC32C; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(const Aws::String& value) { m_checksumCRC32C = value; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(Aws::String&& value) { m_checksumCRC32C = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(const char* value) { m_checksumCRC32C.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline HeadObjectResult& WithChecksumCRC32C(const Aws::String& value) { SetChecksumCRC32C(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline HeadObjectResult& WithChecksumCRC32C(Aws::String&& value) { SetChecksumCRC32C(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline HeadObjectResult& WithChecksumCRC32C(const char* value) { SetChecksumCRC32C(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumSHA1() const{ return m_checksumSHA1; }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(const Aws::String& value) { m_checksumSHA1 = value; }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(Aws::String&& value) { m_checksumSHA1 = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(const char* value) { m_checksumSHA1.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline HeadObjectResult& WithChecksumSHA1(const Aws::String& value) { SetChecksumSHA1(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline HeadObjectResult& WithChecksumSHA1(Aws::String&& value) { SetChecksumSHA1(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline HeadObjectResult& WithChecksumSHA1(const char* value) { SetChecksumSHA1(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumSHA256() const{ return m_checksumSHA256; }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(const Aws::String& value) { m_checksumSHA256 = value; }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(Aws::String&& value) { m_checksumSHA256 = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(const char* value) { m_checksumSHA256.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline HeadObjectResult& WithChecksumSHA256(const Aws::String& value) { SetChecksumSHA256(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline HeadObjectResult& WithChecksumSHA256(Aws::String&& value) { SetChecksumSHA256(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline HeadObjectResult& WithChecksumSHA256(const char* value) { SetChecksumSHA256(value); return *this;}
+
+
+ /**
+ * <p>An entity tag (ETag) is an opaque identifier assigned by a web server to a
+ * specific version of a resource found at a URL.</p>
*/
inline const Aws::String& GetETag() const{ return m_eTag; }
/**
- * <p>An ETag is an opaque identifier assigned by a web server to a specific
- * version of a resource found at a URL.</p>
+ * <p>An entity tag (ETag) is an opaque identifier assigned by a web server to a
+ * specific version of a resource found at a URL.</p>
*/
inline void SetETag(const Aws::String& value) { m_eTag = value; }
/**
- * <p>An ETag is an opaque identifier assigned by a web server to a specific
- * version of a resource found at a URL.</p>
+ * <p>An entity tag (ETag) is an opaque identifier assigned by a web server to a
+ * specific version of a resource found at a URL.</p>
*/
inline void SetETag(Aws::String&& value) { m_eTag = std::move(value); }
/**
- * <p>An ETag is an opaque identifier assigned by a web server to a specific
- * version of a resource found at a URL.</p>
+ * <p>An entity tag (ETag) is an opaque identifier assigned by a web server to a
+ * specific version of a resource found at a URL.</p>
*/
inline void SetETag(const char* value) { m_eTag.assign(value); }
/**
- * <p>An ETag is an opaque identifier assigned by a web server to a specific
- * version of a resource found at a URL.</p>
+ * <p>An entity tag (ETag) is an opaque identifier assigned by a web server to a
+ * specific version of a resource found at a URL.</p>
*/
inline HeadObjectResult& WithETag(const Aws::String& value) { SetETag(value); return *this;}
/**
- * <p>An ETag is an opaque identifier assigned by a web server to a specific
- * version of a resource found at a URL.</p>
+ * <p>An entity tag (ETag) is an opaque identifier assigned by a web server to a
+ * specific version of a resource found at a URL.</p>
*/
inline HeadObjectResult& WithETag(Aws::String&& value) { SetETag(std::move(value)); return *this;}
/**
- * <p>An ETag is an opaque identifier assigned by a web server to a specific
- * version of a resource found at a URL.</p>
+ * <p>An entity tag (ETag) is an opaque identifier assigned by a web server to a
+ * specific version of a resource found at a URL.</p>
*/
inline HeadObjectResult& WithETag(const char* value) { SetETag(value); return *this;}
@@ -719,40 +1003,40 @@ namespace Model
/**
- * <p>If the object is stored using server-side encryption either with an AWS KMS
- * customer master key (CMK) or an Amazon S3-managed encryption key, the response
+ * <p>If the object is stored using server-side encryption either with an Amazon
+ * Web Services KMS key or an Amazon S3-managed encryption key, the response
* includes this header with the value of the server-side encryption algorithm used
* when storing this object in Amazon S3 (for example, AES256, aws:kms).</p>
*/
inline const ServerSideEncryption& GetServerSideEncryption() const{ return m_serverSideEncryption; }
/**
- * <p>If the object is stored using server-side encryption either with an AWS KMS
- * customer master key (CMK) or an Amazon S3-managed encryption key, the response
+ * <p>If the object is stored using server-side encryption either with an Amazon
+ * Web Services KMS key or an Amazon S3-managed encryption key, the response
* includes this header with the value of the server-side encryption algorithm used
* when storing this object in Amazon S3 (for example, AES256, aws:kms).</p>
*/
inline void SetServerSideEncryption(const ServerSideEncryption& value) { m_serverSideEncryption = value; }
/**
- * <p>If the object is stored using server-side encryption either with an AWS KMS
- * customer master key (CMK) or an Amazon S3-managed encryption key, the response
+ * <p>If the object is stored using server-side encryption either with an Amazon
+ * Web Services KMS key or an Amazon S3-managed encryption key, the response
* includes this header with the value of the server-side encryption algorithm used
* when storing this object in Amazon S3 (for example, AES256, aws:kms).</p>
*/
inline void SetServerSideEncryption(ServerSideEncryption&& value) { m_serverSideEncryption = std::move(value); }
/**
- * <p>If the object is stored using server-side encryption either with an AWS KMS
- * customer master key (CMK) or an Amazon S3-managed encryption key, the response
+ * <p>If the object is stored using server-side encryption either with an Amazon
+ * Web Services KMS key or an Amazon S3-managed encryption key, the response
* includes this header with the value of the server-side encryption algorithm used
* when storing this object in Amazon S3 (for example, AES256, aws:kms).</p>
*/
inline HeadObjectResult& WithServerSideEncryption(const ServerSideEncryption& value) { SetServerSideEncryption(value); return *this;}
/**
- * <p>If the object is stored using server-side encryption either with an AWS KMS
- * customer master key (CMK) or an Amazon S3-managed encryption key, the response
+ * <p>If the object is stored using server-side encryption either with an Amazon
+ * Web Services KMS key or an Amazon S3-managed encryption key, the response
* includes this header with the value of the server-side encryption algorithm used
* when storing this object in Amazon S3 (for example, AES256, aws:kms).</p>
*/
@@ -921,70 +1205,70 @@ namespace Model
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline const Aws::String& GetSSEKMSKeyId() const{ return m_sSEKMSKeyId; }
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline void SetSSEKMSKeyId(const Aws::String& value) { m_sSEKMSKeyId = value; }
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline void SetSSEKMSKeyId(Aws::String&& value) { m_sSEKMSKeyId = std::move(value); }
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline void SetSSEKMSKeyId(const char* value) { m_sSEKMSKeyId.assign(value); }
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline HeadObjectResult& WithSSEKMSKeyId(const Aws::String& value) { SetSSEKMSKeyId(value); return *this;}
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline HeadObjectResult& WithSSEKMSKeyId(Aws::String&& value) { SetSSEKMSKeyId(std::move(value)); return *this;}
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline HeadObjectResult& WithSSEKMSKeyId(const char* value) { SetSSEKMSKeyId(value); return *this;}
/**
* <p>Indicates whether the object uses an S3 Bucket Key for server-side encryption
- * with AWS KMS (SSE-KMS).</p>
+ * with Amazon Web Services KMS (SSE-KMS).</p>
*/
inline bool GetBucketKeyEnabled() const{ return m_bucketKeyEnabled; }
/**
* <p>Indicates whether the object uses an S3 Bucket Key for server-side encryption
- * with AWS KMS (SSE-KMS).</p>
+ * with Amazon Web Services KMS (SSE-KMS).</p>
*/
inline void SetBucketKeyEnabled(bool value) { m_bucketKeyEnabled = value; }
/**
* <p>Indicates whether the object uses an S3 Bucket Key for server-side encryption
- * with AWS KMS (SSE-KMS).</p>
+ * with Amazon Web Services KMS (SSE-KMS).</p>
*/
inline HeadObjectResult& WithBucketKeyEnabled(bool value) { SetBucketKeyEnabled(value); return *this;}
@@ -1059,26 +1343,27 @@ namespace Model
* object (<code>GetObject</code>) or object metadata (<code>HeadObject</code>)
* from these buckets, Amazon S3 will return the
* <code>x-amz-replication-status</code> header in the response as follows:</p>
- * <ul> <li> <p>If requesting an object from the source bucket — Amazon S3 will
- * return the <code>x-amz-replication-status</code> header if the object in your
- * request is eligible for replication.</p> <p> For example, suppose that in your
- * replication configuration, you specify object prefix <code>TaxDocs</code>
+ * <ul> <li> <p> <b>If requesting an object from the source bucket</b>, Amazon S3
+ * will return the <code>x-amz-replication-status</code> header if the object in
+ * your request is eligible for replication.</p> <p> For example, suppose that in
+ * your replication configuration, you specify object prefix <code>TaxDocs</code>
* requesting Amazon S3 to replicate objects with key prefix <code>TaxDocs</code>.
* Any objects you upload with this key name prefix, for example
* <code>TaxDocs/document1.pdf</code>, are eligible for replication. For any object
* request with this key name prefix, Amazon S3 will return the
* <code>x-amz-replication-status</code> header with value PENDING, COMPLETED or
- * FAILED indicating object replication status.</p> </li> <li> <p>If requesting an
- * object from a destination bucket — Amazon S3 will return the
+ * FAILED indicating object replication status.</p> </li> <li> <p> <b>If requesting
+ * an object from a destination bucket</b>, Amazon S3 will return the
* <code>x-amz-replication-status</code> header with value REPLICA if the object in
* your request is a replica that Amazon S3 created and there is no replica
- * modification replication in progress.</p> </li> <li> <p>When replicating objects
- * to multiple destination buckets the <code>x-amz-replication-status</code> header
- * acts differently. The header of the source object will only return a value of
- * COMPLETED when replication is successful to all destinations. The header will
- * remain at value PENDING until replication has completed for all destinations. If
- * one or more destinations fails replication the header will return FAILED. </p>
- * </li> </ul> <p>For more information, see <a
+ * modification replication in progress.</p> </li> <li> <p> <b>When replicating
+ * objects to multiple destination buckets</b>, the
+ * <code>x-amz-replication-status</code> header acts differently. The header of the
+ * source object will only return a value of COMPLETED when replication is
+ * successful to all destinations. The header will remain at value PENDING until
+ * replication has completed for all destinations. If one or more destinations
+ * fails replication the header will return FAILED. </p> </li> </ul> <p>For more
+ * information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html">Replication</a>.</p>
*/
inline const ReplicationStatus& GetReplicationStatus() const{ return m_replicationStatus; }
@@ -1091,26 +1376,27 @@ namespace Model
* object (<code>GetObject</code>) or object metadata (<code>HeadObject</code>)
* from these buckets, Amazon S3 will return the
* <code>x-amz-replication-status</code> header in the response as follows:</p>
- * <ul> <li> <p>If requesting an object from the source bucket — Amazon S3 will
- * return the <code>x-amz-replication-status</code> header if the object in your
- * request is eligible for replication.</p> <p> For example, suppose that in your
- * replication configuration, you specify object prefix <code>TaxDocs</code>
+ * <ul> <li> <p> <b>If requesting an object from the source bucket</b>, Amazon S3
+ * will return the <code>x-amz-replication-status</code> header if the object in
+ * your request is eligible for replication.</p> <p> For example, suppose that in
+ * your replication configuration, you specify object prefix <code>TaxDocs</code>
* requesting Amazon S3 to replicate objects with key prefix <code>TaxDocs</code>.
* Any objects you upload with this key name prefix, for example
* <code>TaxDocs/document1.pdf</code>, are eligible for replication. For any object
* request with this key name prefix, Amazon S3 will return the
* <code>x-amz-replication-status</code> header with value PENDING, COMPLETED or
- * FAILED indicating object replication status.</p> </li> <li> <p>If requesting an
- * object from a destination bucket — Amazon S3 will return the
+ * FAILED indicating object replication status.</p> </li> <li> <p> <b>If requesting
+ * an object from a destination bucket</b>, Amazon S3 will return the
* <code>x-amz-replication-status</code> header with value REPLICA if the object in
* your request is a replica that Amazon S3 created and there is no replica
- * modification replication in progress.</p> </li> <li> <p>When replicating objects
- * to multiple destination buckets the <code>x-amz-replication-status</code> header
- * acts differently. The header of the source object will only return a value of
- * COMPLETED when replication is successful to all destinations. The header will
- * remain at value PENDING until replication has completed for all destinations. If
- * one or more destinations fails replication the header will return FAILED. </p>
- * </li> </ul> <p>For more information, see <a
+ * modification replication in progress.</p> </li> <li> <p> <b>When replicating
+ * objects to multiple destination buckets</b>, the
+ * <code>x-amz-replication-status</code> header acts differently. The header of the
+ * source object will only return a value of COMPLETED when replication is
+ * successful to all destinations. The header will remain at value PENDING until
+ * replication has completed for all destinations. If one or more destinations
+ * fails replication the header will return FAILED. </p> </li> </ul> <p>For more
+ * information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html">Replication</a>.</p>
*/
inline void SetReplicationStatus(const ReplicationStatus& value) { m_replicationStatus = value; }
@@ -1123,26 +1409,27 @@ namespace Model
* object (<code>GetObject</code>) or object metadata (<code>HeadObject</code>)
* from these buckets, Amazon S3 will return the
* <code>x-amz-replication-status</code> header in the response as follows:</p>
- * <ul> <li> <p>If requesting an object from the source bucket — Amazon S3 will
- * return the <code>x-amz-replication-status</code> header if the object in your
- * request is eligible for replication.</p> <p> For example, suppose that in your
- * replication configuration, you specify object prefix <code>TaxDocs</code>
+ * <ul> <li> <p> <b>If requesting an object from the source bucket</b>, Amazon S3
+ * will return the <code>x-amz-replication-status</code> header if the object in
+ * your request is eligible for replication.</p> <p> For example, suppose that in
+ * your replication configuration, you specify object prefix <code>TaxDocs</code>
* requesting Amazon S3 to replicate objects with key prefix <code>TaxDocs</code>.
* Any objects you upload with this key name prefix, for example
* <code>TaxDocs/document1.pdf</code>, are eligible for replication. For any object
* request with this key name prefix, Amazon S3 will return the
* <code>x-amz-replication-status</code> header with value PENDING, COMPLETED or
- * FAILED indicating object replication status.</p> </li> <li> <p>If requesting an
- * object from a destination bucket — Amazon S3 will return the
+ * FAILED indicating object replication status.</p> </li> <li> <p> <b>If requesting
+ * an object from a destination bucket</b>, Amazon S3 will return the
* <code>x-amz-replication-status</code> header with value REPLICA if the object in
* your request is a replica that Amazon S3 created and there is no replica
- * modification replication in progress.</p> </li> <li> <p>When replicating objects
- * to multiple destination buckets the <code>x-amz-replication-status</code> header
- * acts differently. The header of the source object will only return a value of
- * COMPLETED when replication is successful to all destinations. The header will
- * remain at value PENDING until replication has completed for all destinations. If
- * one or more destinations fails replication the header will return FAILED. </p>
- * </li> </ul> <p>For more information, see <a
+ * modification replication in progress.</p> </li> <li> <p> <b>When replicating
+ * objects to multiple destination buckets</b>, the
+ * <code>x-amz-replication-status</code> header acts differently. The header of the
+ * source object will only return a value of COMPLETED when replication is
+ * successful to all destinations. The header will remain at value PENDING until
+ * replication has completed for all destinations. If one or more destinations
+ * fails replication the header will return FAILED. </p> </li> </ul> <p>For more
+ * information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html">Replication</a>.</p>
*/
inline void SetReplicationStatus(ReplicationStatus&& value) { m_replicationStatus = std::move(value); }
@@ -1155,26 +1442,27 @@ namespace Model
* object (<code>GetObject</code>) or object metadata (<code>HeadObject</code>)
* from these buckets, Amazon S3 will return the
* <code>x-amz-replication-status</code> header in the response as follows:</p>
- * <ul> <li> <p>If requesting an object from the source bucket — Amazon S3 will
- * return the <code>x-amz-replication-status</code> header if the object in your
- * request is eligible for replication.</p> <p> For example, suppose that in your
- * replication configuration, you specify object prefix <code>TaxDocs</code>
+ * <ul> <li> <p> <b>If requesting an object from the source bucket</b>, Amazon S3
+ * will return the <code>x-amz-replication-status</code> header if the object in
+ * your request is eligible for replication.</p> <p> For example, suppose that in
+ * your replication configuration, you specify object prefix <code>TaxDocs</code>
* requesting Amazon S3 to replicate objects with key prefix <code>TaxDocs</code>.
* Any objects you upload with this key name prefix, for example
* <code>TaxDocs/document1.pdf</code>, are eligible for replication. For any object
* request with this key name prefix, Amazon S3 will return the
* <code>x-amz-replication-status</code> header with value PENDING, COMPLETED or
- * FAILED indicating object replication status.</p> </li> <li> <p>If requesting an
- * object from a destination bucket — Amazon S3 will return the
+ * FAILED indicating object replication status.</p> </li> <li> <p> <b>If requesting
+ * an object from a destination bucket</b>, Amazon S3 will return the
* <code>x-amz-replication-status</code> header with value REPLICA if the object in
* your request is a replica that Amazon S3 created and there is no replica
- * modification replication in progress.</p> </li> <li> <p>When replicating objects
- * to multiple destination buckets the <code>x-amz-replication-status</code> header
- * acts differently. The header of the source object will only return a value of
- * COMPLETED when replication is successful to all destinations. The header will
- * remain at value PENDING until replication has completed for all destinations. If
- * one or more destinations fails replication the header will return FAILED. </p>
- * </li> </ul> <p>For more information, see <a
+ * modification replication in progress.</p> </li> <li> <p> <b>When replicating
+ * objects to multiple destination buckets</b>, the
+ * <code>x-amz-replication-status</code> header acts differently. The header of the
+ * source object will only return a value of COMPLETED when replication is
+ * successful to all destinations. The header will remain at value PENDING until
+ * replication has completed for all destinations. If one or more destinations
+ * fails replication the header will return FAILED. </p> </li> </ul> <p>For more
+ * information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html">Replication</a>.</p>
*/
inline HeadObjectResult& WithReplicationStatus(const ReplicationStatus& value) { SetReplicationStatus(value); return *this;}
@@ -1187,43 +1475,50 @@ namespace Model
* object (<code>GetObject</code>) or object metadata (<code>HeadObject</code>)
* from these buckets, Amazon S3 will return the
* <code>x-amz-replication-status</code> header in the response as follows:</p>
- * <ul> <li> <p>If requesting an object from the source bucket — Amazon S3 will
- * return the <code>x-amz-replication-status</code> header if the object in your
- * request is eligible for replication.</p> <p> For example, suppose that in your
- * replication configuration, you specify object prefix <code>TaxDocs</code>
+ * <ul> <li> <p> <b>If requesting an object from the source bucket</b>, Amazon S3
+ * will return the <code>x-amz-replication-status</code> header if the object in
+ * your request is eligible for replication.</p> <p> For example, suppose that in
+ * your replication configuration, you specify object prefix <code>TaxDocs</code>
* requesting Amazon S3 to replicate objects with key prefix <code>TaxDocs</code>.
* Any objects you upload with this key name prefix, for example
* <code>TaxDocs/document1.pdf</code>, are eligible for replication. For any object
* request with this key name prefix, Amazon S3 will return the
* <code>x-amz-replication-status</code> header with value PENDING, COMPLETED or
- * FAILED indicating object replication status.</p> </li> <li> <p>If requesting an
- * object from a destination bucket — Amazon S3 will return the
+ * FAILED indicating object replication status.</p> </li> <li> <p> <b>If requesting
+ * an object from a destination bucket</b>, Amazon S3 will return the
* <code>x-amz-replication-status</code> header with value REPLICA if the object in
* your request is a replica that Amazon S3 created and there is no replica
- * modification replication in progress.</p> </li> <li> <p>When replicating objects
- * to multiple destination buckets the <code>x-amz-replication-status</code> header
- * acts differently. The header of the source object will only return a value of
- * COMPLETED when replication is successful to all destinations. The header will
- * remain at value PENDING until replication has completed for all destinations. If
- * one or more destinations fails replication the header will return FAILED. </p>
- * </li> </ul> <p>For more information, see <a
+ * modification replication in progress.</p> </li> <li> <p> <b>When replicating
+ * objects to multiple destination buckets</b>, the
+ * <code>x-amz-replication-status</code> header acts differently. The header of the
+ * source object will only return a value of COMPLETED when replication is
+ * successful to all destinations. The header will remain at value PENDING until
+ * replication has completed for all destinations. If one or more destinations
+ * fails replication the header will return FAILED. </p> </li> </ul> <p>For more
+ * information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html">Replication</a>.</p>
*/
inline HeadObjectResult& WithReplicationStatus(ReplicationStatus&& value) { SetReplicationStatus(std::move(value)); return *this;}
/**
- * <p>The count of parts this object has.</p>
+ * <p>The count of parts this object has. This value is only returned if you
+ * specify <code>partNumber</code> in your request and the object was uploaded as a
+ * multipart upload.</p>
*/
inline int GetPartsCount() const{ return m_partsCount; }
/**
- * <p>The count of parts this object has.</p>
+ * <p>The count of parts this object has. This value is only returned if you
+ * specify <code>partNumber</code> in your request and the object was uploaded as a
+ * multipart upload.</p>
*/
inline void SetPartsCount(int value) { m_partsCount = value; }
/**
- * <p>The count of parts this object has.</p>
+ * <p>The count of parts this object has. This value is only returned if you
+ * specify <code>partNumber</code> in your request and the object was uploaded as a
+ * multipart upload.</p>
*/
inline HeadObjectResult& WithPartsCount(int value) { SetPartsCount(value); return *this;}
@@ -1381,6 +1676,14 @@ namespace Model
long long m_contentLength;
+ Aws::String m_checksumCRC32;
+
+ Aws::String m_checksumCRC32C;
+
+ Aws::String m_checksumSHA1;
+
+ Aws::String m_checksumSHA256;
+
Aws::String m_eTag;
int m_missingMeta;
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/IndexDocument.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/IndexDocument.h
index 8f303f7252..992c178465 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/IndexDocument.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/IndexDocument.h
@@ -27,14 +27,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/IndexDocument">AWS
* API Reference</a></p>
*/
- class AWS_S3_API IndexDocument
+ class IndexDocument
{
public:
- IndexDocument();
- IndexDocument(const Aws::Utils::Xml::XmlNode& xmlNode);
- IndexDocument& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API IndexDocument();
+ AWS_S3_API IndexDocument(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API IndexDocument& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -144,7 +144,7 @@ namespace Model
private:
Aws::String m_suffix;
- bool m_suffixHasBeenSet;
+ bool m_suffixHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Initiator.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Initiator.h
index 7e831b799d..4d7d331f27 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Initiator.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Initiator.h
@@ -28,61 +28,61 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Initiator">AWS API
* Reference</a></p>
*/
- class AWS_S3_API Initiator
+ class Initiator
{
public:
- Initiator();
- Initiator(const Aws::Utils::Xml::XmlNode& xmlNode);
- Initiator& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Initiator();
+ AWS_S3_API Initiator(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Initiator& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
- * <p>If the principal is an AWS account, it provides the Canonical User ID. If the
- * principal is an IAM User, it provides a user ARN value.</p>
+ * <p>If the principal is an Amazon Web Services account, it provides the Canonical
+ * User ID. If the principal is an IAM User, it provides a user ARN value.</p>
*/
inline const Aws::String& GetID() const{ return m_iD; }
/**
- * <p>If the principal is an AWS account, it provides the Canonical User ID. If the
- * principal is an IAM User, it provides a user ARN value.</p>
+ * <p>If the principal is an Amazon Web Services account, it provides the Canonical
+ * User ID. If the principal is an IAM User, it provides a user ARN value.</p>
*/
inline bool IDHasBeenSet() const { return m_iDHasBeenSet; }
/**
- * <p>If the principal is an AWS account, it provides the Canonical User ID. If the
- * principal is an IAM User, it provides a user ARN value.</p>
+ * <p>If the principal is an Amazon Web Services account, it provides the Canonical
+ * User ID. If the principal is an IAM User, it provides a user ARN value.</p>
*/
inline void SetID(const Aws::String& value) { m_iDHasBeenSet = true; m_iD = value; }
/**
- * <p>If the principal is an AWS account, it provides the Canonical User ID. If the
- * principal is an IAM User, it provides a user ARN value.</p>
+ * <p>If the principal is an Amazon Web Services account, it provides the Canonical
+ * User ID. If the principal is an IAM User, it provides a user ARN value.</p>
*/
inline void SetID(Aws::String&& value) { m_iDHasBeenSet = true; m_iD = std::move(value); }
/**
- * <p>If the principal is an AWS account, it provides the Canonical User ID. If the
- * principal is an IAM User, it provides a user ARN value.</p>
+ * <p>If the principal is an Amazon Web Services account, it provides the Canonical
+ * User ID. If the principal is an IAM User, it provides a user ARN value.</p>
*/
inline void SetID(const char* value) { m_iDHasBeenSet = true; m_iD.assign(value); }
/**
- * <p>If the principal is an AWS account, it provides the Canonical User ID. If the
- * principal is an IAM User, it provides a user ARN value.</p>
+ * <p>If the principal is an Amazon Web Services account, it provides the Canonical
+ * User ID. If the principal is an IAM User, it provides a user ARN value.</p>
*/
inline Initiator& WithID(const Aws::String& value) { SetID(value); return *this;}
/**
- * <p>If the principal is an AWS account, it provides the Canonical User ID. If the
- * principal is an IAM User, it provides a user ARN value.</p>
+ * <p>If the principal is an Amazon Web Services account, it provides the Canonical
+ * User ID. If the principal is an IAM User, it provides a user ARN value.</p>
*/
inline Initiator& WithID(Aws::String&& value) { SetID(std::move(value)); return *this;}
/**
- * <p>If the principal is an AWS account, it provides the Canonical User ID. If the
- * principal is an IAM User, it provides a user ARN value.</p>
+ * <p>If the principal is an Amazon Web Services account, it provides the Canonical
+ * User ID. If the principal is an IAM User, it provides a user ARN value.</p>
*/
inline Initiator& WithID(const char* value) { SetID(value); return *this;}
@@ -130,10 +130,10 @@ namespace Model
private:
Aws::String m_iD;
- bool m_iDHasBeenSet;
+ bool m_iDHasBeenSet = false;
Aws::String m_displayName;
- bool m_displayNameHasBeenSet;
+ bool m_displayNameHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InputSerialization.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InputSerialization.h
index 332b4ec21d..217575cb50 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InputSerialization.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InputSerialization.h
@@ -31,14 +31,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InputSerialization">AWS
* API Reference</a></p>
*/
- class AWS_S3_API InputSerialization
+ class InputSerialization
{
public:
- InputSerialization();
- InputSerialization(const Aws::Utils::Xml::XmlNode& xmlNode);
- InputSerialization& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API InputSerialization();
+ AWS_S3_API InputSerialization(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API InputSerialization& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -173,16 +173,16 @@ namespace Model
private:
CSVInput m_cSV;
- bool m_cSVHasBeenSet;
+ bool m_cSVHasBeenSet = false;
CompressionType m_compressionType;
- bool m_compressionTypeHasBeenSet;
+ bool m_compressionTypeHasBeenSet = false;
JSONInput m_jSON;
- bool m_jSONHasBeenSet;
+ bool m_jSONHasBeenSet = false;
ParquetInput m_parquet;
- bool m_parquetHasBeenSet;
+ bool m_parquetHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/IntelligentTieringAndOperator.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/IntelligentTieringAndOperator.h
index f40740341b..e0931038fd 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/IntelligentTieringAndOperator.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/IntelligentTieringAndOperator.h
@@ -31,14 +31,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/IntelligentTieringAndOperator">AWS
* API Reference</a></p>
*/
- class AWS_S3_API IntelligentTieringAndOperator
+ class IntelligentTieringAndOperator
{
public:
- IntelligentTieringAndOperator();
- IntelligentTieringAndOperator(const Aws::Utils::Xml::XmlNode& xmlNode);
- IntelligentTieringAndOperator& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API IntelligentTieringAndOperator();
+ AWS_S3_API IntelligentTieringAndOperator(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API IntelligentTieringAndOperator& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -141,10 +141,10 @@ namespace Model
private:
Aws::String m_prefix;
- bool m_prefixHasBeenSet;
+ bool m_prefixHasBeenSet = false;
Aws::Vector<Tag> m_tags;
- bool m_tagsHasBeenSet;
+ bool m_tagsHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/IntelligentTieringConfiguration.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/IntelligentTieringConfiguration.h
index 5e89009326..044a68727b 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/IntelligentTieringConfiguration.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/IntelligentTieringConfiguration.h
@@ -36,14 +36,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/IntelligentTieringConfiguration">AWS
* API Reference</a></p>
*/
- class AWS_S3_API IntelligentTieringConfiguration
+ class IntelligentTieringConfiguration
{
public:
- IntelligentTieringConfiguration();
- IntelligentTieringConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
- IntelligentTieringConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API IntelligentTieringConfiguration();
+ AWS_S3_API IntelligentTieringConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API IntelligentTieringConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -206,16 +206,16 @@ namespace Model
private:
Aws::String m_id;
- bool m_idHasBeenSet;
+ bool m_idHasBeenSet = false;
IntelligentTieringFilter m_filter;
- bool m_filterHasBeenSet;
+ bool m_filterHasBeenSet = false;
IntelligentTieringStatus m_status;
- bool m_statusHasBeenSet;
+ bool m_statusHasBeenSet = false;
Aws::Vector<Tiering> m_tierings;
- bool m_tieringsHasBeenSet;
+ bool m_tieringsHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/IntelligentTieringFilter.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/IntelligentTieringFilter.h
index 9eb538ea13..a67a084744 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/IntelligentTieringFilter.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/IntelligentTieringFilter.h
@@ -30,14 +30,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/IntelligentTieringFilter">AWS
* API Reference</a></p>
*/
- class AWS_S3_API IntelligentTieringFilter
+ class IntelligentTieringFilter
{
public:
- IntelligentTieringFilter();
- IntelligentTieringFilter(const Aws::Utils::Xml::XmlNode& xmlNode);
- IntelligentTieringFilter& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API IntelligentTieringFilter();
+ AWS_S3_API IntelligentTieringFilter(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API IntelligentTieringFilter& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -185,13 +185,13 @@ namespace Model
private:
Aws::String m_prefix;
- bool m_prefixHasBeenSet;
+ bool m_prefixHasBeenSet = false;
Tag m_tag;
- bool m_tagHasBeenSet;
+ bool m_tagHasBeenSet = false;
IntelligentTieringAndOperator m_and;
- bool m_andHasBeenSet;
+ bool m_andHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InvalidObjectState.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InvalidObjectState.h
index 2ce6a101d2..7f381ecaee 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InvalidObjectState.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InvalidObjectState.h
@@ -29,14 +29,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InvalidObjectState">AWS
* API Reference</a></p>
*/
- class AWS_S3_API InvalidObjectState
+ class InvalidObjectState
{
public:
- InvalidObjectState();
- InvalidObjectState(const Aws::Utils::Xml::XmlNode& xmlNode);
- InvalidObjectState& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API InvalidObjectState();
+ AWS_S3_API InvalidObjectState(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API InvalidObjectState& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
@@ -79,10 +79,10 @@ namespace Model
private:
StorageClass m_storageClass;
- bool m_storageClassHasBeenSet;
+ bool m_storageClassHasBeenSet = false;
IntelligentTieringAccessTier m_accessTier;
- bool m_accessTierHasBeenSet;
+ bool m_accessTierHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventoryConfiguration.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventoryConfiguration.h
index d48347ff65..fec504d7a7 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventoryConfiguration.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventoryConfiguration.h
@@ -32,19 +32,19 @@ namespace Model
* <p>Specifies the inventory configuration for an Amazon S3 bucket. For more
* information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketGETInventoryConfig.html">GET
- * Bucket inventory</a> in the <i>Amazon Simple Storage Service API Reference</i>.
- * </p><p><h3>See Also:</h3> <a
+ * Bucket inventory</a> in the <i>Amazon S3 API Reference</i>. </p><p><h3>See
+ * Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryConfiguration">AWS
* API Reference</a></p>
*/
- class AWS_S3_API InventoryConfiguration
+ class InventoryConfiguration
{
public:
- InventoryConfiguration();
- InventoryConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
- InventoryConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API InventoryConfiguration();
+ AWS_S3_API InventoryConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API InventoryConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -314,25 +314,25 @@ namespace Model
private:
InventoryDestination m_destination;
- bool m_destinationHasBeenSet;
+ bool m_destinationHasBeenSet = false;
bool m_isEnabled;
- bool m_isEnabledHasBeenSet;
+ bool m_isEnabledHasBeenSet = false;
InventoryFilter m_filter;
- bool m_filterHasBeenSet;
+ bool m_filterHasBeenSet = false;
Aws::String m_id;
- bool m_idHasBeenSet;
+ bool m_idHasBeenSet = false;
InventoryIncludedObjectVersions m_includedObjectVersions;
- bool m_includedObjectVersionsHasBeenSet;
+ bool m_includedObjectVersionsHasBeenSet = false;
Aws::Vector<InventoryOptionalField> m_optionalFields;
- bool m_optionalFieldsHasBeenSet;
+ bool m_optionalFieldsHasBeenSet = false;
InventorySchedule m_schedule;
- bool m_scheduleHasBeenSet;
+ bool m_scheduleHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventoryDestination.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventoryDestination.h
index 08f259cd74..8f56a7f92e 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventoryDestination.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventoryDestination.h
@@ -28,14 +28,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryDestination">AWS
* API Reference</a></p>
*/
- class AWS_S3_API InventoryDestination
+ class InventoryDestination
{
public:
- InventoryDestination();
- InventoryDestination(const Aws::Utils::Xml::XmlNode& xmlNode);
- InventoryDestination& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API InventoryDestination();
+ AWS_S3_API InventoryDestination(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API InventoryDestination& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -77,7 +77,7 @@ namespace Model
private:
InventoryS3BucketDestination m_s3BucketDestination;
- bool m_s3BucketDestinationHasBeenSet;
+ bool m_s3BucketDestinationHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventoryEncryption.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventoryEncryption.h
index 6c11c72634..aa0480d279 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventoryEncryption.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventoryEncryption.h
@@ -29,14 +29,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryEncryption">AWS
* API Reference</a></p>
*/
- class AWS_S3_API InventoryEncryption
+ class InventoryEncryption
{
public:
- InventoryEncryption();
- InventoryEncryption(const Aws::Utils::Xml::XmlNode& xmlNode);
- InventoryEncryption& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API InventoryEncryption();
+ AWS_S3_API InventoryEncryption(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API InventoryEncryption& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -103,10 +103,10 @@ namespace Model
private:
SSES3 m_sSES3;
- bool m_sSES3HasBeenSet;
+ bool m_sSES3HasBeenSet = false;
SSEKMS m_sSEKMS;
- bool m_sSEKMSHasBeenSet;
+ bool m_sSEKMSHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventoryFilter.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventoryFilter.h
index 919ccf6dd7..ba0662aded 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventoryFilter.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventoryFilter.h
@@ -28,14 +28,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryFilter">AWS
* API Reference</a></p>
*/
- class AWS_S3_API InventoryFilter
+ class InventoryFilter
{
public:
- InventoryFilter();
- InventoryFilter(const Aws::Utils::Xml::XmlNode& xmlNode);
- InventoryFilter& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API InventoryFilter();
+ AWS_S3_API InventoryFilter(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API InventoryFilter& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -89,7 +89,7 @@ namespace Model
private:
Aws::String m_prefix;
- bool m_prefixHasBeenSet;
+ bool m_prefixHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventoryOptionalField.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventoryOptionalField.h
index 9959aa0bfb..c96e4bd859 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventoryOptionalField.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventoryOptionalField.h
@@ -26,7 +26,9 @@ namespace Model
ObjectLockRetainUntilDate,
ObjectLockMode,
ObjectLockLegalHoldStatus,
- IntelligentTieringAccessTier
+ IntelligentTieringAccessTier,
+ BucketKeyStatus,
+ ChecksumAlgorithm
};
namespace InventoryOptionalFieldMapper
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventoryS3BucketDestination.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventoryS3BucketDestination.h
index d2f4f0027e..b907df63c7 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventoryS3BucketDestination.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventoryS3BucketDestination.h
@@ -30,14 +30,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventoryS3BucketDestination">AWS
* API Reference</a></p>
*/
- class AWS_S3_API InventoryS3BucketDestination
+ class InventoryS3BucketDestination
{
public:
- InventoryS3BucketDestination();
- InventoryS3BucketDestination(const Aws::Utils::Xml::XmlNode& xmlNode);
- InventoryS3BucketDestination& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API InventoryS3BucketDestination();
+ AWS_S3_API InventoryS3BucketDestination(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API InventoryS3BucketDestination& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -265,19 +265,19 @@ namespace Model
private:
Aws::String m_accountId;
- bool m_accountIdHasBeenSet;
+ bool m_accountIdHasBeenSet = false;
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
InventoryFormat m_format;
- bool m_formatHasBeenSet;
+ bool m_formatHasBeenSet = false;
Aws::String m_prefix;
- bool m_prefixHasBeenSet;
+ bool m_prefixHasBeenSet = false;
InventoryEncryption m_encryption;
- bool m_encryptionHasBeenSet;
+ bool m_encryptionHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventorySchedule.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventorySchedule.h
index 7a6933d2de..167b8e9f8a 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventorySchedule.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/InventorySchedule.h
@@ -28,14 +28,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/InventorySchedule">AWS
* API Reference</a></p>
*/
- class AWS_S3_API InventorySchedule
+ class InventorySchedule
{
public:
- InventorySchedule();
- InventorySchedule(const Aws::Utils::Xml::XmlNode& xmlNode);
- InventorySchedule& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API InventorySchedule();
+ AWS_S3_API InventorySchedule(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API InventorySchedule& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -71,7 +71,7 @@ namespace Model
private:
InventoryFrequency m_frequency;
- bool m_frequencyHasBeenSet;
+ bool m_frequencyHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/JSONInput.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/JSONInput.h
index be157654bd..99da519a63 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/JSONInput.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/JSONInput.h
@@ -28,14 +28,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/JSONInput">AWS API
* Reference</a></p>
*/
- class AWS_S3_API JSONInput
+ class JSONInput
{
public:
- JSONInput();
- JSONInput(const Aws::Utils::Xml::XmlNode& xmlNode);
- JSONInput& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API JSONInput();
+ AWS_S3_API JSONInput(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API JSONInput& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -71,7 +71,7 @@ namespace Model
private:
JSONType m_type;
- bool m_typeHasBeenSet;
+ bool m_typeHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/JSONOutput.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/JSONOutput.h
index 8a4865a7c0..6c895bb785 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/JSONOutput.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/JSONOutput.h
@@ -28,14 +28,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/JSONOutput">AWS API
* Reference</a></p>
*/
- class AWS_S3_API JSONOutput
+ class JSONOutput
{
public:
- JSONOutput();
- JSONOutput(const Aws::Utils::Xml::XmlNode& xmlNode);
- JSONOutput& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API JSONOutput();
+ AWS_S3_API JSONOutput(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API JSONOutput& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -89,7 +89,7 @@ namespace Model
private:
Aws::String m_recordDelimiter;
- bool m_recordDelimiterHasBeenSet;
+ bool m_recordDelimiterHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LambdaFunctionConfiguration.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LambdaFunctionConfiguration.h
index 3e0041301e..160d9d6d90 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LambdaFunctionConfiguration.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LambdaFunctionConfiguration.h
@@ -26,19 +26,19 @@ namespace Model
{
/**
- * <p>A container for specifying the configuration for AWS Lambda
+ * <p>A container for specifying the configuration for Lambda
* notifications.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LambdaFunctionConfiguration">AWS
* API Reference</a></p>
*/
- class AWS_S3_API LambdaFunctionConfiguration
+ class LambdaFunctionConfiguration
{
public:
- LambdaFunctionConfiguration();
- LambdaFunctionConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
- LambdaFunctionConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API LambdaFunctionConfiguration();
+ AWS_S3_API LambdaFunctionConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API LambdaFunctionConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
@@ -67,113 +67,113 @@ namespace Model
/**
- * <p>The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon S3
- * invokes when the specified event type occurs.</p>
+ * <p>The Amazon Resource Name (ARN) of the Lambda function that Amazon S3 invokes
+ * when the specified event type occurs.</p>
*/
inline const Aws::String& GetLambdaFunctionArn() const{ return m_lambdaFunctionArn; }
/**
- * <p>The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon S3
- * invokes when the specified event type occurs.</p>
+ * <p>The Amazon Resource Name (ARN) of the Lambda function that Amazon S3 invokes
+ * when the specified event type occurs.</p>
*/
inline bool LambdaFunctionArnHasBeenSet() const { return m_lambdaFunctionArnHasBeenSet; }
/**
- * <p>The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon S3
- * invokes when the specified event type occurs.</p>
+ * <p>The Amazon Resource Name (ARN) of the Lambda function that Amazon S3 invokes
+ * when the specified event type occurs.</p>
*/
inline void SetLambdaFunctionArn(const Aws::String& value) { m_lambdaFunctionArnHasBeenSet = true; m_lambdaFunctionArn = value; }
/**
- * <p>The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon S3
- * invokes when the specified event type occurs.</p>
+ * <p>The Amazon Resource Name (ARN) of the Lambda function that Amazon S3 invokes
+ * when the specified event type occurs.</p>
*/
inline void SetLambdaFunctionArn(Aws::String&& value) { m_lambdaFunctionArnHasBeenSet = true; m_lambdaFunctionArn = std::move(value); }
/**
- * <p>The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon S3
- * invokes when the specified event type occurs.</p>
+ * <p>The Amazon Resource Name (ARN) of the Lambda function that Amazon S3 invokes
+ * when the specified event type occurs.</p>
*/
inline void SetLambdaFunctionArn(const char* value) { m_lambdaFunctionArnHasBeenSet = true; m_lambdaFunctionArn.assign(value); }
/**
- * <p>The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon S3
- * invokes when the specified event type occurs.</p>
+ * <p>The Amazon Resource Name (ARN) of the Lambda function that Amazon S3 invokes
+ * when the specified event type occurs.</p>
*/
inline LambdaFunctionConfiguration& WithLambdaFunctionArn(const Aws::String& value) { SetLambdaFunctionArn(value); return *this;}
/**
- * <p>The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon S3
- * invokes when the specified event type occurs.</p>
+ * <p>The Amazon Resource Name (ARN) of the Lambda function that Amazon S3 invokes
+ * when the specified event type occurs.</p>
*/
inline LambdaFunctionConfiguration& WithLambdaFunctionArn(Aws::String&& value) { SetLambdaFunctionArn(std::move(value)); return *this;}
/**
- * <p>The Amazon Resource Name (ARN) of the AWS Lambda function that Amazon S3
- * invokes when the specified event type occurs.</p>
+ * <p>The Amazon Resource Name (ARN) of the Lambda function that Amazon S3 invokes
+ * when the specified event type occurs.</p>
*/
inline LambdaFunctionConfiguration& WithLambdaFunctionArn(const char* value) { SetLambdaFunctionArn(value); return *this;}
/**
- * <p>The Amazon S3 bucket event for which to invoke the AWS Lambda function. For
- * more information, see <a
+ * <p>The Amazon S3 bucket event for which to invoke the Lambda function. For more
+ * information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html">Supported
* Event Types</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::Vector<Event>& GetEvents() const{ return m_events; }
/**
- * <p>The Amazon S3 bucket event for which to invoke the AWS Lambda function. For
- * more information, see <a
+ * <p>The Amazon S3 bucket event for which to invoke the Lambda function. For more
+ * information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html">Supported
* Event Types</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool EventsHasBeenSet() const { return m_eventsHasBeenSet; }
/**
- * <p>The Amazon S3 bucket event for which to invoke the AWS Lambda function. For
- * more information, see <a
+ * <p>The Amazon S3 bucket event for which to invoke the Lambda function. For more
+ * information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html">Supported
* Event Types</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetEvents(const Aws::Vector<Event>& value) { m_eventsHasBeenSet = true; m_events = value; }
/**
- * <p>The Amazon S3 bucket event for which to invoke the AWS Lambda function. For
- * more information, see <a
+ * <p>The Amazon S3 bucket event for which to invoke the Lambda function. For more
+ * information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html">Supported
* Event Types</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetEvents(Aws::Vector<Event>&& value) { m_eventsHasBeenSet = true; m_events = std::move(value); }
/**
- * <p>The Amazon S3 bucket event for which to invoke the AWS Lambda function. For
- * more information, see <a
+ * <p>The Amazon S3 bucket event for which to invoke the Lambda function. For more
+ * information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html">Supported
* Event Types</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline LambdaFunctionConfiguration& WithEvents(const Aws::Vector<Event>& value) { SetEvents(value); return *this;}
/**
- * <p>The Amazon S3 bucket event for which to invoke the AWS Lambda function. For
- * more information, see <a
+ * <p>The Amazon S3 bucket event for which to invoke the Lambda function. For more
+ * information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html">Supported
* Event Types</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline LambdaFunctionConfiguration& WithEvents(Aws::Vector<Event>&& value) { SetEvents(std::move(value)); return *this;}
/**
- * <p>The Amazon S3 bucket event for which to invoke the AWS Lambda function. For
- * more information, see <a
+ * <p>The Amazon S3 bucket event for which to invoke the Lambda function. For more
+ * information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html">Supported
* Event Types</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline LambdaFunctionConfiguration& AddEvents(const Event& value) { m_eventsHasBeenSet = true; m_events.push_back(value); return *this; }
/**
- * <p>The Amazon S3 bucket event for which to invoke the AWS Lambda function. For
- * more information, see <a
+ * <p>The Amazon S3 bucket event for which to invoke the Lambda function. For more
+ * information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/NotificationHowTo.html">Supported
* Event Types</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
@@ -201,16 +201,16 @@ namespace Model
private:
Aws::String m_id;
- bool m_idHasBeenSet;
+ bool m_idHasBeenSet = false;
Aws::String m_lambdaFunctionArn;
- bool m_lambdaFunctionArnHasBeenSet;
+ bool m_lambdaFunctionArnHasBeenSet = false;
Aws::Vector<Event> m_events;
- bool m_eventsHasBeenSet;
+ bool m_eventsHasBeenSet = false;
NotificationConfigurationFilter m_filter;
- bool m_filterHasBeenSet;
+ bool m_filterHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LifecycleConfiguration.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LifecycleConfiguration.h
index 3da3b89dd8..f6bf105d8c 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LifecycleConfiguration.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LifecycleConfiguration.h
@@ -29,14 +29,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleConfiguration">AWS
* API Reference</a></p>
*/
- class AWS_S3_API LifecycleConfiguration
+ class LifecycleConfiguration
{
public:
- LifecycleConfiguration();
- LifecycleConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
- LifecycleConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API LifecycleConfiguration();
+ AWS_S3_API LifecycleConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API LifecycleConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -82,7 +82,7 @@ namespace Model
private:
Aws::Vector<Rule> m_rules;
- bool m_rulesHasBeenSet;
+ bool m_rulesHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LifecycleExpiration.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LifecycleExpiration.h
index cb9628df58..d8e004401d 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LifecycleExpiration.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LifecycleExpiration.h
@@ -28,14 +28,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleExpiration">AWS
* API Reference</a></p>
*/
- class AWS_S3_API LifecycleExpiration
+ class LifecycleExpiration
{
public:
- LifecycleExpiration();
- LifecycleExpiration(const Aws::Utils::Xml::XmlNode& xmlNode);
- LifecycleExpiration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API LifecycleExpiration();
+ AWS_S3_API LifecycleExpiration(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API LifecycleExpiration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -135,13 +135,13 @@ namespace Model
private:
Aws::Utils::DateTime m_date;
- bool m_dateHasBeenSet;
+ bool m_dateHasBeenSet = false;
int m_days;
- bool m_daysHasBeenSet;
+ bool m_daysHasBeenSet = false;
bool m_expiredObjectDeleteMarker;
- bool m_expiredObjectDeleteMarkerHasBeenSet;
+ bool m_expiredObjectDeleteMarkerHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LifecycleRule.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LifecycleRule.h
index b26475ff83..0f5bcb70f5 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LifecycleRule.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LifecycleRule.h
@@ -36,14 +36,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRule">AWS
* API Reference</a></p>
*/
- class AWS_S3_API LifecycleRule
+ class LifecycleRule
{
public:
- LifecycleRule();
- LifecycleRule(const Aws::Utils::Xml::XmlNode& xmlNode);
- LifecycleRule& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API LifecycleRule();
+ AWS_S3_API LifecycleRule(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API LifecycleRule& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -136,7 +136,7 @@ namespace Model
* <p>The <code>Filter</code> is used to identify objects that a Lifecycle Rule
* applies to. A <code>Filter</code> must have exactly one of <code>Prefix</code>,
* <code>Tag</code>, or <code>And</code> specified. <code>Filter</code> is required
- * if the <code>LifecycleRule</code> does not containt a <code>Prefix</code>
+ * if the <code>LifecycleRule</code> does not contain a <code>Prefix</code>
* element.</p>
*/
inline const LifecycleRuleFilter& GetFilter() const{ return m_filter; }
@@ -145,7 +145,7 @@ namespace Model
* <p>The <code>Filter</code> is used to identify objects that a Lifecycle Rule
* applies to. A <code>Filter</code> must have exactly one of <code>Prefix</code>,
* <code>Tag</code>, or <code>And</code> specified. <code>Filter</code> is required
- * if the <code>LifecycleRule</code> does not containt a <code>Prefix</code>
+ * if the <code>LifecycleRule</code> does not contain a <code>Prefix</code>
* element.</p>
*/
inline bool FilterHasBeenSet() const { return m_filterHasBeenSet; }
@@ -154,7 +154,7 @@ namespace Model
* <p>The <code>Filter</code> is used to identify objects that a Lifecycle Rule
* applies to. A <code>Filter</code> must have exactly one of <code>Prefix</code>,
* <code>Tag</code>, or <code>And</code> specified. <code>Filter</code> is required
- * if the <code>LifecycleRule</code> does not containt a <code>Prefix</code>
+ * if the <code>LifecycleRule</code> does not contain a <code>Prefix</code>
* element.</p>
*/
inline void SetFilter(const LifecycleRuleFilter& value) { m_filterHasBeenSet = true; m_filter = value; }
@@ -163,7 +163,7 @@ namespace Model
* <p>The <code>Filter</code> is used to identify objects that a Lifecycle Rule
* applies to. A <code>Filter</code> must have exactly one of <code>Prefix</code>,
* <code>Tag</code>, or <code>And</code> specified. <code>Filter</code> is required
- * if the <code>LifecycleRule</code> does not containt a <code>Prefix</code>
+ * if the <code>LifecycleRule</code> does not contain a <code>Prefix</code>
* element.</p>
*/
inline void SetFilter(LifecycleRuleFilter&& value) { m_filterHasBeenSet = true; m_filter = std::move(value); }
@@ -172,7 +172,7 @@ namespace Model
* <p>The <code>Filter</code> is used to identify objects that a Lifecycle Rule
* applies to. A <code>Filter</code> must have exactly one of <code>Prefix</code>,
* <code>Tag</code>, or <code>And</code> specified. <code>Filter</code> is required
- * if the <code>LifecycleRule</code> does not containt a <code>Prefix</code>
+ * if the <code>LifecycleRule</code> does not contain a <code>Prefix</code>
* element.</p>
*/
inline LifecycleRule& WithFilter(const LifecycleRuleFilter& value) { SetFilter(value); return *this;}
@@ -181,7 +181,7 @@ namespace Model
* <p>The <code>Filter</code> is used to identify objects that a Lifecycle Rule
* applies to. A <code>Filter</code> must have exactly one of <code>Prefix</code>,
* <code>Tag</code>, or <code>And</code> specified. <code>Filter</code> is required
- * if the <code>LifecycleRule</code> does not containt a <code>Prefix</code>
+ * if the <code>LifecycleRule</code> does not contain a <code>Prefix</code>
* element.</p>
*/
inline LifecycleRule& WithFilter(LifecycleRuleFilter&& value) { SetFilter(std::move(value)); return *this;}
@@ -386,28 +386,28 @@ namespace Model
private:
LifecycleExpiration m_expiration;
- bool m_expirationHasBeenSet;
+ bool m_expirationHasBeenSet = false;
Aws::String m_iD;
- bool m_iDHasBeenSet;
+ bool m_iDHasBeenSet = false;
LifecycleRuleFilter m_filter;
- bool m_filterHasBeenSet;
+ bool m_filterHasBeenSet = false;
ExpirationStatus m_status;
- bool m_statusHasBeenSet;
+ bool m_statusHasBeenSet = false;
Aws::Vector<Transition> m_transitions;
- bool m_transitionsHasBeenSet;
+ bool m_transitionsHasBeenSet = false;
Aws::Vector<NoncurrentVersionTransition> m_noncurrentVersionTransitions;
- bool m_noncurrentVersionTransitionsHasBeenSet;
+ bool m_noncurrentVersionTransitionsHasBeenSet = false;
NoncurrentVersionExpiration m_noncurrentVersionExpiration;
- bool m_noncurrentVersionExpirationHasBeenSet;
+ bool m_noncurrentVersionExpirationHasBeenSet = false;
AbortIncompleteMultipartUpload m_abortIncompleteMultipartUpload;
- bool m_abortIncompleteMultipartUploadHasBeenSet;
+ bool m_abortIncompleteMultipartUploadHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LifecycleRuleAndOperator.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LifecycleRuleAndOperator.h
index 36ccbdd5f3..06b31f5221 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LifecycleRuleAndOperator.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LifecycleRuleAndOperator.h
@@ -31,14 +31,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleAndOperator">AWS
* API Reference</a></p>
*/
- class AWS_S3_API LifecycleRuleAndOperator
+ class LifecycleRuleAndOperator
{
public:
- LifecycleRuleAndOperator();
- LifecycleRuleAndOperator(const Aws::Utils::Xml::XmlNode& xmlNode);
- LifecycleRuleAndOperator& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API LifecycleRuleAndOperator();
+ AWS_S3_API LifecycleRuleAndOperator(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API LifecycleRuleAndOperator& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -130,13 +130,61 @@ namespace Model
*/
inline LifecycleRuleAndOperator& AddTags(Tag&& value) { m_tagsHasBeenSet = true; m_tags.push_back(std::move(value)); return *this; }
+
+ /**
+ * <p>Minimum object size to which the rule applies.</p>
+ */
+ inline long long GetObjectSizeGreaterThan() const{ return m_objectSizeGreaterThan; }
+
+ /**
+ * <p>Minimum object size to which the rule applies.</p>
+ */
+ inline bool ObjectSizeGreaterThanHasBeenSet() const { return m_objectSizeGreaterThanHasBeenSet; }
+
+ /**
+ * <p>Minimum object size to which the rule applies.</p>
+ */
+ inline void SetObjectSizeGreaterThan(long long value) { m_objectSizeGreaterThanHasBeenSet = true; m_objectSizeGreaterThan = value; }
+
+ /**
+ * <p>Minimum object size to which the rule applies.</p>
+ */
+ inline LifecycleRuleAndOperator& WithObjectSizeGreaterThan(long long value) { SetObjectSizeGreaterThan(value); return *this;}
+
+
+ /**
+ * <p>Maximum object size to which the rule applies.</p>
+ */
+ inline long long GetObjectSizeLessThan() const{ return m_objectSizeLessThan; }
+
+ /**
+ * <p>Maximum object size to which the rule applies.</p>
+ */
+ inline bool ObjectSizeLessThanHasBeenSet() const { return m_objectSizeLessThanHasBeenSet; }
+
+ /**
+ * <p>Maximum object size to which the rule applies.</p>
+ */
+ inline void SetObjectSizeLessThan(long long value) { m_objectSizeLessThanHasBeenSet = true; m_objectSizeLessThan = value; }
+
+ /**
+ * <p>Maximum object size to which the rule applies.</p>
+ */
+ inline LifecycleRuleAndOperator& WithObjectSizeLessThan(long long value) { SetObjectSizeLessThan(value); return *this;}
+
private:
Aws::String m_prefix;
- bool m_prefixHasBeenSet;
+ bool m_prefixHasBeenSet = false;
Aws::Vector<Tag> m_tags;
- bool m_tagsHasBeenSet;
+ bool m_tagsHasBeenSet = false;
+
+ long long m_objectSizeGreaterThan;
+ bool m_objectSizeGreaterThanHasBeenSet = false;
+
+ long long m_objectSizeLessThan;
+ bool m_objectSizeLessThanHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LifecycleRuleFilter.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LifecycleRuleFilter.h
index 74e57e178a..f657b6264e 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LifecycleRuleFilter.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LifecycleRuleFilter.h
@@ -31,14 +31,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LifecycleRuleFilter">AWS
* API Reference</a></p>
*/
- class AWS_S3_API LifecycleRuleFilter
+ class LifecycleRuleFilter
{
public:
- LifecycleRuleFilter();
- LifecycleRuleFilter(const Aws::Utils::Xml::XmlNode& xmlNode);
- LifecycleRuleFilter& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API LifecycleRuleFilter();
+ AWS_S3_API LifecycleRuleFilter(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API LifecycleRuleFilter& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -159,6 +159,48 @@ namespace Model
inline LifecycleRuleFilter& WithTag(Tag&& value) { SetTag(std::move(value)); return *this;}
+ /**
+ * <p>Minimum object size to which the rule applies.</p>
+ */
+ inline long long GetObjectSizeGreaterThan() const{ return m_objectSizeGreaterThan; }
+
+ /**
+ * <p>Minimum object size to which the rule applies.</p>
+ */
+ inline bool ObjectSizeGreaterThanHasBeenSet() const { return m_objectSizeGreaterThanHasBeenSet; }
+
+ /**
+ * <p>Minimum object size to which the rule applies.</p>
+ */
+ inline void SetObjectSizeGreaterThan(long long value) { m_objectSizeGreaterThanHasBeenSet = true; m_objectSizeGreaterThan = value; }
+
+ /**
+ * <p>Minimum object size to which the rule applies.</p>
+ */
+ inline LifecycleRuleFilter& WithObjectSizeGreaterThan(long long value) { SetObjectSizeGreaterThan(value); return *this;}
+
+
+ /**
+ * <p>Maximum object size to which the rule applies.</p>
+ */
+ inline long long GetObjectSizeLessThan() const{ return m_objectSizeLessThan; }
+
+ /**
+ * <p>Maximum object size to which the rule applies.</p>
+ */
+ inline bool ObjectSizeLessThanHasBeenSet() const { return m_objectSizeLessThanHasBeenSet; }
+
+ /**
+ * <p>Maximum object size to which the rule applies.</p>
+ */
+ inline void SetObjectSizeLessThan(long long value) { m_objectSizeLessThanHasBeenSet = true; m_objectSizeLessThan = value; }
+
+ /**
+ * <p>Maximum object size to which the rule applies.</p>
+ */
+ inline LifecycleRuleFilter& WithObjectSizeLessThan(long long value) { SetObjectSizeLessThan(value); return *this;}
+
+
inline const LifecycleRuleAndOperator& GetAnd() const{ return m_and; }
@@ -180,13 +222,19 @@ namespace Model
private:
Aws::String m_prefix;
- bool m_prefixHasBeenSet;
+ bool m_prefixHasBeenSet = false;
Tag m_tag;
- bool m_tagHasBeenSet;
+ bool m_tagHasBeenSet = false;
+
+ long long m_objectSizeGreaterThan;
+ bool m_objectSizeGreaterThanHasBeenSet = false;
+
+ long long m_objectSizeLessThan;
+ bool m_objectSizeLessThanHasBeenSet = false;
LifecycleRuleAndOperator m_and;
- bool m_andHasBeenSet;
+ bool m_andHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketAnalyticsConfigurationsRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketAnalyticsConfigurationsRequest.h
index 8985a2ad10..d8f7b869a7 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketAnalyticsConfigurationsRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketAnalyticsConfigurationsRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API ListBucketAnalyticsConfigurationsRequest : public S3Request
+ class ListBucketAnalyticsConfigurationsRequest : public S3Request
{
public:
- ListBucketAnalyticsConfigurationsRequest();
+ AWS_S3_API ListBucketAnalyticsConfigurationsRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "ListBucketAnalyticsConfigurations"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket from which analytics configurations are retrieved.</p>
@@ -133,57 +137,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline ListBucketAnalyticsConfigurationsRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline ListBucketAnalyticsConfigurationsRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline ListBucketAnalyticsConfigurationsRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -230,16 +234,16 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_continuationToken;
- bool m_continuationTokenHasBeenSet;
+ bool m_continuationTokenHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketAnalyticsConfigurationsResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketAnalyticsConfigurationsResult.h
index 9fc4f7cb24..a6aa78423e 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketAnalyticsConfigurationsResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketAnalyticsConfigurationsResult.h
@@ -26,12 +26,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API ListBucketAnalyticsConfigurationsResult
+ class ListBucketAnalyticsConfigurationsResult
{
public:
- ListBucketAnalyticsConfigurationsResult();
- ListBucketAnalyticsConfigurationsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- ListBucketAnalyticsConfigurationsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API ListBucketAnalyticsConfigurationsResult();
+ AWS_S3_API ListBucketAnalyticsConfigurationsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API ListBucketAnalyticsConfigurationsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketIntelligentTieringConfigurationsRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketIntelligentTieringConfigurationsRequest.h
index 5a20d97c15..d46195a985 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketIntelligentTieringConfigurationsRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketIntelligentTieringConfigurationsRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API ListBucketIntelligentTieringConfigurationsRequest : public S3Request
+ class ListBucketIntelligentTieringConfigurationsRequest : public S3Request
{
public:
- ListBucketIntelligentTieringConfigurationsRequest();
+ AWS_S3_API ListBucketIntelligentTieringConfigurationsRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,10 +34,14 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "ListBucketIntelligentTieringConfigurations"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the Amazon S3 bucket whose configuration you want to modify or
@@ -89,50 +93,50 @@ namespace Model
/**
- * <p>The ContinuationToken that represents a placeholder from where this request
- * should begin.</p>
+ * <p>The <code>ContinuationToken</code> that represents a placeholder from where
+ * this request should begin.</p>
*/
inline const Aws::String& GetContinuationToken() const{ return m_continuationToken; }
/**
- * <p>The ContinuationToken that represents a placeholder from where this request
- * should begin.</p>
+ * <p>The <code>ContinuationToken</code> that represents a placeholder from where
+ * this request should begin.</p>
*/
inline bool ContinuationTokenHasBeenSet() const { return m_continuationTokenHasBeenSet; }
/**
- * <p>The ContinuationToken that represents a placeholder from where this request
- * should begin.</p>
+ * <p>The <code>ContinuationToken</code> that represents a placeholder from where
+ * this request should begin.</p>
*/
inline void SetContinuationToken(const Aws::String& value) { m_continuationTokenHasBeenSet = true; m_continuationToken = value; }
/**
- * <p>The ContinuationToken that represents a placeholder from where this request
- * should begin.</p>
+ * <p>The <code>ContinuationToken</code> that represents a placeholder from where
+ * this request should begin.</p>
*/
inline void SetContinuationToken(Aws::String&& value) { m_continuationTokenHasBeenSet = true; m_continuationToken = std::move(value); }
/**
- * <p>The ContinuationToken that represents a placeholder from where this request
- * should begin.</p>
+ * <p>The <code>ContinuationToken</code> that represents a placeholder from where
+ * this request should begin.</p>
*/
inline void SetContinuationToken(const char* value) { m_continuationTokenHasBeenSet = true; m_continuationToken.assign(value); }
/**
- * <p>The ContinuationToken that represents a placeholder from where this request
- * should begin.</p>
+ * <p>The <code>ContinuationToken</code> that represents a placeholder from where
+ * this request should begin.</p>
*/
inline ListBucketIntelligentTieringConfigurationsRequest& WithContinuationToken(const Aws::String& value) { SetContinuationToken(value); return *this;}
/**
- * <p>The ContinuationToken that represents a placeholder from where this request
- * should begin.</p>
+ * <p>The <code>ContinuationToken</code> that represents a placeholder from where
+ * this request should begin.</p>
*/
inline ListBucketIntelligentTieringConfigurationsRequest& WithContinuationToken(Aws::String&& value) { SetContinuationToken(std::move(value)); return *this;}
/**
- * <p>The ContinuationToken that represents a placeholder from where this request
- * should begin.</p>
+ * <p>The <code>ContinuationToken</code> that represents a placeholder from where
+ * this request should begin.</p>
*/
inline ListBucketIntelligentTieringConfigurationsRequest& WithContinuationToken(const char* value) { SetContinuationToken(value); return *this;}
@@ -179,13 +183,13 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_continuationToken;
- bool m_continuationTokenHasBeenSet;
+ bool m_continuationTokenHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketIntelligentTieringConfigurationsResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketIntelligentTieringConfigurationsResult.h
index 0203f9ac43..5e6696c8de 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketIntelligentTieringConfigurationsResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketIntelligentTieringConfigurationsResult.h
@@ -26,75 +26,78 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API ListBucketIntelligentTieringConfigurationsResult
+ class ListBucketIntelligentTieringConfigurationsResult
{
public:
- ListBucketIntelligentTieringConfigurationsResult();
- ListBucketIntelligentTieringConfigurationsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- ListBucketIntelligentTieringConfigurationsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API ListBucketIntelligentTieringConfigurationsResult();
+ AWS_S3_API ListBucketIntelligentTieringConfigurationsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API ListBucketIntelligentTieringConfigurationsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
* <p>Indicates whether the returned list of analytics configurations is complete.
- * A value of true indicates that the list is not complete and the
- * NextContinuationToken will be provided for a subsequent request.</p>
+ * A value of <code>true</code> indicates that the list is not complete and the
+ * <code>NextContinuationToken</code> will be provided for a subsequent
+ * request.</p>
*/
inline bool GetIsTruncated() const{ return m_isTruncated; }
/**
* <p>Indicates whether the returned list of analytics configurations is complete.
- * A value of true indicates that the list is not complete and the
- * NextContinuationToken will be provided for a subsequent request.</p>
+ * A value of <code>true</code> indicates that the list is not complete and the
+ * <code>NextContinuationToken</code> will be provided for a subsequent
+ * request.</p>
*/
inline void SetIsTruncated(bool value) { m_isTruncated = value; }
/**
* <p>Indicates whether the returned list of analytics configurations is complete.
- * A value of true indicates that the list is not complete and the
- * NextContinuationToken will be provided for a subsequent request.</p>
+ * A value of <code>true</code> indicates that the list is not complete and the
+ * <code>NextContinuationToken</code> will be provided for a subsequent
+ * request.</p>
*/
inline ListBucketIntelligentTieringConfigurationsResult& WithIsTruncated(bool value) { SetIsTruncated(value); return *this;}
/**
- * <p>The ContinuationToken that represents a placeholder from where this request
- * should begin.</p>
+ * <p>The <code>ContinuationToken</code> that represents a placeholder from where
+ * this request should begin.</p>
*/
inline const Aws::String& GetContinuationToken() const{ return m_continuationToken; }
/**
- * <p>The ContinuationToken that represents a placeholder from where this request
- * should begin.</p>
+ * <p>The <code>ContinuationToken</code> that represents a placeholder from where
+ * this request should begin.</p>
*/
inline void SetContinuationToken(const Aws::String& value) { m_continuationToken = value; }
/**
- * <p>The ContinuationToken that represents a placeholder from where this request
- * should begin.</p>
+ * <p>The <code>ContinuationToken</code> that represents a placeholder from where
+ * this request should begin.</p>
*/
inline void SetContinuationToken(Aws::String&& value) { m_continuationToken = std::move(value); }
/**
- * <p>The ContinuationToken that represents a placeholder from where this request
- * should begin.</p>
+ * <p>The <code>ContinuationToken</code> that represents a placeholder from where
+ * this request should begin.</p>
*/
inline void SetContinuationToken(const char* value) { m_continuationToken.assign(value); }
/**
- * <p>The ContinuationToken that represents a placeholder from where this request
- * should begin.</p>
+ * <p>The <code>ContinuationToken</code> that represents a placeholder from where
+ * this request should begin.</p>
*/
inline ListBucketIntelligentTieringConfigurationsResult& WithContinuationToken(const Aws::String& value) { SetContinuationToken(value); return *this;}
/**
- * <p>The ContinuationToken that represents a placeholder from where this request
- * should begin.</p>
+ * <p>The <code>ContinuationToken</code> that represents a placeholder from where
+ * this request should begin.</p>
*/
inline ListBucketIntelligentTieringConfigurationsResult& WithContinuationToken(Aws::String&& value) { SetContinuationToken(std::move(value)); return *this;}
/**
- * <p>The ContinuationToken that represents a placeholder from where this request
- * should begin.</p>
+ * <p>The <code>ContinuationToken</code> that represents a placeholder from where
+ * this request should begin.</p>
*/
inline ListBucketIntelligentTieringConfigurationsResult& WithContinuationToken(const char* value) { SetContinuationToken(value); return *this;}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketInventoryConfigurationsRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketInventoryConfigurationsRequest.h
index b6c8743dea..a5a5f48a55 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketInventoryConfigurationsRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketInventoryConfigurationsRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API ListBucketInventoryConfigurationsRequest : public S3Request
+ class ListBucketInventoryConfigurationsRequest : public S3Request
{
public:
- ListBucketInventoryConfigurationsRequest();
+ AWS_S3_API ListBucketInventoryConfigurationsRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "ListBucketInventoryConfigurations"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket containing the inventory configurations to
@@ -157,57 +161,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline ListBucketInventoryConfigurationsRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline ListBucketInventoryConfigurationsRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline ListBucketInventoryConfigurationsRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -254,16 +258,16 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_continuationToken;
- bool m_continuationTokenHasBeenSet;
+ bool m_continuationTokenHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketInventoryConfigurationsResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketInventoryConfigurationsResult.h
index e28a0661e3..2864c41b25 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketInventoryConfigurationsResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketInventoryConfigurationsResult.h
@@ -26,12 +26,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API ListBucketInventoryConfigurationsResult
+ class ListBucketInventoryConfigurationsResult
{
public:
- ListBucketInventoryConfigurationsResult();
- ListBucketInventoryConfigurationsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- ListBucketInventoryConfigurationsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API ListBucketInventoryConfigurationsResult();
+ AWS_S3_API ListBucketInventoryConfigurationsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API ListBucketInventoryConfigurationsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketMetricsConfigurationsRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketMetricsConfigurationsRequest.h
index 03ac696974..83790116fe 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketMetricsConfigurationsRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketMetricsConfigurationsRequest.h
@@ -23,10 +23,10 @@ namespace Model
/**
*/
- class AWS_S3_API ListBucketMetricsConfigurationsRequest : public S3Request
+ class ListBucketMetricsConfigurationsRequest : public S3Request
{
public:
- ListBucketMetricsConfigurationsRequest();
+ AWS_S3_API ListBucketMetricsConfigurationsRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +34,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "ListBucketMetricsConfigurations"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket containing the metrics configurations to retrieve.</p>
@@ -149,57 +153,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline ListBucketMetricsConfigurationsRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline ListBucketMetricsConfigurationsRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline ListBucketMetricsConfigurationsRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -246,16 +250,16 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_continuationToken;
- bool m_continuationTokenHasBeenSet;
+ bool m_continuationTokenHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketMetricsConfigurationsResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketMetricsConfigurationsResult.h
index 4c070322b2..2580419c51 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketMetricsConfigurationsResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketMetricsConfigurationsResult.h
@@ -26,12 +26,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API ListBucketMetricsConfigurationsResult
+ class ListBucketMetricsConfigurationsResult
{
public:
- ListBucketMetricsConfigurationsResult();
- ListBucketMetricsConfigurationsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- ListBucketMetricsConfigurationsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API ListBucketMetricsConfigurationsResult();
+ AWS_S3_API ListBucketMetricsConfigurationsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API ListBucketMetricsConfigurationsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketsResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketsResult.h
index a580ac0aad..5596bd0330 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketsResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListBucketsResult.h
@@ -26,46 +26,46 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API ListBucketsResult
+ class ListBucketsResult
{
public:
- ListBucketsResult();
- ListBucketsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- ListBucketsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API ListBucketsResult();
+ AWS_S3_API ListBucketsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API ListBucketsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
- * <p>The list of buckets owned by the requestor.</p>
+ * <p>The list of buckets owned by the requester.</p>
*/
inline const Aws::Vector<Bucket>& GetBuckets() const{ return m_buckets; }
/**
- * <p>The list of buckets owned by the requestor.</p>
+ * <p>The list of buckets owned by the requester.</p>
*/
inline void SetBuckets(const Aws::Vector<Bucket>& value) { m_buckets = value; }
/**
- * <p>The list of buckets owned by the requestor.</p>
+ * <p>The list of buckets owned by the requester.</p>
*/
inline void SetBuckets(Aws::Vector<Bucket>&& value) { m_buckets = std::move(value); }
/**
- * <p>The list of buckets owned by the requestor.</p>
+ * <p>The list of buckets owned by the requester.</p>
*/
inline ListBucketsResult& WithBuckets(const Aws::Vector<Bucket>& value) { SetBuckets(value); return *this;}
/**
- * <p>The list of buckets owned by the requestor.</p>
+ * <p>The list of buckets owned by the requester.</p>
*/
inline ListBucketsResult& WithBuckets(Aws::Vector<Bucket>&& value) { SetBuckets(std::move(value)); return *this;}
/**
- * <p>The list of buckets owned by the requestor.</p>
+ * <p>The list of buckets owned by the requester.</p>
*/
inline ListBucketsResult& AddBuckets(const Bucket& value) { m_buckets.push_back(value); return *this; }
/**
- * <p>The list of buckets owned by the requestor.</p>
+ * <p>The list of buckets owned by the requester.</p>
*/
inline ListBucketsResult& AddBuckets(Bucket&& value) { m_buckets.push_back(std::move(value)); return *this; }
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListMultipartUploadsRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListMultipartUploadsRequest.h
index 97e0e566ad..589441329d 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListMultipartUploadsRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListMultipartUploadsRequest.h
@@ -24,10 +24,10 @@ namespace Model
/**
*/
- class AWS_S3_API ListMultipartUploadsRequest : public S3Request
+ class ListMultipartUploadsRequest : public S3Request
{
public:
- ListMultipartUploadsRequest();
+ AWS_S3_API ListMultipartUploadsRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -35,31 +35,35 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "ListMultipartUploads"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket to which the multipart upload was initiated. </p>
* <p>When using this action with an access point, you must direct requests to the
* access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetBucket() const{ return m_bucket; }
@@ -68,19 +72,19 @@ namespace Model
* <p>When using this action with an access point, you must direct requests to the
* access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool BucketHasBeenSet() const { return m_bucketHasBeenSet; }
@@ -89,19 +93,19 @@ namespace Model
* <p>When using this action with an access point, you must direct requests to the
* access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const Aws::String& value) { m_bucketHasBeenSet = true; m_bucket = value; }
@@ -110,19 +114,19 @@ namespace Model
* <p>When using this action with an access point, you must direct requests to the
* access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(Aws::String&& value) { m_bucketHasBeenSet = true; m_bucket = std::move(value); }
@@ -131,19 +135,19 @@ namespace Model
* <p>When using this action with an access point, you must direct requests to the
* access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const char* value) { m_bucketHasBeenSet = true; m_bucket.assign(value); }
@@ -152,19 +156,19 @@ namespace Model
* <p>When using this action with an access point, you must direct requests to the
* access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline ListMultipartUploadsRequest& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
@@ -173,19 +177,19 @@ namespace Model
* <p>When using this action with an access point, you must direct requests to the
* access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline ListMultipartUploadsRequest& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
@@ -194,19 +198,19 @@ namespace Model
* <p>When using this action with an access point, you must direct requests to the
* access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline ListMultipartUploadsRequest& WithBucket(const char* value) { SetBucket(value); return *this;}
@@ -585,57 +589,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline ListMultipartUploadsRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline ListMultipartUploadsRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline ListMultipartUploadsRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -682,31 +686,31 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_delimiter;
- bool m_delimiterHasBeenSet;
+ bool m_delimiterHasBeenSet = false;
EncodingType m_encodingType;
- bool m_encodingTypeHasBeenSet;
+ bool m_encodingTypeHasBeenSet = false;
Aws::String m_keyMarker;
- bool m_keyMarkerHasBeenSet;
+ bool m_keyMarkerHasBeenSet = false;
int m_maxUploads;
- bool m_maxUploadsHasBeenSet;
+ bool m_maxUploadsHasBeenSet = false;
Aws::String m_prefix;
- bool m_prefixHasBeenSet;
+ bool m_prefixHasBeenSet = false;
Aws::String m_uploadIdMarker;
- bool m_uploadIdMarkerHasBeenSet;
+ bool m_uploadIdMarkerHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListMultipartUploadsResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListMultipartUploadsResult.h
index 70cf65ba6d..1d1d6cd057 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListMultipartUploadsResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListMultipartUploadsResult.h
@@ -28,46 +28,53 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API ListMultipartUploadsResult
+ class ListMultipartUploadsResult
{
public:
- ListMultipartUploadsResult();
- ListMultipartUploadsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- ListMultipartUploadsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API ListMultipartUploadsResult();
+ AWS_S3_API ListMultipartUploadsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API ListMultipartUploadsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
- * <p>The name of the bucket to which the multipart upload was initiated.</p>
+ * <p>The name of the bucket to which the multipart upload was initiated. Does not
+ * return the access point ARN or access point alias if used.</p>
*/
inline const Aws::String& GetBucket() const{ return m_bucket; }
/**
- * <p>The name of the bucket to which the multipart upload was initiated.</p>
+ * <p>The name of the bucket to which the multipart upload was initiated. Does not
+ * return the access point ARN or access point alias if used.</p>
*/
inline void SetBucket(const Aws::String& value) { m_bucket = value; }
/**
- * <p>The name of the bucket to which the multipart upload was initiated.</p>
+ * <p>The name of the bucket to which the multipart upload was initiated. Does not
+ * return the access point ARN or access point alias if used.</p>
*/
inline void SetBucket(Aws::String&& value) { m_bucket = std::move(value); }
/**
- * <p>The name of the bucket to which the multipart upload was initiated.</p>
+ * <p>The name of the bucket to which the multipart upload was initiated. Does not
+ * return the access point ARN or access point alias if used.</p>
*/
inline void SetBucket(const char* value) { m_bucket.assign(value); }
/**
- * <p>The name of the bucket to which the multipart upload was initiated.</p>
+ * <p>The name of the bucket to which the multipart upload was initiated. Does not
+ * return the access point ARN or access point alias if used.</p>
*/
inline ListMultipartUploadsResult& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
/**
- * <p>The name of the bucket to which the multipart upload was initiated.</p>
+ * <p>The name of the bucket to which the multipart upload was initiated. Does not
+ * return the access point ARN or access point alias if used.</p>
*/
inline ListMultipartUploadsResult& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
/**
- * <p>The name of the bucket to which the multipart upload was initiated.</p>
+ * <p>The name of the bucket to which the multipart upload was initiated. Does not
+ * return the access point ARN or access point alias if used.</p>
*/
inline ListMultipartUploadsResult& WithBucket(const char* value) { SetBucket(value); return *this;}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectVersionsRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectVersionsRequest.h
index 9bad797dca..4a61c58927 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectVersionsRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectVersionsRequest.h
@@ -24,10 +24,10 @@ namespace Model
/**
*/
- class AWS_S3_API ListObjectVersionsRequest : public S3Request
+ class ListObjectVersionsRequest : public S3Request
{
public:
- ListObjectVersionsRequest();
+ AWS_S3_API ListObjectVersionsRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -35,12 +35,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "ListObjectVersions"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The bucket name that contains the objects. </p>
@@ -373,57 +377,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline ListObjectVersionsRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline ListObjectVersionsRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline ListObjectVersionsRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -470,31 +474,31 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_delimiter;
- bool m_delimiterHasBeenSet;
+ bool m_delimiterHasBeenSet = false;
EncodingType m_encodingType;
- bool m_encodingTypeHasBeenSet;
+ bool m_encodingTypeHasBeenSet = false;
Aws::String m_keyMarker;
- bool m_keyMarkerHasBeenSet;
+ bool m_keyMarkerHasBeenSet = false;
int m_maxKeys;
- bool m_maxKeysHasBeenSet;
+ bool m_maxKeysHasBeenSet = false;
Aws::String m_prefix;
- bool m_prefixHasBeenSet;
+ bool m_prefixHasBeenSet = false;
Aws::String m_versionIdMarker;
- bool m_versionIdMarkerHasBeenSet;
+ bool m_versionIdMarkerHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectVersionsResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectVersionsResult.h
index 20909af4ad..0d594a929b 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectVersionsResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectVersionsResult.h
@@ -29,12 +29,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API ListObjectVersionsResult
+ class ListObjectVersionsResult
{
public:
- ListObjectVersionsResult();
- ListObjectVersionsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- ListObjectVersionsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API ListObjectVersionsResult();
+ AWS_S3_API ListObjectVersionsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API ListObjectVersionsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectsRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectsRequest.h
index 0c3c88b0a6..374dfc8f2f 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectsRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectsRequest.h
@@ -25,10 +25,10 @@ namespace Model
/**
*/
- class AWS_S3_API ListObjectsRequest : public S3Request
+ class ListObjectsRequest : public S3Request
{
public:
- ListObjectsRequest();
+ AWS_S3_API ListObjectsRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -36,31 +36,35 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "ListObjects"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket containing the objects.</p> <p>When using this action
* with an access point, you must direct requests to the access point hostname. The
* access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetBucket() const{ return m_bucket; }
@@ -69,19 +73,19 @@ namespace Model
* with an access point, you must direct requests to the access point hostname. The
* access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool BucketHasBeenSet() const { return m_bucketHasBeenSet; }
@@ -90,19 +94,19 @@ namespace Model
* with an access point, you must direct requests to the access point hostname. The
* access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const Aws::String& value) { m_bucketHasBeenSet = true; m_bucket = value; }
@@ -111,19 +115,19 @@ namespace Model
* with an access point, you must direct requests to the access point hostname. The
* access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(Aws::String&& value) { m_bucketHasBeenSet = true; m_bucket = std::move(value); }
@@ -132,19 +136,19 @@ namespace Model
* with an access point, you must direct requests to the access point hostname. The
* access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const char* value) { m_bucketHasBeenSet = true; m_bucket.assign(value); }
@@ -153,19 +157,19 @@ namespace Model
* with an access point, you must direct requests to the access point hostname. The
* access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline ListObjectsRequest& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
@@ -174,19 +178,19 @@ namespace Model
* with an access point, you must direct requests to the access point hostname. The
* access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline ListObjectsRequest& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
@@ -195,19 +199,19 @@ namespace Model
* with an access point, you must direct requests to the access point hostname. The
* access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline ListObjectsRequest& WithBucket(const char* value) { SetBucket(value); return *this;}
@@ -273,42 +277,50 @@ namespace Model
/**
- * <p>Specifies the key to start with when listing objects in a bucket.</p>
+ * <p>Marker is where you want Amazon S3 to start listing from. Amazon S3 starts
+ * listing after this specified key. Marker can be any key in the bucket.</p>
*/
inline const Aws::String& GetMarker() const{ return m_marker; }
/**
- * <p>Specifies the key to start with when listing objects in a bucket.</p>
+ * <p>Marker is where you want Amazon S3 to start listing from. Amazon S3 starts
+ * listing after this specified key. Marker can be any key in the bucket.</p>
*/
inline bool MarkerHasBeenSet() const { return m_markerHasBeenSet; }
/**
- * <p>Specifies the key to start with when listing objects in a bucket.</p>
+ * <p>Marker is where you want Amazon S3 to start listing from. Amazon S3 starts
+ * listing after this specified key. Marker can be any key in the bucket.</p>
*/
inline void SetMarker(const Aws::String& value) { m_markerHasBeenSet = true; m_marker = value; }
/**
- * <p>Specifies the key to start with when listing objects in a bucket.</p>
+ * <p>Marker is where you want Amazon S3 to start listing from. Amazon S3 starts
+ * listing after this specified key. Marker can be any key in the bucket.</p>
*/
inline void SetMarker(Aws::String&& value) { m_markerHasBeenSet = true; m_marker = std::move(value); }
/**
- * <p>Specifies the key to start with when listing objects in a bucket.</p>
+ * <p>Marker is where you want Amazon S3 to start listing from. Amazon S3 starts
+ * listing after this specified key. Marker can be any key in the bucket.</p>
*/
inline void SetMarker(const char* value) { m_markerHasBeenSet = true; m_marker.assign(value); }
/**
- * <p>Specifies the key to start with when listing objects in a bucket.</p>
+ * <p>Marker is where you want Amazon S3 to start listing from. Amazon S3 starts
+ * listing after this specified key. Marker can be any key in the bucket.</p>
*/
inline ListObjectsRequest& WithMarker(const Aws::String& value) { SetMarker(value); return *this;}
/**
- * <p>Specifies the key to start with when listing objects in a bucket.</p>
+ * <p>Marker is where you want Amazon S3 to start listing from. Amazon S3 starts
+ * listing after this specified key. Marker can be any key in the bucket.</p>
*/
inline ListObjectsRequest& WithMarker(Aws::String&& value) { SetMarker(std::move(value)); return *this;}
/**
- * <p>Specifies the key to start with when listing objects in a bucket.</p>
+ * <p>Marker is where you want Amazon S3 to start listing from. Amazon S3 starts
+ * listing after this specified key. Marker can be any key in the bucket.</p>
*/
inline ListObjectsRequest& WithMarker(const char* value) { SetMarker(value); return *this;}
@@ -428,57 +440,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline ListObjectsRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline ListObjectsRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline ListObjectsRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -525,31 +537,31 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_delimiter;
- bool m_delimiterHasBeenSet;
+ bool m_delimiterHasBeenSet = false;
EncodingType m_encodingType;
- bool m_encodingTypeHasBeenSet;
+ bool m_encodingTypeHasBeenSet = false;
Aws::String m_marker;
- bool m_markerHasBeenSet;
+ bool m_markerHasBeenSet = false;
int m_maxKeys;
- bool m_maxKeysHasBeenSet;
+ bool m_maxKeysHasBeenSet = false;
Aws::String m_prefix;
- bool m_prefixHasBeenSet;
+ bool m_prefixHasBeenSet = false;
RequestPayer m_requestPayer;
- bool m_requestPayerHasBeenSet;
+ bool m_requestPayerHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectsResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectsResult.h
index cb3b03ca2e..f035833b33 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectsResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectsResult.h
@@ -28,12 +28,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API ListObjectsResult
+ class ListObjectsResult
{
public:
- ListObjectsResult();
- ListObjectsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- ListObjectsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API ListObjectsResult();
+ AWS_S3_API ListObjectsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API ListObjectsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectsV2Request.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectsV2Request.h
index 7a8bd556b3..1c6f1528b7 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectsV2Request.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectsV2Request.h
@@ -25,10 +25,10 @@ namespace Model
/**
*/
- class AWS_S3_API ListObjectsV2Request : public S3Request
+ class ListObjectsV2Request : public S3Request
{
public:
- ListObjectsV2Request();
+ AWS_S3_API ListObjectsV2Request();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -36,31 +36,35 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "ListObjectsV2"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>Bucket name to list. </p> <p>When using this action with an access point, you
* must direct requests to the access point hostname. The access point hostname
* takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetBucket() const{ return m_bucket; }
@@ -69,19 +73,19 @@ namespace Model
* must direct requests to the access point hostname. The access point hostname
* takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool BucketHasBeenSet() const { return m_bucketHasBeenSet; }
@@ -90,19 +94,19 @@ namespace Model
* must direct requests to the access point hostname. The access point hostname
* takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const Aws::String& value) { m_bucketHasBeenSet = true; m_bucket = value; }
@@ -111,19 +115,19 @@ namespace Model
* must direct requests to the access point hostname. The access point hostname
* takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(Aws::String&& value) { m_bucketHasBeenSet = true; m_bucket = std::move(value); }
@@ -132,19 +136,19 @@ namespace Model
* must direct requests to the access point hostname. The access point hostname
* takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const char* value) { m_bucketHasBeenSet = true; m_bucket.assign(value); }
@@ -153,19 +157,19 @@ namespace Model
* must direct requests to the access point hostname. The access point hostname
* takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline ListObjectsV2Request& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
@@ -174,19 +178,19 @@ namespace Model
* must direct requests to the access point hostname. The access point hostname
* takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline ListObjectsV2Request& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
@@ -195,19 +199,19 @@ namespace Model
* must direct requests to the access point hostname. The access point hostname
* takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline ListObjectsV2Request& WithBucket(const char* value) { SetBucket(value); return *this;}
@@ -542,57 +546,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline ListObjectsV2Request& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline ListObjectsV2Request& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline ListObjectsV2Request& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -639,37 +643,37 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_delimiter;
- bool m_delimiterHasBeenSet;
+ bool m_delimiterHasBeenSet = false;
EncodingType m_encodingType;
- bool m_encodingTypeHasBeenSet;
+ bool m_encodingTypeHasBeenSet = false;
int m_maxKeys;
- bool m_maxKeysHasBeenSet;
+ bool m_maxKeysHasBeenSet = false;
Aws::String m_prefix;
- bool m_prefixHasBeenSet;
+ bool m_prefixHasBeenSet = false;
Aws::String m_continuationToken;
- bool m_continuationTokenHasBeenSet;
+ bool m_continuationTokenHasBeenSet = false;
bool m_fetchOwner;
- bool m_fetchOwnerHasBeenSet;
+ bool m_fetchOwnerHasBeenSet = false;
Aws::String m_startAfter;
- bool m_startAfterHasBeenSet;
+ bool m_startAfterHasBeenSet = false;
RequestPayer m_requestPayer;
- bool m_requestPayerHasBeenSet;
+ bool m_requestPayerHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectsV2Result.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectsV2Result.h
index 14749c9259..e8bf41a8c8 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectsV2Result.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListObjectsV2Result.h
@@ -28,12 +28,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API ListObjectsV2Result
+ class ListObjectsV2Result
{
public:
- ListObjectsV2Result();
- ListObjectsV2Result(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- ListObjectsV2Result& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API ListObjectsV2Result();
+ AWS_S3_API ListObjectsV2Result(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API ListObjectsV2Result& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
@@ -99,19 +99,19 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetName() const{ return m_name; }
@@ -120,19 +120,19 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetName(const Aws::String& value) { m_name = value; }
@@ -141,19 +141,19 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetName(Aws::String&& value) { m_name = std::move(value); }
@@ -162,19 +162,19 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetName(const char* value) { m_name.assign(value); }
@@ -183,19 +183,19 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline ListObjectsV2Result& WithName(const Aws::String& value) { SetName(value); return *this;}
@@ -204,19 +204,19 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline ListObjectsV2Result& WithName(Aws::String&& value) { SetName(std::move(value)); return *this;}
@@ -225,19 +225,19 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline ListObjectsV2Result& WithName(const char* value) { SetName(value); return *this;}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListPartsRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListPartsRequest.h
index fef7905b7a..27652328e3 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListPartsRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListPartsRequest.h
@@ -24,10 +24,10 @@ namespace Model
/**
*/
- class AWS_S3_API ListPartsRequest : public S3Request
+ class ListPartsRequest : public S3Request
{
public:
- ListPartsRequest();
+ AWS_S3_API ListPartsRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -35,31 +35,35 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "ListParts"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket to which the parts are being uploaded. </p> <p>When
* using this action with an access point, you must direct requests to the access
* point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetBucket() const{ return m_bucket; }
@@ -68,19 +72,19 @@ namespace Model
* using this action with an access point, you must direct requests to the access
* point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool BucketHasBeenSet() const { return m_bucketHasBeenSet; }
@@ -89,19 +93,19 @@ namespace Model
* using this action with an access point, you must direct requests to the access
* point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const Aws::String& value) { m_bucketHasBeenSet = true; m_bucket = value; }
@@ -110,19 +114,19 @@ namespace Model
* using this action with an access point, you must direct requests to the access
* point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(Aws::String&& value) { m_bucketHasBeenSet = true; m_bucket = std::move(value); }
@@ -131,19 +135,19 @@ namespace Model
* using this action with an access point, you must direct requests to the access
* point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const char* value) { m_bucketHasBeenSet = true; m_bucket.assign(value); }
@@ -152,19 +156,19 @@ namespace Model
* using this action with an access point, you must direct requests to the access
* point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline ListPartsRequest& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
@@ -173,19 +177,19 @@ namespace Model
* using this action with an access point, you must direct requests to the access
* point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline ListPartsRequest& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
@@ -194,19 +198,19 @@ namespace Model
* using this action with an access point, you must direct requests to the access
* point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline ListPartsRequest& WithBucket(const char* value) { SetBucket(value); return *this;}
@@ -360,61 +364,280 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline ListPartsRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline ListPartsRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline ListPartsRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
+ /**
+ * <p>The server-side encryption (SSE) algorithm used to encrypt the object. This
+ * parameter is needed only when the object was created using a checksum algorithm.
+ * For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetSSECustomerAlgorithm() const{ return m_sSECustomerAlgorithm; }
+
+ /**
+ * <p>The server-side encryption (SSE) algorithm used to encrypt the object. This
+ * parameter is needed only when the object was created using a checksum algorithm.
+ * For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool SSECustomerAlgorithmHasBeenSet() const { return m_sSECustomerAlgorithmHasBeenSet; }
+
+ /**
+ * <p>The server-side encryption (SSE) algorithm used to encrypt the object. This
+ * parameter is needed only when the object was created using a checksum algorithm.
+ * For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetSSECustomerAlgorithm(const Aws::String& value) { m_sSECustomerAlgorithmHasBeenSet = true; m_sSECustomerAlgorithm = value; }
+
+ /**
+ * <p>The server-side encryption (SSE) algorithm used to encrypt the object. This
+ * parameter is needed only when the object was created using a checksum algorithm.
+ * For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetSSECustomerAlgorithm(Aws::String&& value) { m_sSECustomerAlgorithmHasBeenSet = true; m_sSECustomerAlgorithm = std::move(value); }
+
+ /**
+ * <p>The server-side encryption (SSE) algorithm used to encrypt the object. This
+ * parameter is needed only when the object was created using a checksum algorithm.
+ * For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetSSECustomerAlgorithm(const char* value) { m_sSECustomerAlgorithmHasBeenSet = true; m_sSECustomerAlgorithm.assign(value); }
+
+ /**
+ * <p>The server-side encryption (SSE) algorithm used to encrypt the object. This
+ * parameter is needed only when the object was created using a checksum algorithm.
+ * For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline ListPartsRequest& WithSSECustomerAlgorithm(const Aws::String& value) { SetSSECustomerAlgorithm(value); return *this;}
+
+ /**
+ * <p>The server-side encryption (SSE) algorithm used to encrypt the object. This
+ * parameter is needed only when the object was created using a checksum algorithm.
+ * For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline ListPartsRequest& WithSSECustomerAlgorithm(Aws::String&& value) { SetSSECustomerAlgorithm(std::move(value)); return *this;}
+
+ /**
+ * <p>The server-side encryption (SSE) algorithm used to encrypt the object. This
+ * parameter is needed only when the object was created using a checksum algorithm.
+ * For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline ListPartsRequest& WithSSECustomerAlgorithm(const char* value) { SetSSECustomerAlgorithm(value); return *this;}
+
+
+ /**
+ * <p>The server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetSSECustomerKey() const{ return m_sSECustomerKey; }
+
+ /**
+ * <p>The server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool SSECustomerKeyHasBeenSet() const { return m_sSECustomerKeyHasBeenSet; }
+
+ /**
+ * <p>The server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetSSECustomerKey(const Aws::String& value) { m_sSECustomerKeyHasBeenSet = true; m_sSECustomerKey = value; }
+
+ /**
+ * <p>The server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetSSECustomerKey(Aws::String&& value) { m_sSECustomerKeyHasBeenSet = true; m_sSECustomerKey = std::move(value); }
+
+ /**
+ * <p>The server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetSSECustomerKey(const char* value) { m_sSECustomerKeyHasBeenSet = true; m_sSECustomerKey.assign(value); }
+
+ /**
+ * <p>The server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline ListPartsRequest& WithSSECustomerKey(const Aws::String& value) { SetSSECustomerKey(value); return *this;}
+
+ /**
+ * <p>The server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline ListPartsRequest& WithSSECustomerKey(Aws::String&& value) { SetSSECustomerKey(std::move(value)); return *this;}
+
+ /**
+ * <p>The server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline ListPartsRequest& WithSSECustomerKey(const char* value) { SetSSECustomerKey(value); return *this;}
+
+
+ /**
+ * <p>The MD5 server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetSSECustomerKeyMD5() const{ return m_sSECustomerKeyMD5; }
+
+ /**
+ * <p>The MD5 server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool SSECustomerKeyMD5HasBeenSet() const { return m_sSECustomerKeyMD5HasBeenSet; }
+
+ /**
+ * <p>The MD5 server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetSSECustomerKeyMD5(const Aws::String& value) { m_sSECustomerKeyMD5HasBeenSet = true; m_sSECustomerKeyMD5 = value; }
+
+ /**
+ * <p>The MD5 server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetSSECustomerKeyMD5(Aws::String&& value) { m_sSECustomerKeyMD5HasBeenSet = true; m_sSECustomerKeyMD5 = std::move(value); }
+
+ /**
+ * <p>The MD5 server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetSSECustomerKeyMD5(const char* value) { m_sSECustomerKeyMD5HasBeenSet = true; m_sSECustomerKeyMD5.assign(value); }
+
+ /**
+ * <p>The MD5 server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline ListPartsRequest& WithSSECustomerKeyMD5(const Aws::String& value) { SetSSECustomerKeyMD5(value); return *this;}
+
+ /**
+ * <p>The MD5 server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline ListPartsRequest& WithSSECustomerKeyMD5(Aws::String&& value) { SetSSECustomerKeyMD5(std::move(value)); return *this;}
+
+ /**
+ * <p>The MD5 server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline ListPartsRequest& WithSSECustomerKeyMD5(const char* value) { SetSSECustomerKeyMD5(value); return *this;}
+
+
inline const Aws::Map<Aws::String, Aws::String>& GetCustomizedAccessLogTag() const{ return m_customizedAccessLogTag; }
@@ -457,28 +680,37 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
int m_maxParts;
- bool m_maxPartsHasBeenSet;
+ bool m_maxPartsHasBeenSet = false;
int m_partNumberMarker;
- bool m_partNumberMarkerHasBeenSet;
+ bool m_partNumberMarkerHasBeenSet = false;
Aws::String m_uploadId;
- bool m_uploadIdHasBeenSet;
+ bool m_uploadIdHasBeenSet = false;
RequestPayer m_requestPayer;
- bool m_requestPayerHasBeenSet;
+ bool m_requestPayerHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
+
+ Aws::String m_sSECustomerAlgorithm;
+ bool m_sSECustomerAlgorithmHasBeenSet = false;
+
+ Aws::String m_sSECustomerKey;
+ bool m_sSECustomerKeyHasBeenSet = false;
+
+ Aws::String m_sSECustomerKeyMD5;
+ bool m_sSECustomerKeyMD5HasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListPartsResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListPartsResult.h
index 9cf2e6d514..c69dd6266b 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListPartsResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ListPartsResult.h
@@ -12,6 +12,7 @@
#include <aws/s3/model/Owner.h>
#include <aws/s3/model/StorageClass.h>
#include <aws/s3/model/RequestCharged.h>
+#include <aws/s3/model/ChecksumAlgorithm.h>
#include <aws/s3/model/Part.h>
#include <utility>
@@ -31,12 +32,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API ListPartsResult
+ class ListPartsResult
{
public:
- ListPartsResult();
- ListPartsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- ListPartsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API ListPartsResult();
+ AWS_S3_API ListPartsResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API ListPartsResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
@@ -156,37 +157,44 @@ namespace Model
/**
- * <p>The name of the bucket to which the multipart upload was initiated.</p>
+ * <p>The name of the bucket to which the multipart upload was initiated. Does not
+ * return the access point ARN or access point alias if used.</p>
*/
inline const Aws::String& GetBucket() const{ return m_bucket; }
/**
- * <p>The name of the bucket to which the multipart upload was initiated.</p>
+ * <p>The name of the bucket to which the multipart upload was initiated. Does not
+ * return the access point ARN or access point alias if used.</p>
*/
inline void SetBucket(const Aws::String& value) { m_bucket = value; }
/**
- * <p>The name of the bucket to which the multipart upload was initiated.</p>
+ * <p>The name of the bucket to which the multipart upload was initiated. Does not
+ * return the access point ARN or access point alias if used.</p>
*/
inline void SetBucket(Aws::String&& value) { m_bucket = std::move(value); }
/**
- * <p>The name of the bucket to which the multipart upload was initiated.</p>
+ * <p>The name of the bucket to which the multipart upload was initiated. Does not
+ * return the access point ARN or access point alias if used.</p>
*/
inline void SetBucket(const char* value) { m_bucket.assign(value); }
/**
- * <p>The name of the bucket to which the multipart upload was initiated.</p>
+ * <p>The name of the bucket to which the multipart upload was initiated. Does not
+ * return the access point ARN or access point alias if used.</p>
*/
inline ListPartsResult& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
/**
- * <p>The name of the bucket to which the multipart upload was initiated.</p>
+ * <p>The name of the bucket to which the multipart upload was initiated. Does not
+ * return the access point ARN or access point alias if used.</p>
*/
inline ListPartsResult& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
/**
- * <p>The name of the bucket to which the multipart upload was initiated.</p>
+ * <p>The name of the bucket to which the multipart upload was initiated. Does not
+ * return the access point ARN or access point alias if used.</p>
*/
inline ListPartsResult& WithBucket(const char* value) { SetBucket(value); return *this;}
@@ -390,41 +398,41 @@ namespace Model
/**
* <p>Container element that identifies who initiated the multipart upload. If the
- * initiator is an AWS account, this element provides the same information as the
- * <code>Owner</code> element. If the initiator is an IAM User, this element
- * provides the user ARN and display name.</p>
+ * initiator is an Amazon Web Services account, this element provides the same
+ * information as the <code>Owner</code> element. If the initiator is an IAM User,
+ * this element provides the user ARN and display name.</p>
*/
inline const Initiator& GetInitiator() const{ return m_initiator; }
/**
* <p>Container element that identifies who initiated the multipart upload. If the
- * initiator is an AWS account, this element provides the same information as the
- * <code>Owner</code> element. If the initiator is an IAM User, this element
- * provides the user ARN and display name.</p>
+ * initiator is an Amazon Web Services account, this element provides the same
+ * information as the <code>Owner</code> element. If the initiator is an IAM User,
+ * this element provides the user ARN and display name.</p>
*/
inline void SetInitiator(const Initiator& value) { m_initiator = value; }
/**
* <p>Container element that identifies who initiated the multipart upload. If the
- * initiator is an AWS account, this element provides the same information as the
- * <code>Owner</code> element. If the initiator is an IAM User, this element
- * provides the user ARN and display name.</p>
+ * initiator is an Amazon Web Services account, this element provides the same
+ * information as the <code>Owner</code> element. If the initiator is an IAM User,
+ * this element provides the user ARN and display name.</p>
*/
inline void SetInitiator(Initiator&& value) { m_initiator = std::move(value); }
/**
* <p>Container element that identifies who initiated the multipart upload. If the
- * initiator is an AWS account, this element provides the same information as the
- * <code>Owner</code> element. If the initiator is an IAM User, this element
- * provides the user ARN and display name.</p>
+ * initiator is an Amazon Web Services account, this element provides the same
+ * information as the <code>Owner</code> element. If the initiator is an IAM User,
+ * this element provides the user ARN and display name.</p>
*/
inline ListPartsResult& WithInitiator(const Initiator& value) { SetInitiator(value); return *this;}
/**
* <p>Container element that identifies who initiated the multipart upload. If the
- * initiator is an AWS account, this element provides the same information as the
- * <code>Owner</code> element. If the initiator is an IAM User, this element
- * provides the user ARN and display name.</p>
+ * initiator is an Amazon Web Services account, this element provides the same
+ * information as the <code>Owner</code> element. If the initiator is an IAM User,
+ * this element provides the user ARN and display name.</p>
*/
inline ListPartsResult& WithInitiator(Initiator&& value) { SetInitiator(std::move(value)); return *this;}
@@ -511,6 +519,32 @@ namespace Model
inline ListPartsResult& WithRequestCharged(RequestCharged&& value) { SetRequestCharged(std::move(value)); return *this;}
+
+ /**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline const ChecksumAlgorithm& GetChecksumAlgorithm() const{ return m_checksumAlgorithm; }
+
+ /**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline void SetChecksumAlgorithm(const ChecksumAlgorithm& value) { m_checksumAlgorithm = value; }
+
+ /**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline void SetChecksumAlgorithm(ChecksumAlgorithm&& value) { m_checksumAlgorithm = std::move(value); }
+
+ /**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline ListPartsResult& WithChecksumAlgorithm(const ChecksumAlgorithm& value) { SetChecksumAlgorithm(value); return *this;}
+
+ /**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline ListPartsResult& WithChecksumAlgorithm(ChecksumAlgorithm&& value) { SetChecksumAlgorithm(std::move(value)); return *this;}
+
private:
Aws::Utils::DateTime m_abortDate;
@@ -540,6 +574,8 @@ namespace Model
StorageClass m_storageClass;
RequestCharged m_requestCharged;
+
+ ChecksumAlgorithm m_checksumAlgorithm;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LoggingEnabled.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LoggingEnabled.h
index 444fbf49f0..3cdf3e9c6f 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LoggingEnabled.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/LoggingEnabled.h
@@ -28,19 +28,19 @@ namespace Model
* <p>Describes where logs are stored and the prefix that Amazon S3 assigns to all
* log object keys for a bucket. For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlogging.html">PUT
- * Bucket logging</a> in the <i>Amazon Simple Storage Service API
- * Reference</i>.</p><p><h3>See Also:</h3> <a
+ * Bucket logging</a> in the <i>Amazon S3 API Reference</i>.</p><p><h3>See
+ * Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/LoggingEnabled">AWS
* API Reference</a></p>
*/
- class AWS_S3_API LoggingEnabled
+ class LoggingEnabled
{
public:
- LoggingEnabled();
- LoggingEnabled(const Aws::Utils::Xml::XmlNode& xmlNode);
- LoggingEnabled& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API LoggingEnabled();
+ AWS_S3_API LoggingEnabled(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API LoggingEnabled& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -125,42 +125,74 @@ namespace Model
/**
- * <p>Container for granting information.</p>
+ * <p>Container for granting information.</p> <p>Buckets that use the bucket owner
+ * enforced setting for Object Ownership don't support target grants. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general">Permissions
+ * for server access log delivery</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::Vector<TargetGrant>& GetTargetGrants() const{ return m_targetGrants; }
/**
- * <p>Container for granting information.</p>
+ * <p>Container for granting information.</p> <p>Buckets that use the bucket owner
+ * enforced setting for Object Ownership don't support target grants. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general">Permissions
+ * for server access log delivery</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool TargetGrantsHasBeenSet() const { return m_targetGrantsHasBeenSet; }
/**
- * <p>Container for granting information.</p>
+ * <p>Container for granting information.</p> <p>Buckets that use the bucket owner
+ * enforced setting for Object Ownership don't support target grants. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general">Permissions
+ * for server access log delivery</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetTargetGrants(const Aws::Vector<TargetGrant>& value) { m_targetGrantsHasBeenSet = true; m_targetGrants = value; }
/**
- * <p>Container for granting information.</p>
+ * <p>Container for granting information.</p> <p>Buckets that use the bucket owner
+ * enforced setting for Object Ownership don't support target grants. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general">Permissions
+ * for server access log delivery</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetTargetGrants(Aws::Vector<TargetGrant>&& value) { m_targetGrantsHasBeenSet = true; m_targetGrants = std::move(value); }
/**
- * <p>Container for granting information.</p>
+ * <p>Container for granting information.</p> <p>Buckets that use the bucket owner
+ * enforced setting for Object Ownership don't support target grants. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general">Permissions
+ * for server access log delivery</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline LoggingEnabled& WithTargetGrants(const Aws::Vector<TargetGrant>& value) { SetTargetGrants(value); return *this;}
/**
- * <p>Container for granting information.</p>
+ * <p>Container for granting information.</p> <p>Buckets that use the bucket owner
+ * enforced setting for Object Ownership don't support target grants. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general">Permissions
+ * for server access log delivery</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline LoggingEnabled& WithTargetGrants(Aws::Vector<TargetGrant>&& value) { SetTargetGrants(std::move(value)); return *this;}
/**
- * <p>Container for granting information.</p>
+ * <p>Container for granting information.</p> <p>Buckets that use the bucket owner
+ * enforced setting for Object Ownership don't support target grants. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general">Permissions
+ * for server access log delivery</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline LoggingEnabled& AddTargetGrants(const TargetGrant& value) { m_targetGrantsHasBeenSet = true; m_targetGrants.push_back(value); return *this; }
/**
- * <p>Container for granting information.</p>
+ * <p>Container for granting information.</p> <p>Buckets that use the bucket owner
+ * enforced setting for Object Ownership don't support target grants. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general">Permissions
+ * for server access log delivery</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline LoggingEnabled& AddTargetGrants(TargetGrant&& value) { m_targetGrantsHasBeenSet = true; m_targetGrants.push_back(std::move(value)); return *this; }
@@ -224,13 +256,13 @@ namespace Model
private:
Aws::String m_targetBucket;
- bool m_targetBucketHasBeenSet;
+ bool m_targetBucketHasBeenSet = false;
Aws::Vector<TargetGrant> m_targetGrants;
- bool m_targetGrantsHasBeenSet;
+ bool m_targetGrantsHasBeenSet = false;
Aws::String m_targetPrefix;
- bool m_targetPrefixHasBeenSet;
+ bool m_targetPrefixHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/MetadataEntry.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/MetadataEntry.h
index b6d876e918..f944998953 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/MetadataEntry.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/MetadataEntry.h
@@ -27,14 +27,14 @@ namespace Model
* <a href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetadataEntry">AWS
* API Reference</a></p>
*/
- class AWS_S3_API MetadataEntry
+ class MetadataEntry
{
public:
- MetadataEntry();
- MetadataEntry(const Aws::Utils::Xml::XmlNode& xmlNode);
- MetadataEntry& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API MetadataEntry();
+ AWS_S3_API MetadataEntry(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API MetadataEntry& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -121,10 +121,10 @@ namespace Model
private:
Aws::String m_name;
- bool m_nameHasBeenSet;
+ bool m_nameHasBeenSet = false;
Aws::String m_value;
- bool m_valueHasBeenSet;
+ bool m_valueHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Metrics.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Metrics.h
index 815b2b556c..bfec4208b1 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Metrics.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Metrics.h
@@ -29,14 +29,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Metrics">AWS API
* Reference</a></p>
*/
- class AWS_S3_API Metrics
+ class Metrics
{
public:
- Metrics();
- Metrics(const Aws::Utils::Xml::XmlNode& xmlNode);
- Metrics& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Metrics();
+ AWS_S3_API Metrics(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Metrics& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -109,10 +109,10 @@ namespace Model
private:
MetricsStatus m_status;
- bool m_statusHasBeenSet;
+ bool m_statusHasBeenSet = false;
ReplicationTimeValue m_eventThreshold;
- bool m_eventThresholdHasBeenSet;
+ bool m_eventThresholdHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/MetricsAndOperator.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/MetricsAndOperator.h
index e413af11d9..952c5fd0b1 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/MetricsAndOperator.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/MetricsAndOperator.h
@@ -32,14 +32,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsAndOperator">AWS
* API Reference</a></p>
*/
- class AWS_S3_API MetricsAndOperator
+ class MetricsAndOperator
{
public:
- MetricsAndOperator();
- MetricsAndOperator(const Aws::Utils::Xml::XmlNode& xmlNode);
- MetricsAndOperator& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API MetricsAndOperator();
+ AWS_S3_API MetricsAndOperator(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API MetricsAndOperator& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -123,13 +123,57 @@ namespace Model
*/
inline MetricsAndOperator& AddTags(Tag&& value) { m_tagsHasBeenSet = true; m_tags.push_back(std::move(value)); return *this; }
+
+ /**
+ * <p>The access point ARN used when evaluating an <code>AND</code> predicate.</p>
+ */
+ inline const Aws::String& GetAccessPointArn() const{ return m_accessPointArn; }
+
+ /**
+ * <p>The access point ARN used when evaluating an <code>AND</code> predicate.</p>
+ */
+ inline bool AccessPointArnHasBeenSet() const { return m_accessPointArnHasBeenSet; }
+
+ /**
+ * <p>The access point ARN used when evaluating an <code>AND</code> predicate.</p>
+ */
+ inline void SetAccessPointArn(const Aws::String& value) { m_accessPointArnHasBeenSet = true; m_accessPointArn = value; }
+
+ /**
+ * <p>The access point ARN used when evaluating an <code>AND</code> predicate.</p>
+ */
+ inline void SetAccessPointArn(Aws::String&& value) { m_accessPointArnHasBeenSet = true; m_accessPointArn = std::move(value); }
+
+ /**
+ * <p>The access point ARN used when evaluating an <code>AND</code> predicate.</p>
+ */
+ inline void SetAccessPointArn(const char* value) { m_accessPointArnHasBeenSet = true; m_accessPointArn.assign(value); }
+
+ /**
+ * <p>The access point ARN used when evaluating an <code>AND</code> predicate.</p>
+ */
+ inline MetricsAndOperator& WithAccessPointArn(const Aws::String& value) { SetAccessPointArn(value); return *this;}
+
+ /**
+ * <p>The access point ARN used when evaluating an <code>AND</code> predicate.</p>
+ */
+ inline MetricsAndOperator& WithAccessPointArn(Aws::String&& value) { SetAccessPointArn(std::move(value)); return *this;}
+
+ /**
+ * <p>The access point ARN used when evaluating an <code>AND</code> predicate.</p>
+ */
+ inline MetricsAndOperator& WithAccessPointArn(const char* value) { SetAccessPointArn(value); return *this;}
+
private:
Aws::String m_prefix;
- bool m_prefixHasBeenSet;
+ bool m_prefixHasBeenSet = false;
Aws::Vector<Tag> m_tags;
- bool m_tagsHasBeenSet;
+ bool m_tagsHasBeenSet = false;
+
+ Aws::String m_accessPointArn;
+ bool m_accessPointArnHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/MetricsConfiguration.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/MetricsConfiguration.h
index 57abb1c653..8806bb12c3 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/MetricsConfiguration.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/MetricsConfiguration.h
@@ -29,20 +29,19 @@ namespace Model
* updating an existing metrics configuration, note that this is a full replacement
* of the existing metrics configuration. If you don't include the elements you
* want to keep, they are erased. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html">
- * PUT Bucket metrics</a> in the <i>Amazon Simple Storage Service API
- * Reference</i>.</p><p><h3>See Also:</h3> <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTMetricConfiguration.html">PutBucketMetricsConfiguration</a>.</p><p><h3>See
+ * Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsConfiguration">AWS
* API Reference</a></p>
*/
- class AWS_S3_API MetricsConfiguration
+ class MetricsConfiguration
{
public:
- MetricsConfiguration();
- MetricsConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
- MetricsConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API MetricsConfiguration();
+ AWS_S3_API MetricsConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API MetricsConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -88,53 +87,53 @@ namespace Model
/**
* <p>Specifies a metrics configuration filter. The metrics configuration will only
- * include objects that meet the filter's criteria. A filter must be a prefix, a
- * tag, or a conjunction (MetricsAndOperator).</p>
+ * include objects that meet the filter's criteria. A filter must be a prefix, an
+ * object tag, an access point ARN, or a conjunction (MetricsAndOperator).</p>
*/
inline const MetricsFilter& GetFilter() const{ return m_filter; }
/**
* <p>Specifies a metrics configuration filter. The metrics configuration will only
- * include objects that meet the filter's criteria. A filter must be a prefix, a
- * tag, or a conjunction (MetricsAndOperator).</p>
+ * include objects that meet the filter's criteria. A filter must be a prefix, an
+ * object tag, an access point ARN, or a conjunction (MetricsAndOperator).</p>
*/
inline bool FilterHasBeenSet() const { return m_filterHasBeenSet; }
/**
* <p>Specifies a metrics configuration filter. The metrics configuration will only
- * include objects that meet the filter's criteria. A filter must be a prefix, a
- * tag, or a conjunction (MetricsAndOperator).</p>
+ * include objects that meet the filter's criteria. A filter must be a prefix, an
+ * object tag, an access point ARN, or a conjunction (MetricsAndOperator).</p>
*/
inline void SetFilter(const MetricsFilter& value) { m_filterHasBeenSet = true; m_filter = value; }
/**
* <p>Specifies a metrics configuration filter. The metrics configuration will only
- * include objects that meet the filter's criteria. A filter must be a prefix, a
- * tag, or a conjunction (MetricsAndOperator).</p>
+ * include objects that meet the filter's criteria. A filter must be a prefix, an
+ * object tag, an access point ARN, or a conjunction (MetricsAndOperator).</p>
*/
inline void SetFilter(MetricsFilter&& value) { m_filterHasBeenSet = true; m_filter = std::move(value); }
/**
* <p>Specifies a metrics configuration filter. The metrics configuration will only
- * include objects that meet the filter's criteria. A filter must be a prefix, a
- * tag, or a conjunction (MetricsAndOperator).</p>
+ * include objects that meet the filter's criteria. A filter must be a prefix, an
+ * object tag, an access point ARN, or a conjunction (MetricsAndOperator).</p>
*/
inline MetricsConfiguration& WithFilter(const MetricsFilter& value) { SetFilter(value); return *this;}
/**
* <p>Specifies a metrics configuration filter. The metrics configuration will only
- * include objects that meet the filter's criteria. A filter must be a prefix, a
- * tag, or a conjunction (MetricsAndOperator).</p>
+ * include objects that meet the filter's criteria. A filter must be a prefix, an
+ * object tag, an access point ARN, or a conjunction (MetricsAndOperator).</p>
*/
inline MetricsConfiguration& WithFilter(MetricsFilter&& value) { SetFilter(std::move(value)); return *this;}
private:
Aws::String m_id;
- bool m_idHasBeenSet;
+ bool m_idHasBeenSet = false;
MetricsFilter m_filter;
- bool m_filterHasBeenSet;
+ bool m_filterHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/MetricsFilter.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/MetricsFilter.h
index 1305abdf21..b0086b9eca 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/MetricsFilter.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/MetricsFilter.h
@@ -26,19 +26,22 @@ namespace Model
/**
* <p>Specifies a metrics configuration filter. The metrics configuration only
- * includes objects that meet the filter's criteria. A filter must be a prefix, a
- * tag, or a conjunction (MetricsAndOperator).</p><p><h3>See Also:</h3> <a
+ * includes objects that meet the filter's criteria. A filter must be a prefix, an
+ * object tag, an access point ARN, or a conjunction (MetricsAndOperator). For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketMetricsConfiguration.html">PutBucketMetricsConfiguration</a>.</p><p><h3>See
+ * Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MetricsFilter">AWS
* API Reference</a></p>
*/
- class AWS_S3_API MetricsFilter
+ class MetricsFilter
{
public:
- MetricsFilter();
- MetricsFilter(const Aws::Utils::Xml::XmlNode& xmlNode);
- MetricsFilter& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API MetricsFilter();
+ AWS_S3_API MetricsFilter(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API MetricsFilter& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -114,6 +117,47 @@ namespace Model
/**
+ * <p>The access point ARN used when evaluating a metrics filter.</p>
+ */
+ inline const Aws::String& GetAccessPointArn() const{ return m_accessPointArn; }
+
+ /**
+ * <p>The access point ARN used when evaluating a metrics filter.</p>
+ */
+ inline bool AccessPointArnHasBeenSet() const { return m_accessPointArnHasBeenSet; }
+
+ /**
+ * <p>The access point ARN used when evaluating a metrics filter.</p>
+ */
+ inline void SetAccessPointArn(const Aws::String& value) { m_accessPointArnHasBeenSet = true; m_accessPointArn = value; }
+
+ /**
+ * <p>The access point ARN used when evaluating a metrics filter.</p>
+ */
+ inline void SetAccessPointArn(Aws::String&& value) { m_accessPointArnHasBeenSet = true; m_accessPointArn = std::move(value); }
+
+ /**
+ * <p>The access point ARN used when evaluating a metrics filter.</p>
+ */
+ inline void SetAccessPointArn(const char* value) { m_accessPointArnHasBeenSet = true; m_accessPointArn.assign(value); }
+
+ /**
+ * <p>The access point ARN used when evaluating a metrics filter.</p>
+ */
+ inline MetricsFilter& WithAccessPointArn(const Aws::String& value) { SetAccessPointArn(value); return *this;}
+
+ /**
+ * <p>The access point ARN used when evaluating a metrics filter.</p>
+ */
+ inline MetricsFilter& WithAccessPointArn(Aws::String&& value) { SetAccessPointArn(std::move(value)); return *this;}
+
+ /**
+ * <p>The access point ARN used when evaluating a metrics filter.</p>
+ */
+ inline MetricsFilter& WithAccessPointArn(const char* value) { SetAccessPointArn(value); return *this;}
+
+
+ /**
* <p>A conjunction (logical AND) of predicates, which is used in evaluating a
* metrics filter. The operator must have at least two predicates, and an object
* must match all of the predicates in order for the filter to apply.</p>
@@ -158,13 +202,16 @@ namespace Model
private:
Aws::String m_prefix;
- bool m_prefixHasBeenSet;
+ bool m_prefixHasBeenSet = false;
Tag m_tag;
- bool m_tagHasBeenSet;
+ bool m_tagHasBeenSet = false;
+
+ Aws::String m_accessPointArn;
+ bool m_accessPointArnHasBeenSet = false;
MetricsAndOperator m_and;
- bool m_andHasBeenSet;
+ bool m_andHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/MultipartUpload.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/MultipartUpload.h
index 29eafa4222..b0dad12ce0 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/MultipartUpload.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/MultipartUpload.h
@@ -10,6 +10,7 @@
#include <aws/s3/model/StorageClass.h>
#include <aws/s3/model/Owner.h>
#include <aws/s3/model/Initiator.h>
+#include <aws/s3/model/ChecksumAlgorithm.h>
#include <utility>
namespace Aws
@@ -32,14 +33,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/MultipartUpload">AWS
* API Reference</a></p>
*/
- class AWS_S3_API MultipartUpload
+ class MultipartUpload
{
public:
- MultipartUpload();
- MultipartUpload(const Aws::Utils::Xml::XmlNode& xmlNode);
- MultipartUpload& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API MultipartUpload();
+ AWS_S3_API MultipartUpload(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API MultipartUpload& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -247,25 +248,59 @@ namespace Model
*/
inline MultipartUpload& WithInitiator(Initiator&& value) { SetInitiator(std::move(value)); return *this;}
+
+ /**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline const ChecksumAlgorithm& GetChecksumAlgorithm() const{ return m_checksumAlgorithm; }
+
+ /**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline bool ChecksumAlgorithmHasBeenSet() const { return m_checksumAlgorithmHasBeenSet; }
+
+ /**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline void SetChecksumAlgorithm(const ChecksumAlgorithm& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = value; }
+
+ /**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline void SetChecksumAlgorithm(ChecksumAlgorithm&& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = std::move(value); }
+
+ /**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline MultipartUpload& WithChecksumAlgorithm(const ChecksumAlgorithm& value) { SetChecksumAlgorithm(value); return *this;}
+
+ /**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline MultipartUpload& WithChecksumAlgorithm(ChecksumAlgorithm&& value) { SetChecksumAlgorithm(std::move(value)); return *this;}
+
private:
Aws::String m_uploadId;
- bool m_uploadIdHasBeenSet;
+ bool m_uploadIdHasBeenSet = false;
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
Aws::Utils::DateTime m_initiated;
- bool m_initiatedHasBeenSet;
+ bool m_initiatedHasBeenSet = false;
StorageClass m_storageClass;
- bool m_storageClassHasBeenSet;
+ bool m_storageClassHasBeenSet = false;
Owner m_owner;
- bool m_ownerHasBeenSet;
+ bool m_ownerHasBeenSet = false;
Initiator m_initiator;
- bool m_initiatorHasBeenSet;
+ bool m_initiatorHasBeenSet = false;
+
+ ChecksumAlgorithm m_checksumAlgorithm;
+ bool m_checksumAlgorithmHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/NoncurrentVersionExpiration.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/NoncurrentVersionExpiration.h
index 9e966d7722..c039ca9950 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/NoncurrentVersionExpiration.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/NoncurrentVersionExpiration.h
@@ -29,60 +29,100 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionExpiration">AWS
* API Reference</a></p>
*/
- class AWS_S3_API NoncurrentVersionExpiration
+ class NoncurrentVersionExpiration
{
public:
- NoncurrentVersionExpiration();
- NoncurrentVersionExpiration(const Aws::Utils::Xml::XmlNode& xmlNode);
- NoncurrentVersionExpiration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API NoncurrentVersionExpiration();
+ AWS_S3_API NoncurrentVersionExpiration(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API NoncurrentVersionExpiration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
* <p>Specifies the number of days an object is noncurrent before Amazon S3 can
- * perform the associated action. For information about the noncurrent days
- * calculations, see <a
+ * perform the associated action. The value must be a non-zero positive integer.
+ * For information about the noncurrent days calculations, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations">How
- * Amazon S3 Calculates When an Object Became Noncurrent</a> in the <i>Amazon
- * Simple Storage Service Developer Guide</i>.</p>
+ * Amazon S3 Calculates When an Object Became Noncurrent</a> in the <i>Amazon S3
+ * User Guide</i>.</p>
*/
inline int GetNoncurrentDays() const{ return m_noncurrentDays; }
/**
* <p>Specifies the number of days an object is noncurrent before Amazon S3 can
- * perform the associated action. For information about the noncurrent days
- * calculations, see <a
+ * perform the associated action. The value must be a non-zero positive integer.
+ * For information about the noncurrent days calculations, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations">How
- * Amazon S3 Calculates When an Object Became Noncurrent</a> in the <i>Amazon
- * Simple Storage Service Developer Guide</i>.</p>
+ * Amazon S3 Calculates When an Object Became Noncurrent</a> in the <i>Amazon S3
+ * User Guide</i>.</p>
*/
inline bool NoncurrentDaysHasBeenSet() const { return m_noncurrentDaysHasBeenSet; }
/**
* <p>Specifies the number of days an object is noncurrent before Amazon S3 can
- * perform the associated action. For information about the noncurrent days
- * calculations, see <a
+ * perform the associated action. The value must be a non-zero positive integer.
+ * For information about the noncurrent days calculations, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations">How
- * Amazon S3 Calculates When an Object Became Noncurrent</a> in the <i>Amazon
- * Simple Storage Service Developer Guide</i>.</p>
+ * Amazon S3 Calculates When an Object Became Noncurrent</a> in the <i>Amazon S3
+ * User Guide</i>.</p>
*/
inline void SetNoncurrentDays(int value) { m_noncurrentDaysHasBeenSet = true; m_noncurrentDays = value; }
/**
* <p>Specifies the number of days an object is noncurrent before Amazon S3 can
- * perform the associated action. For information about the noncurrent days
- * calculations, see <a
+ * perform the associated action. The value must be a non-zero positive integer.
+ * For information about the noncurrent days calculations, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/intro-lifecycle-rules.html#non-current-days-calculations">How
- * Amazon S3 Calculates When an Object Became Noncurrent</a> in the <i>Amazon
- * Simple Storage Service Developer Guide</i>.</p>
+ * Amazon S3 Calculates When an Object Became Noncurrent</a> in the <i>Amazon S3
+ * User Guide</i>.</p>
*/
inline NoncurrentVersionExpiration& WithNoncurrentDays(int value) { SetNoncurrentDays(value); return *this;}
+
+ /**
+ * <p>Specifies how many noncurrent versions Amazon S3 will retain. If there are
+ * this many more recent noncurrent versions, Amazon S3 will take the associated
+ * action. For more information about noncurrent versions, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html">Lifecycle
+ * configuration elements</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline int GetNewerNoncurrentVersions() const{ return m_newerNoncurrentVersions; }
+
+ /**
+ * <p>Specifies how many noncurrent versions Amazon S3 will retain. If there are
+ * this many more recent noncurrent versions, Amazon S3 will take the associated
+ * action. For more information about noncurrent versions, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html">Lifecycle
+ * configuration elements</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool NewerNoncurrentVersionsHasBeenSet() const { return m_newerNoncurrentVersionsHasBeenSet; }
+
+ /**
+ * <p>Specifies how many noncurrent versions Amazon S3 will retain. If there are
+ * this many more recent noncurrent versions, Amazon S3 will take the associated
+ * action. For more information about noncurrent versions, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html">Lifecycle
+ * configuration elements</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetNewerNoncurrentVersions(int value) { m_newerNoncurrentVersionsHasBeenSet = true; m_newerNoncurrentVersions = value; }
+
+ /**
+ * <p>Specifies how many noncurrent versions Amazon S3 will retain. If there are
+ * this many more recent noncurrent versions, Amazon S3 will take the associated
+ * action. For more information about noncurrent versions, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html">Lifecycle
+ * configuration elements</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline NoncurrentVersionExpiration& WithNewerNoncurrentVersions(int value) { SetNewerNoncurrentVersions(value); return *this;}
+
private:
int m_noncurrentDays;
- bool m_noncurrentDaysHasBeenSet;
+ bool m_noncurrentDaysHasBeenSet = false;
+
+ int m_newerNoncurrentVersions;
+ bool m_newerNoncurrentVersionsHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/NoncurrentVersionTransition.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/NoncurrentVersionTransition.h
index f6ea66e084..340d82a597 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/NoncurrentVersionTransition.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/NoncurrentVersionTransition.h
@@ -25,24 +25,25 @@ namespace Model
/**
* <p>Container for the transition rule that describes when noncurrent objects
* transition to the <code>STANDARD_IA</code>, <code>ONEZONE_IA</code>,
- * <code>INTELLIGENT_TIERING</code>, <code>GLACIER</code>, or
- * <code>DEEP_ARCHIVE</code> storage class. If your bucket is versioning-enabled
+ * <code>INTELLIGENT_TIERING</code>, <code>GLACIER_IR</code>, <code>GLACIER</code>,
+ * or <code>DEEP_ARCHIVE</code> storage class. If your bucket is versioning-enabled
* (or versioning is suspended), you can set this action to request that Amazon S3
* transition noncurrent object versions to the <code>STANDARD_IA</code>,
- * <code>ONEZONE_IA</code>, <code>INTELLIGENT_TIERING</code>, <code>GLACIER</code>,
- * or <code>DEEP_ARCHIVE</code> storage class at a specific period in the object's
- * lifetime.</p><p><h3>See Also:</h3> <a
+ * <code>ONEZONE_IA</code>, <code>INTELLIGENT_TIERING</code>,
+ * <code>GLACIER_IR</code>, <code>GLACIER</code>, or <code>DEEP_ARCHIVE</code>
+ * storage class at a specific period in the object's lifetime.</p><p><h3>See
+ * Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NoncurrentVersionTransition">AWS
* API Reference</a></p>
*/
- class AWS_S3_API NoncurrentVersionTransition
+ class NoncurrentVersionTransition
{
public:
- NoncurrentVersionTransition();
- NoncurrentVersionTransition(const Aws::Utils::Xml::XmlNode& xmlNode);
- NoncurrentVersionTransition& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API NoncurrentVersionTransition();
+ AWS_S3_API NoncurrentVersionTransition(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API NoncurrentVersionTransition& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -116,13 +117,53 @@ namespace Model
*/
inline NoncurrentVersionTransition& WithStorageClass(TransitionStorageClass&& value) { SetStorageClass(std::move(value)); return *this;}
+
+ /**
+ * <p>Specifies how many noncurrent versions Amazon S3 will retain. If there are
+ * this many more recent noncurrent versions, Amazon S3 will take the associated
+ * action. For more information about noncurrent versions, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html">Lifecycle
+ * configuration elements</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline int GetNewerNoncurrentVersions() const{ return m_newerNoncurrentVersions; }
+
+ /**
+ * <p>Specifies how many noncurrent versions Amazon S3 will retain. If there are
+ * this many more recent noncurrent versions, Amazon S3 will take the associated
+ * action. For more information about noncurrent versions, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html">Lifecycle
+ * configuration elements</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool NewerNoncurrentVersionsHasBeenSet() const { return m_newerNoncurrentVersionsHasBeenSet; }
+
+ /**
+ * <p>Specifies how many noncurrent versions Amazon S3 will retain. If there are
+ * this many more recent noncurrent versions, Amazon S3 will take the associated
+ * action. For more information about noncurrent versions, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html">Lifecycle
+ * configuration elements</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetNewerNoncurrentVersions(int value) { m_newerNoncurrentVersionsHasBeenSet = true; m_newerNoncurrentVersions = value; }
+
+ /**
+ * <p>Specifies how many noncurrent versions Amazon S3 will retain. If there are
+ * this many more recent noncurrent versions, Amazon S3 will take the associated
+ * action. For more information about noncurrent versions, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/intro-lifecycle-rules.html">Lifecycle
+ * configuration elements</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline NoncurrentVersionTransition& WithNewerNoncurrentVersions(int value) { SetNewerNoncurrentVersions(value); return *this;}
+
private:
int m_noncurrentDays;
- bool m_noncurrentDaysHasBeenSet;
+ bool m_noncurrentDaysHasBeenSet = false;
TransitionStorageClass m_storageClass;
- bool m_storageClassHasBeenSet;
+ bool m_storageClassHasBeenSet = false;
+
+ int m_newerNoncurrentVersions;
+ bool m_newerNoncurrentVersionsHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/NotificationConfiguration.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/NotificationConfiguration.h
index f479b46490..24f0e6cbc0 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/NotificationConfiguration.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/NotificationConfiguration.h
@@ -6,6 +6,7 @@
#pragma once
#include <aws/s3/S3_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSVector.h>
+#include <aws/s3/model/EventBridgeConfiguration.h>
#include <aws/s3/model/TopicConfiguration.h>
#include <aws/s3/model/QueueConfiguration.h>
#include <aws/s3/model/LambdaFunctionConfiguration.h>
@@ -32,14 +33,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfiguration">AWS
* API Reference</a></p>
*/
- class AWS_S3_API NotificationConfiguration
+ class NotificationConfiguration
{
public:
- NotificationConfiguration();
- NotificationConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
- NotificationConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API NotificationConfiguration();
+ AWS_S3_API NotificationConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API NotificationConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -141,63 +142,97 @@ namespace Model
/**
- * <p>Describes the AWS Lambda functions to invoke and the events for which to
- * invoke them.</p>
+ * <p>Describes the Lambda functions to invoke and the events for which to invoke
+ * them.</p>
*/
inline const Aws::Vector<LambdaFunctionConfiguration>& GetLambdaFunctionConfigurations() const{ return m_lambdaFunctionConfigurations; }
/**
- * <p>Describes the AWS Lambda functions to invoke and the events for which to
- * invoke them.</p>
+ * <p>Describes the Lambda functions to invoke and the events for which to invoke
+ * them.</p>
*/
inline bool LambdaFunctionConfigurationsHasBeenSet() const { return m_lambdaFunctionConfigurationsHasBeenSet; }
/**
- * <p>Describes the AWS Lambda functions to invoke and the events for which to
- * invoke them.</p>
+ * <p>Describes the Lambda functions to invoke and the events for which to invoke
+ * them.</p>
*/
inline void SetLambdaFunctionConfigurations(const Aws::Vector<LambdaFunctionConfiguration>& value) { m_lambdaFunctionConfigurationsHasBeenSet = true; m_lambdaFunctionConfigurations = value; }
/**
- * <p>Describes the AWS Lambda functions to invoke and the events for which to
- * invoke them.</p>
+ * <p>Describes the Lambda functions to invoke and the events for which to invoke
+ * them.</p>
*/
inline void SetLambdaFunctionConfigurations(Aws::Vector<LambdaFunctionConfiguration>&& value) { m_lambdaFunctionConfigurationsHasBeenSet = true; m_lambdaFunctionConfigurations = std::move(value); }
/**
- * <p>Describes the AWS Lambda functions to invoke and the events for which to
- * invoke them.</p>
+ * <p>Describes the Lambda functions to invoke and the events for which to invoke
+ * them.</p>
*/
inline NotificationConfiguration& WithLambdaFunctionConfigurations(const Aws::Vector<LambdaFunctionConfiguration>& value) { SetLambdaFunctionConfigurations(value); return *this;}
/**
- * <p>Describes the AWS Lambda functions to invoke and the events for which to
- * invoke them.</p>
+ * <p>Describes the Lambda functions to invoke and the events for which to invoke
+ * them.</p>
*/
inline NotificationConfiguration& WithLambdaFunctionConfigurations(Aws::Vector<LambdaFunctionConfiguration>&& value) { SetLambdaFunctionConfigurations(std::move(value)); return *this;}
/**
- * <p>Describes the AWS Lambda functions to invoke and the events for which to
- * invoke them.</p>
+ * <p>Describes the Lambda functions to invoke and the events for which to invoke
+ * them.</p>
*/
inline NotificationConfiguration& AddLambdaFunctionConfigurations(const LambdaFunctionConfiguration& value) { m_lambdaFunctionConfigurationsHasBeenSet = true; m_lambdaFunctionConfigurations.push_back(value); return *this; }
/**
- * <p>Describes the AWS Lambda functions to invoke and the events for which to
- * invoke them.</p>
+ * <p>Describes the Lambda functions to invoke and the events for which to invoke
+ * them.</p>
*/
inline NotificationConfiguration& AddLambdaFunctionConfigurations(LambdaFunctionConfiguration&& value) { m_lambdaFunctionConfigurationsHasBeenSet = true; m_lambdaFunctionConfigurations.push_back(std::move(value)); return *this; }
+
+ /**
+ * <p>Enables delivery of events to Amazon EventBridge.</p>
+ */
+ inline const EventBridgeConfiguration& GetEventBridgeConfiguration() const{ return m_eventBridgeConfiguration; }
+
+ /**
+ * <p>Enables delivery of events to Amazon EventBridge.</p>
+ */
+ inline bool EventBridgeConfigurationHasBeenSet() const { return m_eventBridgeConfigurationHasBeenSet; }
+
+ /**
+ * <p>Enables delivery of events to Amazon EventBridge.</p>
+ */
+ inline void SetEventBridgeConfiguration(const EventBridgeConfiguration& value) { m_eventBridgeConfigurationHasBeenSet = true; m_eventBridgeConfiguration = value; }
+
+ /**
+ * <p>Enables delivery of events to Amazon EventBridge.</p>
+ */
+ inline void SetEventBridgeConfiguration(EventBridgeConfiguration&& value) { m_eventBridgeConfigurationHasBeenSet = true; m_eventBridgeConfiguration = std::move(value); }
+
+ /**
+ * <p>Enables delivery of events to Amazon EventBridge.</p>
+ */
+ inline NotificationConfiguration& WithEventBridgeConfiguration(const EventBridgeConfiguration& value) { SetEventBridgeConfiguration(value); return *this;}
+
+ /**
+ * <p>Enables delivery of events to Amazon EventBridge.</p>
+ */
+ inline NotificationConfiguration& WithEventBridgeConfiguration(EventBridgeConfiguration&& value) { SetEventBridgeConfiguration(std::move(value)); return *this;}
+
private:
Aws::Vector<TopicConfiguration> m_topicConfigurations;
- bool m_topicConfigurationsHasBeenSet;
+ bool m_topicConfigurationsHasBeenSet = false;
Aws::Vector<QueueConfiguration> m_queueConfigurations;
- bool m_queueConfigurationsHasBeenSet;
+ bool m_queueConfigurationsHasBeenSet = false;
Aws::Vector<LambdaFunctionConfiguration> m_lambdaFunctionConfigurations;
- bool m_lambdaFunctionConfigurationsHasBeenSet;
+ bool m_lambdaFunctionConfigurationsHasBeenSet = false;
+
+ EventBridgeConfiguration m_eventBridgeConfiguration;
+ bool m_eventBridgeConfigurationHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/NotificationConfigurationDeprecated.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/NotificationConfigurationDeprecated.h
index fabb46afcf..2f6618fc4e 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/NotificationConfigurationDeprecated.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/NotificationConfigurationDeprecated.h
@@ -24,14 +24,14 @@ namespace S3
namespace Model
{
- class AWS_S3_API NotificationConfigurationDeprecated
+ class NotificationConfigurationDeprecated
{
public:
- NotificationConfigurationDeprecated();
- NotificationConfigurationDeprecated(const Aws::Utils::Xml::XmlNode& xmlNode);
- NotificationConfigurationDeprecated& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API NotificationConfigurationDeprecated();
+ AWS_S3_API NotificationConfigurationDeprecated(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API NotificationConfigurationDeprecated& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -121,45 +121,45 @@ namespace Model
/**
- * <p>Container for specifying the AWS Lambda notification configuration.</p>
+ * <p>Container for specifying the Lambda notification configuration.</p>
*/
inline const CloudFunctionConfiguration& GetCloudFunctionConfiguration() const{ return m_cloudFunctionConfiguration; }
/**
- * <p>Container for specifying the AWS Lambda notification configuration.</p>
+ * <p>Container for specifying the Lambda notification configuration.</p>
*/
inline bool CloudFunctionConfigurationHasBeenSet() const { return m_cloudFunctionConfigurationHasBeenSet; }
/**
- * <p>Container for specifying the AWS Lambda notification configuration.</p>
+ * <p>Container for specifying the Lambda notification configuration.</p>
*/
inline void SetCloudFunctionConfiguration(const CloudFunctionConfiguration& value) { m_cloudFunctionConfigurationHasBeenSet = true; m_cloudFunctionConfiguration = value; }
/**
- * <p>Container for specifying the AWS Lambda notification configuration.</p>
+ * <p>Container for specifying the Lambda notification configuration.</p>
*/
inline void SetCloudFunctionConfiguration(CloudFunctionConfiguration&& value) { m_cloudFunctionConfigurationHasBeenSet = true; m_cloudFunctionConfiguration = std::move(value); }
/**
- * <p>Container for specifying the AWS Lambda notification configuration.</p>
+ * <p>Container for specifying the Lambda notification configuration.</p>
*/
inline NotificationConfigurationDeprecated& WithCloudFunctionConfiguration(const CloudFunctionConfiguration& value) { SetCloudFunctionConfiguration(value); return *this;}
/**
- * <p>Container for specifying the AWS Lambda notification configuration.</p>
+ * <p>Container for specifying the Lambda notification configuration.</p>
*/
inline NotificationConfigurationDeprecated& WithCloudFunctionConfiguration(CloudFunctionConfiguration&& value) { SetCloudFunctionConfiguration(std::move(value)); return *this;}
private:
TopicConfigurationDeprecated m_topicConfiguration;
- bool m_topicConfigurationHasBeenSet;
+ bool m_topicConfigurationHasBeenSet = false;
QueueConfigurationDeprecated m_queueConfiguration;
- bool m_queueConfigurationHasBeenSet;
+ bool m_queueConfigurationHasBeenSet = false;
CloudFunctionConfiguration m_cloudFunctionConfiguration;
- bool m_cloudFunctionConfigurationHasBeenSet;
+ bool m_cloudFunctionConfigurationHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/NotificationConfigurationFilter.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/NotificationConfigurationFilter.h
index 496d5d756f..0bfe347533 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/NotificationConfigurationFilter.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/NotificationConfigurationFilter.h
@@ -31,14 +31,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/NotificationConfigurationFilter">AWS
* API Reference</a></p>
*/
- class AWS_S3_API NotificationConfigurationFilter
+ class NotificationConfigurationFilter
{
public:
- NotificationConfigurationFilter();
- NotificationConfigurationFilter(const Aws::Utils::Xml::XmlNode& xmlNode);
- NotificationConfigurationFilter& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API NotificationConfigurationFilter();
+ AWS_S3_API NotificationConfigurationFilter(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API NotificationConfigurationFilter& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
@@ -62,7 +62,7 @@ namespace Model
private:
S3KeyFilter m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Object.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Object.h
index 2deeed80c9..520cf869c6 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Object.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Object.h
@@ -7,8 +7,10 @@
#include <aws/s3/S3_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/core/utils/DateTime.h>
+#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/s3/model/ObjectStorageClass.h>
#include <aws/s3/model/Owner.h>
+#include <aws/s3/model/ChecksumAlgorithm.h>
#include <utility>
namespace Aws
@@ -31,14 +33,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Object">AWS API
* Reference</a></p>
*/
- class AWS_S3_API Object
+ class Object
{
public:
- Object();
- Object(const Aws::Utils::Xml::XmlNode& xmlNode);
- Object& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Object();
+ AWS_S3_API Object(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Object& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -126,14 +128,17 @@ namespace Model
* contents of an object, not its metadata. The ETag may or may not be an MD5
* digest of the object data. Whether or not it is depends on how the object was
* created and how it is encrypted as described below:</p> <ul> <li> <p>Objects
- * created by the PUT Object, POST Object, or Copy operation, or through the AWS
- * Management Console, and are encrypted by SSE-S3 or plaintext, have ETags that
- * are an MD5 digest of their object data.</p> </li> <li> <p>Objects created by the
- * PUT Object, POST Object, or Copy operation, or through the AWS Management
- * Console, and are encrypted by SSE-C or SSE-KMS, have ETags that are not an MD5
- * digest of their object data.</p> </li> <li> <p>If an object is created by either
- * the Multipart Upload or Part Copy operation, the ETag is not an MD5 digest,
- * regardless of the method of encryption.</p> </li> </ul>
+ * created by the PUT Object, POST Object, or Copy operation, or through the Amazon
+ * Web Services Management Console, and are encrypted by SSE-S3 or plaintext, have
+ * ETags that are an MD5 digest of their object data.</p> </li> <li> <p>Objects
+ * created by the PUT Object, POST Object, or Copy operation, or through the Amazon
+ * Web Services Management Console, and are encrypted by SSE-C or SSE-KMS, have
+ * ETags that are not an MD5 digest of their object data.</p> </li> <li> <p>If an
+ * object is created by either the Multipart Upload or Part Copy operation, the
+ * ETag is not an MD5 digest, regardless of the method of encryption. If an object
+ * is larger than 16 MB, the Amazon Web Services Management Console will upload or
+ * copy that object as a Multipart Upload, and therefore the ETag will not be an
+ * MD5 digest.</p> </li> </ul>
*/
inline const Aws::String& GetETag() const{ return m_eTag; }
@@ -142,14 +147,17 @@ namespace Model
* contents of an object, not its metadata. The ETag may or may not be an MD5
* digest of the object data. Whether or not it is depends on how the object was
* created and how it is encrypted as described below:</p> <ul> <li> <p>Objects
- * created by the PUT Object, POST Object, or Copy operation, or through the AWS
- * Management Console, and are encrypted by SSE-S3 or plaintext, have ETags that
- * are an MD5 digest of their object data.</p> </li> <li> <p>Objects created by the
- * PUT Object, POST Object, or Copy operation, or through the AWS Management
- * Console, and are encrypted by SSE-C or SSE-KMS, have ETags that are not an MD5
- * digest of their object data.</p> </li> <li> <p>If an object is created by either
- * the Multipart Upload or Part Copy operation, the ETag is not an MD5 digest,
- * regardless of the method of encryption.</p> </li> </ul>
+ * created by the PUT Object, POST Object, or Copy operation, or through the Amazon
+ * Web Services Management Console, and are encrypted by SSE-S3 or plaintext, have
+ * ETags that are an MD5 digest of their object data.</p> </li> <li> <p>Objects
+ * created by the PUT Object, POST Object, or Copy operation, or through the Amazon
+ * Web Services Management Console, and are encrypted by SSE-C or SSE-KMS, have
+ * ETags that are not an MD5 digest of their object data.</p> </li> <li> <p>If an
+ * object is created by either the Multipart Upload or Part Copy operation, the
+ * ETag is not an MD5 digest, regardless of the method of encryption. If an object
+ * is larger than 16 MB, the Amazon Web Services Management Console will upload or
+ * copy that object as a Multipart Upload, and therefore the ETag will not be an
+ * MD5 digest.</p> </li> </ul>
*/
inline bool ETagHasBeenSet() const { return m_eTagHasBeenSet; }
@@ -158,14 +166,17 @@ namespace Model
* contents of an object, not its metadata. The ETag may or may not be an MD5
* digest of the object data. Whether or not it is depends on how the object was
* created and how it is encrypted as described below:</p> <ul> <li> <p>Objects
- * created by the PUT Object, POST Object, or Copy operation, or through the AWS
- * Management Console, and are encrypted by SSE-S3 or plaintext, have ETags that
- * are an MD5 digest of their object data.</p> </li> <li> <p>Objects created by the
- * PUT Object, POST Object, or Copy operation, or through the AWS Management
- * Console, and are encrypted by SSE-C or SSE-KMS, have ETags that are not an MD5
- * digest of their object data.</p> </li> <li> <p>If an object is created by either
- * the Multipart Upload or Part Copy operation, the ETag is not an MD5 digest,
- * regardless of the method of encryption.</p> </li> </ul>
+ * created by the PUT Object, POST Object, or Copy operation, or through the Amazon
+ * Web Services Management Console, and are encrypted by SSE-S3 or plaintext, have
+ * ETags that are an MD5 digest of their object data.</p> </li> <li> <p>Objects
+ * created by the PUT Object, POST Object, or Copy operation, or through the Amazon
+ * Web Services Management Console, and are encrypted by SSE-C or SSE-KMS, have
+ * ETags that are not an MD5 digest of their object data.</p> </li> <li> <p>If an
+ * object is created by either the Multipart Upload or Part Copy operation, the
+ * ETag is not an MD5 digest, regardless of the method of encryption. If an object
+ * is larger than 16 MB, the Amazon Web Services Management Console will upload or
+ * copy that object as a Multipart Upload, and therefore the ETag will not be an
+ * MD5 digest.</p> </li> </ul>
*/
inline void SetETag(const Aws::String& value) { m_eTagHasBeenSet = true; m_eTag = value; }
@@ -174,14 +185,17 @@ namespace Model
* contents of an object, not its metadata. The ETag may or may not be an MD5
* digest of the object data. Whether or not it is depends on how the object was
* created and how it is encrypted as described below:</p> <ul> <li> <p>Objects
- * created by the PUT Object, POST Object, or Copy operation, or through the AWS
- * Management Console, and are encrypted by SSE-S3 or plaintext, have ETags that
- * are an MD5 digest of their object data.</p> </li> <li> <p>Objects created by the
- * PUT Object, POST Object, or Copy operation, or through the AWS Management
- * Console, and are encrypted by SSE-C or SSE-KMS, have ETags that are not an MD5
- * digest of their object data.</p> </li> <li> <p>If an object is created by either
- * the Multipart Upload or Part Copy operation, the ETag is not an MD5 digest,
- * regardless of the method of encryption.</p> </li> </ul>
+ * created by the PUT Object, POST Object, or Copy operation, or through the Amazon
+ * Web Services Management Console, and are encrypted by SSE-S3 or plaintext, have
+ * ETags that are an MD5 digest of their object data.</p> </li> <li> <p>Objects
+ * created by the PUT Object, POST Object, or Copy operation, or through the Amazon
+ * Web Services Management Console, and are encrypted by SSE-C or SSE-KMS, have
+ * ETags that are not an MD5 digest of their object data.</p> </li> <li> <p>If an
+ * object is created by either the Multipart Upload or Part Copy operation, the
+ * ETag is not an MD5 digest, regardless of the method of encryption. If an object
+ * is larger than 16 MB, the Amazon Web Services Management Console will upload or
+ * copy that object as a Multipart Upload, and therefore the ETag will not be an
+ * MD5 digest.</p> </li> </ul>
*/
inline void SetETag(Aws::String&& value) { m_eTagHasBeenSet = true; m_eTag = std::move(value); }
@@ -190,14 +204,17 @@ namespace Model
* contents of an object, not its metadata. The ETag may or may not be an MD5
* digest of the object data. Whether or not it is depends on how the object was
* created and how it is encrypted as described below:</p> <ul> <li> <p>Objects
- * created by the PUT Object, POST Object, or Copy operation, or through the AWS
- * Management Console, and are encrypted by SSE-S3 or plaintext, have ETags that
- * are an MD5 digest of their object data.</p> </li> <li> <p>Objects created by the
- * PUT Object, POST Object, or Copy operation, or through the AWS Management
- * Console, and are encrypted by SSE-C or SSE-KMS, have ETags that are not an MD5
- * digest of their object data.</p> </li> <li> <p>If an object is created by either
- * the Multipart Upload or Part Copy operation, the ETag is not an MD5 digest,
- * regardless of the method of encryption.</p> </li> </ul>
+ * created by the PUT Object, POST Object, or Copy operation, or through the Amazon
+ * Web Services Management Console, and are encrypted by SSE-S3 or plaintext, have
+ * ETags that are an MD5 digest of their object data.</p> </li> <li> <p>Objects
+ * created by the PUT Object, POST Object, or Copy operation, or through the Amazon
+ * Web Services Management Console, and are encrypted by SSE-C or SSE-KMS, have
+ * ETags that are not an MD5 digest of their object data.</p> </li> <li> <p>If an
+ * object is created by either the Multipart Upload or Part Copy operation, the
+ * ETag is not an MD5 digest, regardless of the method of encryption. If an object
+ * is larger than 16 MB, the Amazon Web Services Management Console will upload or
+ * copy that object as a Multipart Upload, and therefore the ETag will not be an
+ * MD5 digest.</p> </li> </ul>
*/
inline void SetETag(const char* value) { m_eTagHasBeenSet = true; m_eTag.assign(value); }
@@ -206,14 +223,17 @@ namespace Model
* contents of an object, not its metadata. The ETag may or may not be an MD5
* digest of the object data. Whether or not it is depends on how the object was
* created and how it is encrypted as described below:</p> <ul> <li> <p>Objects
- * created by the PUT Object, POST Object, or Copy operation, or through the AWS
- * Management Console, and are encrypted by SSE-S3 or plaintext, have ETags that
- * are an MD5 digest of their object data.</p> </li> <li> <p>Objects created by the
- * PUT Object, POST Object, or Copy operation, or through the AWS Management
- * Console, and are encrypted by SSE-C or SSE-KMS, have ETags that are not an MD5
- * digest of their object data.</p> </li> <li> <p>If an object is created by either
- * the Multipart Upload or Part Copy operation, the ETag is not an MD5 digest,
- * regardless of the method of encryption.</p> </li> </ul>
+ * created by the PUT Object, POST Object, or Copy operation, or through the Amazon
+ * Web Services Management Console, and are encrypted by SSE-S3 or plaintext, have
+ * ETags that are an MD5 digest of their object data.</p> </li> <li> <p>Objects
+ * created by the PUT Object, POST Object, or Copy operation, or through the Amazon
+ * Web Services Management Console, and are encrypted by SSE-C or SSE-KMS, have
+ * ETags that are not an MD5 digest of their object data.</p> </li> <li> <p>If an
+ * object is created by either the Multipart Upload or Part Copy operation, the
+ * ETag is not an MD5 digest, regardless of the method of encryption. If an object
+ * is larger than 16 MB, the Amazon Web Services Management Console will upload or
+ * copy that object as a Multipart Upload, and therefore the ETag will not be an
+ * MD5 digest.</p> </li> </ul>
*/
inline Object& WithETag(const Aws::String& value) { SetETag(value); return *this;}
@@ -222,14 +242,17 @@ namespace Model
* contents of an object, not its metadata. The ETag may or may not be an MD5
* digest of the object data. Whether or not it is depends on how the object was
* created and how it is encrypted as described below:</p> <ul> <li> <p>Objects
- * created by the PUT Object, POST Object, or Copy operation, or through the AWS
- * Management Console, and are encrypted by SSE-S3 or plaintext, have ETags that
- * are an MD5 digest of their object data.</p> </li> <li> <p>Objects created by the
- * PUT Object, POST Object, or Copy operation, or through the AWS Management
- * Console, and are encrypted by SSE-C or SSE-KMS, have ETags that are not an MD5
- * digest of their object data.</p> </li> <li> <p>If an object is created by either
- * the Multipart Upload or Part Copy operation, the ETag is not an MD5 digest,
- * regardless of the method of encryption.</p> </li> </ul>
+ * created by the PUT Object, POST Object, or Copy operation, or through the Amazon
+ * Web Services Management Console, and are encrypted by SSE-S3 or plaintext, have
+ * ETags that are an MD5 digest of their object data.</p> </li> <li> <p>Objects
+ * created by the PUT Object, POST Object, or Copy operation, or through the Amazon
+ * Web Services Management Console, and are encrypted by SSE-C or SSE-KMS, have
+ * ETags that are not an MD5 digest of their object data.</p> </li> <li> <p>If an
+ * object is created by either the Multipart Upload or Part Copy operation, the
+ * ETag is not an MD5 digest, regardless of the method of encryption. If an object
+ * is larger than 16 MB, the Amazon Web Services Management Console will upload or
+ * copy that object as a Multipart Upload, and therefore the ETag will not be an
+ * MD5 digest.</p> </li> </ul>
*/
inline Object& WithETag(Aws::String&& value) { SetETag(std::move(value)); return *this;}
@@ -238,19 +261,63 @@ namespace Model
* contents of an object, not its metadata. The ETag may or may not be an MD5
* digest of the object data. Whether or not it is depends on how the object was
* created and how it is encrypted as described below:</p> <ul> <li> <p>Objects
- * created by the PUT Object, POST Object, or Copy operation, or through the AWS
- * Management Console, and are encrypted by SSE-S3 or plaintext, have ETags that
- * are an MD5 digest of their object data.</p> </li> <li> <p>Objects created by the
- * PUT Object, POST Object, or Copy operation, or through the AWS Management
- * Console, and are encrypted by SSE-C or SSE-KMS, have ETags that are not an MD5
- * digest of their object data.</p> </li> <li> <p>If an object is created by either
- * the Multipart Upload or Part Copy operation, the ETag is not an MD5 digest,
- * regardless of the method of encryption.</p> </li> </ul>
+ * created by the PUT Object, POST Object, or Copy operation, or through the Amazon
+ * Web Services Management Console, and are encrypted by SSE-S3 or plaintext, have
+ * ETags that are an MD5 digest of their object data.</p> </li> <li> <p>Objects
+ * created by the PUT Object, POST Object, or Copy operation, or through the Amazon
+ * Web Services Management Console, and are encrypted by SSE-C or SSE-KMS, have
+ * ETags that are not an MD5 digest of their object data.</p> </li> <li> <p>If an
+ * object is created by either the Multipart Upload or Part Copy operation, the
+ * ETag is not an MD5 digest, regardless of the method of encryption. If an object
+ * is larger than 16 MB, the Amazon Web Services Management Console will upload or
+ * copy that object as a Multipart Upload, and therefore the ETag will not be an
+ * MD5 digest.</p> </li> </ul>
*/
inline Object& WithETag(const char* value) { SetETag(value); return *this;}
/**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline const Aws::Vector<ChecksumAlgorithm>& GetChecksumAlgorithm() const{ return m_checksumAlgorithm; }
+
+ /**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline bool ChecksumAlgorithmHasBeenSet() const { return m_checksumAlgorithmHasBeenSet; }
+
+ /**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline void SetChecksumAlgorithm(const Aws::Vector<ChecksumAlgorithm>& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = value; }
+
+ /**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline void SetChecksumAlgorithm(Aws::Vector<ChecksumAlgorithm>&& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = std::move(value); }
+
+ /**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline Object& WithChecksumAlgorithm(const Aws::Vector<ChecksumAlgorithm>& value) { SetChecksumAlgorithm(value); return *this;}
+
+ /**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline Object& WithChecksumAlgorithm(Aws::Vector<ChecksumAlgorithm>&& value) { SetChecksumAlgorithm(std::move(value)); return *this;}
+
+ /**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline Object& AddChecksumAlgorithm(const ChecksumAlgorithm& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm.push_back(value); return *this; }
+
+ /**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline Object& AddChecksumAlgorithm(ChecksumAlgorithm&& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm.push_back(std::move(value)); return *this; }
+
+
+ /**
* <p>Size in bytes of the object</p>
*/
inline long long GetSize() const{ return m_size; }
@@ -335,22 +402,25 @@ namespace Model
private:
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
Aws::Utils::DateTime m_lastModified;
- bool m_lastModifiedHasBeenSet;
+ bool m_lastModifiedHasBeenSet = false;
Aws::String m_eTag;
- bool m_eTagHasBeenSet;
+ bool m_eTagHasBeenSet = false;
+
+ Aws::Vector<ChecksumAlgorithm> m_checksumAlgorithm;
+ bool m_checksumAlgorithmHasBeenSet = false;
long long m_size;
- bool m_sizeHasBeenSet;
+ bool m_sizeHasBeenSet = false;
ObjectStorageClass m_storageClass;
- bool m_storageClassHasBeenSet;
+ bool m_storageClassHasBeenSet = false;
Owner m_owner;
- bool m_ownerHasBeenSet;
+ bool m_ownerHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectAttributes.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectAttributes.h
new file mode 100644
index 0000000000..1fd094cba8
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectAttributes.h
@@ -0,0 +1,34 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+#include <aws/s3/S3_EXPORTS.h>
+#include <aws/core/utils/memory/stl/AWSString.h>
+
+namespace Aws
+{
+namespace S3
+{
+namespace Model
+{
+ enum class ObjectAttributes
+ {
+ NOT_SET,
+ ETag,
+ Checksum,
+ ObjectParts,
+ StorageClass,
+ ObjectSize
+ };
+
+namespace ObjectAttributesMapper
+{
+AWS_S3_API ObjectAttributes GetObjectAttributesForName(const Aws::String& name);
+
+AWS_S3_API Aws::String GetNameForObjectAttributes(ObjectAttributes value);
+} // namespace ObjectAttributesMapper
+} // namespace Model
+} // namespace S3
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectIdentifier.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectIdentifier.h
index 82e3c611da..26ab8081a9 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectIdentifier.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectIdentifier.h
@@ -28,14 +28,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectIdentifier">AWS
* API Reference</a></p>
*/
- class AWS_S3_API ObjectIdentifier
+ class ObjectIdentifier
{
public:
- ObjectIdentifier();
- ObjectIdentifier(const Aws::Utils::Xml::XmlNode& xmlNode);
- ObjectIdentifier& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ObjectIdentifier();
+ AWS_S3_API ObjectIdentifier(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ObjectIdentifier& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -154,10 +154,10 @@ namespace Model
private:
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
Aws::String m_versionId;
- bool m_versionIdHasBeenSet;
+ bool m_versionIdHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectLockConfiguration.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectLockConfiguration.h
index 441e475856..a17f87a820 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectLockConfiguration.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectLockConfiguration.h
@@ -29,14 +29,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectLockConfiguration">AWS
* API Reference</a></p>
*/
- class AWS_S3_API ObjectLockConfiguration
+ class ObjectLockConfiguration
{
public:
- ObjectLockConfiguration();
- ObjectLockConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
- ObjectLockConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ObjectLockConfiguration();
+ AWS_S3_API ObjectLockConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ObjectLockConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -139,10 +139,10 @@ namespace Model
private:
ObjectLockEnabled m_objectLockEnabled;
- bool m_objectLockEnabledHasBeenSet;
+ bool m_objectLockEnabledHasBeenSet = false;
ObjectLockRule m_rule;
- bool m_ruleHasBeenSet;
+ bool m_ruleHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectLockLegalHold.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectLockLegalHold.h
index fdeba38dda..5d2bb9039c 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectLockLegalHold.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectLockLegalHold.h
@@ -23,54 +23,54 @@ namespace Model
{
/**
- * <p>A Legal Hold configuration for an object.</p><p><h3>See Also:</h3> <a
+ * <p>A legal hold configuration for an object.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectLockLegalHold">AWS
* API Reference</a></p>
*/
- class AWS_S3_API ObjectLockLegalHold
+ class ObjectLockLegalHold
{
public:
- ObjectLockLegalHold();
- ObjectLockLegalHold(const Aws::Utils::Xml::XmlNode& xmlNode);
- ObjectLockLegalHold& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ObjectLockLegalHold();
+ AWS_S3_API ObjectLockLegalHold(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ObjectLockLegalHold& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
- * <p>Indicates whether the specified object has a Legal Hold in place.</p>
+ * <p>Indicates whether the specified object has a legal hold in place.</p>
*/
inline const ObjectLockLegalHoldStatus& GetStatus() const{ return m_status; }
/**
- * <p>Indicates whether the specified object has a Legal Hold in place.</p>
+ * <p>Indicates whether the specified object has a legal hold in place.</p>
*/
inline bool StatusHasBeenSet() const { return m_statusHasBeenSet; }
/**
- * <p>Indicates whether the specified object has a Legal Hold in place.</p>
+ * <p>Indicates whether the specified object has a legal hold in place.</p>
*/
inline void SetStatus(const ObjectLockLegalHoldStatus& value) { m_statusHasBeenSet = true; m_status = value; }
/**
- * <p>Indicates whether the specified object has a Legal Hold in place.</p>
+ * <p>Indicates whether the specified object has a legal hold in place.</p>
*/
inline void SetStatus(ObjectLockLegalHoldStatus&& value) { m_statusHasBeenSet = true; m_status = std::move(value); }
/**
- * <p>Indicates whether the specified object has a Legal Hold in place.</p>
+ * <p>Indicates whether the specified object has a legal hold in place.</p>
*/
inline ObjectLockLegalHold& WithStatus(const ObjectLockLegalHoldStatus& value) { SetStatus(value); return *this;}
/**
- * <p>Indicates whether the specified object has a Legal Hold in place.</p>
+ * <p>Indicates whether the specified object has a legal hold in place.</p>
*/
inline ObjectLockLegalHold& WithStatus(ObjectLockLegalHoldStatus&& value) { SetStatus(std::move(value)); return *this;}
private:
ObjectLockLegalHoldStatus m_status;
- bool m_statusHasBeenSet;
+ bool m_statusHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectLockRetention.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectLockRetention.h
index 26c60fdb78..caa01d58de 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectLockRetention.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectLockRetention.h
@@ -28,14 +28,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectLockRetention">AWS
* API Reference</a></p>
*/
- class AWS_S3_API ObjectLockRetention
+ class ObjectLockRetention
{
public:
- ObjectLockRetention();
- ObjectLockRetention(const Aws::Utils::Xml::XmlNode& xmlNode);
- ObjectLockRetention& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ObjectLockRetention();
+ AWS_S3_API ObjectLockRetention(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ObjectLockRetention& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -102,10 +102,10 @@ namespace Model
private:
ObjectLockRetentionMode m_mode;
- bool m_modeHasBeenSet;
+ bool m_modeHasBeenSet = false;
Aws::Utils::DateTime m_retainUntilDate;
- bool m_retainUntilDateHasBeenSet;
+ bool m_retainUntilDateHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectLockRule.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectLockRule.h
index e373cc3dba..0eda426212 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectLockRule.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectLockRule.h
@@ -27,14 +27,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectLockRule">AWS
* API Reference</a></p>
*/
- class AWS_S3_API ObjectLockRule
+ class ObjectLockRule
{
public:
- ObjectLockRule();
- ObjectLockRule(const Aws::Utils::Xml::XmlNode& xmlNode);
- ObjectLockRule& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ObjectLockRule();
+ AWS_S3_API ObjectLockRule(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ObjectLockRule& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -94,7 +94,7 @@ namespace Model
private:
DefaultRetention m_defaultRetention;
- bool m_defaultRetentionHasBeenSet;
+ bool m_defaultRetentionHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectOwnership.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectOwnership.h
index 06ba5f9daa..08957d956e 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectOwnership.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectOwnership.h
@@ -17,7 +17,8 @@ namespace Model
{
NOT_SET,
BucketOwnerPreferred,
- ObjectWriter
+ ObjectWriter,
+ BucketOwnerEnforced
};
namespace ObjectOwnershipMapper
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectPart.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectPart.h
new file mode 100644
index 0000000000..cb913c6b43
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectPart.h
@@ -0,0 +1,433 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#pragma once
+#include <aws/s3/S3_EXPORTS.h>
+#include <aws/core/utils/memory/stl/AWSString.h>
+#include <utility>
+
+namespace Aws
+{
+namespace Utils
+{
+namespace Xml
+{
+ class XmlNode;
+} // namespace Xml
+} // namespace Utils
+namespace S3
+{
+namespace Model
+{
+
+ /**
+ * <p>A container for elements related to an individual part.</p><p><h3>See
+ * Also:</h3> <a
+ * href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectPart">AWS API
+ * Reference</a></p>
+ */
+ class ObjectPart
+ {
+ public:
+ AWS_S3_API ObjectPart();
+ AWS_S3_API ObjectPart(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ObjectPart& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+
+
+ /**
+ * <p>The part number identifying the part. This value is a positive integer
+ * between 1 and 10,000.</p>
+ */
+ inline int GetPartNumber() const{ return m_partNumber; }
+
+ /**
+ * <p>The part number identifying the part. This value is a positive integer
+ * between 1 and 10,000.</p>
+ */
+ inline bool PartNumberHasBeenSet() const { return m_partNumberHasBeenSet; }
+
+ /**
+ * <p>The part number identifying the part. This value is a positive integer
+ * between 1 and 10,000.</p>
+ */
+ inline void SetPartNumber(int value) { m_partNumberHasBeenSet = true; m_partNumber = value; }
+
+ /**
+ * <p>The part number identifying the part. This value is a positive integer
+ * between 1 and 10,000.</p>
+ */
+ inline ObjectPart& WithPartNumber(int value) { SetPartNumber(value); return *this;}
+
+
+ /**
+ * <p>The size of the uploaded part in bytes.</p>
+ */
+ inline long long GetSize() const{ return m_size; }
+
+ /**
+ * <p>The size of the uploaded part in bytes.</p>
+ */
+ inline bool SizeHasBeenSet() const { return m_sizeHasBeenSet; }
+
+ /**
+ * <p>The size of the uploaded part in bytes.</p>
+ */
+ inline void SetSize(long long value) { m_sizeHasBeenSet = true; m_size = value; }
+
+ /**
+ * <p>The size of the uploaded part in bytes.</p>
+ */
+ inline ObjectPart& WithSize(long long value) { SetSize(value); return *this;}
+
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumCRC32() const{ return m_checksumCRC32; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumCRC32HasBeenSet() const { return m_checksumCRC32HasBeenSet; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(const Aws::String& value) { m_checksumCRC32HasBeenSet = true; m_checksumCRC32 = value; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(Aws::String&& value) { m_checksumCRC32HasBeenSet = true; m_checksumCRC32 = std::move(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(const char* value) { m_checksumCRC32HasBeenSet = true; m_checksumCRC32.assign(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline ObjectPart& WithChecksumCRC32(const Aws::String& value) { SetChecksumCRC32(value); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline ObjectPart& WithChecksumCRC32(Aws::String&& value) { SetChecksumCRC32(std::move(value)); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline ObjectPart& WithChecksumCRC32(const char* value) { SetChecksumCRC32(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumCRC32C() const{ return m_checksumCRC32C; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumCRC32CHasBeenSet() const { return m_checksumCRC32CHasBeenSet; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(const Aws::String& value) { m_checksumCRC32CHasBeenSet = true; m_checksumCRC32C = value; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(Aws::String&& value) { m_checksumCRC32CHasBeenSet = true; m_checksumCRC32C = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(const char* value) { m_checksumCRC32CHasBeenSet = true; m_checksumCRC32C.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline ObjectPart& WithChecksumCRC32C(const Aws::String& value) { SetChecksumCRC32C(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline ObjectPart& WithChecksumCRC32C(Aws::String&& value) { SetChecksumCRC32C(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline ObjectPart& WithChecksumCRC32C(const char* value) { SetChecksumCRC32C(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumSHA1() const{ return m_checksumSHA1; }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumSHA1HasBeenSet() const { return m_checksumSHA1HasBeenSet; }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(const Aws::String& value) { m_checksumSHA1HasBeenSet = true; m_checksumSHA1 = value; }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(Aws::String&& value) { m_checksumSHA1HasBeenSet = true; m_checksumSHA1 = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(const char* value) { m_checksumSHA1HasBeenSet = true; m_checksumSHA1.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline ObjectPart& WithChecksumSHA1(const Aws::String& value) { SetChecksumSHA1(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline ObjectPart& WithChecksumSHA1(Aws::String&& value) { SetChecksumSHA1(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline ObjectPart& WithChecksumSHA1(const char* value) { SetChecksumSHA1(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumSHA256() const{ return m_checksumSHA256; }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumSHA256HasBeenSet() const { return m_checksumSHA256HasBeenSet; }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(const Aws::String& value) { m_checksumSHA256HasBeenSet = true; m_checksumSHA256 = value; }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(Aws::String&& value) { m_checksumSHA256HasBeenSet = true; m_checksumSHA256 = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(const char* value) { m_checksumSHA256HasBeenSet = true; m_checksumSHA256.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline ObjectPart& WithChecksumSHA256(const Aws::String& value) { SetChecksumSHA256(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline ObjectPart& WithChecksumSHA256(Aws::String&& value) { SetChecksumSHA256(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline ObjectPart& WithChecksumSHA256(const char* value) { SetChecksumSHA256(value); return *this;}
+
+ private:
+
+ int m_partNumber;
+ bool m_partNumberHasBeenSet = false;
+
+ long long m_size;
+ bool m_sizeHasBeenSet = false;
+
+ Aws::String m_checksumCRC32;
+ bool m_checksumCRC32HasBeenSet = false;
+
+ Aws::String m_checksumCRC32C;
+ bool m_checksumCRC32CHasBeenSet = false;
+
+ Aws::String m_checksumSHA1;
+ bool m_checksumSHA1HasBeenSet = false;
+
+ Aws::String m_checksumSHA256;
+ bool m_checksumSHA256HasBeenSet = false;
+ };
+
+} // namespace Model
+} // namespace S3
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectStorageClass.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectStorageClass.h
index 814e5de398..6f2d4dacc7 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectStorageClass.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectStorageClass.h
@@ -23,7 +23,8 @@ namespace Model
ONEZONE_IA,
INTELLIGENT_TIERING,
DEEP_ARCHIVE,
- OUTPOSTS
+ OUTPOSTS,
+ GLACIER_IR
};
namespace ObjectStorageClassMapper
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectVersion.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectVersion.h
index d084d24b1e..47ddbe008d 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectVersion.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ObjectVersion.h
@@ -6,9 +6,11 @@
#pragma once
#include <aws/s3/S3_EXPORTS.h>
#include <aws/core/utils/memory/stl/AWSString.h>
+#include <aws/core/utils/memory/stl/AWSVector.h>
#include <aws/s3/model/ObjectVersionStorageClass.h>
#include <aws/core/utils/DateTime.h>
#include <aws/s3/model/Owner.h>
+#include <aws/s3/model/ChecksumAlgorithm.h>
#include <utility>
namespace Aws
@@ -30,14 +32,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ObjectVersion">AWS
* API Reference</a></p>
*/
- class AWS_S3_API ObjectVersion
+ class ObjectVersion
{
public:
- ObjectVersion();
- ObjectVersion(const Aws::Utils::Xml::XmlNode& xmlNode);
- ObjectVersion& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ObjectVersion();
+ AWS_S3_API ObjectVersion(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ObjectVersion& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -82,6 +84,47 @@ namespace Model
/**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline const Aws::Vector<ChecksumAlgorithm>& GetChecksumAlgorithm() const{ return m_checksumAlgorithm; }
+
+ /**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline bool ChecksumAlgorithmHasBeenSet() const { return m_checksumAlgorithmHasBeenSet; }
+
+ /**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline void SetChecksumAlgorithm(const Aws::Vector<ChecksumAlgorithm>& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = value; }
+
+ /**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline void SetChecksumAlgorithm(Aws::Vector<ChecksumAlgorithm>&& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = std::move(value); }
+
+ /**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline ObjectVersion& WithChecksumAlgorithm(const Aws::Vector<ChecksumAlgorithm>& value) { SetChecksumAlgorithm(value); return *this;}
+
+ /**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline ObjectVersion& WithChecksumAlgorithm(Aws::Vector<ChecksumAlgorithm>&& value) { SetChecksumAlgorithm(std::move(value)); return *this;}
+
+ /**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline ObjectVersion& AddChecksumAlgorithm(const ChecksumAlgorithm& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm.push_back(value); return *this; }
+
+ /**
+ * <p>The algorithm that was used to create a checksum of the object.</p>
+ */
+ inline ObjectVersion& AddChecksumAlgorithm(ChecksumAlgorithm&& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm.push_back(std::move(value)); return *this; }
+
+
+ /**
* <p>Size in bytes of the object.</p>
*/
inline long long GetSize() const{ return m_size; }
@@ -304,28 +347,31 @@ namespace Model
private:
Aws::String m_eTag;
- bool m_eTagHasBeenSet;
+ bool m_eTagHasBeenSet = false;
+
+ Aws::Vector<ChecksumAlgorithm> m_checksumAlgorithm;
+ bool m_checksumAlgorithmHasBeenSet = false;
long long m_size;
- bool m_sizeHasBeenSet;
+ bool m_sizeHasBeenSet = false;
ObjectVersionStorageClass m_storageClass;
- bool m_storageClassHasBeenSet;
+ bool m_storageClassHasBeenSet = false;
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
Aws::String m_versionId;
- bool m_versionIdHasBeenSet;
+ bool m_versionIdHasBeenSet = false;
bool m_isLatest;
- bool m_isLatestHasBeenSet;
+ bool m_isLatestHasBeenSet = false;
Aws::Utils::DateTime m_lastModified;
- bool m_lastModifiedHasBeenSet;
+ bool m_lastModifiedHasBeenSet = false;
Owner m_owner;
- bool m_ownerHasBeenSet;
+ bool m_ownerHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/OutputLocation.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/OutputLocation.h
index 4240773137..a716b1b0d0 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/OutputLocation.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/OutputLocation.h
@@ -28,14 +28,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/OutputLocation">AWS
* API Reference</a></p>
*/
- class AWS_S3_API OutputLocation
+ class OutputLocation
{
public:
- OutputLocation();
- OutputLocation(const Aws::Utils::Xml::XmlNode& xmlNode);
- OutputLocation& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API OutputLocation();
+ AWS_S3_API OutputLocation(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API OutputLocation& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -77,7 +77,7 @@ namespace Model
private:
S3Location m_s3;
- bool m_s3HasBeenSet;
+ bool m_s3HasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/OutputSerialization.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/OutputSerialization.h
index 92331d902c..e5af3e64fc 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/OutputSerialization.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/OutputSerialization.h
@@ -29,14 +29,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/OutputSerialization">AWS
* API Reference</a></p>
*/
- class AWS_S3_API OutputSerialization
+ class OutputSerialization
{
public:
- OutputSerialization();
- OutputSerialization(const Aws::Utils::Xml::XmlNode& xmlNode);
- OutputSerialization& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API OutputSerialization();
+ AWS_S3_API OutputSerialization(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API OutputSerialization& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -103,10 +103,10 @@ namespace Model
private:
CSVOutput m_cSV;
- bool m_cSVHasBeenSet;
+ bool m_cSVHasBeenSet = false;
JSONOutput m_jSON;
- bool m_jSONHasBeenSet;
+ bool m_jSONHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Owner.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Owner.h
index 4f3f5d6aaa..8dd45ff6d1 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Owner.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Owner.h
@@ -27,14 +27,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Owner">AWS API
* Reference</a></p>
*/
- class AWS_S3_API Owner
+ class Owner
{
public:
- Owner();
- Owner(const Aws::Utils::Xml::XmlNode& xmlNode);
- Owner& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Owner();
+ AWS_S3_API Owner(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Owner& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -121,10 +121,10 @@ namespace Model
private:
Aws::String m_displayName;
- bool m_displayNameHasBeenSet;
+ bool m_displayNameHasBeenSet = false;
Aws::String m_iD;
- bool m_iDHasBeenSet;
+ bool m_iDHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/OwnershipControls.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/OwnershipControls.h
index a0d1a3542f..0585d8db91 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/OwnershipControls.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/OwnershipControls.h
@@ -29,14 +29,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/OwnershipControls">AWS
* API Reference</a></p>
*/
- class AWS_S3_API OwnershipControls
+ class OwnershipControls
{
public:
- OwnershipControls();
- OwnershipControls(const Aws::Utils::Xml::XmlNode& xmlNode);
- OwnershipControls& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API OwnershipControls();
+ AWS_S3_API OwnershipControls(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API OwnershipControls& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -82,7 +82,7 @@ namespace Model
private:
Aws::Vector<OwnershipControlsRule> m_rules;
- bool m_rulesHasBeenSet;
+ bool m_rulesHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/OwnershipControlsRule.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/OwnershipControlsRule.h
index 2e512c810c..bd11bbe5f1 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/OwnershipControlsRule.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/OwnershipControlsRule.h
@@ -28,14 +28,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/OwnershipControlsRule">AWS
* API Reference</a></p>
*/
- class AWS_S3_API OwnershipControlsRule
+ class OwnershipControlsRule
{
public:
- OwnershipControlsRule();
- OwnershipControlsRule(const Aws::Utils::Xml::XmlNode& xmlNode);
- OwnershipControlsRule& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API OwnershipControlsRule();
+ AWS_S3_API OwnershipControlsRule(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API OwnershipControlsRule& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
@@ -59,7 +59,7 @@ namespace Model
private:
ObjectOwnership m_objectOwnership;
- bool m_objectOwnershipHasBeenSet;
+ bool m_objectOwnershipHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ParquetInput.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ParquetInput.h
index 3bc15a8481..8cc0df422b 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ParquetInput.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ParquetInput.h
@@ -25,14 +25,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ParquetInput">AWS API
* Reference</a></p>
*/
- class AWS_S3_API ParquetInput
+ class ParquetInput
{
public:
- ParquetInput();
- ParquetInput(const Aws::Utils::Xml::XmlNode& xmlNode);
- ParquetInput& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ParquetInput();
+ AWS_S3_API ParquetInput(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ParquetInput& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
};
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Part.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Part.h
index 6b83d534f1..b3a96fc351 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Part.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Part.h
@@ -28,14 +28,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Part">AWS API
* Reference</a></p>
*/
- class AWS_S3_API Part
+ class Part
{
public:
- Part();
- Part(const Aws::Utils::Xml::XmlNode& xmlNode);
- Part& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Part();
+ AWS_S3_API Part(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Part& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -155,19 +155,355 @@ namespace Model
*/
inline Part& WithSize(long long value) { SetSize(value); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumCRC32() const{ return m_checksumCRC32; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumCRC32HasBeenSet() const { return m_checksumCRC32HasBeenSet; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(const Aws::String& value) { m_checksumCRC32HasBeenSet = true; m_checksumCRC32 = value; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(Aws::String&& value) { m_checksumCRC32HasBeenSet = true; m_checksumCRC32 = std::move(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(const char* value) { m_checksumCRC32HasBeenSet = true; m_checksumCRC32.assign(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline Part& WithChecksumCRC32(const Aws::String& value) { SetChecksumCRC32(value); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline Part& WithChecksumCRC32(Aws::String&& value) { SetChecksumCRC32(std::move(value)); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline Part& WithChecksumCRC32(const char* value) { SetChecksumCRC32(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumCRC32C() const{ return m_checksumCRC32C; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumCRC32CHasBeenSet() const { return m_checksumCRC32CHasBeenSet; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(const Aws::String& value) { m_checksumCRC32CHasBeenSet = true; m_checksumCRC32C = value; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(Aws::String&& value) { m_checksumCRC32CHasBeenSet = true; m_checksumCRC32C = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(const char* value) { m_checksumCRC32CHasBeenSet = true; m_checksumCRC32C.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline Part& WithChecksumCRC32C(const Aws::String& value) { SetChecksumCRC32C(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline Part& WithChecksumCRC32C(Aws::String&& value) { SetChecksumCRC32C(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline Part& WithChecksumCRC32C(const char* value) { SetChecksumCRC32C(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumSHA1() const{ return m_checksumSHA1; }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumSHA1HasBeenSet() const { return m_checksumSHA1HasBeenSet; }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(const Aws::String& value) { m_checksumSHA1HasBeenSet = true; m_checksumSHA1 = value; }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(Aws::String&& value) { m_checksumSHA1HasBeenSet = true; m_checksumSHA1 = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(const char* value) { m_checksumSHA1HasBeenSet = true; m_checksumSHA1.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline Part& WithChecksumSHA1(const Aws::String& value) { SetChecksumSHA1(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline Part& WithChecksumSHA1(Aws::String&& value) { SetChecksumSHA1(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline Part& WithChecksumSHA1(const char* value) { SetChecksumSHA1(value); return *this;}
+
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumSHA256() const{ return m_checksumSHA256; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumSHA256HasBeenSet() const { return m_checksumSHA256HasBeenSet; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(const Aws::String& value) { m_checksumSHA256HasBeenSet = true; m_checksumSHA256 = value; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(Aws::String&& value) { m_checksumSHA256HasBeenSet = true; m_checksumSHA256 = std::move(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(const char* value) { m_checksumSHA256HasBeenSet = true; m_checksumSHA256.assign(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline Part& WithChecksumSHA256(const Aws::String& value) { SetChecksumSHA256(value); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline Part& WithChecksumSHA256(Aws::String&& value) { SetChecksumSHA256(std::move(value)); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline Part& WithChecksumSHA256(const char* value) { SetChecksumSHA256(value); return *this;}
+
private:
int m_partNumber;
- bool m_partNumberHasBeenSet;
+ bool m_partNumberHasBeenSet = false;
Aws::Utils::DateTime m_lastModified;
- bool m_lastModifiedHasBeenSet;
+ bool m_lastModifiedHasBeenSet = false;
Aws::String m_eTag;
- bool m_eTagHasBeenSet;
+ bool m_eTagHasBeenSet = false;
long long m_size;
- bool m_sizeHasBeenSet;
+ bool m_sizeHasBeenSet = false;
+
+ Aws::String m_checksumCRC32;
+ bool m_checksumCRC32HasBeenSet = false;
+
+ Aws::String m_checksumCRC32C;
+ bool m_checksumCRC32CHasBeenSet = false;
+
+ Aws::String m_checksumSHA1;
+ bool m_checksumSHA1HasBeenSet = false;
+
+ Aws::String m_checksumSHA256;
+ bool m_checksumSHA256HasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PolicyStatus.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PolicyStatus.h
index cd0cfd393b..bf90bc0db7 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PolicyStatus.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PolicyStatus.h
@@ -25,14 +25,14 @@ namespace Model
* <a href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PolicyStatus">AWS
* API Reference</a></p>
*/
- class AWS_S3_API PolicyStatus
+ class PolicyStatus
{
public:
- PolicyStatus();
- PolicyStatus(const Aws::Utils::Xml::XmlNode& xmlNode);
- PolicyStatus& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API PolicyStatus();
+ AWS_S3_API PolicyStatus(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API PolicyStatus& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -66,7 +66,7 @@ namespace Model
private:
bool m_isPublic;
- bool m_isPublicHasBeenSet;
+ bool m_isPublicHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Progress.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Progress.h
index a188e5b9c7..8279bf50bd 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Progress.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Progress.h
@@ -26,14 +26,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Progress">AWS API
* Reference</a></p>
*/
- class AWS_S3_API Progress
+ class Progress
{
public:
- Progress();
- Progress(const Aws::Utils::Xml::XmlNode& xmlNode);
- Progress& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Progress();
+ AWS_S3_API Progress(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Progress& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -101,13 +101,13 @@ namespace Model
private:
long long m_bytesScanned;
- bool m_bytesScannedHasBeenSet;
+ bool m_bytesScannedHasBeenSet = false;
long long m_bytesProcessed;
- bool m_bytesProcessedHasBeenSet;
+ bool m_bytesProcessedHasBeenSet = false;
long long m_bytesReturned;
- bool m_bytesReturnedHasBeenSet;
+ bool m_bytesReturnedHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ProgressEvent.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ProgressEvent.h
index 9b3b8edf85..c7a317a260 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ProgressEvent.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ProgressEvent.h
@@ -28,14 +28,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ProgressEvent">AWS
* API Reference</a></p>
*/
- class AWS_S3_API ProgressEvent
+ class ProgressEvent
{
public:
- ProgressEvent();
- ProgressEvent(const Aws::Utils::Xml::XmlNode& xmlNode);
- ProgressEvent& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ProgressEvent();
+ AWS_S3_API ProgressEvent(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ProgressEvent& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -71,7 +71,7 @@ namespace Model
private:
Progress m_details;
- bool m_detailsHasBeenSet;
+ bool m_detailsHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PublicAccessBlockConfiguration.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PublicAccessBlockConfiguration.h
index b78baafaf4..b22aba130d 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PublicAccessBlockConfiguration.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PublicAccessBlockConfiguration.h
@@ -30,21 +30,21 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/PublicAccessBlockConfiguration">AWS
* API Reference</a></p>
*/
- class AWS_S3_API PublicAccessBlockConfiguration
+ class PublicAccessBlockConfiguration
{
public:
- PublicAccessBlockConfiguration();
- PublicAccessBlockConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
- PublicAccessBlockConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API PublicAccessBlockConfiguration();
+ AWS_S3_API PublicAccessBlockConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API PublicAccessBlockConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
* <p>Specifies whether Amazon S3 should block public access control lists (ACLs)
* for this bucket and objects in this bucket. Setting this element to
- * <code>TRUE</code> causes the following behavior:</p> <ul> <li> <p>PUT Bucket acl
- * and PUT Object acl calls fail if the specified ACL is public.</p> </li> <li>
+ * <code>TRUE</code> causes the following behavior:</p> <ul> <li> <p>PUT Bucket ACL
+ * and PUT Object ACL calls fail if the specified ACL is public.</p> </li> <li>
* <p>PUT Object calls fail if the request includes a public ACL.</p> </li> <li>
* <p>PUT Bucket calls fail if the request includes a public ACL.</p> </li> </ul>
* <p>Enabling this setting doesn't affect existing policies or ACLs.</p>
@@ -54,8 +54,8 @@ namespace Model
/**
* <p>Specifies whether Amazon S3 should block public access control lists (ACLs)
* for this bucket and objects in this bucket. Setting this element to
- * <code>TRUE</code> causes the following behavior:</p> <ul> <li> <p>PUT Bucket acl
- * and PUT Object acl calls fail if the specified ACL is public.</p> </li> <li>
+ * <code>TRUE</code> causes the following behavior:</p> <ul> <li> <p>PUT Bucket ACL
+ * and PUT Object ACL calls fail if the specified ACL is public.</p> </li> <li>
* <p>PUT Object calls fail if the request includes a public ACL.</p> </li> <li>
* <p>PUT Bucket calls fail if the request includes a public ACL.</p> </li> </ul>
* <p>Enabling this setting doesn't affect existing policies or ACLs.</p>
@@ -65,8 +65,8 @@ namespace Model
/**
* <p>Specifies whether Amazon S3 should block public access control lists (ACLs)
* for this bucket and objects in this bucket. Setting this element to
- * <code>TRUE</code> causes the following behavior:</p> <ul> <li> <p>PUT Bucket acl
- * and PUT Object acl calls fail if the specified ACL is public.</p> </li> <li>
+ * <code>TRUE</code> causes the following behavior:</p> <ul> <li> <p>PUT Bucket ACL
+ * and PUT Object ACL calls fail if the specified ACL is public.</p> </li> <li>
* <p>PUT Object calls fail if the request includes a public ACL.</p> </li> <li>
* <p>PUT Bucket calls fail if the request includes a public ACL.</p> </li> </ul>
* <p>Enabling this setting doesn't affect existing policies or ACLs.</p>
@@ -76,8 +76,8 @@ namespace Model
/**
* <p>Specifies whether Amazon S3 should block public access control lists (ACLs)
* for this bucket and objects in this bucket. Setting this element to
- * <code>TRUE</code> causes the following behavior:</p> <ul> <li> <p>PUT Bucket acl
- * and PUT Object acl calls fail if the specified ACL is public.</p> </li> <li>
+ * <code>TRUE</code> causes the following behavior:</p> <ul> <li> <p>PUT Bucket ACL
+ * and PUT Object ACL calls fail if the specified ACL is public.</p> </li> <li>
* <p>PUT Object calls fail if the request includes a public ACL.</p> </li> <li>
* <p>PUT Bucket calls fail if the request includes a public ACL.</p> </li> </ul>
* <p>Enabling this setting doesn't affect existing policies or ACLs.</p>
@@ -158,60 +158,60 @@ namespace Model
/**
* <p>Specifies whether Amazon S3 should restrict public bucket policies for this
* bucket. Setting this element to <code>TRUE</code> restricts access to this
- * bucket to only AWS service principals and authorized users within this account
- * if the bucket has a public policy.</p> <p>Enabling this setting doesn't affect
- * previously stored bucket policies, except that public and cross-account access
- * within any public bucket policy, including non-public delegation to specific
- * accounts, is blocked.</p>
+ * bucket to only Amazon Web Service principals and authorized users within this
+ * account if the bucket has a public policy.</p> <p>Enabling this setting doesn't
+ * affect previously stored bucket policies, except that public and cross-account
+ * access within any public bucket policy, including non-public delegation to
+ * specific accounts, is blocked.</p>
*/
inline bool GetRestrictPublicBuckets() const{ return m_restrictPublicBuckets; }
/**
* <p>Specifies whether Amazon S3 should restrict public bucket policies for this
* bucket. Setting this element to <code>TRUE</code> restricts access to this
- * bucket to only AWS service principals and authorized users within this account
- * if the bucket has a public policy.</p> <p>Enabling this setting doesn't affect
- * previously stored bucket policies, except that public and cross-account access
- * within any public bucket policy, including non-public delegation to specific
- * accounts, is blocked.</p>
+ * bucket to only Amazon Web Service principals and authorized users within this
+ * account if the bucket has a public policy.</p> <p>Enabling this setting doesn't
+ * affect previously stored bucket policies, except that public and cross-account
+ * access within any public bucket policy, including non-public delegation to
+ * specific accounts, is blocked.</p>
*/
inline bool RestrictPublicBucketsHasBeenSet() const { return m_restrictPublicBucketsHasBeenSet; }
/**
* <p>Specifies whether Amazon S3 should restrict public bucket policies for this
* bucket. Setting this element to <code>TRUE</code> restricts access to this
- * bucket to only AWS service principals and authorized users within this account
- * if the bucket has a public policy.</p> <p>Enabling this setting doesn't affect
- * previously stored bucket policies, except that public and cross-account access
- * within any public bucket policy, including non-public delegation to specific
- * accounts, is blocked.</p>
+ * bucket to only Amazon Web Service principals and authorized users within this
+ * account if the bucket has a public policy.</p> <p>Enabling this setting doesn't
+ * affect previously stored bucket policies, except that public and cross-account
+ * access within any public bucket policy, including non-public delegation to
+ * specific accounts, is blocked.</p>
*/
inline void SetRestrictPublicBuckets(bool value) { m_restrictPublicBucketsHasBeenSet = true; m_restrictPublicBuckets = value; }
/**
* <p>Specifies whether Amazon S3 should restrict public bucket policies for this
* bucket. Setting this element to <code>TRUE</code> restricts access to this
- * bucket to only AWS service principals and authorized users within this account
- * if the bucket has a public policy.</p> <p>Enabling this setting doesn't affect
- * previously stored bucket policies, except that public and cross-account access
- * within any public bucket policy, including non-public delegation to specific
- * accounts, is blocked.</p>
+ * bucket to only Amazon Web Service principals and authorized users within this
+ * account if the bucket has a public policy.</p> <p>Enabling this setting doesn't
+ * affect previously stored bucket policies, except that public and cross-account
+ * access within any public bucket policy, including non-public delegation to
+ * specific accounts, is blocked.</p>
*/
inline PublicAccessBlockConfiguration& WithRestrictPublicBuckets(bool value) { SetRestrictPublicBuckets(value); return *this;}
private:
bool m_blockPublicAcls;
- bool m_blockPublicAclsHasBeenSet;
+ bool m_blockPublicAclsHasBeenSet = false;
bool m_ignorePublicAcls;
- bool m_ignorePublicAclsHasBeenSet;
+ bool m_ignorePublicAclsHasBeenSet = false;
bool m_blockPublicPolicy;
- bool m_blockPublicPolicyHasBeenSet;
+ bool m_blockPublicPolicyHasBeenSet = false;
bool m_restrictPublicBuckets;
- bool m_restrictPublicBucketsHasBeenSet;
+ bool m_restrictPublicBucketsHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketAccelerateConfigurationRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketAccelerateConfigurationRequest.h
index f2c3b36790..e880387fd6 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketAccelerateConfigurationRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketAccelerateConfigurationRequest.h
@@ -8,6 +8,7 @@
#include <aws/s3/S3Request.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/s3/model/AccelerateConfiguration.h>
+#include <aws/s3/model/ChecksumAlgorithm.h>
#include <aws/core/utils/memory/stl/AWSMap.h>
#include <utility>
@@ -24,10 +25,10 @@ namespace Model
/**
*/
- class AWS_S3_API PutBucketAccelerateConfigurationRequest : public S3Request
+ class PutBucketAccelerateConfigurationRequest : public S3Request
{
public:
- PutBucketAccelerateConfigurationRequest();
+ AWS_S3_API PutBucketAccelerateConfigurationRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -35,12 +36,18 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "PutBucketAccelerateConfiguration"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::String GetChecksumAlgorithmName() const override;
+
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket for which the accelerate configuration is set.</p>
@@ -116,61 +123,146 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketAccelerateConfigurationRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketAccelerateConfigurationRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketAccelerateConfigurationRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline const ChecksumAlgorithm& GetChecksumAlgorithm() const{ return m_checksumAlgorithm; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline bool ChecksumAlgorithmHasBeenSet() const { return m_checksumAlgorithmHasBeenSet; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(const ChecksumAlgorithm& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = value; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(ChecksumAlgorithm&& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = std::move(value); }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutBucketAccelerateConfigurationRequest& WithChecksumAlgorithm(const ChecksumAlgorithm& value) { SetChecksumAlgorithm(value); return *this;}
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutBucketAccelerateConfigurationRequest& WithChecksumAlgorithm(ChecksumAlgorithm&& value) { SetChecksumAlgorithm(std::move(value)); return *this;}
+
+
inline const Aws::Map<Aws::String, Aws::String>& GetCustomizedAccessLogTag() const{ return m_customizedAccessLogTag; }
@@ -213,16 +305,19 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
AccelerateConfiguration m_accelerateConfiguration;
- bool m_accelerateConfigurationHasBeenSet;
+ bool m_accelerateConfigurationHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
+
+ ChecksumAlgorithm m_checksumAlgorithm;
+ bool m_checksumAlgorithmHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketAclRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketAclRequest.h
index 86c5cc7d51..5d7dacb7b2 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketAclRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketAclRequest.h
@@ -9,6 +9,7 @@
#include <aws/s3/model/BucketCannedACL.h>
#include <aws/s3/model/AccessControlPolicy.h>
#include <aws/core/utils/memory/stl/AWSString.h>
+#include <aws/s3/model/ChecksumAlgorithm.h>
#include <aws/core/utils/memory/stl/AWSMap.h>
#include <utility>
@@ -25,10 +26,10 @@ namespace Model
/**
*/
- class AWS_S3_API PutBucketAclRequest : public S3Request
+ class PutBucketAclRequest : public S3Request
{
public:
- PutBucketAclRequest();
+ AWS_S3_API PutBucketAclRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -36,14 +37,18 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "PutBucketAcl"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
- inline bool ShouldComputeContentMd5() const override { return true; }
+ AWS_S3_API Aws::String GetChecksumAlgorithmName() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The canned ACL to apply to the bucket.</p>
@@ -159,8 +164,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, go to <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864.</a> </p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline const Aws::String& GetContentMD5() const{ return m_contentMD5; }
@@ -169,8 +174,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, go to <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864.</a> </p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline bool ContentMD5HasBeenSet() const { return m_contentMD5HasBeenSet; }
@@ -179,8 +184,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, go to <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864.</a> </p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(const Aws::String& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = value; }
@@ -189,8 +194,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, go to <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864.</a> </p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(Aws::String&& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = std::move(value); }
@@ -199,8 +204,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, go to <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864.</a> </p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(const char* value) { m_contentMD5HasBeenSet = true; m_contentMD5.assign(value); }
@@ -209,8 +214,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, go to <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864.</a> </p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline PutBucketAclRequest& WithContentMD5(const Aws::String& value) { SetContentMD5(value); return *this;}
@@ -219,8 +224,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, go to <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864.</a> </p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline PutBucketAclRequest& WithContentMD5(Aws::String&& value) { SetContentMD5(std::move(value)); return *this;}
@@ -229,13 +234,98 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, go to <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864.</a> </p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline PutBucketAclRequest& WithContentMD5(const char* value) { SetContentMD5(value); return *this;}
/**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline const ChecksumAlgorithm& GetChecksumAlgorithm() const{ return m_checksumAlgorithm; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline bool ChecksumAlgorithmHasBeenSet() const { return m_checksumAlgorithmHasBeenSet; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(const ChecksumAlgorithm& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = value; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(ChecksumAlgorithm&& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = std::move(value); }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutBucketAclRequest& WithChecksumAlgorithm(const ChecksumAlgorithm& value) { SetChecksumAlgorithm(value); return *this;}
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutBucketAclRequest& WithChecksumAlgorithm(ChecksumAlgorithm&& value) { SetChecksumAlgorithm(std::move(value)); return *this;}
+
+
+ /**
* <p>Allows grantee the read, write, read ACP, and write ACP permissions on the
* bucket.</p>
*/
@@ -367,42 +457,58 @@ namespace Model
/**
- * <p>Allows grantee to create, overwrite, and delete any object in the bucket.</p>
+ * <p>Allows grantee to create new objects in the bucket.</p> <p>For the bucket and
+ * object owners of existing objects, also allows deletions and overwrites of those
+ * objects.</p>
*/
inline const Aws::String& GetGrantWrite() const{ return m_grantWrite; }
/**
- * <p>Allows grantee to create, overwrite, and delete any object in the bucket.</p>
+ * <p>Allows grantee to create new objects in the bucket.</p> <p>For the bucket and
+ * object owners of existing objects, also allows deletions and overwrites of those
+ * objects.</p>
*/
inline bool GrantWriteHasBeenSet() const { return m_grantWriteHasBeenSet; }
/**
- * <p>Allows grantee to create, overwrite, and delete any object in the bucket.</p>
+ * <p>Allows grantee to create new objects in the bucket.</p> <p>For the bucket and
+ * object owners of existing objects, also allows deletions and overwrites of those
+ * objects.</p>
*/
inline void SetGrantWrite(const Aws::String& value) { m_grantWriteHasBeenSet = true; m_grantWrite = value; }
/**
- * <p>Allows grantee to create, overwrite, and delete any object in the bucket.</p>
+ * <p>Allows grantee to create new objects in the bucket.</p> <p>For the bucket and
+ * object owners of existing objects, also allows deletions and overwrites of those
+ * objects.</p>
*/
inline void SetGrantWrite(Aws::String&& value) { m_grantWriteHasBeenSet = true; m_grantWrite = std::move(value); }
/**
- * <p>Allows grantee to create, overwrite, and delete any object in the bucket.</p>
+ * <p>Allows grantee to create new objects in the bucket.</p> <p>For the bucket and
+ * object owners of existing objects, also allows deletions and overwrites of those
+ * objects.</p>
*/
inline void SetGrantWrite(const char* value) { m_grantWriteHasBeenSet = true; m_grantWrite.assign(value); }
/**
- * <p>Allows grantee to create, overwrite, and delete any object in the bucket.</p>
+ * <p>Allows grantee to create new objects in the bucket.</p> <p>For the bucket and
+ * object owners of existing objects, also allows deletions and overwrites of those
+ * objects.</p>
*/
inline PutBucketAclRequest& WithGrantWrite(const Aws::String& value) { SetGrantWrite(value); return *this;}
/**
- * <p>Allows grantee to create, overwrite, and delete any object in the bucket.</p>
+ * <p>Allows grantee to create new objects in the bucket.</p> <p>For the bucket and
+ * object owners of existing objects, also allows deletions and overwrites of those
+ * objects.</p>
*/
inline PutBucketAclRequest& WithGrantWrite(Aws::String&& value) { SetGrantWrite(std::move(value)); return *this;}
/**
- * <p>Allows grantee to create, overwrite, and delete any object in the bucket.</p>
+ * <p>Allows grantee to create new objects in the bucket.</p> <p>For the bucket and
+ * object owners of existing objects, also allows deletions and overwrites of those
+ * objects.</p>
*/
inline PutBucketAclRequest& WithGrantWrite(const char* value) { SetGrantWrite(value); return *this;}
@@ -450,57 +556,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketAclRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketAclRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketAclRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -547,37 +653,40 @@ namespace Model
private:
BucketCannedACL m_aCL;
- bool m_aCLHasBeenSet;
+ bool m_aCLHasBeenSet = false;
AccessControlPolicy m_accessControlPolicy;
- bool m_accessControlPolicyHasBeenSet;
+ bool m_accessControlPolicyHasBeenSet = false;
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_contentMD5;
- bool m_contentMD5HasBeenSet;
+ bool m_contentMD5HasBeenSet = false;
+
+ ChecksumAlgorithm m_checksumAlgorithm;
+ bool m_checksumAlgorithmHasBeenSet = false;
Aws::String m_grantFullControl;
- bool m_grantFullControlHasBeenSet;
+ bool m_grantFullControlHasBeenSet = false;
Aws::String m_grantRead;
- bool m_grantReadHasBeenSet;
+ bool m_grantReadHasBeenSet = false;
Aws::String m_grantReadACP;
- bool m_grantReadACPHasBeenSet;
+ bool m_grantReadACPHasBeenSet = false;
Aws::String m_grantWrite;
- bool m_grantWriteHasBeenSet;
+ bool m_grantWriteHasBeenSet = false;
Aws::String m_grantWriteACP;
- bool m_grantWriteACPHasBeenSet;
+ bool m_grantWriteACPHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketAnalyticsConfigurationRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketAnalyticsConfigurationRequest.h
index 72baf8a3ad..4403269bfe 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketAnalyticsConfigurationRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketAnalyticsConfigurationRequest.h
@@ -24,10 +24,10 @@ namespace Model
/**
*/
- class AWS_S3_API PutBucketAnalyticsConfigurationRequest : public S3Request
+ class PutBucketAnalyticsConfigurationRequest : public S3Request
{
public:
- PutBucketAnalyticsConfigurationRequest();
+ AWS_S3_API PutBucketAnalyticsConfigurationRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -35,12 +35,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "PutBucketAnalyticsConfiguration"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket to which an analytics configuration is stored.</p>
@@ -157,57 +161,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketAnalyticsConfigurationRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketAnalyticsConfigurationRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketAnalyticsConfigurationRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -254,19 +258,19 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_id;
- bool m_idHasBeenSet;
+ bool m_idHasBeenSet = false;
AnalyticsConfiguration m_analyticsConfiguration;
- bool m_analyticsConfigurationHasBeenSet;
+ bool m_analyticsConfigurationHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketCorsRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketCorsRequest.h
index fd468b39c0..bf7b752844 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketCorsRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketCorsRequest.h
@@ -8,6 +8,7 @@
#include <aws/s3/S3Request.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/s3/model/CORSConfiguration.h>
+#include <aws/s3/model/ChecksumAlgorithm.h>
#include <aws/core/utils/memory/stl/AWSMap.h>
#include <utility>
@@ -24,10 +25,10 @@ namespace Model
/**
*/
- class AWS_S3_API PutBucketCorsRequest : public S3Request
+ class PutBucketCorsRequest : public S3Request
{
public:
- PutBucketCorsRequest();
+ AWS_S3_API PutBucketCorsRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -35,14 +36,18 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "PutBucketCors"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
- inline bool ShouldComputeContentMd5() const override { return true; }
+ AWS_S3_API Aws::String GetChecksumAlgorithmName() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>Specifies the bucket impacted by the <code>cors</code>configuration.</p>
@@ -139,8 +144,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, go to <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864.</a> </p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline const Aws::String& GetContentMD5() const{ return m_contentMD5; }
@@ -149,8 +154,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, go to <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864.</a> </p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline bool ContentMD5HasBeenSet() const { return m_contentMD5HasBeenSet; }
@@ -159,8 +164,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, go to <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864.</a> </p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(const Aws::String& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = value; }
@@ -169,8 +174,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, go to <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864.</a> </p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(Aws::String&& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = std::move(value); }
@@ -179,8 +184,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, go to <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864.</a> </p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(const char* value) { m_contentMD5HasBeenSet = true; m_contentMD5.assign(value); }
@@ -189,8 +194,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, go to <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864.</a> </p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline PutBucketCorsRequest& WithContentMD5(const Aws::String& value) { SetContentMD5(value); return *this;}
@@ -199,8 +204,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, go to <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864.</a> </p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline PutBucketCorsRequest& WithContentMD5(Aws::String&& value) { SetContentMD5(std::move(value)); return *this;}
@@ -209,65 +214,150 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, go to <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864.</a> </p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline PutBucketCorsRequest& WithContentMD5(const char* value) { SetContentMD5(value); return *this;}
/**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline const ChecksumAlgorithm& GetChecksumAlgorithm() const{ return m_checksumAlgorithm; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline bool ChecksumAlgorithmHasBeenSet() const { return m_checksumAlgorithmHasBeenSet; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(const ChecksumAlgorithm& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = value; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(ChecksumAlgorithm&& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = std::move(value); }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutBucketCorsRequest& WithChecksumAlgorithm(const ChecksumAlgorithm& value) { SetChecksumAlgorithm(value); return *this;}
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutBucketCorsRequest& WithChecksumAlgorithm(ChecksumAlgorithm&& value) { SetChecksumAlgorithm(std::move(value)); return *this;}
+
+
+ /**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketCorsRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketCorsRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketCorsRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -314,19 +404,22 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
CORSConfiguration m_cORSConfiguration;
- bool m_cORSConfigurationHasBeenSet;
+ bool m_cORSConfigurationHasBeenSet = false;
Aws::String m_contentMD5;
- bool m_contentMD5HasBeenSet;
+ bool m_contentMD5HasBeenSet = false;
+
+ ChecksumAlgorithm m_checksumAlgorithm;
+ bool m_checksumAlgorithmHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketEncryptionRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketEncryptionRequest.h
index 312c50e251..b56a8bde6f 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketEncryptionRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketEncryptionRequest.h
@@ -7,6 +7,7 @@
#include <aws/s3/S3_EXPORTS.h>
#include <aws/s3/S3Request.h>
#include <aws/core/utils/memory/stl/AWSString.h>
+#include <aws/s3/model/ChecksumAlgorithm.h>
#include <aws/s3/model/ServerSideEncryptionConfiguration.h>
#include <aws/core/utils/memory/stl/AWSMap.h>
#include <utility>
@@ -24,10 +25,10 @@ namespace Model
/**
*/
- class AWS_S3_API PutBucketEncryptionRequest : public S3Request
+ class PutBucketEncryptionRequest : public S3Request
{
public:
- PutBucketEncryptionRequest();
+ AWS_S3_API PutBucketEncryptionRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -35,20 +36,23 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "PutBucketEncryption"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
- inline bool ShouldComputeContentMd5() const override { return true; }
+ AWS_S3_API Aws::String GetChecksumAlgorithmName() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>Specifies default encryption for a bucket using server-side encryption with
- * Amazon S3-managed keys (SSE-S3) or customer master keys stored in AWS KMS
- * (SSE-KMS). For information about the Amazon S3 default encryption feature, see
- * <a
+ * Amazon S3-managed keys (SSE-S3) or customer managed keys (SSE-KMS). For
+ * information about the Amazon S3 default encryption feature, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html">Amazon
* S3 Default Bucket Encryption</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
@@ -56,9 +60,8 @@ namespace Model
/**
* <p>Specifies default encryption for a bucket using server-side encryption with
- * Amazon S3-managed keys (SSE-S3) or customer master keys stored in AWS KMS
- * (SSE-KMS). For information about the Amazon S3 default encryption feature, see
- * <a
+ * Amazon S3-managed keys (SSE-S3) or customer managed keys (SSE-KMS). For
+ * information about the Amazon S3 default encryption feature, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html">Amazon
* S3 Default Bucket Encryption</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
@@ -66,9 +69,8 @@ namespace Model
/**
* <p>Specifies default encryption for a bucket using server-side encryption with
- * Amazon S3-managed keys (SSE-S3) or customer master keys stored in AWS KMS
- * (SSE-KMS). For information about the Amazon S3 default encryption feature, see
- * <a
+ * Amazon S3-managed keys (SSE-S3) or customer managed keys (SSE-KMS). For
+ * information about the Amazon S3 default encryption feature, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html">Amazon
* S3 Default Bucket Encryption</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
@@ -76,9 +78,8 @@ namespace Model
/**
* <p>Specifies default encryption for a bucket using server-side encryption with
- * Amazon S3-managed keys (SSE-S3) or customer master keys stored in AWS KMS
- * (SSE-KMS). For information about the Amazon S3 default encryption feature, see
- * <a
+ * Amazon S3-managed keys (SSE-S3) or customer managed keys (SSE-KMS). For
+ * information about the Amazon S3 default encryption feature, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html">Amazon
* S3 Default Bucket Encryption</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
@@ -86,9 +87,8 @@ namespace Model
/**
* <p>Specifies default encryption for a bucket using server-side encryption with
- * Amazon S3-managed keys (SSE-S3) or customer master keys stored in AWS KMS
- * (SSE-KMS). For information about the Amazon S3 default encryption feature, see
- * <a
+ * Amazon S3-managed keys (SSE-S3) or customer managed keys (SSE-KMS). For
+ * information about the Amazon S3 default encryption feature, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html">Amazon
* S3 Default Bucket Encryption</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
@@ -96,9 +96,8 @@ namespace Model
/**
* <p>Specifies default encryption for a bucket using server-side encryption with
- * Amazon S3-managed keys (SSE-S3) or customer master keys stored in AWS KMS
- * (SSE-KMS). For information about the Amazon S3 default encryption feature, see
- * <a
+ * Amazon S3-managed keys (SSE-S3) or customer managed keys (SSE-KMS). For
+ * information about the Amazon S3 default encryption feature, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html">Amazon
* S3 Default Bucket Encryption</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
@@ -106,9 +105,8 @@ namespace Model
/**
* <p>Specifies default encryption for a bucket using server-side encryption with
- * Amazon S3-managed keys (SSE-S3) or customer master keys stored in AWS KMS
- * (SSE-KMS). For information about the Amazon S3 default encryption feature, see
- * <a
+ * Amazon S3-managed keys (SSE-S3) or customer managed keys (SSE-KMS). For
+ * information about the Amazon S3 default encryption feature, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html">Amazon
* S3 Default Bucket Encryption</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
@@ -116,9 +114,8 @@ namespace Model
/**
* <p>Specifies default encryption for a bucket using server-side encryption with
- * Amazon S3-managed keys (SSE-S3) or customer master keys stored in AWS KMS
- * (SSE-KMS). For information about the Amazon S3 default encryption feature, see
- * <a
+ * Amazon S3-managed keys (SSE-S3) or customer managed keys (SSE-KMS). For
+ * information about the Amazon S3 default encryption feature, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html">Amazon
* S3 Default Bucket Encryption</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
@@ -127,61 +124,154 @@ namespace Model
/**
* <p>The base64-encoded 128-bit MD5 digest of the server-side encryption
- * configuration.</p> <p>For requests made using the AWS Command Line Interface
- * (CLI) or AWS SDKs, this field is calculated automatically.</p>
+ * configuration.</p> <p>For requests made using the Amazon Web Services Command
+ * Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated
+ * automatically.</p>
*/
inline const Aws::String& GetContentMD5() const{ return m_contentMD5; }
/**
* <p>The base64-encoded 128-bit MD5 digest of the server-side encryption
- * configuration.</p> <p>For requests made using the AWS Command Line Interface
- * (CLI) or AWS SDKs, this field is calculated automatically.</p>
+ * configuration.</p> <p>For requests made using the Amazon Web Services Command
+ * Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated
+ * automatically.</p>
*/
inline bool ContentMD5HasBeenSet() const { return m_contentMD5HasBeenSet; }
/**
* <p>The base64-encoded 128-bit MD5 digest of the server-side encryption
- * configuration.</p> <p>For requests made using the AWS Command Line Interface
- * (CLI) or AWS SDKs, this field is calculated automatically.</p>
+ * configuration.</p> <p>For requests made using the Amazon Web Services Command
+ * Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated
+ * automatically.</p>
*/
inline void SetContentMD5(const Aws::String& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = value; }
/**
* <p>The base64-encoded 128-bit MD5 digest of the server-side encryption
- * configuration.</p> <p>For requests made using the AWS Command Line Interface
- * (CLI) or AWS SDKs, this field is calculated automatically.</p>
+ * configuration.</p> <p>For requests made using the Amazon Web Services Command
+ * Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated
+ * automatically.</p>
*/
inline void SetContentMD5(Aws::String&& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = std::move(value); }
/**
* <p>The base64-encoded 128-bit MD5 digest of the server-side encryption
- * configuration.</p> <p>For requests made using the AWS Command Line Interface
- * (CLI) or AWS SDKs, this field is calculated automatically.</p>
+ * configuration.</p> <p>For requests made using the Amazon Web Services Command
+ * Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated
+ * automatically.</p>
*/
inline void SetContentMD5(const char* value) { m_contentMD5HasBeenSet = true; m_contentMD5.assign(value); }
/**
* <p>The base64-encoded 128-bit MD5 digest of the server-side encryption
- * configuration.</p> <p>For requests made using the AWS Command Line Interface
- * (CLI) or AWS SDKs, this field is calculated automatically.</p>
+ * configuration.</p> <p>For requests made using the Amazon Web Services Command
+ * Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated
+ * automatically.</p>
*/
inline PutBucketEncryptionRequest& WithContentMD5(const Aws::String& value) { SetContentMD5(value); return *this;}
/**
* <p>The base64-encoded 128-bit MD5 digest of the server-side encryption
- * configuration.</p> <p>For requests made using the AWS Command Line Interface
- * (CLI) or AWS SDKs, this field is calculated automatically.</p>
+ * configuration.</p> <p>For requests made using the Amazon Web Services Command
+ * Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated
+ * automatically.</p>
*/
inline PutBucketEncryptionRequest& WithContentMD5(Aws::String&& value) { SetContentMD5(std::move(value)); return *this;}
/**
* <p>The base64-encoded 128-bit MD5 digest of the server-side encryption
- * configuration.</p> <p>For requests made using the AWS Command Line Interface
- * (CLI) or AWS SDKs, this field is calculated automatically.</p>
+ * configuration.</p> <p>For requests made using the Amazon Web Services Command
+ * Line Interface (CLI) or Amazon Web Services SDKs, this field is calculated
+ * automatically.</p>
*/
inline PutBucketEncryptionRequest& WithContentMD5(const char* value) { SetContentMD5(value); return *this;}
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline const ChecksumAlgorithm& GetChecksumAlgorithm() const{ return m_checksumAlgorithm; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline bool ChecksumAlgorithmHasBeenSet() const { return m_checksumAlgorithmHasBeenSet; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(const ChecksumAlgorithm& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = value; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(ChecksumAlgorithm&& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = std::move(value); }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutBucketEncryptionRequest& WithChecksumAlgorithm(const ChecksumAlgorithm& value) { SetChecksumAlgorithm(value); return *this;}
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutBucketEncryptionRequest& WithChecksumAlgorithm(ChecksumAlgorithm&& value) { SetChecksumAlgorithm(std::move(value)); return *this;}
+
+
inline const ServerSideEncryptionConfiguration& GetServerSideEncryptionConfiguration() const{ return m_serverSideEncryptionConfiguration; }
@@ -203,57 +293,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketEncryptionRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketEncryptionRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketEncryptionRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -300,19 +390,22 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_contentMD5;
- bool m_contentMD5HasBeenSet;
+ bool m_contentMD5HasBeenSet = false;
+
+ ChecksumAlgorithm m_checksumAlgorithm;
+ bool m_checksumAlgorithmHasBeenSet = false;
ServerSideEncryptionConfiguration m_serverSideEncryptionConfiguration;
- bool m_serverSideEncryptionConfigurationHasBeenSet;
+ bool m_serverSideEncryptionConfigurationHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketIntelligentTieringConfigurationRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketIntelligentTieringConfigurationRequest.h
index 9055e5c1d8..91e9d79382 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketIntelligentTieringConfigurationRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketIntelligentTieringConfigurationRequest.h
@@ -24,10 +24,10 @@ namespace Model
/**
*/
- class AWS_S3_API PutBucketIntelligentTieringConfigurationRequest : public S3Request
+ class PutBucketIntelligentTieringConfigurationRequest : public S3Request
{
public:
- PutBucketIntelligentTieringConfigurationRequest();
+ AWS_S3_API PutBucketIntelligentTieringConfigurationRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -35,10 +35,14 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "PutBucketIntelligentTieringConfiguration"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the Amazon S3 bucket whose configuration you want to modify or
@@ -203,16 +207,16 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_id;
- bool m_idHasBeenSet;
+ bool m_idHasBeenSet = false;
IntelligentTieringConfiguration m_intelligentTieringConfiguration;
- bool m_intelligentTieringConfigurationHasBeenSet;
+ bool m_intelligentTieringConfigurationHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketInventoryConfigurationRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketInventoryConfigurationRequest.h
index 88997a4a66..e4b342626d 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketInventoryConfigurationRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketInventoryConfigurationRequest.h
@@ -24,10 +24,10 @@ namespace Model
/**
*/
- class AWS_S3_API PutBucketInventoryConfigurationRequest : public S3Request
+ class PutBucketInventoryConfigurationRequest : public S3Request
{
public:
- PutBucketInventoryConfigurationRequest();
+ AWS_S3_API PutBucketInventoryConfigurationRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -35,12 +35,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "PutBucketInventoryConfiguration"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket where the inventory configuration will be stored.</p>
@@ -157,57 +161,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketInventoryConfigurationRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketInventoryConfigurationRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketInventoryConfigurationRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -254,19 +258,19 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_id;
- bool m_idHasBeenSet;
+ bool m_idHasBeenSet = false;
InventoryConfiguration m_inventoryConfiguration;
- bool m_inventoryConfigurationHasBeenSet;
+ bool m_inventoryConfigurationHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketLifecycleConfigurationRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketLifecycleConfigurationRequest.h
index 177b0434b4..2f08cb2e04 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketLifecycleConfigurationRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketLifecycleConfigurationRequest.h
@@ -7,6 +7,7 @@
#include <aws/s3/S3_EXPORTS.h>
#include <aws/s3/S3Request.h>
#include <aws/core/utils/memory/stl/AWSString.h>
+#include <aws/s3/model/ChecksumAlgorithm.h>
#include <aws/s3/model/BucketLifecycleConfiguration.h>
#include <aws/core/utils/memory/stl/AWSMap.h>
#include <utility>
@@ -24,10 +25,10 @@ namespace Model
/**
*/
- class AWS_S3_API PutBucketLifecycleConfigurationRequest : public S3Request
+ class PutBucketLifecycleConfigurationRequest : public S3Request
{
public:
- PutBucketLifecycleConfigurationRequest();
+ AWS_S3_API PutBucketLifecycleConfigurationRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -35,14 +36,18 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "PutBucketLifecycleConfiguration"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
- inline bool ShouldComputeContentMd5() const override { return true; }
+ AWS_S3_API Aws::String GetChecksumAlgorithmName() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket for which to set the configuration.</p>
@@ -86,6 +91,91 @@ namespace Model
/**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline const ChecksumAlgorithm& GetChecksumAlgorithm() const{ return m_checksumAlgorithm; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline bool ChecksumAlgorithmHasBeenSet() const { return m_checksumAlgorithmHasBeenSet; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(const ChecksumAlgorithm& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = value; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(ChecksumAlgorithm&& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = std::move(value); }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutBucketLifecycleConfigurationRequest& WithChecksumAlgorithm(const ChecksumAlgorithm& value) { SetChecksumAlgorithm(value); return *this;}
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutBucketLifecycleConfigurationRequest& WithChecksumAlgorithm(ChecksumAlgorithm&& value) { SetChecksumAlgorithm(std::move(value)); return *this;}
+
+
+ /**
* <p>Container for lifecycle rules. You can add as many as 1,000 rules.</p>
*/
inline const BucketLifecycleConfiguration& GetLifecycleConfiguration() const{ return m_lifecycleConfiguration; }
@@ -118,57 +208,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketLifecycleConfigurationRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketLifecycleConfigurationRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketLifecycleConfigurationRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -215,16 +305,19 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
+
+ ChecksumAlgorithm m_checksumAlgorithm;
+ bool m_checksumAlgorithmHasBeenSet = false;
BucketLifecycleConfiguration m_lifecycleConfiguration;
- bool m_lifecycleConfigurationHasBeenSet;
+ bool m_lifecycleConfigurationHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketLoggingRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketLoggingRequest.h
index befa6e8f1a..dd1f1c0ef1 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketLoggingRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketLoggingRequest.h
@@ -8,6 +8,7 @@
#include <aws/s3/S3Request.h>
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/s3/model/BucketLoggingStatus.h>
+#include <aws/s3/model/ChecksumAlgorithm.h>
#include <aws/core/utils/memory/stl/AWSMap.h>
#include <utility>
@@ -24,10 +25,10 @@ namespace Model
/**
*/
- class AWS_S3_API PutBucketLoggingRequest : public S3Request
+ class PutBucketLoggingRequest : public S3Request
{
public:
- PutBucketLoggingRequest();
+ AWS_S3_API PutBucketLoggingRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -35,14 +36,18 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "PutBucketLogging"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
- inline bool ShouldComputeContentMd5() const override { return true; }
+ AWS_S3_API Aws::String GetChecksumAlgorithmName() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket for which to set the logging parameters.</p>
@@ -118,114 +123,199 @@ namespace Model
/**
* <p>The MD5 hash of the <code>PutBucketLogging</code> request body.</p> <p>For
- * requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field
- * is calculated automatically.</p>
+ * requests made using the Amazon Web Services Command Line Interface (CLI) or
+ * Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline const Aws::String& GetContentMD5() const{ return m_contentMD5; }
/**
* <p>The MD5 hash of the <code>PutBucketLogging</code> request body.</p> <p>For
- * requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field
- * is calculated automatically.</p>
+ * requests made using the Amazon Web Services Command Line Interface (CLI) or
+ * Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline bool ContentMD5HasBeenSet() const { return m_contentMD5HasBeenSet; }
/**
* <p>The MD5 hash of the <code>PutBucketLogging</code> request body.</p> <p>For
- * requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field
- * is calculated automatically.</p>
+ * requests made using the Amazon Web Services Command Line Interface (CLI) or
+ * Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(const Aws::String& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = value; }
/**
* <p>The MD5 hash of the <code>PutBucketLogging</code> request body.</p> <p>For
- * requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field
- * is calculated automatically.</p>
+ * requests made using the Amazon Web Services Command Line Interface (CLI) or
+ * Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(Aws::String&& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = std::move(value); }
/**
* <p>The MD5 hash of the <code>PutBucketLogging</code> request body.</p> <p>For
- * requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field
- * is calculated automatically.</p>
+ * requests made using the Amazon Web Services Command Line Interface (CLI) or
+ * Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(const char* value) { m_contentMD5HasBeenSet = true; m_contentMD5.assign(value); }
/**
* <p>The MD5 hash of the <code>PutBucketLogging</code> request body.</p> <p>For
- * requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field
- * is calculated automatically.</p>
+ * requests made using the Amazon Web Services Command Line Interface (CLI) or
+ * Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline PutBucketLoggingRequest& WithContentMD5(const Aws::String& value) { SetContentMD5(value); return *this;}
/**
* <p>The MD5 hash of the <code>PutBucketLogging</code> request body.</p> <p>For
- * requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field
- * is calculated automatically.</p>
+ * requests made using the Amazon Web Services Command Line Interface (CLI) or
+ * Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline PutBucketLoggingRequest& WithContentMD5(Aws::String&& value) { SetContentMD5(std::move(value)); return *this;}
/**
* <p>The MD5 hash of the <code>PutBucketLogging</code> request body.</p> <p>For
- * requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field
- * is calculated automatically.</p>
+ * requests made using the Amazon Web Services Command Line Interface (CLI) or
+ * Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline PutBucketLoggingRequest& WithContentMD5(const char* value) { SetContentMD5(value); return *this;}
/**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline const ChecksumAlgorithm& GetChecksumAlgorithm() const{ return m_checksumAlgorithm; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline bool ChecksumAlgorithmHasBeenSet() const { return m_checksumAlgorithmHasBeenSet; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(const ChecksumAlgorithm& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = value; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(ChecksumAlgorithm&& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = std::move(value); }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutBucketLoggingRequest& WithChecksumAlgorithm(const ChecksumAlgorithm& value) { SetChecksumAlgorithm(value); return *this;}
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutBucketLoggingRequest& WithChecksumAlgorithm(ChecksumAlgorithm&& value) { SetChecksumAlgorithm(std::move(value)); return *this;}
+
+
+ /**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketLoggingRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketLoggingRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketLoggingRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -272,19 +362,22 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
BucketLoggingStatus m_bucketLoggingStatus;
- bool m_bucketLoggingStatusHasBeenSet;
+ bool m_bucketLoggingStatusHasBeenSet = false;
Aws::String m_contentMD5;
- bool m_contentMD5HasBeenSet;
+ bool m_contentMD5HasBeenSet = false;
+
+ ChecksumAlgorithm m_checksumAlgorithm;
+ bool m_checksumAlgorithmHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketMetricsConfigurationRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketMetricsConfigurationRequest.h
index 94791d58d5..5b6f3ebf6d 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketMetricsConfigurationRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketMetricsConfigurationRequest.h
@@ -24,10 +24,10 @@ namespace Model
/**
*/
- class AWS_S3_API PutBucketMetricsConfigurationRequest : public S3Request
+ class PutBucketMetricsConfigurationRequest : public S3Request
{
public:
- PutBucketMetricsConfigurationRequest();
+ AWS_S3_API PutBucketMetricsConfigurationRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -35,12 +35,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "PutBucketMetricsConfiguration"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket for which the metrics configuration is set.</p>
@@ -157,57 +161,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketMetricsConfigurationRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketMetricsConfigurationRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketMetricsConfigurationRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -254,19 +258,19 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_id;
- bool m_idHasBeenSet;
+ bool m_idHasBeenSet = false;
MetricsConfiguration m_metricsConfiguration;
- bool m_metricsConfigurationHasBeenSet;
+ bool m_metricsConfigurationHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketNotificationConfigurationRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketNotificationConfigurationRequest.h
index 016bba49c9..4ce686f86b 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketNotificationConfigurationRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketNotificationConfigurationRequest.h
@@ -24,10 +24,10 @@ namespace Model
/**
*/
- class AWS_S3_API PutBucketNotificationConfigurationRequest : public S3Request
+ class PutBucketNotificationConfigurationRequest : public S3Request
{
public:
- PutBucketNotificationConfigurationRequest();
+ AWS_S3_API PutBucketNotificationConfigurationRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -35,12 +35,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "PutBucketNotificationConfiguration"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket.</p>
@@ -104,61 +108,86 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketNotificationConfigurationRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketNotificationConfigurationRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketNotificationConfigurationRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
+ /**
+ * <p>Skips validation of Amazon SQS, Amazon SNS, and Lambda destinations. True or
+ * false value.</p>
+ */
+ inline bool GetSkipDestinationValidation() const{ return m_skipDestinationValidation; }
+
+ /**
+ * <p>Skips validation of Amazon SQS, Amazon SNS, and Lambda destinations. True or
+ * false value.</p>
+ */
+ inline bool SkipDestinationValidationHasBeenSet() const { return m_skipDestinationValidationHasBeenSet; }
+
+ /**
+ * <p>Skips validation of Amazon SQS, Amazon SNS, and Lambda destinations. True or
+ * false value.</p>
+ */
+ inline void SetSkipDestinationValidation(bool value) { m_skipDestinationValidationHasBeenSet = true; m_skipDestinationValidation = value; }
+
+ /**
+ * <p>Skips validation of Amazon SQS, Amazon SNS, and Lambda destinations. True or
+ * false value.</p>
+ */
+ inline PutBucketNotificationConfigurationRequest& WithSkipDestinationValidation(bool value) { SetSkipDestinationValidation(value); return *this;}
+
+
inline const Aws::Map<Aws::String, Aws::String>& GetCustomizedAccessLogTag() const{ return m_customizedAccessLogTag; }
@@ -201,16 +230,19 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
NotificationConfiguration m_notificationConfiguration;
- bool m_notificationConfigurationHasBeenSet;
+ bool m_notificationConfigurationHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
+
+ bool m_skipDestinationValidation;
+ bool m_skipDestinationValidationHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketOwnershipControlsRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketOwnershipControlsRequest.h
index 76f46d672a..bdcf6c4754 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketOwnershipControlsRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketOwnershipControlsRequest.h
@@ -24,10 +24,10 @@ namespace Model
/**
*/
- class AWS_S3_API PutBucketOwnershipControlsRequest : public S3Request
+ class PutBucketOwnershipControlsRequest : public S3Request
{
public:
- PutBucketOwnershipControlsRequest();
+ AWS_S3_API PutBucketOwnershipControlsRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -35,14 +35,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "PutBucketOwnershipControls"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
-
- inline bool ShouldComputeContentMd5() const override { return true; }
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the Amazon S3 bucket whose <code>OwnershipControls</code> you
@@ -95,151 +97,157 @@ namespace Model
/**
* <p>The MD5 hash of the <code>OwnershipControls</code> request body. </p> <p>For
- * requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field
- * is calculated automatically.</p>
+ * requests made using the Amazon Web Services Command Line Interface (CLI) or
+ * Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline const Aws::String& GetContentMD5() const{ return m_contentMD5; }
/**
* <p>The MD5 hash of the <code>OwnershipControls</code> request body. </p> <p>For
- * requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field
- * is calculated automatically.</p>
+ * requests made using the Amazon Web Services Command Line Interface (CLI) or
+ * Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline bool ContentMD5HasBeenSet() const { return m_contentMD5HasBeenSet; }
/**
* <p>The MD5 hash of the <code>OwnershipControls</code> request body. </p> <p>For
- * requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field
- * is calculated automatically.</p>
+ * requests made using the Amazon Web Services Command Line Interface (CLI) or
+ * Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(const Aws::String& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = value; }
/**
* <p>The MD5 hash of the <code>OwnershipControls</code> request body. </p> <p>For
- * requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field
- * is calculated automatically.</p>
+ * requests made using the Amazon Web Services Command Line Interface (CLI) or
+ * Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(Aws::String&& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = std::move(value); }
/**
* <p>The MD5 hash of the <code>OwnershipControls</code> request body. </p> <p>For
- * requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field
- * is calculated automatically.</p>
+ * requests made using the Amazon Web Services Command Line Interface (CLI) or
+ * Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(const char* value) { m_contentMD5HasBeenSet = true; m_contentMD5.assign(value); }
/**
* <p>The MD5 hash of the <code>OwnershipControls</code> request body. </p> <p>For
- * requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field
- * is calculated automatically.</p>
+ * requests made using the Amazon Web Services Command Line Interface (CLI) or
+ * Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline PutBucketOwnershipControlsRequest& WithContentMD5(const Aws::String& value) { SetContentMD5(value); return *this;}
/**
* <p>The MD5 hash of the <code>OwnershipControls</code> request body. </p> <p>For
- * requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field
- * is calculated automatically.</p>
+ * requests made using the Amazon Web Services Command Line Interface (CLI) or
+ * Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline PutBucketOwnershipControlsRequest& WithContentMD5(Aws::String&& value) { SetContentMD5(std::move(value)); return *this;}
/**
* <p>The MD5 hash of the <code>OwnershipControls</code> request body. </p> <p>For
- * requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field
- * is calculated automatically.</p>
+ * requests made using the Amazon Web Services Command Line Interface (CLI) or
+ * Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline PutBucketOwnershipControlsRequest& WithContentMD5(const char* value) { SetContentMD5(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketOwnershipControlsRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketOwnershipControlsRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketOwnershipControlsRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
/**
- * <p>The <code>OwnershipControls</code> (BucketOwnerPreferred or ObjectWriter)
- * that you want to apply to this Amazon S3 bucket.</p>
+ * <p>The <code>OwnershipControls</code> (BucketOwnerEnforced,
+ * BucketOwnerPreferred, or ObjectWriter) that you want to apply to this Amazon S3
+ * bucket.</p>
*/
inline const OwnershipControls& GetOwnershipControls() const{ return m_ownershipControls; }
/**
- * <p>The <code>OwnershipControls</code> (BucketOwnerPreferred or ObjectWriter)
- * that you want to apply to this Amazon S3 bucket.</p>
+ * <p>The <code>OwnershipControls</code> (BucketOwnerEnforced,
+ * BucketOwnerPreferred, or ObjectWriter) that you want to apply to this Amazon S3
+ * bucket.</p>
*/
inline bool OwnershipControlsHasBeenSet() const { return m_ownershipControlsHasBeenSet; }
/**
- * <p>The <code>OwnershipControls</code> (BucketOwnerPreferred or ObjectWriter)
- * that you want to apply to this Amazon S3 bucket.</p>
+ * <p>The <code>OwnershipControls</code> (BucketOwnerEnforced,
+ * BucketOwnerPreferred, or ObjectWriter) that you want to apply to this Amazon S3
+ * bucket.</p>
*/
inline void SetOwnershipControls(const OwnershipControls& value) { m_ownershipControlsHasBeenSet = true; m_ownershipControls = value; }
/**
- * <p>The <code>OwnershipControls</code> (BucketOwnerPreferred or ObjectWriter)
- * that you want to apply to this Amazon S3 bucket.</p>
+ * <p>The <code>OwnershipControls</code> (BucketOwnerEnforced,
+ * BucketOwnerPreferred, or ObjectWriter) that you want to apply to this Amazon S3
+ * bucket.</p>
*/
inline void SetOwnershipControls(OwnershipControls&& value) { m_ownershipControlsHasBeenSet = true; m_ownershipControls = std::move(value); }
/**
- * <p>The <code>OwnershipControls</code> (BucketOwnerPreferred or ObjectWriter)
- * that you want to apply to this Amazon S3 bucket.</p>
+ * <p>The <code>OwnershipControls</code> (BucketOwnerEnforced,
+ * BucketOwnerPreferred, or ObjectWriter) that you want to apply to this Amazon S3
+ * bucket.</p>
*/
inline PutBucketOwnershipControlsRequest& WithOwnershipControls(const OwnershipControls& value) { SetOwnershipControls(value); return *this;}
/**
- * <p>The <code>OwnershipControls</code> (BucketOwnerPreferred or ObjectWriter)
- * that you want to apply to this Amazon S3 bucket.</p>
+ * <p>The <code>OwnershipControls</code> (BucketOwnerEnforced,
+ * BucketOwnerPreferred, or ObjectWriter) that you want to apply to this Amazon S3
+ * bucket.</p>
*/
inline PutBucketOwnershipControlsRequest& WithOwnershipControls(OwnershipControls&& value) { SetOwnershipControls(std::move(value)); return *this;}
@@ -286,19 +294,19 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_contentMD5;
- bool m_contentMD5HasBeenSet;
+ bool m_contentMD5HasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
OwnershipControls m_ownershipControls;
- bool m_ownershipControlsHasBeenSet;
+ bool m_ownershipControlsHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketPolicyRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketPolicyRequest.h
index 8cf3d4129a..553987a836 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketPolicyRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketPolicyRequest.h
@@ -7,6 +7,7 @@
#include <aws/s3/S3_EXPORTS.h>
#include <aws/s3/S3Request.h>
#include <aws/core/utils/memory/stl/AWSString.h>
+#include <aws/s3/model/ChecksumAlgorithm.h>
#include <aws/core/utils/memory/stl/AWSMap.h>
#include <utility>
@@ -23,10 +24,10 @@ namespace Model
/**
*/
- class AWS_S3_API PutBucketPolicyRequest : public StreamingS3Request
+ class PutBucketPolicyRequest : public StreamingS3Request
{
public:
- PutBucketPolicyRequest();
+ AWS_S3_API PutBucketPolicyRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -34,12 +35,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "PutBucketPolicy"; }
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
- inline bool ShouldComputeContentMd5() const override { return true; }
+ AWS_S3_API Aws::String GetChecksumAlgorithmName() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket.</p>
@@ -83,63 +88,148 @@ namespace Model
/**
- * <p>The MD5 hash of the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash of the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline const Aws::String& GetContentMD5() const{ return m_contentMD5; }
/**
- * <p>The MD5 hash of the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash of the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline bool ContentMD5HasBeenSet() const { return m_contentMD5HasBeenSet; }
/**
- * <p>The MD5 hash of the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash of the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline void SetContentMD5(const Aws::String& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = value; }
/**
- * <p>The MD5 hash of the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash of the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline void SetContentMD5(Aws::String&& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = std::move(value); }
/**
- * <p>The MD5 hash of the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash of the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline void SetContentMD5(const char* value) { m_contentMD5HasBeenSet = true; m_contentMD5.assign(value); }
/**
- * <p>The MD5 hash of the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash of the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline PutBucketPolicyRequest& WithContentMD5(const Aws::String& value) { SetContentMD5(value); return *this;}
/**
- * <p>The MD5 hash of the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash of the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline PutBucketPolicyRequest& WithContentMD5(Aws::String&& value) { SetContentMD5(std::move(value)); return *this;}
/**
- * <p>The MD5 hash of the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash of the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline PutBucketPolicyRequest& WithContentMD5(const char* value) { SetContentMD5(value); return *this;}
/**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline const ChecksumAlgorithm& GetChecksumAlgorithm() const{ return m_checksumAlgorithm; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline bool ChecksumAlgorithmHasBeenSet() const { return m_checksumAlgorithmHasBeenSet; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(const ChecksumAlgorithm& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = value; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(ChecksumAlgorithm&& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = std::move(value); }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutBucketPolicyRequest& WithChecksumAlgorithm(const ChecksumAlgorithm& value) { SetChecksumAlgorithm(value); return *this;}
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutBucketPolicyRequest& WithChecksumAlgorithm(ChecksumAlgorithm&& value) { SetChecksumAlgorithm(std::move(value)); return *this;}
+
+
+ /**
* <p>Set this parameter to true to confirm that you want to remove your
* permissions to change this bucket policy in the future.</p>
*/
@@ -166,57 +256,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketPolicyRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketPolicyRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketPolicyRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -263,20 +353,23 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_contentMD5;
- bool m_contentMD5HasBeenSet;
+ bool m_contentMD5HasBeenSet = false;
+
+ ChecksumAlgorithm m_checksumAlgorithm;
+ bool m_checksumAlgorithmHasBeenSet = false;
bool m_confirmRemoveSelfBucketAccess;
- bool m_confirmRemoveSelfBucketAccessHasBeenSet;
+ bool m_confirmRemoveSelfBucketAccessHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketReplicationRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketReplicationRequest.h
index 1c8d3e16d8..026e05efa1 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketReplicationRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketReplicationRequest.h
@@ -7,6 +7,7 @@
#include <aws/s3/S3_EXPORTS.h>
#include <aws/s3/S3Request.h>
#include <aws/core/utils/memory/stl/AWSString.h>
+#include <aws/s3/model/ChecksumAlgorithm.h>
#include <aws/s3/model/ReplicationConfiguration.h>
#include <aws/core/utils/memory/stl/AWSMap.h>
#include <utility>
@@ -24,10 +25,10 @@ namespace Model
/**
*/
- class AWS_S3_API PutBucketReplicationRequest : public S3Request
+ class PutBucketReplicationRequest : public S3Request
{
public:
- PutBucketReplicationRequest();
+ AWS_S3_API PutBucketReplicationRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -35,14 +36,18 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "PutBucketReplication"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
- inline bool ShouldComputeContentMd5() const override { return true; }
+ AWS_S3_API Aws::String GetChecksumAlgorithmName() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket</p>
@@ -90,8 +95,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline const Aws::String& GetContentMD5() const{ return m_contentMD5; }
@@ -100,8 +105,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline bool ContentMD5HasBeenSet() const { return m_contentMD5HasBeenSet; }
@@ -110,8 +115,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(const Aws::String& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = value; }
@@ -120,8 +125,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(Aws::String&& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = std::move(value); }
@@ -130,8 +135,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(const char* value) { m_contentMD5HasBeenSet = true; m_contentMD5.assign(value); }
@@ -140,8 +145,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline PutBucketReplicationRequest& WithContentMD5(const Aws::String& value) { SetContentMD5(value); return *this;}
@@ -150,8 +155,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline PutBucketReplicationRequest& WithContentMD5(Aws::String&& value) { SetContentMD5(std::move(value)); return *this;}
@@ -160,12 +165,97 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline PutBucketReplicationRequest& WithContentMD5(const char* value) { SetContentMD5(value); return *this;}
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline const ChecksumAlgorithm& GetChecksumAlgorithm() const{ return m_checksumAlgorithm; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline bool ChecksumAlgorithmHasBeenSet() const { return m_checksumAlgorithmHasBeenSet; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(const ChecksumAlgorithm& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = value; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(ChecksumAlgorithm&& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = std::move(value); }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutBucketReplicationRequest& WithChecksumAlgorithm(const ChecksumAlgorithm& value) { SetChecksumAlgorithm(value); return *this;}
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutBucketReplicationRequest& WithChecksumAlgorithm(ChecksumAlgorithm&& value) { SetChecksumAlgorithm(std::move(value)); return *this;}
+
+
inline const ReplicationConfiguration& GetReplicationConfiguration() const{ return m_replicationConfiguration; }
@@ -228,57 +318,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketReplicationRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketReplicationRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketReplicationRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -325,22 +415,25 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_contentMD5;
- bool m_contentMD5HasBeenSet;
+ bool m_contentMD5HasBeenSet = false;
+
+ ChecksumAlgorithm m_checksumAlgorithm;
+ bool m_checksumAlgorithmHasBeenSet = false;
ReplicationConfiguration m_replicationConfiguration;
- bool m_replicationConfigurationHasBeenSet;
+ bool m_replicationConfigurationHasBeenSet = false;
Aws::String m_token;
- bool m_tokenHasBeenSet;
+ bool m_tokenHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketRequestPaymentRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketRequestPaymentRequest.h
index cf61658395..27e831c728 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketRequestPaymentRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketRequestPaymentRequest.h
@@ -7,6 +7,7 @@
#include <aws/s3/S3_EXPORTS.h>
#include <aws/s3/S3Request.h>
#include <aws/core/utils/memory/stl/AWSString.h>
+#include <aws/s3/model/ChecksumAlgorithm.h>
#include <aws/s3/model/RequestPaymentConfiguration.h>
#include <aws/core/utils/memory/stl/AWSMap.h>
#include <utility>
@@ -24,10 +25,10 @@ namespace Model
/**
*/
- class AWS_S3_API PutBucketRequestPaymentRequest : public S3Request
+ class PutBucketRequestPaymentRequest : public S3Request
{
public:
- PutBucketRequestPaymentRequest();
+ AWS_S3_API PutBucketRequestPaymentRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -35,14 +36,18 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "PutBucketRequestPayment"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
- inline bool ShouldComputeContentMd5() const override { return true; }
+ AWS_S3_API Aws::String GetChecksumAlgorithmName() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The bucket name.</p>
@@ -90,8 +95,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline const Aws::String& GetContentMD5() const{ return m_contentMD5; }
@@ -100,8 +105,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline bool ContentMD5HasBeenSet() const { return m_contentMD5HasBeenSet; }
@@ -110,8 +115,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(const Aws::String& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = value; }
@@ -120,8 +125,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(Aws::String&& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = std::move(value); }
@@ -130,8 +135,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(const char* value) { m_contentMD5HasBeenSet = true; m_contentMD5.assign(value); }
@@ -140,8 +145,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline PutBucketRequestPaymentRequest& WithContentMD5(const Aws::String& value) { SetContentMD5(value); return *this;}
@@ -150,8 +155,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline PutBucketRequestPaymentRequest& WithContentMD5(Aws::String&& value) { SetContentMD5(std::move(value)); return *this;}
@@ -160,13 +165,98 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline PutBucketRequestPaymentRequest& WithContentMD5(const char* value) { SetContentMD5(value); return *this;}
/**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline const ChecksumAlgorithm& GetChecksumAlgorithm() const{ return m_checksumAlgorithm; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline bool ChecksumAlgorithmHasBeenSet() const { return m_checksumAlgorithmHasBeenSet; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(const ChecksumAlgorithm& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = value; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(ChecksumAlgorithm&& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = std::move(value); }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutBucketRequestPaymentRequest& WithChecksumAlgorithm(const ChecksumAlgorithm& value) { SetChecksumAlgorithm(value); return *this;}
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutBucketRequestPaymentRequest& WithChecksumAlgorithm(ChecksumAlgorithm&& value) { SetChecksumAlgorithm(std::move(value)); return *this;}
+
+
+ /**
* <p>Container for Payer.</p>
*/
inline const RequestPaymentConfiguration& GetRequestPaymentConfiguration() const{ return m_requestPaymentConfiguration; }
@@ -199,57 +289,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketRequestPaymentRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketRequestPaymentRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketRequestPaymentRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -296,19 +386,22 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_contentMD5;
- bool m_contentMD5HasBeenSet;
+ bool m_contentMD5HasBeenSet = false;
+
+ ChecksumAlgorithm m_checksumAlgorithm;
+ bool m_checksumAlgorithmHasBeenSet = false;
RequestPaymentConfiguration m_requestPaymentConfiguration;
- bool m_requestPaymentConfigurationHasBeenSet;
+ bool m_requestPaymentConfigurationHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketTaggingRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketTaggingRequest.h
index 9fab196905..05a97a14b0 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketTaggingRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketTaggingRequest.h
@@ -7,6 +7,7 @@
#include <aws/s3/S3_EXPORTS.h>
#include <aws/s3/S3Request.h>
#include <aws/core/utils/memory/stl/AWSString.h>
+#include <aws/s3/model/ChecksumAlgorithm.h>
#include <aws/s3/model/Tagging.h>
#include <aws/core/utils/memory/stl/AWSMap.h>
#include <utility>
@@ -24,10 +25,10 @@ namespace Model
/**
*/
- class AWS_S3_API PutBucketTaggingRequest : public S3Request
+ class PutBucketTaggingRequest : public S3Request
{
public:
- PutBucketTaggingRequest();
+ AWS_S3_API PutBucketTaggingRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -35,14 +36,18 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "PutBucketTagging"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
- inline bool ShouldComputeContentMd5() const override { return true; }
+ AWS_S3_API Aws::String GetChecksumAlgorithmName() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The bucket name.</p>
@@ -90,8 +95,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline const Aws::String& GetContentMD5() const{ return m_contentMD5; }
@@ -100,8 +105,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline bool ContentMD5HasBeenSet() const { return m_contentMD5HasBeenSet; }
@@ -110,8 +115,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(const Aws::String& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = value; }
@@ -120,8 +125,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(Aws::String&& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = std::move(value); }
@@ -130,8 +135,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(const char* value) { m_contentMD5HasBeenSet = true; m_contentMD5.assign(value); }
@@ -140,8 +145,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline PutBucketTaggingRequest& WithContentMD5(const Aws::String& value) { SetContentMD5(value); return *this;}
@@ -150,8 +155,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline PutBucketTaggingRequest& WithContentMD5(Aws::String&& value) { SetContentMD5(std::move(value)); return *this;}
@@ -160,13 +165,98 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline PutBucketTaggingRequest& WithContentMD5(const char* value) { SetContentMD5(value); return *this;}
/**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline const ChecksumAlgorithm& GetChecksumAlgorithm() const{ return m_checksumAlgorithm; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline bool ChecksumAlgorithmHasBeenSet() const { return m_checksumAlgorithmHasBeenSet; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(const ChecksumAlgorithm& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = value; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(ChecksumAlgorithm&& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = std::move(value); }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutBucketTaggingRequest& WithChecksumAlgorithm(const ChecksumAlgorithm& value) { SetChecksumAlgorithm(value); return *this;}
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutBucketTaggingRequest& WithChecksumAlgorithm(ChecksumAlgorithm&& value) { SetChecksumAlgorithm(std::move(value)); return *this;}
+
+
+ /**
* <p>Container for the <code>TagSet</code> and <code>Tag</code> elements.</p>
*/
inline const Tagging& GetTagging() const{ return m_tagging; }
@@ -199,57 +289,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketTaggingRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketTaggingRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketTaggingRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -296,19 +386,22 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_contentMD5;
- bool m_contentMD5HasBeenSet;
+ bool m_contentMD5HasBeenSet = false;
+
+ ChecksumAlgorithm m_checksumAlgorithm;
+ bool m_checksumAlgorithmHasBeenSet = false;
Tagging m_tagging;
- bool m_taggingHasBeenSet;
+ bool m_taggingHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketVersioningRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketVersioningRequest.h
index c9adb345bc..8b515fcf68 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketVersioningRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketVersioningRequest.h
@@ -7,6 +7,7 @@
#include <aws/s3/S3_EXPORTS.h>
#include <aws/s3/S3Request.h>
#include <aws/core/utils/memory/stl/AWSString.h>
+#include <aws/s3/model/ChecksumAlgorithm.h>
#include <aws/s3/model/VersioningConfiguration.h>
#include <aws/core/utils/memory/stl/AWSMap.h>
#include <utility>
@@ -24,10 +25,10 @@ namespace Model
/**
*/
- class AWS_S3_API PutBucketVersioningRequest : public S3Request
+ class PutBucketVersioningRequest : public S3Request
{
public:
- PutBucketVersioningRequest();
+ AWS_S3_API PutBucketVersioningRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -35,14 +36,18 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "PutBucketVersioning"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
- inline bool ShouldComputeContentMd5() const override { return true; }
+ AWS_S3_API Aws::String GetChecksumAlgorithmName() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The bucket name.</p>
@@ -90,8 +95,8 @@ namespace Model
* header as a message integrity check to verify that the request body was not
* corrupted in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline const Aws::String& GetContentMD5() const{ return m_contentMD5; }
@@ -100,8 +105,8 @@ namespace Model
* header as a message integrity check to verify that the request body was not
* corrupted in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline bool ContentMD5HasBeenSet() const { return m_contentMD5HasBeenSet; }
@@ -110,8 +115,8 @@ namespace Model
* header as a message integrity check to verify that the request body was not
* corrupted in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(const Aws::String& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = value; }
@@ -120,8 +125,8 @@ namespace Model
* header as a message integrity check to verify that the request body was not
* corrupted in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(Aws::String&& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = std::move(value); }
@@ -130,8 +135,8 @@ namespace Model
* header as a message integrity check to verify that the request body was not
* corrupted in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(const char* value) { m_contentMD5HasBeenSet = true; m_contentMD5.assign(value); }
@@ -140,8 +145,8 @@ namespace Model
* header as a message integrity check to verify that the request body was not
* corrupted in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline PutBucketVersioningRequest& WithContentMD5(const Aws::String& value) { SetContentMD5(value); return *this;}
@@ -150,8 +155,8 @@ namespace Model
* header as a message integrity check to verify that the request body was not
* corrupted in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline PutBucketVersioningRequest& WithContentMD5(Aws::String&& value) { SetContentMD5(std::move(value)); return *this;}
@@ -160,13 +165,98 @@ namespace Model
* header as a message integrity check to verify that the request body was not
* corrupted in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline PutBucketVersioningRequest& WithContentMD5(const char* value) { SetContentMD5(value); return *this;}
/**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline const ChecksumAlgorithm& GetChecksumAlgorithm() const{ return m_checksumAlgorithm; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline bool ChecksumAlgorithmHasBeenSet() const { return m_checksumAlgorithmHasBeenSet; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(const ChecksumAlgorithm& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = value; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(ChecksumAlgorithm&& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = std::move(value); }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutBucketVersioningRequest& WithChecksumAlgorithm(const ChecksumAlgorithm& value) { SetChecksumAlgorithm(value); return *this;}
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutBucketVersioningRequest& WithChecksumAlgorithm(ChecksumAlgorithm&& value) { SetChecksumAlgorithm(std::move(value)); return *this;}
+
+
+ /**
* <p>The concatenation of the authentication device's serial number, a space, and
* the value that is displayed on your authentication device.</p>
*/
@@ -248,57 +338,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketVersioningRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketVersioningRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketVersioningRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -345,22 +435,25 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_contentMD5;
- bool m_contentMD5HasBeenSet;
+ bool m_contentMD5HasBeenSet = false;
+
+ ChecksumAlgorithm m_checksumAlgorithm;
+ bool m_checksumAlgorithmHasBeenSet = false;
Aws::String m_mFA;
- bool m_mFAHasBeenSet;
+ bool m_mFAHasBeenSet = false;
VersioningConfiguration m_versioningConfiguration;
- bool m_versioningConfigurationHasBeenSet;
+ bool m_versioningConfigurationHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketWebsiteRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketWebsiteRequest.h
index f95f12c950..64064f7bce 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketWebsiteRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutBucketWebsiteRequest.h
@@ -7,6 +7,7 @@
#include <aws/s3/S3_EXPORTS.h>
#include <aws/s3/S3Request.h>
#include <aws/core/utils/memory/stl/AWSString.h>
+#include <aws/s3/model/ChecksumAlgorithm.h>
#include <aws/s3/model/WebsiteConfiguration.h>
#include <aws/core/utils/memory/stl/AWSMap.h>
#include <utility>
@@ -24,10 +25,10 @@ namespace Model
/**
*/
- class AWS_S3_API PutBucketWebsiteRequest : public S3Request
+ class PutBucketWebsiteRequest : public S3Request
{
public:
- PutBucketWebsiteRequest();
+ AWS_S3_API PutBucketWebsiteRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -35,14 +36,18 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "PutBucketWebsite"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
- inline bool ShouldComputeContentMd5() const override { return true; }
+ AWS_S3_API Aws::String GetChecksumAlgorithmName() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The bucket name.</p>
@@ -90,8 +95,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline const Aws::String& GetContentMD5() const{ return m_contentMD5; }
@@ -100,8 +105,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline bool ContentMD5HasBeenSet() const { return m_contentMD5HasBeenSet; }
@@ -110,8 +115,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(const Aws::String& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = value; }
@@ -120,8 +125,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(Aws::String&& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = std::move(value); }
@@ -130,8 +135,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(const char* value) { m_contentMD5HasBeenSet = true; m_contentMD5.assign(value); }
@@ -140,8 +145,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline PutBucketWebsiteRequest& WithContentMD5(const Aws::String& value) { SetContentMD5(value); return *this;}
@@ -150,8 +155,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline PutBucketWebsiteRequest& WithContentMD5(Aws::String&& value) { SetContentMD5(std::move(value)); return *this;}
@@ -160,13 +165,98 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, see <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864</a>.</p> <p>For requests
- * made using the AWS Command Line Interface (CLI) or AWS SDKs, this field is
- * calculated automatically.</p>
+ * made using the Amazon Web Services Command Line Interface (CLI) or Amazon Web
+ * Services SDKs, this field is calculated automatically.</p>
*/
inline PutBucketWebsiteRequest& WithContentMD5(const char* value) { SetContentMD5(value); return *this;}
/**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline const ChecksumAlgorithm& GetChecksumAlgorithm() const{ return m_checksumAlgorithm; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline bool ChecksumAlgorithmHasBeenSet() const { return m_checksumAlgorithmHasBeenSet; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(const ChecksumAlgorithm& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = value; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(ChecksumAlgorithm&& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = std::move(value); }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutBucketWebsiteRequest& WithChecksumAlgorithm(const ChecksumAlgorithm& value) { SetChecksumAlgorithm(value); return *this;}
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutBucketWebsiteRequest& WithChecksumAlgorithm(ChecksumAlgorithm&& value) { SetChecksumAlgorithm(std::move(value)); return *this;}
+
+
+ /**
* <p>Container for the request.</p>
*/
inline const WebsiteConfiguration& GetWebsiteConfiguration() const{ return m_websiteConfiguration; }
@@ -199,57 +289,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketWebsiteRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketWebsiteRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutBucketWebsiteRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -296,19 +386,22 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_contentMD5;
- bool m_contentMD5HasBeenSet;
+ bool m_contentMD5HasBeenSet = false;
+
+ ChecksumAlgorithm m_checksumAlgorithm;
+ bool m_checksumAlgorithmHasBeenSet = false;
WebsiteConfiguration m_websiteConfiguration;
- bool m_websiteConfigurationHasBeenSet;
+ bool m_websiteConfigurationHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectAclRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectAclRequest.h
index 8ee776e41a..d289ca8aff 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectAclRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectAclRequest.h
@@ -9,6 +9,7 @@
#include <aws/s3/model/ObjectCannedACL.h>
#include <aws/s3/model/AccessControlPolicy.h>
#include <aws/core/utils/memory/stl/AWSString.h>
+#include <aws/s3/model/ChecksumAlgorithm.h>
#include <aws/s3/model/RequestPayer.h>
#include <aws/core/utils/memory/stl/AWSMap.h>
#include <utility>
@@ -26,10 +27,10 @@ namespace Model
/**
*/
- class AWS_S3_API PutObjectAclRequest : public S3Request
+ class PutObjectAclRequest : public S3Request
{
public:
- PutObjectAclRequest();
+ AWS_S3_API PutObjectAclRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -37,14 +38,18 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "PutObjectAcl"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
- inline bool ShouldComputeContentMd5() const override { return true; }
+ AWS_S3_API Aws::String GetChecksumAlgorithmName() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The canned ACL to apply to the object. For more information, see <a
@@ -131,11 +136,11 @@ namespace Model
* </p> <p>When using this action with an access point, you must direct requests to
* the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetBucket() const{ return m_bucket; }
@@ -144,11 +149,11 @@ namespace Model
* </p> <p>When using this action with an access point, you must direct requests to
* the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool BucketHasBeenSet() const { return m_bucketHasBeenSet; }
@@ -157,11 +162,11 @@ namespace Model
* </p> <p>When using this action with an access point, you must direct requests to
* the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const Aws::String& value) { m_bucketHasBeenSet = true; m_bucket = value; }
@@ -170,11 +175,11 @@ namespace Model
* </p> <p>When using this action with an access point, you must direct requests to
* the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(Aws::String&& value) { m_bucketHasBeenSet = true; m_bucket = std::move(value); }
@@ -183,11 +188,11 @@ namespace Model
* </p> <p>When using this action with an access point, you must direct requests to
* the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const char* value) { m_bucketHasBeenSet = true; m_bucket.assign(value); }
@@ -196,11 +201,11 @@ namespace Model
* </p> <p>When using this action with an access point, you must direct requests to
* the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline PutObjectAclRequest& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
@@ -209,11 +214,11 @@ namespace Model
* </p> <p>When using this action with an access point, you must direct requests to
* the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline PutObjectAclRequest& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
@@ -222,11 +227,11 @@ namespace Model
* </p> <p>When using this action with an access point, you must direct requests to
* the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline PutObjectAclRequest& WithBucket(const char* value) { SetBucket(value); return *this;}
@@ -236,8 +241,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, go to <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864.&gt;</a> </p> <p>For
- * requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field
- * is calculated automatically.</p>
+ * requests made using the Amazon Web Services Command Line Interface (CLI) or
+ * Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline const Aws::String& GetContentMD5() const{ return m_contentMD5; }
@@ -246,8 +251,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, go to <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864.&gt;</a> </p> <p>For
- * requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field
- * is calculated automatically.</p>
+ * requests made using the Amazon Web Services Command Line Interface (CLI) or
+ * Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline bool ContentMD5HasBeenSet() const { return m_contentMD5HasBeenSet; }
@@ -256,8 +261,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, go to <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864.&gt;</a> </p> <p>For
- * requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field
- * is calculated automatically.</p>
+ * requests made using the Amazon Web Services Command Line Interface (CLI) or
+ * Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(const Aws::String& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = value; }
@@ -266,8 +271,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, go to <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864.&gt;</a> </p> <p>For
- * requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field
- * is calculated automatically.</p>
+ * requests made using the Amazon Web Services Command Line Interface (CLI) or
+ * Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(Aws::String&& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = std::move(value); }
@@ -276,8 +281,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, go to <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864.&gt;</a> </p> <p>For
- * requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field
- * is calculated automatically.</p>
+ * requests made using the Amazon Web Services Command Line Interface (CLI) or
+ * Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(const char* value) { m_contentMD5HasBeenSet = true; m_contentMD5.assign(value); }
@@ -286,8 +291,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, go to <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864.&gt;</a> </p> <p>For
- * requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field
- * is calculated automatically.</p>
+ * requests made using the Amazon Web Services Command Line Interface (CLI) or
+ * Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline PutObjectAclRequest& WithContentMD5(const Aws::String& value) { SetContentMD5(value); return *this;}
@@ -296,8 +301,8 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, go to <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864.&gt;</a> </p> <p>For
- * requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field
- * is calculated automatically.</p>
+ * requests made using the Amazon Web Services Command Line Interface (CLI) or
+ * Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline PutObjectAclRequest& WithContentMD5(Aws::String&& value) { SetContentMD5(std::move(value)); return *this;}
@@ -306,13 +311,98 @@ namespace Model
* as a message integrity check to verify that the request body was not corrupted
* in transit. For more information, go to <a
* href="http://www.ietf.org/rfc/rfc1864.txt">RFC 1864.&gt;</a> </p> <p>For
- * requests made using the AWS Command Line Interface (CLI) or AWS SDKs, this field
- * is calculated automatically.</p>
+ * requests made using the Amazon Web Services Command Line Interface (CLI) or
+ * Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline PutObjectAclRequest& WithContentMD5(const char* value) { SetContentMD5(value); return *this;}
/**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline const ChecksumAlgorithm& GetChecksumAlgorithm() const{ return m_checksumAlgorithm; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline bool ChecksumAlgorithmHasBeenSet() const { return m_checksumAlgorithmHasBeenSet; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(const ChecksumAlgorithm& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = value; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(ChecksumAlgorithm&& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = std::move(value); }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutObjectAclRequest& WithChecksumAlgorithm(const ChecksumAlgorithm& value) { SetChecksumAlgorithm(value); return *this;}
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutObjectAclRequest& WithChecksumAlgorithm(ChecksumAlgorithm&& value) { SetChecksumAlgorithm(std::move(value)); return *this;}
+
+
+ /**
* <p>Allows grantee the read, write, read ACP, and write ACP permissions on the
* bucket.</p> <p>This action is not supported by Amazon S3 on Outposts.</p>
*/
@@ -460,42 +550,58 @@ namespace Model
/**
- * <p>Allows grantee to create, overwrite, and delete any object in the bucket.</p>
+ * <p>Allows grantee to create new objects in the bucket.</p> <p>For the bucket and
+ * object owners of existing objects, also allows deletions and overwrites of those
+ * objects.</p>
*/
inline const Aws::String& GetGrantWrite() const{ return m_grantWrite; }
/**
- * <p>Allows grantee to create, overwrite, and delete any object in the bucket.</p>
+ * <p>Allows grantee to create new objects in the bucket.</p> <p>For the bucket and
+ * object owners of existing objects, also allows deletions and overwrites of those
+ * objects.</p>
*/
inline bool GrantWriteHasBeenSet() const { return m_grantWriteHasBeenSet; }
/**
- * <p>Allows grantee to create, overwrite, and delete any object in the bucket.</p>
+ * <p>Allows grantee to create new objects in the bucket.</p> <p>For the bucket and
+ * object owners of existing objects, also allows deletions and overwrites of those
+ * objects.</p>
*/
inline void SetGrantWrite(const Aws::String& value) { m_grantWriteHasBeenSet = true; m_grantWrite = value; }
/**
- * <p>Allows grantee to create, overwrite, and delete any object in the bucket.</p>
+ * <p>Allows grantee to create new objects in the bucket.</p> <p>For the bucket and
+ * object owners of existing objects, also allows deletions and overwrites of those
+ * objects.</p>
*/
inline void SetGrantWrite(Aws::String&& value) { m_grantWriteHasBeenSet = true; m_grantWrite = std::move(value); }
/**
- * <p>Allows grantee to create, overwrite, and delete any object in the bucket.</p>
+ * <p>Allows grantee to create new objects in the bucket.</p> <p>For the bucket and
+ * object owners of existing objects, also allows deletions and overwrites of those
+ * objects.</p>
*/
inline void SetGrantWrite(const char* value) { m_grantWriteHasBeenSet = true; m_grantWrite.assign(value); }
/**
- * <p>Allows grantee to create, overwrite, and delete any object in the bucket.</p>
+ * <p>Allows grantee to create new objects in the bucket.</p> <p>For the bucket and
+ * object owners of existing objects, also allows deletions and overwrites of those
+ * objects.</p>
*/
inline PutObjectAclRequest& WithGrantWrite(const Aws::String& value) { SetGrantWrite(value); return *this;}
/**
- * <p>Allows grantee to create, overwrite, and delete any object in the bucket.</p>
+ * <p>Allows grantee to create new objects in the bucket.</p> <p>For the bucket and
+ * object owners of existing objects, also allows deletions and overwrites of those
+ * objects.</p>
*/
inline PutObjectAclRequest& WithGrantWrite(Aws::String&& value) { SetGrantWrite(std::move(value)); return *this;}
/**
- * <p>Allows grantee to create, overwrite, and delete any object in the bucket.</p>
+ * <p>Allows grantee to create new objects in the bucket.</p> <p>For the bucket and
+ * object owners of existing objects, also allows deletions and overwrites of those
+ * objects.</p>
*/
inline PutObjectAclRequest& WithGrantWrite(const char* value) { SetGrantWrite(value); return *this;}
@@ -554,19 +660,19 @@ namespace Model
* with an access point, you must direct requests to the access point hostname. The
* access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetKey() const{ return m_key; }
@@ -575,19 +681,19 @@ namespace Model
* with an access point, you must direct requests to the access point hostname. The
* access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool KeyHasBeenSet() const { return m_keyHasBeenSet; }
@@ -596,19 +702,19 @@ namespace Model
* with an access point, you must direct requests to the access point hostname. The
* access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetKey(const Aws::String& value) { m_keyHasBeenSet = true; m_key = value; }
@@ -617,19 +723,19 @@ namespace Model
* with an access point, you must direct requests to the access point hostname. The
* access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetKey(Aws::String&& value) { m_keyHasBeenSet = true; m_key = std::move(value); }
@@ -638,19 +744,19 @@ namespace Model
* with an access point, you must direct requests to the access point hostname. The
* access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetKey(const char* value) { m_keyHasBeenSet = true; m_key.assign(value); }
@@ -659,19 +765,19 @@ namespace Model
* with an access point, you must direct requests to the access point hostname. The
* access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline PutObjectAclRequest& WithKey(const Aws::String& value) { SetKey(value); return *this;}
@@ -680,19 +786,19 @@ namespace Model
* with an access point, you must direct requests to the access point hostname. The
* access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline PutObjectAclRequest& WithKey(Aws::String&& value) { SetKey(std::move(value)); return *this;}
@@ -701,19 +807,19 @@ namespace Model
* with an access point, you must direct requests to the access point hostname. The
* access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline PutObjectAclRequest& WithKey(const char* value) { SetKey(value); return *this;}
@@ -780,57 +886,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutObjectAclRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutObjectAclRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutObjectAclRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -877,46 +983,49 @@ namespace Model
private:
ObjectCannedACL m_aCL;
- bool m_aCLHasBeenSet;
+ bool m_aCLHasBeenSet = false;
AccessControlPolicy m_accessControlPolicy;
- bool m_accessControlPolicyHasBeenSet;
+ bool m_accessControlPolicyHasBeenSet = false;
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_contentMD5;
- bool m_contentMD5HasBeenSet;
+ bool m_contentMD5HasBeenSet = false;
+
+ ChecksumAlgorithm m_checksumAlgorithm;
+ bool m_checksumAlgorithmHasBeenSet = false;
Aws::String m_grantFullControl;
- bool m_grantFullControlHasBeenSet;
+ bool m_grantFullControlHasBeenSet = false;
Aws::String m_grantRead;
- bool m_grantReadHasBeenSet;
+ bool m_grantReadHasBeenSet = false;
Aws::String m_grantReadACP;
- bool m_grantReadACPHasBeenSet;
+ bool m_grantReadACPHasBeenSet = false;
Aws::String m_grantWrite;
- bool m_grantWriteHasBeenSet;
+ bool m_grantWriteHasBeenSet = false;
Aws::String m_grantWriteACP;
- bool m_grantWriteACPHasBeenSet;
+ bool m_grantWriteACPHasBeenSet = false;
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
RequestPayer m_requestPayer;
- bool m_requestPayerHasBeenSet;
+ bool m_requestPayerHasBeenSet = false;
Aws::String m_versionId;
- bool m_versionIdHasBeenSet;
+ bool m_versionIdHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectAclResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectAclResult.h
index 0047bbca99..a2d4799c34 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectAclResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectAclResult.h
@@ -24,12 +24,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API PutObjectAclResult
+ class PutObjectAclResult
{
public:
- PutObjectAclResult();
- PutObjectAclResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- PutObjectAclResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API PutObjectAclResult();
+ AWS_S3_API PutObjectAclResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API PutObjectAclResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectLegalHoldRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectLegalHoldRequest.h
index ad7edb3d02..31814effa7 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectLegalHoldRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectLegalHoldRequest.h
@@ -9,6 +9,7 @@
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/s3/model/ObjectLockLegalHold.h>
#include <aws/s3/model/RequestPayer.h>
+#include <aws/s3/model/ChecksumAlgorithm.h>
#include <aws/core/utils/memory/stl/AWSMap.h>
#include <utility>
@@ -25,10 +26,10 @@ namespace Model
/**
*/
- class AWS_S3_API PutObjectLegalHoldRequest : public S3Request
+ class PutObjectLegalHoldRequest : public S3Request
{
public:
- PutObjectLegalHoldRequest();
+ AWS_S3_API PutObjectLegalHoldRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -36,193 +37,197 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "PutObjectLegalHold"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
- inline bool ShouldComputeContentMd5() const override { return true; }
+ AWS_S3_API Aws::String GetChecksumAlgorithmName() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
- * <p>The bucket name containing the object that you want to place a Legal Hold on.
+ * <p>The bucket name containing the object that you want to place a legal hold on.
* </p> <p>When using this action with an access point, you must direct requests to
* the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetBucket() const{ return m_bucket; }
/**
- * <p>The bucket name containing the object that you want to place a Legal Hold on.
+ * <p>The bucket name containing the object that you want to place a legal hold on.
* </p> <p>When using this action with an access point, you must direct requests to
* the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool BucketHasBeenSet() const { return m_bucketHasBeenSet; }
/**
- * <p>The bucket name containing the object that you want to place a Legal Hold on.
+ * <p>The bucket name containing the object that you want to place a legal hold on.
* </p> <p>When using this action with an access point, you must direct requests to
* the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const Aws::String& value) { m_bucketHasBeenSet = true; m_bucket = value; }
/**
- * <p>The bucket name containing the object that you want to place a Legal Hold on.
+ * <p>The bucket name containing the object that you want to place a legal hold on.
* </p> <p>When using this action with an access point, you must direct requests to
* the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(Aws::String&& value) { m_bucketHasBeenSet = true; m_bucket = std::move(value); }
/**
- * <p>The bucket name containing the object that you want to place a Legal Hold on.
+ * <p>The bucket name containing the object that you want to place a legal hold on.
* </p> <p>When using this action with an access point, you must direct requests to
* the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const char* value) { m_bucketHasBeenSet = true; m_bucket.assign(value); }
/**
- * <p>The bucket name containing the object that you want to place a Legal Hold on.
+ * <p>The bucket name containing the object that you want to place a legal hold on.
* </p> <p>When using this action with an access point, you must direct requests to
* the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline PutObjectLegalHoldRequest& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
/**
- * <p>The bucket name containing the object that you want to place a Legal Hold on.
+ * <p>The bucket name containing the object that you want to place a legal hold on.
* </p> <p>When using this action with an access point, you must direct requests to
* the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline PutObjectLegalHoldRequest& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
/**
- * <p>The bucket name containing the object that you want to place a Legal Hold on.
+ * <p>The bucket name containing the object that you want to place a legal hold on.
* </p> <p>When using this action with an access point, you must direct requests to
* the access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline PutObjectLegalHoldRequest& WithBucket(const char* value) { SetBucket(value); return *this;}
/**
- * <p>The key name for the object that you want to place a Legal Hold on.</p>
+ * <p>The key name for the object that you want to place a legal hold on.</p>
*/
inline const Aws::String& GetKey() const{ return m_key; }
/**
- * <p>The key name for the object that you want to place a Legal Hold on.</p>
+ * <p>The key name for the object that you want to place a legal hold on.</p>
*/
inline bool KeyHasBeenSet() const { return m_keyHasBeenSet; }
/**
- * <p>The key name for the object that you want to place a Legal Hold on.</p>
+ * <p>The key name for the object that you want to place a legal hold on.</p>
*/
inline void SetKey(const Aws::String& value) { m_keyHasBeenSet = true; m_key = value; }
/**
- * <p>The key name for the object that you want to place a Legal Hold on.</p>
+ * <p>The key name for the object that you want to place a legal hold on.</p>
*/
inline void SetKey(Aws::String&& value) { m_keyHasBeenSet = true; m_key = std::move(value); }
/**
- * <p>The key name for the object that you want to place a Legal Hold on.</p>
+ * <p>The key name for the object that you want to place a legal hold on.</p>
*/
inline void SetKey(const char* value) { m_keyHasBeenSet = true; m_key.assign(value); }
/**
- * <p>The key name for the object that you want to place a Legal Hold on.</p>
+ * <p>The key name for the object that you want to place a legal hold on.</p>
*/
inline PutObjectLegalHoldRequest& WithKey(const Aws::String& value) { SetKey(value); return *this;}
/**
- * <p>The key name for the object that you want to place a Legal Hold on.</p>
+ * <p>The key name for the object that you want to place a legal hold on.</p>
*/
inline PutObjectLegalHoldRequest& WithKey(Aws::String&& value) { SetKey(std::move(value)); return *this;}
/**
- * <p>The key name for the object that you want to place a Legal Hold on.</p>
+ * <p>The key name for the object that you want to place a legal hold on.</p>
*/
inline PutObjectLegalHoldRequest& WithKey(const char* value) { SetKey(value); return *this;}
/**
- * <p>Container element for the Legal Hold configuration you want to apply to the
+ * <p>Container element for the legal hold configuration you want to apply to the
* specified object.</p>
*/
inline const ObjectLockLegalHold& GetLegalHold() const{ return m_legalHold; }
/**
- * <p>Container element for the Legal Hold configuration you want to apply to the
+ * <p>Container element for the legal hold configuration you want to apply to the
* specified object.</p>
*/
inline bool LegalHoldHasBeenSet() const { return m_legalHoldHasBeenSet; }
/**
- * <p>Container element for the Legal Hold configuration you want to apply to the
+ * <p>Container element for the legal hold configuration you want to apply to the
* specified object.</p>
*/
inline void SetLegalHold(const ObjectLockLegalHold& value) { m_legalHoldHasBeenSet = true; m_legalHold = value; }
/**
- * <p>Container element for the Legal Hold configuration you want to apply to the
+ * <p>Container element for the legal hold configuration you want to apply to the
* specified object.</p>
*/
inline void SetLegalHold(ObjectLockLegalHold&& value) { m_legalHoldHasBeenSet = true; m_legalHold = std::move(value); }
/**
- * <p>Container element for the Legal Hold configuration you want to apply to the
+ * <p>Container element for the legal hold configuration you want to apply to the
* specified object.</p>
*/
inline PutObjectLegalHoldRequest& WithLegalHold(const ObjectLockLegalHold& value) { SetLegalHold(value); return *this;}
/**
- * <p>Container element for the Legal Hold configuration you want to apply to the
+ * <p>Container element for the legal hold configuration you want to apply to the
* specified object.</p>
*/
inline PutObjectLegalHoldRequest& WithLegalHold(ObjectLockLegalHold&& value) { SetLegalHold(std::move(value)); return *this;}
@@ -248,156 +253,241 @@ namespace Model
/**
- * <p>The version ID of the object that you want to place a Legal Hold on.</p>
+ * <p>The version ID of the object that you want to place a legal hold on.</p>
*/
inline const Aws::String& GetVersionId() const{ return m_versionId; }
/**
- * <p>The version ID of the object that you want to place a Legal Hold on.</p>
+ * <p>The version ID of the object that you want to place a legal hold on.</p>
*/
inline bool VersionIdHasBeenSet() const { return m_versionIdHasBeenSet; }
/**
- * <p>The version ID of the object that you want to place a Legal Hold on.</p>
+ * <p>The version ID of the object that you want to place a legal hold on.</p>
*/
inline void SetVersionId(const Aws::String& value) { m_versionIdHasBeenSet = true; m_versionId = value; }
/**
- * <p>The version ID of the object that you want to place a Legal Hold on.</p>
+ * <p>The version ID of the object that you want to place a legal hold on.</p>
*/
inline void SetVersionId(Aws::String&& value) { m_versionIdHasBeenSet = true; m_versionId = std::move(value); }
/**
- * <p>The version ID of the object that you want to place a Legal Hold on.</p>
+ * <p>The version ID of the object that you want to place a legal hold on.</p>
*/
inline void SetVersionId(const char* value) { m_versionIdHasBeenSet = true; m_versionId.assign(value); }
/**
- * <p>The version ID of the object that you want to place a Legal Hold on.</p>
+ * <p>The version ID of the object that you want to place a legal hold on.</p>
*/
inline PutObjectLegalHoldRequest& WithVersionId(const Aws::String& value) { SetVersionId(value); return *this;}
/**
- * <p>The version ID of the object that you want to place a Legal Hold on.</p>
+ * <p>The version ID of the object that you want to place a legal hold on.</p>
*/
inline PutObjectLegalHoldRequest& WithVersionId(Aws::String&& value) { SetVersionId(std::move(value)); return *this;}
/**
- * <p>The version ID of the object that you want to place a Legal Hold on.</p>
+ * <p>The version ID of the object that you want to place a legal hold on.</p>
*/
inline PutObjectLegalHoldRequest& WithVersionId(const char* value) { SetVersionId(value); return *this;}
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline const Aws::String& GetContentMD5() const{ return m_contentMD5; }
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline bool ContentMD5HasBeenSet() const { return m_contentMD5HasBeenSet; }
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline void SetContentMD5(const Aws::String& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = value; }
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline void SetContentMD5(Aws::String&& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = std::move(value); }
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline void SetContentMD5(const char* value) { m_contentMD5HasBeenSet = true; m_contentMD5.assign(value); }
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline PutObjectLegalHoldRequest& WithContentMD5(const Aws::String& value) { SetContentMD5(value); return *this;}
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline PutObjectLegalHoldRequest& WithContentMD5(Aws::String&& value) { SetContentMD5(std::move(value)); return *this;}
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline PutObjectLegalHoldRequest& WithContentMD5(const char* value) { SetContentMD5(value); return *this;}
/**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline const ChecksumAlgorithm& GetChecksumAlgorithm() const{ return m_checksumAlgorithm; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline bool ChecksumAlgorithmHasBeenSet() const { return m_checksumAlgorithmHasBeenSet; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(const ChecksumAlgorithm& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = value; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(ChecksumAlgorithm&& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = std::move(value); }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutObjectLegalHoldRequest& WithChecksumAlgorithm(const ChecksumAlgorithm& value) { SetChecksumAlgorithm(value); return *this;}
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutObjectLegalHoldRequest& WithChecksumAlgorithm(ChecksumAlgorithm&& value) { SetChecksumAlgorithm(std::move(value)); return *this;}
+
+
+ /**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutObjectLegalHoldRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutObjectLegalHoldRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutObjectLegalHoldRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -444,28 +534,31 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
ObjectLockLegalHold m_legalHold;
- bool m_legalHoldHasBeenSet;
+ bool m_legalHoldHasBeenSet = false;
RequestPayer m_requestPayer;
- bool m_requestPayerHasBeenSet;
+ bool m_requestPayerHasBeenSet = false;
Aws::String m_versionId;
- bool m_versionIdHasBeenSet;
+ bool m_versionIdHasBeenSet = false;
Aws::String m_contentMD5;
- bool m_contentMD5HasBeenSet;
+ bool m_contentMD5HasBeenSet = false;
+
+ ChecksumAlgorithm m_checksumAlgorithm;
+ bool m_checksumAlgorithmHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectLegalHoldResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectLegalHoldResult.h
index f4322c70f9..85301e413a 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectLegalHoldResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectLegalHoldResult.h
@@ -24,12 +24,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API PutObjectLegalHoldResult
+ class PutObjectLegalHoldResult
{
public:
- PutObjectLegalHoldResult();
- PutObjectLegalHoldResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- PutObjectLegalHoldResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API PutObjectLegalHoldResult();
+ AWS_S3_API PutObjectLegalHoldResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API PutObjectLegalHoldResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectLockConfigurationRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectLockConfigurationRequest.h
index 4ffc75c531..40a9a1287f 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectLockConfigurationRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectLockConfigurationRequest.h
@@ -9,6 +9,7 @@
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/s3/model/ObjectLockConfiguration.h>
#include <aws/s3/model/RequestPayer.h>
+#include <aws/s3/model/ChecksumAlgorithm.h>
#include <aws/core/utils/memory/stl/AWSMap.h>
#include <utility>
@@ -25,10 +26,10 @@ namespace Model
/**
*/
- class AWS_S3_API PutObjectLockConfigurationRequest : public S3Request
+ class PutObjectLockConfigurationRequest : public S3Request
{
public:
- PutObjectLockConfigurationRequest();
+ AWS_S3_API PutObjectLockConfigurationRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -36,14 +37,18 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "PutObjectLockConfiguration"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
- inline bool ShouldComputeContentMd5() const override { return true; }
+ AWS_S3_API Aws::String GetChecksumAlgorithmName() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The bucket whose Object Lock configuration you want to create or replace.</p>
@@ -184,115 +189,200 @@ namespace Model
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline const Aws::String& GetContentMD5() const{ return m_contentMD5; }
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline bool ContentMD5HasBeenSet() const { return m_contentMD5HasBeenSet; }
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline void SetContentMD5(const Aws::String& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = value; }
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline void SetContentMD5(Aws::String&& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = std::move(value); }
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline void SetContentMD5(const char* value) { m_contentMD5HasBeenSet = true; m_contentMD5.assign(value); }
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline PutObjectLockConfigurationRequest& WithContentMD5(const Aws::String& value) { SetContentMD5(value); return *this;}
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline PutObjectLockConfigurationRequest& WithContentMD5(Aws::String&& value) { SetContentMD5(std::move(value)); return *this;}
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline PutObjectLockConfigurationRequest& WithContentMD5(const char* value) { SetContentMD5(value); return *this;}
/**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline const ChecksumAlgorithm& GetChecksumAlgorithm() const{ return m_checksumAlgorithm; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline bool ChecksumAlgorithmHasBeenSet() const { return m_checksumAlgorithmHasBeenSet; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(const ChecksumAlgorithm& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = value; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(ChecksumAlgorithm&& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = std::move(value); }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutObjectLockConfigurationRequest& WithChecksumAlgorithm(const ChecksumAlgorithm& value) { SetChecksumAlgorithm(value); return *this;}
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutObjectLockConfigurationRequest& WithChecksumAlgorithm(ChecksumAlgorithm&& value) { SetChecksumAlgorithm(std::move(value)); return *this;}
+
+
+ /**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutObjectLockConfigurationRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutObjectLockConfigurationRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutObjectLockConfigurationRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -339,25 +429,28 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
ObjectLockConfiguration m_objectLockConfiguration;
- bool m_objectLockConfigurationHasBeenSet;
+ bool m_objectLockConfigurationHasBeenSet = false;
RequestPayer m_requestPayer;
- bool m_requestPayerHasBeenSet;
+ bool m_requestPayerHasBeenSet = false;
Aws::String m_token;
- bool m_tokenHasBeenSet;
+ bool m_tokenHasBeenSet = false;
Aws::String m_contentMD5;
- bool m_contentMD5HasBeenSet;
+ bool m_contentMD5HasBeenSet = false;
+
+ ChecksumAlgorithm m_checksumAlgorithm;
+ bool m_checksumAlgorithmHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectLockConfigurationResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectLockConfigurationResult.h
index c3b86b53d5..85017a3dd2 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectLockConfigurationResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectLockConfigurationResult.h
@@ -24,12 +24,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API PutObjectLockConfigurationResult
+ class PutObjectLockConfigurationResult
{
public:
- PutObjectLockConfigurationResult();
- PutObjectLockConfigurationResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- PutObjectLockConfigurationResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API PutObjectLockConfigurationResult();
+ AWS_S3_API PutObjectLockConfigurationResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API PutObjectLockConfigurationResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectRequest.h
index b2c872004c..d38708d197 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectRequest.h
@@ -9,6 +9,7 @@
#include <aws/s3/model/ObjectCannedACL.h>
#include <aws/core/utils/Array.h>
#include <aws/core/utils/memory/stl/AWSString.h>
+#include <aws/s3/model/ChecksumAlgorithm.h>
#include <aws/core/utils/DateTime.h>
#include <aws/core/utils/memory/stl/AWSMap.h>
#include <aws/s3/model/ServerSideEncryption.h>
@@ -31,10 +32,10 @@ namespace Model
/**
*/
- class AWS_S3_API PutObjectRequest : public StreamingS3Request
+ class PutObjectRequest : public StreamingS3Request
{
public:
- PutObjectRequest();
+ AWS_S3_API PutObjectRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -42,10 +43,16 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "PutObject"; }
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::String GetChecksumAlgorithmName() const override;
+
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The canned ACL to apply to the object. For more information, see <a
@@ -95,19 +102,19 @@ namespace Model
* this action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetBucket() const{ return m_bucket; }
@@ -116,19 +123,19 @@ namespace Model
* this action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool BucketHasBeenSet() const { return m_bucketHasBeenSet; }
@@ -137,19 +144,19 @@ namespace Model
* this action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const Aws::String& value) { m_bucketHasBeenSet = true; m_bucket = value; }
@@ -158,19 +165,19 @@ namespace Model
* this action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(Aws::String&& value) { m_bucketHasBeenSet = true; m_bucket = std::move(value); }
@@ -179,19 +186,19 @@ namespace Model
* this action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const char* value) { m_bucketHasBeenSet = true; m_bucket.assign(value); }
@@ -200,19 +207,19 @@ namespace Model
* this action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline PutObjectRequest& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
@@ -221,19 +228,19 @@ namespace Model
* this action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline PutObjectRequest& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
@@ -242,19 +249,19 @@ namespace Model
* this action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline PutObjectRequest& WithBucket(const char* value) { SetBucket(value); return *this;}
@@ -598,6 +605,407 @@ namespace Model
/**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline const ChecksumAlgorithm& GetChecksumAlgorithm() const{ return m_checksumAlgorithm; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline bool ChecksumAlgorithmHasBeenSet() const { return m_checksumAlgorithmHasBeenSet; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(const ChecksumAlgorithm& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = value; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(ChecksumAlgorithm&& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = std::move(value); }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutObjectRequest& WithChecksumAlgorithm(const ChecksumAlgorithm& value) { SetChecksumAlgorithm(value); return *this;}
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutObjectRequest& WithChecksumAlgorithm(ChecksumAlgorithm&& value) { SetChecksumAlgorithm(std::move(value)); return *this;}
+
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumCRC32() const{ return m_checksumCRC32; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumCRC32HasBeenSet() const { return m_checksumCRC32HasBeenSet; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(const Aws::String& value) { m_checksumCRC32HasBeenSet = true; m_checksumCRC32 = value; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(Aws::String&& value) { m_checksumCRC32HasBeenSet = true; m_checksumCRC32 = std::move(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(const char* value) { m_checksumCRC32HasBeenSet = true; m_checksumCRC32.assign(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline PutObjectRequest& WithChecksumCRC32(const Aws::String& value) { SetChecksumCRC32(value); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline PutObjectRequest& WithChecksumCRC32(Aws::String&& value) { SetChecksumCRC32(std::move(value)); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline PutObjectRequest& WithChecksumCRC32(const char* value) { SetChecksumCRC32(value); return *this;}
+
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumCRC32C() const{ return m_checksumCRC32C; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumCRC32CHasBeenSet() const { return m_checksumCRC32CHasBeenSet; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(const Aws::String& value) { m_checksumCRC32CHasBeenSet = true; m_checksumCRC32C = value; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(Aws::String&& value) { m_checksumCRC32CHasBeenSet = true; m_checksumCRC32C = std::move(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(const char* value) { m_checksumCRC32CHasBeenSet = true; m_checksumCRC32C.assign(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline PutObjectRequest& WithChecksumCRC32C(const Aws::String& value) { SetChecksumCRC32C(value); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline PutObjectRequest& WithChecksumCRC32C(Aws::String&& value) { SetChecksumCRC32C(std::move(value)); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline PutObjectRequest& WithChecksumCRC32C(const char* value) { SetChecksumCRC32C(value); return *this;}
+
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumSHA1() const{ return m_checksumSHA1; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumSHA1HasBeenSet() const { return m_checksumSHA1HasBeenSet; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(const Aws::String& value) { m_checksumSHA1HasBeenSet = true; m_checksumSHA1 = value; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(Aws::String&& value) { m_checksumSHA1HasBeenSet = true; m_checksumSHA1 = std::move(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(const char* value) { m_checksumSHA1HasBeenSet = true; m_checksumSHA1.assign(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline PutObjectRequest& WithChecksumSHA1(const Aws::String& value) { SetChecksumSHA1(value); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline PutObjectRequest& WithChecksumSHA1(Aws::String&& value) { SetChecksumSHA1(std::move(value)); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline PutObjectRequest& WithChecksumSHA1(const char* value) { SetChecksumSHA1(value); return *this;}
+
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumSHA256() const{ return m_checksumSHA256; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumSHA256HasBeenSet() const { return m_checksumSHA256HasBeenSet; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(const Aws::String& value) { m_checksumSHA256HasBeenSet = true; m_checksumSHA256 = value; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(Aws::String&& value) { m_checksumSHA256HasBeenSet = true; m_checksumSHA256 = std::move(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(const char* value) { m_checksumSHA256HasBeenSet = true; m_checksumSHA256.assign(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline PutObjectRequest& WithChecksumSHA256(const Aws::String& value) { SetChecksumSHA256(value); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline PutObjectRequest& WithChecksumSHA256(Aws::String&& value) { SetChecksumSHA256(std::move(value)); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline PutObjectRequest& WithChecksumSHA256(const char* value) { SetChecksumSHA256(value); return *this;}
+
+
+ /**
* <p>The date and time at which the object is no longer cacheable. For more
* information, see <a
* href="http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21">http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.21</a>.</p>
@@ -987,7 +1395,7 @@ namespace Model
* Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For
* more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
- * Classes</a> in the <i>Amazon S3 Service Developer Guide</i>.</p>
+ * Classes</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const StorageClass& GetStorageClass() const{ return m_storageClass; }
@@ -998,7 +1406,7 @@ namespace Model
* Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For
* more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
- * Classes</a> in the <i>Amazon S3 Service Developer Guide</i>.</p>
+ * Classes</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool StorageClassHasBeenSet() const { return m_storageClassHasBeenSet; }
@@ -1009,7 +1417,7 @@ namespace Model
* Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For
* more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
- * Classes</a> in the <i>Amazon S3 Service Developer Guide</i>.</p>
+ * Classes</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetStorageClass(const StorageClass& value) { m_storageClassHasBeenSet = true; m_storageClass = value; }
@@ -1020,7 +1428,7 @@ namespace Model
* Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For
* more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
- * Classes</a> in the <i>Amazon S3 Service Developer Guide</i>.</p>
+ * Classes</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetStorageClass(StorageClass&& value) { m_storageClassHasBeenSet = true; m_storageClass = std::move(value); }
@@ -1031,7 +1439,7 @@ namespace Model
* Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For
* more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
- * Classes</a> in the <i>Amazon S3 Service Developer Guide</i>.</p>
+ * Classes</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline PutObjectRequest& WithStorageClass(const StorageClass& value) { SetStorageClass(value); return *this;}
@@ -1042,7 +1450,7 @@ namespace Model
* Storage Class. Amazon S3 on Outposts only uses the OUTPOSTS Storage Class. For
* more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
- * Classes</a> in the <i>Amazon S3 Service Developer Guide</i>.</p>
+ * Classes</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline PutObjectRequest& WithStorageClass(StorageClass&& value) { SetStorageClass(std::move(value)); return *this;}
@@ -1389,170 +1797,162 @@ namespace Model
/**
* <p>If <code>x-amz-server-side-encryption</code> is present and has the value of
- * <code>aws:kms</code>, this header specifies the ID of the AWS Key Management
- * Service (AWS KMS) symmetrical customer managed customer master key (CMK) that
- * was used for the object.</p> <p> If the value of
- * <code>x-amz-server-side-encryption</code> is <code>aws:kms</code>, this header
- * specifies the ID of the symmetric customer managed AWS KMS CMK that will be used
- * for the object. If you specify
+ * <code>aws:kms</code>, this header specifies the ID of the Amazon Web Services
+ * Key Management Service (Amazon Web Services KMS) symmetrical customer managed
+ * key that was used for the object. If you specify
* <code>x-amz-server-side-encryption:aws:kms</code>, but do not provide<code>
- * x-amz-server-side-encryption-aws-kms-key-id</code>, Amazon S3 uses the AWS
- * managed CMK in AWS to protect the data.</p>
+ * x-amz-server-side-encryption-aws-kms-key-id</code>, Amazon S3 uses the Amazon
+ * Web Services managed key to protect the data. If the KMS key does not exist in
+ * the same account issuing the command, you must use the full ARN and not just the
+ * ID. </p>
*/
inline const Aws::String& GetSSEKMSKeyId() const{ return m_sSEKMSKeyId; }
/**
* <p>If <code>x-amz-server-side-encryption</code> is present and has the value of
- * <code>aws:kms</code>, this header specifies the ID of the AWS Key Management
- * Service (AWS KMS) symmetrical customer managed customer master key (CMK) that
- * was used for the object.</p> <p> If the value of
- * <code>x-amz-server-side-encryption</code> is <code>aws:kms</code>, this header
- * specifies the ID of the symmetric customer managed AWS KMS CMK that will be used
- * for the object. If you specify
+ * <code>aws:kms</code>, this header specifies the ID of the Amazon Web Services
+ * Key Management Service (Amazon Web Services KMS) symmetrical customer managed
+ * key that was used for the object. If you specify
* <code>x-amz-server-side-encryption:aws:kms</code>, but do not provide<code>
- * x-amz-server-side-encryption-aws-kms-key-id</code>, Amazon S3 uses the AWS
- * managed CMK in AWS to protect the data.</p>
+ * x-amz-server-side-encryption-aws-kms-key-id</code>, Amazon S3 uses the Amazon
+ * Web Services managed key to protect the data. If the KMS key does not exist in
+ * the same account issuing the command, you must use the full ARN and not just the
+ * ID. </p>
*/
inline bool SSEKMSKeyIdHasBeenSet() const { return m_sSEKMSKeyIdHasBeenSet; }
/**
* <p>If <code>x-amz-server-side-encryption</code> is present and has the value of
- * <code>aws:kms</code>, this header specifies the ID of the AWS Key Management
- * Service (AWS KMS) symmetrical customer managed customer master key (CMK) that
- * was used for the object.</p> <p> If the value of
- * <code>x-amz-server-side-encryption</code> is <code>aws:kms</code>, this header
- * specifies the ID of the symmetric customer managed AWS KMS CMK that will be used
- * for the object. If you specify
+ * <code>aws:kms</code>, this header specifies the ID of the Amazon Web Services
+ * Key Management Service (Amazon Web Services KMS) symmetrical customer managed
+ * key that was used for the object. If you specify
* <code>x-amz-server-side-encryption:aws:kms</code>, but do not provide<code>
- * x-amz-server-side-encryption-aws-kms-key-id</code>, Amazon S3 uses the AWS
- * managed CMK in AWS to protect the data.</p>
+ * x-amz-server-side-encryption-aws-kms-key-id</code>, Amazon S3 uses the Amazon
+ * Web Services managed key to protect the data. If the KMS key does not exist in
+ * the same account issuing the command, you must use the full ARN and not just the
+ * ID. </p>
*/
inline void SetSSEKMSKeyId(const Aws::String& value) { m_sSEKMSKeyIdHasBeenSet = true; m_sSEKMSKeyId = value; }
/**
* <p>If <code>x-amz-server-side-encryption</code> is present and has the value of
- * <code>aws:kms</code>, this header specifies the ID of the AWS Key Management
- * Service (AWS KMS) symmetrical customer managed customer master key (CMK) that
- * was used for the object.</p> <p> If the value of
- * <code>x-amz-server-side-encryption</code> is <code>aws:kms</code>, this header
- * specifies the ID of the symmetric customer managed AWS KMS CMK that will be used
- * for the object. If you specify
+ * <code>aws:kms</code>, this header specifies the ID of the Amazon Web Services
+ * Key Management Service (Amazon Web Services KMS) symmetrical customer managed
+ * key that was used for the object. If you specify
* <code>x-amz-server-side-encryption:aws:kms</code>, but do not provide<code>
- * x-amz-server-side-encryption-aws-kms-key-id</code>, Amazon S3 uses the AWS
- * managed CMK in AWS to protect the data.</p>
+ * x-amz-server-side-encryption-aws-kms-key-id</code>, Amazon S3 uses the Amazon
+ * Web Services managed key to protect the data. If the KMS key does not exist in
+ * the same account issuing the command, you must use the full ARN and not just the
+ * ID. </p>
*/
inline void SetSSEKMSKeyId(Aws::String&& value) { m_sSEKMSKeyIdHasBeenSet = true; m_sSEKMSKeyId = std::move(value); }
/**
* <p>If <code>x-amz-server-side-encryption</code> is present and has the value of
- * <code>aws:kms</code>, this header specifies the ID of the AWS Key Management
- * Service (AWS KMS) symmetrical customer managed customer master key (CMK) that
- * was used for the object.</p> <p> If the value of
- * <code>x-amz-server-side-encryption</code> is <code>aws:kms</code>, this header
- * specifies the ID of the symmetric customer managed AWS KMS CMK that will be used
- * for the object. If you specify
+ * <code>aws:kms</code>, this header specifies the ID of the Amazon Web Services
+ * Key Management Service (Amazon Web Services KMS) symmetrical customer managed
+ * key that was used for the object. If you specify
* <code>x-amz-server-side-encryption:aws:kms</code>, but do not provide<code>
- * x-amz-server-side-encryption-aws-kms-key-id</code>, Amazon S3 uses the AWS
- * managed CMK in AWS to protect the data.</p>
+ * x-amz-server-side-encryption-aws-kms-key-id</code>, Amazon S3 uses the Amazon
+ * Web Services managed key to protect the data. If the KMS key does not exist in
+ * the same account issuing the command, you must use the full ARN and not just the
+ * ID. </p>
*/
inline void SetSSEKMSKeyId(const char* value) { m_sSEKMSKeyIdHasBeenSet = true; m_sSEKMSKeyId.assign(value); }
/**
* <p>If <code>x-amz-server-side-encryption</code> is present and has the value of
- * <code>aws:kms</code>, this header specifies the ID of the AWS Key Management
- * Service (AWS KMS) symmetrical customer managed customer master key (CMK) that
- * was used for the object.</p> <p> If the value of
- * <code>x-amz-server-side-encryption</code> is <code>aws:kms</code>, this header
- * specifies the ID of the symmetric customer managed AWS KMS CMK that will be used
- * for the object. If you specify
+ * <code>aws:kms</code>, this header specifies the ID of the Amazon Web Services
+ * Key Management Service (Amazon Web Services KMS) symmetrical customer managed
+ * key that was used for the object. If you specify
* <code>x-amz-server-side-encryption:aws:kms</code>, but do not provide<code>
- * x-amz-server-side-encryption-aws-kms-key-id</code>, Amazon S3 uses the AWS
- * managed CMK in AWS to protect the data.</p>
+ * x-amz-server-side-encryption-aws-kms-key-id</code>, Amazon S3 uses the Amazon
+ * Web Services managed key to protect the data. If the KMS key does not exist in
+ * the same account issuing the command, you must use the full ARN and not just the
+ * ID. </p>
*/
inline PutObjectRequest& WithSSEKMSKeyId(const Aws::String& value) { SetSSEKMSKeyId(value); return *this;}
/**
* <p>If <code>x-amz-server-side-encryption</code> is present and has the value of
- * <code>aws:kms</code>, this header specifies the ID of the AWS Key Management
- * Service (AWS KMS) symmetrical customer managed customer master key (CMK) that
- * was used for the object.</p> <p> If the value of
- * <code>x-amz-server-side-encryption</code> is <code>aws:kms</code>, this header
- * specifies the ID of the symmetric customer managed AWS KMS CMK that will be used
- * for the object. If you specify
+ * <code>aws:kms</code>, this header specifies the ID of the Amazon Web Services
+ * Key Management Service (Amazon Web Services KMS) symmetrical customer managed
+ * key that was used for the object. If you specify
* <code>x-amz-server-side-encryption:aws:kms</code>, but do not provide<code>
- * x-amz-server-side-encryption-aws-kms-key-id</code>, Amazon S3 uses the AWS
- * managed CMK in AWS to protect the data.</p>
+ * x-amz-server-side-encryption-aws-kms-key-id</code>, Amazon S3 uses the Amazon
+ * Web Services managed key to protect the data. If the KMS key does not exist in
+ * the same account issuing the command, you must use the full ARN and not just the
+ * ID. </p>
*/
inline PutObjectRequest& WithSSEKMSKeyId(Aws::String&& value) { SetSSEKMSKeyId(std::move(value)); return *this;}
/**
* <p>If <code>x-amz-server-side-encryption</code> is present and has the value of
- * <code>aws:kms</code>, this header specifies the ID of the AWS Key Management
- * Service (AWS KMS) symmetrical customer managed customer master key (CMK) that
- * was used for the object.</p> <p> If the value of
- * <code>x-amz-server-side-encryption</code> is <code>aws:kms</code>, this header
- * specifies the ID of the symmetric customer managed AWS KMS CMK that will be used
- * for the object. If you specify
+ * <code>aws:kms</code>, this header specifies the ID of the Amazon Web Services
+ * Key Management Service (Amazon Web Services KMS) symmetrical customer managed
+ * key that was used for the object. If you specify
* <code>x-amz-server-side-encryption:aws:kms</code>, but do not provide<code>
- * x-amz-server-side-encryption-aws-kms-key-id</code>, Amazon S3 uses the AWS
- * managed CMK in AWS to protect the data.</p>
+ * x-amz-server-side-encryption-aws-kms-key-id</code>, Amazon S3 uses the Amazon
+ * Web Services managed key to protect the data. If the KMS key does not exist in
+ * the same account issuing the command, you must use the full ARN and not just the
+ * ID. </p>
*/
inline PutObjectRequest& WithSSEKMSKeyId(const char* value) { SetSSEKMSKeyId(value); return *this;}
/**
- * <p>Specifies the AWS KMS Encryption Context to use for object encryption. The
- * value of this header is a base64-encoded UTF-8 string holding JSON with the
- * encryption context key-value pairs.</p>
+ * <p>Specifies the Amazon Web Services KMS Encryption Context to use for object
+ * encryption. The value of this header is a base64-encoded UTF-8 string holding
+ * JSON with the encryption context key-value pairs.</p>
*/
inline const Aws::String& GetSSEKMSEncryptionContext() const{ return m_sSEKMSEncryptionContext; }
/**
- * <p>Specifies the AWS KMS Encryption Context to use for object encryption. The
- * value of this header is a base64-encoded UTF-8 string holding JSON with the
- * encryption context key-value pairs.</p>
+ * <p>Specifies the Amazon Web Services KMS Encryption Context to use for object
+ * encryption. The value of this header is a base64-encoded UTF-8 string holding
+ * JSON with the encryption context key-value pairs.</p>
*/
inline bool SSEKMSEncryptionContextHasBeenSet() const { return m_sSEKMSEncryptionContextHasBeenSet; }
/**
- * <p>Specifies the AWS KMS Encryption Context to use for object encryption. The
- * value of this header is a base64-encoded UTF-8 string holding JSON with the
- * encryption context key-value pairs.</p>
+ * <p>Specifies the Amazon Web Services KMS Encryption Context to use for object
+ * encryption. The value of this header is a base64-encoded UTF-8 string holding
+ * JSON with the encryption context key-value pairs.</p>
*/
inline void SetSSEKMSEncryptionContext(const Aws::String& value) { m_sSEKMSEncryptionContextHasBeenSet = true; m_sSEKMSEncryptionContext = value; }
/**
- * <p>Specifies the AWS KMS Encryption Context to use for object encryption. The
- * value of this header is a base64-encoded UTF-8 string holding JSON with the
- * encryption context key-value pairs.</p>
+ * <p>Specifies the Amazon Web Services KMS Encryption Context to use for object
+ * encryption. The value of this header is a base64-encoded UTF-8 string holding
+ * JSON with the encryption context key-value pairs.</p>
*/
inline void SetSSEKMSEncryptionContext(Aws::String&& value) { m_sSEKMSEncryptionContextHasBeenSet = true; m_sSEKMSEncryptionContext = std::move(value); }
/**
- * <p>Specifies the AWS KMS Encryption Context to use for object encryption. The
- * value of this header is a base64-encoded UTF-8 string holding JSON with the
- * encryption context key-value pairs.</p>
+ * <p>Specifies the Amazon Web Services KMS Encryption Context to use for object
+ * encryption. The value of this header is a base64-encoded UTF-8 string holding
+ * JSON with the encryption context key-value pairs.</p>
*/
inline void SetSSEKMSEncryptionContext(const char* value) { m_sSEKMSEncryptionContextHasBeenSet = true; m_sSEKMSEncryptionContext.assign(value); }
/**
- * <p>Specifies the AWS KMS Encryption Context to use for object encryption. The
- * value of this header is a base64-encoded UTF-8 string holding JSON with the
- * encryption context key-value pairs.</p>
+ * <p>Specifies the Amazon Web Services KMS Encryption Context to use for object
+ * encryption. The value of this header is a base64-encoded UTF-8 string holding
+ * JSON with the encryption context key-value pairs.</p>
*/
inline PutObjectRequest& WithSSEKMSEncryptionContext(const Aws::String& value) { SetSSEKMSEncryptionContext(value); return *this;}
/**
- * <p>Specifies the AWS KMS Encryption Context to use for object encryption. The
- * value of this header is a base64-encoded UTF-8 string holding JSON with the
- * encryption context key-value pairs.</p>
+ * <p>Specifies the Amazon Web Services KMS Encryption Context to use for object
+ * encryption. The value of this header is a base64-encoded UTF-8 string holding
+ * JSON with the encryption context key-value pairs.</p>
*/
inline PutObjectRequest& WithSSEKMSEncryptionContext(Aws::String&& value) { SetSSEKMSEncryptionContext(std::move(value)); return *this;}
/**
- * <p>Specifies the AWS KMS Encryption Context to use for object encryption. The
- * value of this header is a base64-encoded UTF-8 string holding JSON with the
- * encryption context key-value pairs.</p>
+ * <p>Specifies the Amazon Web Services KMS Encryption Context to use for object
+ * encryption. The value of this header is a base64-encoded UTF-8 string holding
+ * JSON with the encryption context key-value pairs.</p>
*/
inline PutObjectRequest& WithSSEKMSEncryptionContext(const char* value) { SetSSEKMSEncryptionContext(value); return *this;}
@@ -1694,32 +2094,38 @@ namespace Model
/**
- * <p>The date and time when you want this object's Object Lock to expire.</p>
+ * <p>The date and time when you want this object's Object Lock to expire. Must be
+ * formatted as a timestamp parameter.</p>
*/
inline const Aws::Utils::DateTime& GetObjectLockRetainUntilDate() const{ return m_objectLockRetainUntilDate; }
/**
- * <p>The date and time when you want this object's Object Lock to expire.</p>
+ * <p>The date and time when you want this object's Object Lock to expire. Must be
+ * formatted as a timestamp parameter.</p>
*/
inline bool ObjectLockRetainUntilDateHasBeenSet() const { return m_objectLockRetainUntilDateHasBeenSet; }
/**
- * <p>The date and time when you want this object's Object Lock to expire.</p>
+ * <p>The date and time when you want this object's Object Lock to expire. Must be
+ * formatted as a timestamp parameter.</p>
*/
inline void SetObjectLockRetainUntilDate(const Aws::Utils::DateTime& value) { m_objectLockRetainUntilDateHasBeenSet = true; m_objectLockRetainUntilDate = value; }
/**
- * <p>The date and time when you want this object's Object Lock to expire.</p>
+ * <p>The date and time when you want this object's Object Lock to expire. Must be
+ * formatted as a timestamp parameter.</p>
*/
inline void SetObjectLockRetainUntilDate(Aws::Utils::DateTime&& value) { m_objectLockRetainUntilDateHasBeenSet = true; m_objectLockRetainUntilDate = std::move(value); }
/**
- * <p>The date and time when you want this object's Object Lock to expire.</p>
+ * <p>The date and time when you want this object's Object Lock to expire. Must be
+ * formatted as a timestamp parameter.</p>
*/
inline PutObjectRequest& WithObjectLockRetainUntilDate(const Aws::Utils::DateTime& value) { SetObjectLockRetainUntilDate(value); return *this;}
/**
- * <p>The date and time when you want this object's Object Lock to expire.</p>
+ * <p>The date and time when you want this object's Object Lock to expire. Must be
+ * formatted as a timestamp parameter.</p>
*/
inline PutObjectRequest& WithObjectLockRetainUntilDate(Aws::Utils::DateTime&& value) { SetObjectLockRetainUntilDate(std::move(value)); return *this;}
@@ -1775,57 +2181,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutObjectRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutObjectRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutObjectRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -1872,98 +2278,113 @@ namespace Model
private:
ObjectCannedACL m_aCL;
- bool m_aCLHasBeenSet;
+ bool m_aCLHasBeenSet = false;
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_cacheControl;
- bool m_cacheControlHasBeenSet;
+ bool m_cacheControlHasBeenSet = false;
Aws::String m_contentDisposition;
- bool m_contentDispositionHasBeenSet;
+ bool m_contentDispositionHasBeenSet = false;
Aws::String m_contentEncoding;
- bool m_contentEncodingHasBeenSet;
+ bool m_contentEncodingHasBeenSet = false;
Aws::String m_contentLanguage;
- bool m_contentLanguageHasBeenSet;
+ bool m_contentLanguageHasBeenSet = false;
long long m_contentLength;
- bool m_contentLengthHasBeenSet;
+ bool m_contentLengthHasBeenSet = false;
Aws::String m_contentMD5;
- bool m_contentMD5HasBeenSet;
+ bool m_contentMD5HasBeenSet = false;
+
+ ChecksumAlgorithm m_checksumAlgorithm;
+ bool m_checksumAlgorithmHasBeenSet = false;
+
+ Aws::String m_checksumCRC32;
+ bool m_checksumCRC32HasBeenSet = false;
+
+ Aws::String m_checksumCRC32C;
+ bool m_checksumCRC32CHasBeenSet = false;
+
+ Aws::String m_checksumSHA1;
+ bool m_checksumSHA1HasBeenSet = false;
+
+ Aws::String m_checksumSHA256;
+ bool m_checksumSHA256HasBeenSet = false;
Aws::Utils::DateTime m_expires;
- bool m_expiresHasBeenSet;
+ bool m_expiresHasBeenSet = false;
Aws::String m_grantFullControl;
- bool m_grantFullControlHasBeenSet;
+ bool m_grantFullControlHasBeenSet = false;
Aws::String m_grantRead;
- bool m_grantReadHasBeenSet;
+ bool m_grantReadHasBeenSet = false;
Aws::String m_grantReadACP;
- bool m_grantReadACPHasBeenSet;
+ bool m_grantReadACPHasBeenSet = false;
Aws::String m_grantWriteACP;
- bool m_grantWriteACPHasBeenSet;
+ bool m_grantWriteACPHasBeenSet = false;
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_metadata;
- bool m_metadataHasBeenSet;
+ bool m_metadataHasBeenSet = false;
ServerSideEncryption m_serverSideEncryption;
- bool m_serverSideEncryptionHasBeenSet;
+ bool m_serverSideEncryptionHasBeenSet = false;
StorageClass m_storageClass;
- bool m_storageClassHasBeenSet;
+ bool m_storageClassHasBeenSet = false;
Aws::String m_websiteRedirectLocation;
- bool m_websiteRedirectLocationHasBeenSet;
+ bool m_websiteRedirectLocationHasBeenSet = false;
Aws::String m_sSECustomerAlgorithm;
- bool m_sSECustomerAlgorithmHasBeenSet;
+ bool m_sSECustomerAlgorithmHasBeenSet = false;
Aws::String m_sSECustomerKey;
- bool m_sSECustomerKeyHasBeenSet;
+ bool m_sSECustomerKeyHasBeenSet = false;
Aws::String m_sSECustomerKeyMD5;
- bool m_sSECustomerKeyMD5HasBeenSet;
+ bool m_sSECustomerKeyMD5HasBeenSet = false;
Aws::String m_sSEKMSKeyId;
- bool m_sSEKMSKeyIdHasBeenSet;
+ bool m_sSEKMSKeyIdHasBeenSet = false;
Aws::String m_sSEKMSEncryptionContext;
- bool m_sSEKMSEncryptionContextHasBeenSet;
+ bool m_sSEKMSEncryptionContextHasBeenSet = false;
bool m_bucketKeyEnabled;
- bool m_bucketKeyEnabledHasBeenSet;
+ bool m_bucketKeyEnabledHasBeenSet = false;
RequestPayer m_requestPayer;
- bool m_requestPayerHasBeenSet;
+ bool m_requestPayerHasBeenSet = false;
Aws::String m_tagging;
- bool m_taggingHasBeenSet;
+ bool m_taggingHasBeenSet = false;
ObjectLockMode m_objectLockMode;
- bool m_objectLockModeHasBeenSet;
+ bool m_objectLockModeHasBeenSet = false;
Aws::Utils::DateTime m_objectLockRetainUntilDate;
- bool m_objectLockRetainUntilDateHasBeenSet;
+ bool m_objectLockRetainUntilDateHasBeenSet = false;
ObjectLockLegalHoldStatus m_objectLockLegalHoldStatus;
- bool m_objectLockLegalHoldStatusHasBeenSet;
+ bool m_objectLockLegalHoldStatusHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectResult.h
index f8b5bf13e7..71eac15963 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectResult.h
@@ -26,74 +26,74 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API PutObjectResult
+ class PutObjectResult
{
public:
- PutObjectResult();
- PutObjectResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- PutObjectResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API PutObjectResult();
+ AWS_S3_API PutObjectResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API PutObjectResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
- * <p> If the expiration is configured for the object (see <a
+ * <p>If the expiration is configured for the object (see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html">PutBucketLifecycleConfiguration</a>),
- * the response includes this header. It includes the expiry-date and rule-id
- * key-value pairs that provide information about object expiration. The value of
- * the rule-id is URL encoded.</p>
+ * the response includes this header. It includes the <code>expiry-date</code> and
+ * <code>rule-id</code> key-value pairs that provide information about object
+ * expiration. The value of the <code>rule-id</code> is URL-encoded.</p>
*/
inline const Aws::String& GetExpiration() const{ return m_expiration; }
/**
- * <p> If the expiration is configured for the object (see <a
+ * <p>If the expiration is configured for the object (see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html">PutBucketLifecycleConfiguration</a>),
- * the response includes this header. It includes the expiry-date and rule-id
- * key-value pairs that provide information about object expiration. The value of
- * the rule-id is URL encoded.</p>
+ * the response includes this header. It includes the <code>expiry-date</code> and
+ * <code>rule-id</code> key-value pairs that provide information about object
+ * expiration. The value of the <code>rule-id</code> is URL-encoded.</p>
*/
inline void SetExpiration(const Aws::String& value) { m_expiration = value; }
/**
- * <p> If the expiration is configured for the object (see <a
+ * <p>If the expiration is configured for the object (see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html">PutBucketLifecycleConfiguration</a>),
- * the response includes this header. It includes the expiry-date and rule-id
- * key-value pairs that provide information about object expiration. The value of
- * the rule-id is URL encoded.</p>
+ * the response includes this header. It includes the <code>expiry-date</code> and
+ * <code>rule-id</code> key-value pairs that provide information about object
+ * expiration. The value of the <code>rule-id</code> is URL-encoded.</p>
*/
inline void SetExpiration(Aws::String&& value) { m_expiration = std::move(value); }
/**
- * <p> If the expiration is configured for the object (see <a
+ * <p>If the expiration is configured for the object (see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html">PutBucketLifecycleConfiguration</a>),
- * the response includes this header. It includes the expiry-date and rule-id
- * key-value pairs that provide information about object expiration. The value of
- * the rule-id is URL encoded.</p>
+ * the response includes this header. It includes the <code>expiry-date</code> and
+ * <code>rule-id</code> key-value pairs that provide information about object
+ * expiration. The value of the <code>rule-id</code> is URL-encoded.</p>
*/
inline void SetExpiration(const char* value) { m_expiration.assign(value); }
/**
- * <p> If the expiration is configured for the object (see <a
+ * <p>If the expiration is configured for the object (see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html">PutBucketLifecycleConfiguration</a>),
- * the response includes this header. It includes the expiry-date and rule-id
- * key-value pairs that provide information about object expiration. The value of
- * the rule-id is URL encoded.</p>
+ * the response includes this header. It includes the <code>expiry-date</code> and
+ * <code>rule-id</code> key-value pairs that provide information about object
+ * expiration. The value of the <code>rule-id</code> is URL-encoded.</p>
*/
inline PutObjectResult& WithExpiration(const Aws::String& value) { SetExpiration(value); return *this;}
/**
- * <p> If the expiration is configured for the object (see <a
+ * <p>If the expiration is configured for the object (see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html">PutBucketLifecycleConfiguration</a>),
- * the response includes this header. It includes the expiry-date and rule-id
- * key-value pairs that provide information about object expiration. The value of
- * the rule-id is URL encoded.</p>
+ * the response includes this header. It includes the <code>expiry-date</code> and
+ * <code>rule-id</code> key-value pairs that provide information about object
+ * expiration. The value of the <code>rule-id</code> is URL-encoded.</p>
*/
inline PutObjectResult& WithExpiration(Aws::String&& value) { SetExpiration(std::move(value)); return *this;}
/**
- * <p> If the expiration is configured for the object (see <a
+ * <p>If the expiration is configured for the object (see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html">PutBucketLifecycleConfiguration</a>),
- * the response includes this header. It includes the expiry-date and rule-id
- * key-value pairs that provide information about object expiration. The value of
- * the rule-id is URL encoded.</p>
+ * the response includes this header. It includes the <code>expiry-date</code> and
+ * <code>rule-id</code> key-value pairs that provide information about object
+ * expiration. The value of the <code>rule-id</code> is URL-encoded.</p>
*/
inline PutObjectResult& WithExpiration(const char* value) { SetExpiration(value); return *this;}
@@ -135,42 +135,326 @@ namespace Model
/**
- * <p>If you specified server-side encryption either with an AWS KMS customer
- * master key (CMK) or Amazon S3-managed encryption key in your PUT request, the
- * response includes this header. It confirms the encryption algorithm that Amazon
- * S3 used to encrypt the object.</p>
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumCRC32() const{ return m_checksumCRC32; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(const Aws::String& value) { m_checksumCRC32 = value; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(Aws::String&& value) { m_checksumCRC32 = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(const char* value) { m_checksumCRC32.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline PutObjectResult& WithChecksumCRC32(const Aws::String& value) { SetChecksumCRC32(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline PutObjectResult& WithChecksumCRC32(Aws::String&& value) { SetChecksumCRC32(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline PutObjectResult& WithChecksumCRC32(const char* value) { SetChecksumCRC32(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumCRC32C() const{ return m_checksumCRC32C; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(const Aws::String& value) { m_checksumCRC32C = value; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(Aws::String&& value) { m_checksumCRC32C = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(const char* value) { m_checksumCRC32C.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline PutObjectResult& WithChecksumCRC32C(const Aws::String& value) { SetChecksumCRC32C(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline PutObjectResult& WithChecksumCRC32C(Aws::String&& value) { SetChecksumCRC32C(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline PutObjectResult& WithChecksumCRC32C(const char* value) { SetChecksumCRC32C(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumSHA1() const{ return m_checksumSHA1; }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(const Aws::String& value) { m_checksumSHA1 = value; }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(Aws::String&& value) { m_checksumSHA1 = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(const char* value) { m_checksumSHA1.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline PutObjectResult& WithChecksumSHA1(const Aws::String& value) { SetChecksumSHA1(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline PutObjectResult& WithChecksumSHA1(Aws::String&& value) { SetChecksumSHA1(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline PutObjectResult& WithChecksumSHA1(const char* value) { SetChecksumSHA1(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumSHA256() const{ return m_checksumSHA256; }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(const Aws::String& value) { m_checksumSHA256 = value; }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(Aws::String&& value) { m_checksumSHA256 = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(const char* value) { m_checksumSHA256.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline PutObjectResult& WithChecksumSHA256(const Aws::String& value) { SetChecksumSHA256(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline PutObjectResult& WithChecksumSHA256(Aws::String&& value) { SetChecksumSHA256(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline PutObjectResult& WithChecksumSHA256(const char* value) { SetChecksumSHA256(value); return *this;}
+
+
+ /**
+ * <p>If you specified server-side encryption either with an Amazon Web Services
+ * KMS key or Amazon S3-managed encryption key in your PUT request, the response
+ * includes this header. It confirms the encryption algorithm that Amazon S3 used
+ * to encrypt the object.</p>
*/
inline const ServerSideEncryption& GetServerSideEncryption() const{ return m_serverSideEncryption; }
/**
- * <p>If you specified server-side encryption either with an AWS KMS customer
- * master key (CMK) or Amazon S3-managed encryption key in your PUT request, the
- * response includes this header. It confirms the encryption algorithm that Amazon
- * S3 used to encrypt the object.</p>
+ * <p>If you specified server-side encryption either with an Amazon Web Services
+ * KMS key or Amazon S3-managed encryption key in your PUT request, the response
+ * includes this header. It confirms the encryption algorithm that Amazon S3 used
+ * to encrypt the object.</p>
*/
inline void SetServerSideEncryption(const ServerSideEncryption& value) { m_serverSideEncryption = value; }
/**
- * <p>If you specified server-side encryption either with an AWS KMS customer
- * master key (CMK) or Amazon S3-managed encryption key in your PUT request, the
- * response includes this header. It confirms the encryption algorithm that Amazon
- * S3 used to encrypt the object.</p>
+ * <p>If you specified server-side encryption either with an Amazon Web Services
+ * KMS key or Amazon S3-managed encryption key in your PUT request, the response
+ * includes this header. It confirms the encryption algorithm that Amazon S3 used
+ * to encrypt the object.</p>
*/
inline void SetServerSideEncryption(ServerSideEncryption&& value) { m_serverSideEncryption = std::move(value); }
/**
- * <p>If you specified server-side encryption either with an AWS KMS customer
- * master key (CMK) or Amazon S3-managed encryption key in your PUT request, the
- * response includes this header. It confirms the encryption algorithm that Amazon
- * S3 used to encrypt the object.</p>
+ * <p>If you specified server-side encryption either with an Amazon Web Services
+ * KMS key or Amazon S3-managed encryption key in your PUT request, the response
+ * includes this header. It confirms the encryption algorithm that Amazon S3 used
+ * to encrypt the object.</p>
*/
inline PutObjectResult& WithServerSideEncryption(const ServerSideEncryption& value) { SetServerSideEncryption(value); return *this;}
/**
- * <p>If you specified server-side encryption either with an AWS KMS customer
- * master key (CMK) or Amazon S3-managed encryption key in your PUT request, the
- * response includes this header. It confirms the encryption algorithm that Amazon
- * S3 used to encrypt the object.</p>
+ * <p>If you specified server-side encryption either with an Amazon Web Services
+ * KMS key or Amazon S3-managed encryption key in your PUT request, the response
+ * includes this header. It confirms the encryption algorithm that Amazon S3 used
+ * to encrypt the object.</p>
*/
inline PutObjectResult& WithServerSideEncryption(ServerSideEncryption&& value) { SetServerSideEncryption(std::move(value)); return *this;}
@@ -313,126 +597,126 @@ namespace Model
/**
* <p>If <code>x-amz-server-side-encryption</code> is present and has the value of
- * <code>aws:kms</code>, this header specifies the ID of the AWS Key Management
- * Service (AWS KMS) symmetric customer managed customer master key (CMK) that was
- * used for the object. </p>
+ * <code>aws:kms</code>, this header specifies the ID of the Amazon Web Services
+ * Key Management Service (Amazon Web Services KMS) symmetric customer managed key
+ * that was used for the object. </p>
*/
inline const Aws::String& GetSSEKMSKeyId() const{ return m_sSEKMSKeyId; }
/**
* <p>If <code>x-amz-server-side-encryption</code> is present and has the value of
- * <code>aws:kms</code>, this header specifies the ID of the AWS Key Management
- * Service (AWS KMS) symmetric customer managed customer master key (CMK) that was
- * used for the object. </p>
+ * <code>aws:kms</code>, this header specifies the ID of the Amazon Web Services
+ * Key Management Service (Amazon Web Services KMS) symmetric customer managed key
+ * that was used for the object. </p>
*/
inline void SetSSEKMSKeyId(const Aws::String& value) { m_sSEKMSKeyId = value; }
/**
* <p>If <code>x-amz-server-side-encryption</code> is present and has the value of
- * <code>aws:kms</code>, this header specifies the ID of the AWS Key Management
- * Service (AWS KMS) symmetric customer managed customer master key (CMK) that was
- * used for the object. </p>
+ * <code>aws:kms</code>, this header specifies the ID of the Amazon Web Services
+ * Key Management Service (Amazon Web Services KMS) symmetric customer managed key
+ * that was used for the object. </p>
*/
inline void SetSSEKMSKeyId(Aws::String&& value) { m_sSEKMSKeyId = std::move(value); }
/**
* <p>If <code>x-amz-server-side-encryption</code> is present and has the value of
- * <code>aws:kms</code>, this header specifies the ID of the AWS Key Management
- * Service (AWS KMS) symmetric customer managed customer master key (CMK) that was
- * used for the object. </p>
+ * <code>aws:kms</code>, this header specifies the ID of the Amazon Web Services
+ * Key Management Service (Amazon Web Services KMS) symmetric customer managed key
+ * that was used for the object. </p>
*/
inline void SetSSEKMSKeyId(const char* value) { m_sSEKMSKeyId.assign(value); }
/**
* <p>If <code>x-amz-server-side-encryption</code> is present and has the value of
- * <code>aws:kms</code>, this header specifies the ID of the AWS Key Management
- * Service (AWS KMS) symmetric customer managed customer master key (CMK) that was
- * used for the object. </p>
+ * <code>aws:kms</code>, this header specifies the ID of the Amazon Web Services
+ * Key Management Service (Amazon Web Services KMS) symmetric customer managed key
+ * that was used for the object. </p>
*/
inline PutObjectResult& WithSSEKMSKeyId(const Aws::String& value) { SetSSEKMSKeyId(value); return *this;}
/**
* <p>If <code>x-amz-server-side-encryption</code> is present and has the value of
- * <code>aws:kms</code>, this header specifies the ID of the AWS Key Management
- * Service (AWS KMS) symmetric customer managed customer master key (CMK) that was
- * used for the object. </p>
+ * <code>aws:kms</code>, this header specifies the ID of the Amazon Web Services
+ * Key Management Service (Amazon Web Services KMS) symmetric customer managed key
+ * that was used for the object. </p>
*/
inline PutObjectResult& WithSSEKMSKeyId(Aws::String&& value) { SetSSEKMSKeyId(std::move(value)); return *this;}
/**
* <p>If <code>x-amz-server-side-encryption</code> is present and has the value of
- * <code>aws:kms</code>, this header specifies the ID of the AWS Key Management
- * Service (AWS KMS) symmetric customer managed customer master key (CMK) that was
- * used for the object. </p>
+ * <code>aws:kms</code>, this header specifies the ID of the Amazon Web Services
+ * Key Management Service (Amazon Web Services KMS) symmetric customer managed key
+ * that was used for the object. </p>
*/
inline PutObjectResult& WithSSEKMSKeyId(const char* value) { SetSSEKMSKeyId(value); return *this;}
/**
- * <p>If present, specifies the AWS KMS Encryption Context to use for object
- * encryption. The value of this header is a base64-encoded UTF-8 string holding
- * JSON with the encryption context key-value pairs.</p>
+ * <p>If present, specifies the Amazon Web Services KMS Encryption Context to use
+ * for object encryption. The value of this header is a base64-encoded UTF-8 string
+ * holding JSON with the encryption context key-value pairs.</p>
*/
inline const Aws::String& GetSSEKMSEncryptionContext() const{ return m_sSEKMSEncryptionContext; }
/**
- * <p>If present, specifies the AWS KMS Encryption Context to use for object
- * encryption. The value of this header is a base64-encoded UTF-8 string holding
- * JSON with the encryption context key-value pairs.</p>
+ * <p>If present, specifies the Amazon Web Services KMS Encryption Context to use
+ * for object encryption. The value of this header is a base64-encoded UTF-8 string
+ * holding JSON with the encryption context key-value pairs.</p>
*/
inline void SetSSEKMSEncryptionContext(const Aws::String& value) { m_sSEKMSEncryptionContext = value; }
/**
- * <p>If present, specifies the AWS KMS Encryption Context to use for object
- * encryption. The value of this header is a base64-encoded UTF-8 string holding
- * JSON with the encryption context key-value pairs.</p>
+ * <p>If present, specifies the Amazon Web Services KMS Encryption Context to use
+ * for object encryption. The value of this header is a base64-encoded UTF-8 string
+ * holding JSON with the encryption context key-value pairs.</p>
*/
inline void SetSSEKMSEncryptionContext(Aws::String&& value) { m_sSEKMSEncryptionContext = std::move(value); }
/**
- * <p>If present, specifies the AWS KMS Encryption Context to use for object
- * encryption. The value of this header is a base64-encoded UTF-8 string holding
- * JSON with the encryption context key-value pairs.</p>
+ * <p>If present, specifies the Amazon Web Services KMS Encryption Context to use
+ * for object encryption. The value of this header is a base64-encoded UTF-8 string
+ * holding JSON with the encryption context key-value pairs.</p>
*/
inline void SetSSEKMSEncryptionContext(const char* value) { m_sSEKMSEncryptionContext.assign(value); }
/**
- * <p>If present, specifies the AWS KMS Encryption Context to use for object
- * encryption. The value of this header is a base64-encoded UTF-8 string holding
- * JSON with the encryption context key-value pairs.</p>
+ * <p>If present, specifies the Amazon Web Services KMS Encryption Context to use
+ * for object encryption. The value of this header is a base64-encoded UTF-8 string
+ * holding JSON with the encryption context key-value pairs.</p>
*/
inline PutObjectResult& WithSSEKMSEncryptionContext(const Aws::String& value) { SetSSEKMSEncryptionContext(value); return *this;}
/**
- * <p>If present, specifies the AWS KMS Encryption Context to use for object
- * encryption. The value of this header is a base64-encoded UTF-8 string holding
- * JSON with the encryption context key-value pairs.</p>
+ * <p>If present, specifies the Amazon Web Services KMS Encryption Context to use
+ * for object encryption. The value of this header is a base64-encoded UTF-8 string
+ * holding JSON with the encryption context key-value pairs.</p>
*/
inline PutObjectResult& WithSSEKMSEncryptionContext(Aws::String&& value) { SetSSEKMSEncryptionContext(std::move(value)); return *this;}
/**
- * <p>If present, specifies the AWS KMS Encryption Context to use for object
- * encryption. The value of this header is a base64-encoded UTF-8 string holding
- * JSON with the encryption context key-value pairs.</p>
+ * <p>If present, specifies the Amazon Web Services KMS Encryption Context to use
+ * for object encryption. The value of this header is a base64-encoded UTF-8 string
+ * holding JSON with the encryption context key-value pairs.</p>
*/
inline PutObjectResult& WithSSEKMSEncryptionContext(const char* value) { SetSSEKMSEncryptionContext(value); return *this;}
/**
* <p>Indicates whether the uploaded object uses an S3 Bucket Key for server-side
- * encryption with AWS KMS (SSE-KMS).</p>
+ * encryption with Amazon Web Services KMS (SSE-KMS).</p>
*/
inline bool GetBucketKeyEnabled() const{ return m_bucketKeyEnabled; }
/**
* <p>Indicates whether the uploaded object uses an S3 Bucket Key for server-side
- * encryption with AWS KMS (SSE-KMS).</p>
+ * encryption with Amazon Web Services KMS (SSE-KMS).</p>
*/
inline void SetBucketKeyEnabled(bool value) { m_bucketKeyEnabled = value; }
/**
* <p>Indicates whether the uploaded object uses an S3 Bucket Key for server-side
- * encryption with AWS KMS (SSE-KMS).</p>
+ * encryption with Amazon Web Services KMS (SSE-KMS).</p>
*/
inline PutObjectResult& WithBucketKeyEnabled(bool value) { SetBucketKeyEnabled(value); return *this;}
@@ -458,6 +742,14 @@ namespace Model
Aws::String m_eTag;
+ Aws::String m_checksumCRC32;
+
+ Aws::String m_checksumCRC32C;
+
+ Aws::String m_checksumSHA1;
+
+ Aws::String m_checksumSHA256;
+
ServerSideEncryption m_serverSideEncryption;
Aws::String m_versionId;
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectRetentionRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectRetentionRequest.h
index e11e2568a1..9447737e66 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectRetentionRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectRetentionRequest.h
@@ -9,6 +9,7 @@
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/s3/model/ObjectLockRetention.h>
#include <aws/s3/model/RequestPayer.h>
+#include <aws/s3/model/ChecksumAlgorithm.h>
#include <aws/core/utils/memory/stl/AWSMap.h>
#include <utility>
@@ -25,10 +26,10 @@ namespace Model
/**
*/
- class AWS_S3_API PutObjectRetentionRequest : public S3Request
+ class PutObjectRetentionRequest : public S3Request
{
public:
- PutObjectRetentionRequest();
+ AWS_S3_API PutObjectRetentionRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -36,14 +37,18 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "PutObjectRetention"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
- inline bool ShouldComputeContentMd5() const override { return true; }
+ AWS_S3_API Aws::String GetChecksumAlgorithmName() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The bucket name that contains the object you want to apply this Object
@@ -51,11 +56,11 @@ namespace Model
* you must direct requests to the access point hostname. The access point hostname
* takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetBucket() const{ return m_bucket; }
@@ -65,11 +70,11 @@ namespace Model
* you must direct requests to the access point hostname. The access point hostname
* takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool BucketHasBeenSet() const { return m_bucketHasBeenSet; }
@@ -79,11 +84,11 @@ namespace Model
* you must direct requests to the access point hostname. The access point hostname
* takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const Aws::String& value) { m_bucketHasBeenSet = true; m_bucket = value; }
@@ -93,11 +98,11 @@ namespace Model
* you must direct requests to the access point hostname. The access point hostname
* takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(Aws::String&& value) { m_bucketHasBeenSet = true; m_bucket = std::move(value); }
@@ -107,11 +112,11 @@ namespace Model
* you must direct requests to the access point hostname. The access point hostname
* takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const char* value) { m_bucketHasBeenSet = true; m_bucket.assign(value); }
@@ -121,11 +126,11 @@ namespace Model
* you must direct requests to the access point hostname. The access point hostname
* takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline PutObjectRetentionRequest& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
@@ -135,11 +140,11 @@ namespace Model
* you must direct requests to the access point hostname. The access point hostname
* takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline PutObjectRetentionRequest& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
@@ -149,11 +154,11 @@ namespace Model
* you must direct requests to the access point hostname. The access point hostname
* takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline PutObjectRetentionRequest& WithBucket(const char* value) { SetBucket(value); return *this;}
@@ -328,115 +333,200 @@ namespace Model
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline const Aws::String& GetContentMD5() const{ return m_contentMD5; }
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline bool ContentMD5HasBeenSet() const { return m_contentMD5HasBeenSet; }
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline void SetContentMD5(const Aws::String& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = value; }
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline void SetContentMD5(Aws::String&& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = std::move(value); }
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline void SetContentMD5(const char* value) { m_contentMD5HasBeenSet = true; m_contentMD5.assign(value); }
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline PutObjectRetentionRequest& WithContentMD5(const Aws::String& value) { SetContentMD5(value); return *this;}
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline PutObjectRetentionRequest& WithContentMD5(Aws::String&& value) { SetContentMD5(std::move(value)); return *this;}
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline PutObjectRetentionRequest& WithContentMD5(const char* value) { SetContentMD5(value); return *this;}
/**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline const ChecksumAlgorithm& GetChecksumAlgorithm() const{ return m_checksumAlgorithm; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline bool ChecksumAlgorithmHasBeenSet() const { return m_checksumAlgorithmHasBeenSet; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(const ChecksumAlgorithm& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = value; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(ChecksumAlgorithm&& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = std::move(value); }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutObjectRetentionRequest& WithChecksumAlgorithm(const ChecksumAlgorithm& value) { SetChecksumAlgorithm(value); return *this;}
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutObjectRetentionRequest& WithChecksumAlgorithm(ChecksumAlgorithm&& value) { SetChecksumAlgorithm(std::move(value)); return *this;}
+
+
+ /**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutObjectRetentionRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutObjectRetentionRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutObjectRetentionRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -483,31 +573,34 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
ObjectLockRetention m_retention;
- bool m_retentionHasBeenSet;
+ bool m_retentionHasBeenSet = false;
RequestPayer m_requestPayer;
- bool m_requestPayerHasBeenSet;
+ bool m_requestPayerHasBeenSet = false;
Aws::String m_versionId;
- bool m_versionIdHasBeenSet;
+ bool m_versionIdHasBeenSet = false;
bool m_bypassGovernanceRetention;
- bool m_bypassGovernanceRetentionHasBeenSet;
+ bool m_bypassGovernanceRetentionHasBeenSet = false;
Aws::String m_contentMD5;
- bool m_contentMD5HasBeenSet;
+ bool m_contentMD5HasBeenSet = false;
+
+ ChecksumAlgorithm m_checksumAlgorithm;
+ bool m_checksumAlgorithmHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectRetentionResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectRetentionResult.h
index c35bdbd110..fdd21a4524 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectRetentionResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectRetentionResult.h
@@ -24,12 +24,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API PutObjectRetentionResult
+ class PutObjectRetentionResult
{
public:
- PutObjectRetentionResult();
- PutObjectRetentionResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- PutObjectRetentionResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API PutObjectRetentionResult();
+ AWS_S3_API PutObjectRetentionResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API PutObjectRetentionResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectTaggingRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectTaggingRequest.h
index 9bd467ebf1..2ce5195e29 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectTaggingRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectTaggingRequest.h
@@ -7,6 +7,7 @@
#include <aws/s3/S3_EXPORTS.h>
#include <aws/s3/S3Request.h>
#include <aws/core/utils/memory/stl/AWSString.h>
+#include <aws/s3/model/ChecksumAlgorithm.h>
#include <aws/s3/model/Tagging.h>
#include <aws/s3/model/RequestPayer.h>
#include <aws/core/utils/memory/stl/AWSMap.h>
@@ -25,10 +26,10 @@ namespace Model
/**
*/
- class AWS_S3_API PutObjectTaggingRequest : public S3Request
+ class PutObjectTaggingRequest : public S3Request
{
public:
- PutObjectTaggingRequest();
+ AWS_S3_API PutObjectTaggingRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -36,33 +37,37 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "PutObjectTagging"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
- inline bool ShouldComputeContentMd5() const override { return true; }
+ AWS_S3_API Aws::String GetChecksumAlgorithmName() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The bucket name containing the object. </p> <p>When using this action with an
* access point, you must direct requests to the access point hostname. The access
* point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetBucket() const{ return m_bucket; }
@@ -71,19 +76,19 @@ namespace Model
* access point, you must direct requests to the access point hostname. The access
* point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool BucketHasBeenSet() const { return m_bucketHasBeenSet; }
@@ -92,19 +97,19 @@ namespace Model
* access point, you must direct requests to the access point hostname. The access
* point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const Aws::String& value) { m_bucketHasBeenSet = true; m_bucket = value; }
@@ -113,19 +118,19 @@ namespace Model
* access point, you must direct requests to the access point hostname. The access
* point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(Aws::String&& value) { m_bucketHasBeenSet = true; m_bucket = std::move(value); }
@@ -134,19 +139,19 @@ namespace Model
* access point, you must direct requests to the access point hostname. The access
* point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const char* value) { m_bucketHasBeenSet = true; m_bucket.assign(value); }
@@ -155,19 +160,19 @@ namespace Model
* access point, you must direct requests to the access point hostname. The access
* point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline PutObjectTaggingRequest& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
@@ -176,19 +181,19 @@ namespace Model
* access point, you must direct requests to the access point hostname. The access
* point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline PutObjectTaggingRequest& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
@@ -197,19 +202,19 @@ namespace Model
* access point, you must direct requests to the access point hostname. The access
* point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline PutObjectTaggingRequest& WithBucket(const char* value) { SetBucket(value); return *this;}
@@ -297,63 +302,148 @@ namespace Model
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline const Aws::String& GetContentMD5() const{ return m_contentMD5; }
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline bool ContentMD5HasBeenSet() const { return m_contentMD5HasBeenSet; }
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline void SetContentMD5(const Aws::String& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = value; }
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline void SetContentMD5(Aws::String&& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = std::move(value); }
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline void SetContentMD5(const char* value) { m_contentMD5HasBeenSet = true; m_contentMD5.assign(value); }
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline PutObjectTaggingRequest& WithContentMD5(const Aws::String& value) { SetContentMD5(value); return *this;}
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline PutObjectTaggingRequest& WithContentMD5(Aws::String&& value) { SetContentMD5(std::move(value)); return *this;}
/**
- * <p>The MD5 hash for the request body.</p> <p>For requests made using the AWS
- * Command Line Interface (CLI) or AWS SDKs, this field is calculated
- * automatically.</p>
+ * <p>The MD5 hash for the request body.</p> <p>For requests made using the Amazon
+ * Web Services Command Line Interface (CLI) or Amazon Web Services SDKs, this
+ * field is calculated automatically.</p>
*/
inline PutObjectTaggingRequest& WithContentMD5(const char* value) { SetContentMD5(value); return *this;}
/**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline const ChecksumAlgorithm& GetChecksumAlgorithm() const{ return m_checksumAlgorithm; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline bool ChecksumAlgorithmHasBeenSet() const { return m_checksumAlgorithmHasBeenSet; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(const ChecksumAlgorithm& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = value; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(ChecksumAlgorithm&& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = std::move(value); }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutObjectTaggingRequest& WithChecksumAlgorithm(const ChecksumAlgorithm& value) { SetChecksumAlgorithm(value); return *this;}
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutObjectTaggingRequest& WithChecksumAlgorithm(ChecksumAlgorithm&& value) { SetChecksumAlgorithm(std::move(value)); return *this;}
+
+
+ /**
* <p>Container for the <code>TagSet</code> and <code>Tag</code> elements</p>
*/
inline const Tagging& GetTagging() const{ return m_tagging; }
@@ -386,57 +476,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutObjectTaggingRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutObjectTaggingRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutObjectTaggingRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -502,28 +592,31 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
Aws::String m_versionId;
- bool m_versionIdHasBeenSet;
+ bool m_versionIdHasBeenSet = false;
Aws::String m_contentMD5;
- bool m_contentMD5HasBeenSet;
+ bool m_contentMD5HasBeenSet = false;
+
+ ChecksumAlgorithm m_checksumAlgorithm;
+ bool m_checksumAlgorithmHasBeenSet = false;
Tagging m_tagging;
- bool m_taggingHasBeenSet;
+ bool m_taggingHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
RequestPayer m_requestPayer;
- bool m_requestPayerHasBeenSet;
+ bool m_requestPayerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectTaggingResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectTaggingResult.h
index 7a48440e89..4df7212374 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectTaggingResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutObjectTaggingResult.h
@@ -24,12 +24,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API PutObjectTaggingResult
+ class PutObjectTaggingResult
{
public:
- PutObjectTaggingResult();
- PutObjectTaggingResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- PutObjectTaggingResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API PutObjectTaggingResult();
+ AWS_S3_API PutObjectTaggingResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API PutObjectTaggingResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutPublicAccessBlockRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutPublicAccessBlockRequest.h
index fd369ccfac..74862b9eaa 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutPublicAccessBlockRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/PutPublicAccessBlockRequest.h
@@ -7,6 +7,7 @@
#include <aws/s3/S3_EXPORTS.h>
#include <aws/s3/S3Request.h>
#include <aws/core/utils/memory/stl/AWSString.h>
+#include <aws/s3/model/ChecksumAlgorithm.h>
#include <aws/s3/model/PublicAccessBlockConfiguration.h>
#include <aws/core/utils/memory/stl/AWSMap.h>
#include <utility>
@@ -24,10 +25,10 @@ namespace Model
/**
*/
- class AWS_S3_API PutPublicAccessBlockRequest : public S3Request
+ class PutPublicAccessBlockRequest : public S3Request
{
public:
- PutPublicAccessBlockRequest();
+ AWS_S3_API PutPublicAccessBlockRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -35,14 +36,18 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "PutPublicAccessBlock"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
- inline bool ShouldComputeContentMd5() const override { return true; }
+ AWS_S3_API Aws::String GetChecksumAlgorithmName() const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the Amazon S3 bucket whose <code>PublicAccessBlock</code>
@@ -95,62 +100,147 @@ namespace Model
/**
* <p>The MD5 hash of the <code>PutPublicAccessBlock</code> request body. </p>
- * <p>For requests made using the AWS Command Line Interface (CLI) or AWS SDKs,
- * this field is calculated automatically.</p>
+ * <p>For requests made using the Amazon Web Services Command Line Interface (CLI)
+ * or Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline const Aws::String& GetContentMD5() const{ return m_contentMD5; }
/**
* <p>The MD5 hash of the <code>PutPublicAccessBlock</code> request body. </p>
- * <p>For requests made using the AWS Command Line Interface (CLI) or AWS SDKs,
- * this field is calculated automatically.</p>
+ * <p>For requests made using the Amazon Web Services Command Line Interface (CLI)
+ * or Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline bool ContentMD5HasBeenSet() const { return m_contentMD5HasBeenSet; }
/**
* <p>The MD5 hash of the <code>PutPublicAccessBlock</code> request body. </p>
- * <p>For requests made using the AWS Command Line Interface (CLI) or AWS SDKs,
- * this field is calculated automatically.</p>
+ * <p>For requests made using the Amazon Web Services Command Line Interface (CLI)
+ * or Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(const Aws::String& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = value; }
/**
* <p>The MD5 hash of the <code>PutPublicAccessBlock</code> request body. </p>
- * <p>For requests made using the AWS Command Line Interface (CLI) or AWS SDKs,
- * this field is calculated automatically.</p>
+ * <p>For requests made using the Amazon Web Services Command Line Interface (CLI)
+ * or Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(Aws::String&& value) { m_contentMD5HasBeenSet = true; m_contentMD5 = std::move(value); }
/**
* <p>The MD5 hash of the <code>PutPublicAccessBlock</code> request body. </p>
- * <p>For requests made using the AWS Command Line Interface (CLI) or AWS SDKs,
- * this field is calculated automatically.</p>
+ * <p>For requests made using the Amazon Web Services Command Line Interface (CLI)
+ * or Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline void SetContentMD5(const char* value) { m_contentMD5HasBeenSet = true; m_contentMD5.assign(value); }
/**
* <p>The MD5 hash of the <code>PutPublicAccessBlock</code> request body. </p>
- * <p>For requests made using the AWS Command Line Interface (CLI) or AWS SDKs,
- * this field is calculated automatically.</p>
+ * <p>For requests made using the Amazon Web Services Command Line Interface (CLI)
+ * or Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline PutPublicAccessBlockRequest& WithContentMD5(const Aws::String& value) { SetContentMD5(value); return *this;}
/**
* <p>The MD5 hash of the <code>PutPublicAccessBlock</code> request body. </p>
- * <p>For requests made using the AWS Command Line Interface (CLI) or AWS SDKs,
- * this field is calculated automatically.</p>
+ * <p>For requests made using the Amazon Web Services Command Line Interface (CLI)
+ * or Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline PutPublicAccessBlockRequest& WithContentMD5(Aws::String&& value) { SetContentMD5(std::move(value)); return *this;}
/**
* <p>The MD5 hash of the <code>PutPublicAccessBlock</code> request body. </p>
- * <p>For requests made using the AWS Command Line Interface (CLI) or AWS SDKs,
- * this field is calculated automatically.</p>
+ * <p>For requests made using the Amazon Web Services Command Line Interface (CLI)
+ * or Amazon Web Services SDKs, this field is calculated automatically.</p>
*/
inline PutPublicAccessBlockRequest& WithContentMD5(const char* value) { SetContentMD5(value); return *this;}
/**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline const ChecksumAlgorithm& GetChecksumAlgorithm() const{ return m_checksumAlgorithm; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline bool ChecksumAlgorithmHasBeenSet() const { return m_checksumAlgorithmHasBeenSet; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(const ChecksumAlgorithm& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = value; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(ChecksumAlgorithm&& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = std::move(value); }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutPublicAccessBlockRequest& WithChecksumAlgorithm(const ChecksumAlgorithm& value) { SetChecksumAlgorithm(value); return *this;}
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline PutPublicAccessBlockRequest& WithChecksumAlgorithm(ChecksumAlgorithm&& value) { SetChecksumAlgorithm(std::move(value)); return *this;}
+
+
+ /**
* <p>The <code>PublicAccessBlock</code> configuration that you want to apply to
* this Amazon S3 bucket. You can enable the configuration options in any
* combination. For more information about when Amazon S3 considers a bucket or
@@ -213,57 +303,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutPublicAccessBlockRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutPublicAccessBlockRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline PutPublicAccessBlockRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -310,19 +400,22 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_contentMD5;
- bool m_contentMD5HasBeenSet;
+ bool m_contentMD5HasBeenSet = false;
+
+ ChecksumAlgorithm m_checksumAlgorithm;
+ bool m_checksumAlgorithmHasBeenSet = false;
PublicAccessBlockConfiguration m_publicAccessBlockConfiguration;
- bool m_publicAccessBlockConfigurationHasBeenSet;
+ bool m_publicAccessBlockConfigurationHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/QueueConfiguration.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/QueueConfiguration.h
index 41cc3d2d70..6904890b57 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/QueueConfiguration.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/QueueConfiguration.h
@@ -32,14 +32,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfiguration">AWS
* API Reference</a></p>
*/
- class AWS_S3_API QueueConfiguration
+ class QueueConfiguration
{
public:
- QueueConfiguration();
- QueueConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
- QueueConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API QueueConfiguration();
+ AWS_S3_API QueueConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API QueueConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
@@ -178,16 +178,16 @@ namespace Model
private:
Aws::String m_id;
- bool m_idHasBeenSet;
+ bool m_idHasBeenSet = false;
Aws::String m_queueArn;
- bool m_queueArnHasBeenSet;
+ bool m_queueArnHasBeenSet = false;
Aws::Vector<Event> m_events;
- bool m_eventsHasBeenSet;
+ bool m_eventsHasBeenSet = false;
NotificationConfigurationFilter m_filter;
- bool m_filterHasBeenSet;
+ bool m_filterHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/QueueConfigurationDeprecated.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/QueueConfigurationDeprecated.h
index 05c80b2db1..2edd7a1a4d 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/QueueConfigurationDeprecated.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/QueueConfigurationDeprecated.h
@@ -33,14 +33,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/QueueConfigurationDeprecated">AWS
* API Reference</a></p>
*/
- class AWS_S3_API QueueConfigurationDeprecated
+ class QueueConfigurationDeprecated
{
public:
- QueueConfigurationDeprecated();
- QueueConfigurationDeprecated(const Aws::Utils::Xml::XmlNode& xmlNode);
- QueueConfigurationDeprecated& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API QueueConfigurationDeprecated();
+ AWS_S3_API QueueConfigurationDeprecated(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API QueueConfigurationDeprecated& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
@@ -160,13 +160,13 @@ namespace Model
private:
Aws::String m_id;
- bool m_idHasBeenSet;
+ bool m_idHasBeenSet = false;
Aws::Vector<Event> m_events;
- bool m_eventsHasBeenSet;
+ bool m_eventsHasBeenSet = false;
Aws::String m_queue;
- bool m_queueHasBeenSet;
+ bool m_queueHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RecordsEvent.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RecordsEvent.h
index 30ef211adb..345c0426e2 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RecordsEvent.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RecordsEvent.h
@@ -19,11 +19,11 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RecordsEvent">AWS API
* Reference</a></p>
*/
- class AWS_S3_API RecordsEvent
+ class RecordsEvent
{
public:
- RecordsEvent() = default;
- RecordsEvent(Aws::Vector<unsigned char>&& value) { m_payload = std::move(value); }
+ AWS_S3_API RecordsEvent() = default;
+ AWS_S3_API RecordsEvent(Aws::Vector<unsigned char>&& value) { m_payload = std::move(value); }
/**
* <p>The byte array of partial, one or more result records.</p>
@@ -58,7 +58,7 @@ namespace Model
private:
Aws::Vector<unsigned char> m_payload;
- bool m_payloadHasBeenSet;
+ bool m_payloadHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Redirect.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Redirect.h
index 3a21bec482..a974475fe4 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Redirect.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Redirect.h
@@ -29,14 +29,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Redirect">AWS API
* Reference</a></p>
*/
- class AWS_S3_API Redirect
+ class Redirect
{
public:
- Redirect();
- Redirect(const Aws::Utils::Xml::XmlNode& xmlNode);
- Redirect& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Redirect();
+ AWS_S3_API Redirect(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Redirect& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -386,19 +386,19 @@ namespace Model
private:
Aws::String m_hostName;
- bool m_hostNameHasBeenSet;
+ bool m_hostNameHasBeenSet = false;
Aws::String m_httpRedirectCode;
- bool m_httpRedirectCodeHasBeenSet;
+ bool m_httpRedirectCodeHasBeenSet = false;
Protocol m_protocol;
- bool m_protocolHasBeenSet;
+ bool m_protocolHasBeenSet = false;
Aws::String m_replaceKeyPrefixWith;
- bool m_replaceKeyPrefixWithHasBeenSet;
+ bool m_replaceKeyPrefixWithHasBeenSet = false;
Aws::String m_replaceKeyWith;
- bool m_replaceKeyWithHasBeenSet;
+ bool m_replaceKeyWithHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RedirectAllRequestsTo.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RedirectAllRequestsTo.h
index 73d5a96f6d..b704f28bf1 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RedirectAllRequestsTo.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RedirectAllRequestsTo.h
@@ -29,14 +29,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RedirectAllRequestsTo">AWS
* API Reference</a></p>
*/
- class AWS_S3_API RedirectAllRequestsTo
+ class RedirectAllRequestsTo
{
public:
- RedirectAllRequestsTo();
- RedirectAllRequestsTo(const Aws::Utils::Xml::XmlNode& xmlNode);
- RedirectAllRequestsTo& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API RedirectAllRequestsTo();
+ AWS_S3_API RedirectAllRequestsTo(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API RedirectAllRequestsTo& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -119,10 +119,10 @@ namespace Model
private:
Aws::String m_hostName;
- bool m_hostNameHasBeenSet;
+ bool m_hostNameHasBeenSet = false;
Protocol m_protocol;
- bool m_protocolHasBeenSet;
+ bool m_protocolHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicaModifications.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicaModifications.h
index e9685d6258..f8b35fc69c 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicaModifications.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicaModifications.h
@@ -34,14 +34,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicaModifications">AWS
* API Reference</a></p>
*/
- class AWS_S3_API ReplicaModifications
+ class ReplicaModifications
{
public:
- ReplicaModifications();
- ReplicaModifications(const Aws::Utils::Xml::XmlNode& xmlNode);
- ReplicaModifications& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ReplicaModifications();
+ AWS_S3_API ReplicaModifications(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ReplicaModifications& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -77,7 +77,7 @@ namespace Model
private:
ReplicaModificationsStatus m_status;
- bool m_statusHasBeenSet;
+ bool m_statusHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicationConfiguration.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicationConfiguration.h
index e138c79cba..9ce58fd351 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicationConfiguration.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicationConfiguration.h
@@ -30,83 +30,83 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationConfiguration">AWS
* API Reference</a></p>
*/
- class AWS_S3_API ReplicationConfiguration
+ class ReplicationConfiguration
{
public:
- ReplicationConfiguration();
- ReplicationConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
- ReplicationConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ReplicationConfiguration();
+ AWS_S3_API ReplicationConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ReplicationConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
- * <p>The Amazon Resource Name (ARN) of the AWS Identity and Access Management
- * (IAM) role that Amazon S3 assumes when replicating objects. For more
- * information, see <a
+ * <p>The Amazon Resource Name (ARN) of the Identity and Access Management (IAM)
+ * role that Amazon S3 assumes when replicating objects. For more information, see
+ * <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html">How
* to Set Up Replication</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetRole() const{ return m_role; }
/**
- * <p>The Amazon Resource Name (ARN) of the AWS Identity and Access Management
- * (IAM) role that Amazon S3 assumes when replicating objects. For more
- * information, see <a
+ * <p>The Amazon Resource Name (ARN) of the Identity and Access Management (IAM)
+ * role that Amazon S3 assumes when replicating objects. For more information, see
+ * <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html">How
* to Set Up Replication</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool RoleHasBeenSet() const { return m_roleHasBeenSet; }
/**
- * <p>The Amazon Resource Name (ARN) of the AWS Identity and Access Management
- * (IAM) role that Amazon S3 assumes when replicating objects. For more
- * information, see <a
+ * <p>The Amazon Resource Name (ARN) of the Identity and Access Management (IAM)
+ * role that Amazon S3 assumes when replicating objects. For more information, see
+ * <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html">How
* to Set Up Replication</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetRole(const Aws::String& value) { m_roleHasBeenSet = true; m_role = value; }
/**
- * <p>The Amazon Resource Name (ARN) of the AWS Identity and Access Management
- * (IAM) role that Amazon S3 assumes when replicating objects. For more
- * information, see <a
+ * <p>The Amazon Resource Name (ARN) of the Identity and Access Management (IAM)
+ * role that Amazon S3 assumes when replicating objects. For more information, see
+ * <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html">How
* to Set Up Replication</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetRole(Aws::String&& value) { m_roleHasBeenSet = true; m_role = std::move(value); }
/**
- * <p>The Amazon Resource Name (ARN) of the AWS Identity and Access Management
- * (IAM) role that Amazon S3 assumes when replicating objects. For more
- * information, see <a
+ * <p>The Amazon Resource Name (ARN) of the Identity and Access Management (IAM)
+ * role that Amazon S3 assumes when replicating objects. For more information, see
+ * <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html">How
* to Set Up Replication</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetRole(const char* value) { m_roleHasBeenSet = true; m_role.assign(value); }
/**
- * <p>The Amazon Resource Name (ARN) of the AWS Identity and Access Management
- * (IAM) role that Amazon S3 assumes when replicating objects. For more
- * information, see <a
+ * <p>The Amazon Resource Name (ARN) of the Identity and Access Management (IAM)
+ * role that Amazon S3 assumes when replicating objects. For more information, see
+ * <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html">How
* to Set Up Replication</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline ReplicationConfiguration& WithRole(const Aws::String& value) { SetRole(value); return *this;}
/**
- * <p>The Amazon Resource Name (ARN) of the AWS Identity and Access Management
- * (IAM) role that Amazon S3 assumes when replicating objects. For more
- * information, see <a
+ * <p>The Amazon Resource Name (ARN) of the Identity and Access Management (IAM)
+ * role that Amazon S3 assumes when replicating objects. For more information, see
+ * <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html">How
* to Set Up Replication</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline ReplicationConfiguration& WithRole(Aws::String&& value) { SetRole(std::move(value)); return *this;}
/**
- * <p>The Amazon Resource Name (ARN) of the AWS Identity and Access Management
- * (IAM) role that Amazon S3 assumes when replicating objects. For more
- * information, see <a
+ * <p>The Amazon Resource Name (ARN) of the Identity and Access Management (IAM)
+ * role that Amazon S3 assumes when replicating objects. For more information, see
+ * <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/replication-how-setup.html">How
* to Set Up Replication</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
@@ -164,10 +164,10 @@ namespace Model
private:
Aws::String m_role;
- bool m_roleHasBeenSet;
+ bool m_roleHasBeenSet = false;
Aws::Vector<ReplicationRule> m_rules;
- bool m_rulesHasBeenSet;
+ bool m_rulesHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicationRule.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicationRule.h
index 2003426a67..e9d85afdd3 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicationRule.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicationRule.h
@@ -34,14 +34,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationRule">AWS
* API Reference</a></p>
*/
- class AWS_S3_API ReplicationRule
+ class ReplicationRule
{
public:
- ReplicationRule();
- ReplicationRule(const Aws::Utils::Xml::XmlNode& xmlNode);
- ReplicationRule& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ReplicationRule();
+ AWS_S3_API ReplicationRule(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ReplicationRule& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -189,7 +189,7 @@ namespace Model
* objects that you want to replicate. You can choose to enable or disable the
* replication of these objects. Currently, Amazon S3 supports only the filter that
* you can specify for objects created with server-side encryption using a customer
- * master key (CMK) stored in AWS Key Management Service (SSE-KMS).</p>
+ * managed key stored in Amazon Web Services Key Management Service (SSE-KMS).</p>
*/
inline const SourceSelectionCriteria& GetSourceSelectionCriteria() const{ return m_sourceSelectionCriteria; }
@@ -198,7 +198,7 @@ namespace Model
* objects that you want to replicate. You can choose to enable or disable the
* replication of these objects. Currently, Amazon S3 supports only the filter that
* you can specify for objects created with server-side encryption using a customer
- * master key (CMK) stored in AWS Key Management Service (SSE-KMS).</p>
+ * managed key stored in Amazon Web Services Key Management Service (SSE-KMS).</p>
*/
inline bool SourceSelectionCriteriaHasBeenSet() const { return m_sourceSelectionCriteriaHasBeenSet; }
@@ -207,7 +207,7 @@ namespace Model
* objects that you want to replicate. You can choose to enable or disable the
* replication of these objects. Currently, Amazon S3 supports only the filter that
* you can specify for objects created with server-side encryption using a customer
- * master key (CMK) stored in AWS Key Management Service (SSE-KMS).</p>
+ * managed key stored in Amazon Web Services Key Management Service (SSE-KMS).</p>
*/
inline void SetSourceSelectionCriteria(const SourceSelectionCriteria& value) { m_sourceSelectionCriteriaHasBeenSet = true; m_sourceSelectionCriteria = value; }
@@ -216,7 +216,7 @@ namespace Model
* objects that you want to replicate. You can choose to enable or disable the
* replication of these objects. Currently, Amazon S3 supports only the filter that
* you can specify for objects created with server-side encryption using a customer
- * master key (CMK) stored in AWS Key Management Service (SSE-KMS).</p>
+ * managed key stored in Amazon Web Services Key Management Service (SSE-KMS).</p>
*/
inline void SetSourceSelectionCriteria(SourceSelectionCriteria&& value) { m_sourceSelectionCriteriaHasBeenSet = true; m_sourceSelectionCriteria = std::move(value); }
@@ -225,7 +225,7 @@ namespace Model
* objects that you want to replicate. You can choose to enable or disable the
* replication of these objects. Currently, Amazon S3 supports only the filter that
* you can specify for objects created with server-side encryption using a customer
- * master key (CMK) stored in AWS Key Management Service (SSE-KMS).</p>
+ * managed key stored in Amazon Web Services Key Management Service (SSE-KMS).</p>
*/
inline ReplicationRule& WithSourceSelectionCriteria(const SourceSelectionCriteria& value) { SetSourceSelectionCriteria(value); return *this;}
@@ -234,7 +234,7 @@ namespace Model
* objects that you want to replicate. You can choose to enable or disable the
* replication of these objects. Currently, Amazon S3 supports only the filter that
* you can specify for objects created with server-side encryption using a customer
- * master key (CMK) stored in AWS Key Management Service (SSE-KMS).</p>
+ * managed key stored in Amazon Web Services Key Management Service (SSE-KMS).</p>
*/
inline ReplicationRule& WithSourceSelectionCriteria(SourceSelectionCriteria&& value) { SetSourceSelectionCriteria(std::move(value)); return *this;}
@@ -328,28 +328,28 @@ namespace Model
private:
Aws::String m_iD;
- bool m_iDHasBeenSet;
+ bool m_iDHasBeenSet = false;
int m_priority;
- bool m_priorityHasBeenSet;
+ bool m_priorityHasBeenSet = false;
ReplicationRuleFilter m_filter;
- bool m_filterHasBeenSet;
+ bool m_filterHasBeenSet = false;
ReplicationRuleStatus m_status;
- bool m_statusHasBeenSet;
+ bool m_statusHasBeenSet = false;
SourceSelectionCriteria m_sourceSelectionCriteria;
- bool m_sourceSelectionCriteriaHasBeenSet;
+ bool m_sourceSelectionCriteriaHasBeenSet = false;
ExistingObjectReplication m_existingObjectReplication;
- bool m_existingObjectReplicationHasBeenSet;
+ bool m_existingObjectReplicationHasBeenSet = false;
Destination m_destination;
- bool m_destinationHasBeenSet;
+ bool m_destinationHasBeenSet = false;
DeleteMarkerReplication m_deleteMarkerReplication;
- bool m_deleteMarkerReplicationHasBeenSet;
+ bool m_deleteMarkerReplicationHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicationRuleAndOperator.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicationRuleAndOperator.h
index bf78d3af98..f19f496f5c 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicationRuleAndOperator.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicationRuleAndOperator.h
@@ -35,14 +35,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationRuleAndOperator">AWS
* API Reference</a></p>
*/
- class AWS_S3_API ReplicationRuleAndOperator
+ class ReplicationRuleAndOperator
{
public:
- ReplicationRuleAndOperator();
- ReplicationRuleAndOperator(const Aws::Utils::Xml::XmlNode& xmlNode);
- ReplicationRuleAndOperator& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ReplicationRuleAndOperator();
+ AWS_S3_API ReplicationRuleAndOperator(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ReplicationRuleAndOperator& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -137,10 +137,10 @@ namespace Model
private:
Aws::String m_prefix;
- bool m_prefixHasBeenSet;
+ bool m_prefixHasBeenSet = false;
Aws::Vector<Tag> m_tags;
- bool m_tagsHasBeenSet;
+ bool m_tagsHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicationRuleFilter.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicationRuleFilter.h
index 630c385d9e..8d64d1cdd1 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicationRuleFilter.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicationRuleFilter.h
@@ -32,14 +32,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationRuleFilter">AWS
* API Reference</a></p>
*/
- class AWS_S3_API ReplicationRuleFilter
+ class ReplicationRuleFilter
{
public:
- ReplicationRuleFilter();
- ReplicationRuleFilter(const Aws::Utils::Xml::XmlNode& xmlNode);
- ReplicationRuleFilter& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ReplicationRuleFilter();
+ AWS_S3_API ReplicationRuleFilter(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ReplicationRuleFilter& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -229,13 +229,13 @@ namespace Model
private:
Aws::String m_prefix;
- bool m_prefixHasBeenSet;
+ bool m_prefixHasBeenSet = false;
Tag m_tag;
- bool m_tagHasBeenSet;
+ bool m_tagHasBeenSet = false;
ReplicationRuleAndOperator m_and;
- bool m_andHasBeenSet;
+ bool m_andHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicationTime.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicationTime.h
index fa5cfd6008..80a05eba50 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicationTime.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicationTime.h
@@ -31,14 +31,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationTime">AWS
* API Reference</a></p>
*/
- class AWS_S3_API ReplicationTime
+ class ReplicationTime
{
public:
- ReplicationTime();
- ReplicationTime(const Aws::Utils::Xml::XmlNode& xmlNode);
- ReplicationTime& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ReplicationTime();
+ AWS_S3_API ReplicationTime(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ReplicationTime& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -111,10 +111,10 @@ namespace Model
private:
ReplicationTimeStatus m_status;
- bool m_statusHasBeenSet;
+ bool m_statusHasBeenSet = false;
ReplicationTimeValue m_time;
- bool m_timeHasBeenSet;
+ bool m_timeHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicationTimeValue.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicationTimeValue.h
index 846b5f88b9..eaabfecc7b 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicationTimeValue.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ReplicationTimeValue.h
@@ -27,44 +27,40 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ReplicationTimeValue">AWS
* API Reference</a></p>
*/
- class AWS_S3_API ReplicationTimeValue
+ class ReplicationTimeValue
{
public:
- ReplicationTimeValue();
- ReplicationTimeValue(const Aws::Utils::Xml::XmlNode& xmlNode);
- ReplicationTimeValue& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ReplicationTimeValue();
+ AWS_S3_API ReplicationTimeValue(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ReplicationTimeValue& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
- * <p> Contains an integer specifying time in minutes. </p> <p> Valid values: 15
- * minutes. </p>
+ * <p> Contains an integer specifying time in minutes. </p> <p> Valid value: 15</p>
*/
inline int GetMinutes() const{ return m_minutes; }
/**
- * <p> Contains an integer specifying time in minutes. </p> <p> Valid values: 15
- * minutes. </p>
+ * <p> Contains an integer specifying time in minutes. </p> <p> Valid value: 15</p>
*/
inline bool MinutesHasBeenSet() const { return m_minutesHasBeenSet; }
/**
- * <p> Contains an integer specifying time in minutes. </p> <p> Valid values: 15
- * minutes. </p>
+ * <p> Contains an integer specifying time in minutes. </p> <p> Valid value: 15</p>
*/
inline void SetMinutes(int value) { m_minutesHasBeenSet = true; m_minutes = value; }
/**
- * <p> Contains an integer specifying time in minutes. </p> <p> Valid values: 15
- * minutes. </p>
+ * <p> Contains an integer specifying time in minutes. </p> <p> Valid value: 15</p>
*/
inline ReplicationTimeValue& WithMinutes(int value) { SetMinutes(value); return *this;}
private:
int m_minutes;
- bool m_minutesHasBeenSet;
+ bool m_minutesHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RequestPaymentConfiguration.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RequestPaymentConfiguration.h
index 72861dbe7b..b6c5bc4789 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RequestPaymentConfiguration.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RequestPaymentConfiguration.h
@@ -27,14 +27,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RequestPaymentConfiguration">AWS
* API Reference</a></p>
*/
- class AWS_S3_API RequestPaymentConfiguration
+ class RequestPaymentConfiguration
{
public:
- RequestPaymentConfiguration();
- RequestPaymentConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
- RequestPaymentConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API RequestPaymentConfiguration();
+ AWS_S3_API RequestPaymentConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API RequestPaymentConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -70,7 +70,7 @@ namespace Model
private:
Payer m_payer;
- bool m_payerHasBeenSet;
+ bool m_payerHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RequestProgress.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RequestProgress.h
index 717ad47c6f..b4a08d0a7c 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RequestProgress.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RequestProgress.h
@@ -26,14 +26,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RequestProgress">AWS
* API Reference</a></p>
*/
- class AWS_S3_API RequestProgress
+ class RequestProgress
{
public:
- RequestProgress();
- RequestProgress(const Aws::Utils::Xml::XmlNode& xmlNode);
- RequestProgress& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API RequestProgress();
+ AWS_S3_API RequestProgress(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API RequestProgress& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -63,7 +63,7 @@ namespace Model
private:
bool m_enabled;
- bool m_enabledHasBeenSet;
+ bool m_enabledHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RestoreObjectRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RestoreObjectRequest.h
index f588c84270..05e08111be 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RestoreObjectRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RestoreObjectRequest.h
@@ -9,6 +9,7 @@
#include <aws/core/utils/memory/stl/AWSString.h>
#include <aws/s3/model/RestoreRequest.h>
#include <aws/s3/model/RequestPayer.h>
+#include <aws/s3/model/ChecksumAlgorithm.h>
#include <aws/core/utils/memory/stl/AWSMap.h>
#include <utility>
@@ -25,10 +26,10 @@ namespace Model
/**
*/
- class AWS_S3_API RestoreObjectRequest : public S3Request
+ class RestoreObjectRequest : public S3Request
{
public:
- RestoreObjectRequest();
+ AWS_S3_API RestoreObjectRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -36,31 +37,37 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "RestoreObject"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::String GetChecksumAlgorithmName() const override;
+
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The bucket name containing the object to restore. </p> <p>When using this
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetBucket() const{ return m_bucket; }
@@ -69,19 +76,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool BucketHasBeenSet() const { return m_bucketHasBeenSet; }
@@ -90,19 +97,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const Aws::String& value) { m_bucketHasBeenSet = true; m_bucket = value; }
@@ -111,19 +118,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(Aws::String&& value) { m_bucketHasBeenSet = true; m_bucket = std::move(value); }
@@ -132,19 +139,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const char* value) { m_bucketHasBeenSet = true; m_bucket.assign(value); }
@@ -153,19 +160,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline RestoreObjectRequest& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
@@ -174,19 +181,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline RestoreObjectRequest& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
@@ -195,19 +202,19 @@ namespace Model
* action with an access point, you must direct requests to the access point
* hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline RestoreObjectRequest& WithBucket(const char* value) { SetBucket(value); return *this;}
@@ -333,58 +340,143 @@ namespace Model
/**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline const ChecksumAlgorithm& GetChecksumAlgorithm() const{ return m_checksumAlgorithm; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline bool ChecksumAlgorithmHasBeenSet() const { return m_checksumAlgorithmHasBeenSet; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(const ChecksumAlgorithm& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = value; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline void SetChecksumAlgorithm(ChecksumAlgorithm&& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = std::move(value); }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline RestoreObjectRequest& WithChecksumAlgorithm(const ChecksumAlgorithm& value) { SetChecksumAlgorithm(value); return *this;}
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p>
+ */
+ inline RestoreObjectRequest& WithChecksumAlgorithm(ChecksumAlgorithm&& value) { SetChecksumAlgorithm(std::move(value)); return *this;}
+
+
+ /**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline RestoreObjectRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline RestoreObjectRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline RestoreObjectRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -431,25 +523,28 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
Aws::String m_versionId;
- bool m_versionIdHasBeenSet;
+ bool m_versionIdHasBeenSet = false;
RestoreRequest m_restoreRequest;
- bool m_restoreRequestHasBeenSet;
+ bool m_restoreRequestHasBeenSet = false;
RequestPayer m_requestPayer;
- bool m_requestPayerHasBeenSet;
+ bool m_requestPayerHasBeenSet = false;
+
+ ChecksumAlgorithm m_checksumAlgorithm;
+ bool m_checksumAlgorithmHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RestoreObjectResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RestoreObjectResult.h
index 2cadd53189..703a41037b 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RestoreObjectResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RestoreObjectResult.h
@@ -25,12 +25,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API RestoreObjectResult
+ class RestoreObjectResult
{
public:
- RestoreObjectResult();
- RestoreObjectResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- RestoreObjectResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API RestoreObjectResult();
+ AWS_S3_API RestoreObjectResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API RestoreObjectResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RestoreRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RestoreRequest.h
index f2460fbc85..9324a713ad 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RestoreRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RestoreRequest.h
@@ -32,14 +32,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RestoreRequest">AWS
* API Reference</a></p>
*/
- class AWS_S3_API RestoreRequest
+ class RestoreRequest
{
public:
- RestoreRequest();
- RestoreRequest(const Aws::Utils::Xml::XmlNode& xmlNode);
- RestoreRequest& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API RestoreRequest();
+ AWS_S3_API RestoreRequest(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API RestoreRequest& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -275,25 +275,25 @@ namespace Model
private:
int m_days;
- bool m_daysHasBeenSet;
+ bool m_daysHasBeenSet = false;
GlacierJobParameters m_glacierJobParameters;
- bool m_glacierJobParametersHasBeenSet;
+ bool m_glacierJobParametersHasBeenSet = false;
RestoreRequestType m_type;
- bool m_typeHasBeenSet;
+ bool m_typeHasBeenSet = false;
Tier m_tier;
- bool m_tierHasBeenSet;
+ bool m_tierHasBeenSet = false;
Aws::String m_description;
- bool m_descriptionHasBeenSet;
+ bool m_descriptionHasBeenSet = false;
SelectParameters m_selectParameters;
- bool m_selectParametersHasBeenSet;
+ bool m_selectParametersHasBeenSet = false;
OutputLocation m_outputLocation;
- bool m_outputLocationHasBeenSet;
+ bool m_outputLocationHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RoutingRule.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RoutingRule.h
index e87bdb7958..127ad69117 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RoutingRule.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/RoutingRule.h
@@ -32,14 +32,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/RoutingRule">AWS API
* Reference</a></p>
*/
- class AWS_S3_API RoutingRule
+ class RoutingRule
{
public:
- RoutingRule();
- RoutingRule(const Aws::Utils::Xml::XmlNode& xmlNode);
- RoutingRule& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API RoutingRule();
+ AWS_S3_API RoutingRule(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API RoutingRule& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -142,10 +142,10 @@ namespace Model
private:
Condition m_condition;
- bool m_conditionHasBeenSet;
+ bool m_conditionHasBeenSet = false;
Redirect m_redirect;
- bool m_redirectHasBeenSet;
+ bool m_redirectHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Rule.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Rule.h
index 7613ef3226..d0065042f3 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Rule.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Rule.h
@@ -32,21 +32,21 @@ namespace Model
* <p>Specifies lifecycle rules for an Amazon S3 bucket. For more information, see
* <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTlifecycle.html">Put
- * Bucket Lifecycle Configuration</a> in the <i>Amazon Simple Storage Service API
- * Reference</i>. For examples, see <a
+ * Bucket Lifecycle Configuration</a> in the <i>Amazon S3 API Reference</i>. For
+ * examples, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/API_PutBucketLifecycleConfiguration.html#API_PutBucketLifecycleConfiguration_Examples">Put
* Bucket Lifecycle Configuration Examples</a>.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Rule">AWS API
* Reference</a></p>
*/
- class AWS_S3_API Rule
+ class Rule
{
public:
- Rule();
- Rule(const Aws::Utils::Xml::XmlNode& xmlNode);
- Rule& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Rule();
+ AWS_S3_API Rule(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Rule& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -355,28 +355,28 @@ namespace Model
private:
LifecycleExpiration m_expiration;
- bool m_expirationHasBeenSet;
+ bool m_expirationHasBeenSet = false;
Aws::String m_iD;
- bool m_iDHasBeenSet;
+ bool m_iDHasBeenSet = false;
Aws::String m_prefix;
- bool m_prefixHasBeenSet;
+ bool m_prefixHasBeenSet = false;
ExpirationStatus m_status;
- bool m_statusHasBeenSet;
+ bool m_statusHasBeenSet = false;
Transition m_transition;
- bool m_transitionHasBeenSet;
+ bool m_transitionHasBeenSet = false;
NoncurrentVersionTransition m_noncurrentVersionTransition;
- bool m_noncurrentVersionTransitionHasBeenSet;
+ bool m_noncurrentVersionTransitionHasBeenSet = false;
NoncurrentVersionExpiration m_noncurrentVersionExpiration;
- bool m_noncurrentVersionExpirationHasBeenSet;
+ bool m_noncurrentVersionExpirationHasBeenSet = false;
AbortIncompleteMultipartUpload m_abortIncompleteMultipartUpload;
- bool m_abortIncompleteMultipartUploadHasBeenSet;
+ bool m_abortIncompleteMultipartUploadHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/S3KeyFilter.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/S3KeyFilter.h
index 0f22a9524e..de9827621b 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/S3KeyFilter.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/S3KeyFilter.h
@@ -29,14 +29,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/S3KeyFilter">AWS API
* Reference</a></p>
*/
- class AWS_S3_API S3KeyFilter
+ class S3KeyFilter
{
public:
- S3KeyFilter();
- S3KeyFilter(const Aws::Utils::Xml::XmlNode& xmlNode);
- S3KeyFilter& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API S3KeyFilter();
+ AWS_S3_API S3KeyFilter(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API S3KeyFilter& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
@@ -66,7 +66,7 @@ namespace Model
private:
Aws::Vector<FilterRule> m_filterRules;
- bool m_filterRulesHasBeenSet;
+ bool m_filterRulesHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/S3Location.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/S3Location.h
index 8f0eab338b..a2614cc2c8 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/S3Location.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/S3Location.h
@@ -35,14 +35,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/S3Location">AWS API
* Reference</a></p>
*/
- class AWS_S3_API S3Location
+ class S3Location
{
public:
- S3Location();
- S3Location(const Aws::Utils::Xml::XmlNode& xmlNode);
- S3Location& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API S3Location();
+ AWS_S3_API S3Location(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API S3Location& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -323,28 +323,28 @@ namespace Model
private:
Aws::String m_bucketName;
- bool m_bucketNameHasBeenSet;
+ bool m_bucketNameHasBeenSet = false;
Aws::String m_prefix;
- bool m_prefixHasBeenSet;
+ bool m_prefixHasBeenSet = false;
Encryption m_encryption;
- bool m_encryptionHasBeenSet;
+ bool m_encryptionHasBeenSet = false;
ObjectCannedACL m_cannedACL;
- bool m_cannedACLHasBeenSet;
+ bool m_cannedACLHasBeenSet = false;
Aws::Vector<Grant> m_accessControlList;
- bool m_accessControlListHasBeenSet;
+ bool m_accessControlListHasBeenSet = false;
Tagging m_tagging;
- bool m_taggingHasBeenSet;
+ bool m_taggingHasBeenSet = false;
Aws::Vector<MetadataEntry> m_userMetadata;
- bool m_userMetadataHasBeenSet;
+ bool m_userMetadataHasBeenSet = false;
StorageClass m_storageClass;
- bool m_storageClassHasBeenSet;
+ bool m_storageClassHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SSEKMS.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SSEKMS.h
index b7b1ac25be..eaae8f2319 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SSEKMS.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SSEKMS.h
@@ -28,68 +28,68 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SSEKMS">AWS API
* Reference</a></p>
*/
- class AWS_S3_API SSEKMS
+ class SSEKMS
{
public:
- SSEKMS();
- SSEKMS(const Aws::Utils::Xml::XmlNode& xmlNode);
- SSEKMS& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API SSEKMS();
+ AWS_S3_API SSEKMS(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API SSEKMS& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
- * <p>Specifies the ID of the AWS Key Management Service (AWS KMS) symmetric
- * customer managed customer master key (CMK) to use for encrypting inventory
+ * <p>Specifies the ID of the Amazon Web Services Key Management Service (Amazon
+ * Web Services KMS) symmetric customer managed key to use for encrypting inventory
* reports.</p>
*/
inline const Aws::String& GetKeyId() const{ return m_keyId; }
/**
- * <p>Specifies the ID of the AWS Key Management Service (AWS KMS) symmetric
- * customer managed customer master key (CMK) to use for encrypting inventory
+ * <p>Specifies the ID of the Amazon Web Services Key Management Service (Amazon
+ * Web Services KMS) symmetric customer managed key to use for encrypting inventory
* reports.</p>
*/
inline bool KeyIdHasBeenSet() const { return m_keyIdHasBeenSet; }
/**
- * <p>Specifies the ID of the AWS Key Management Service (AWS KMS) symmetric
- * customer managed customer master key (CMK) to use for encrypting inventory
+ * <p>Specifies the ID of the Amazon Web Services Key Management Service (Amazon
+ * Web Services KMS) symmetric customer managed key to use for encrypting inventory
* reports.</p>
*/
inline void SetKeyId(const Aws::String& value) { m_keyIdHasBeenSet = true; m_keyId = value; }
/**
- * <p>Specifies the ID of the AWS Key Management Service (AWS KMS) symmetric
- * customer managed customer master key (CMK) to use for encrypting inventory
+ * <p>Specifies the ID of the Amazon Web Services Key Management Service (Amazon
+ * Web Services KMS) symmetric customer managed key to use for encrypting inventory
* reports.</p>
*/
inline void SetKeyId(Aws::String&& value) { m_keyIdHasBeenSet = true; m_keyId = std::move(value); }
/**
- * <p>Specifies the ID of the AWS Key Management Service (AWS KMS) symmetric
- * customer managed customer master key (CMK) to use for encrypting inventory
+ * <p>Specifies the ID of the Amazon Web Services Key Management Service (Amazon
+ * Web Services KMS) symmetric customer managed key to use for encrypting inventory
* reports.</p>
*/
inline void SetKeyId(const char* value) { m_keyIdHasBeenSet = true; m_keyId.assign(value); }
/**
- * <p>Specifies the ID of the AWS Key Management Service (AWS KMS) symmetric
- * customer managed customer master key (CMK) to use for encrypting inventory
+ * <p>Specifies the ID of the Amazon Web Services Key Management Service (Amazon
+ * Web Services KMS) symmetric customer managed key to use for encrypting inventory
* reports.</p>
*/
inline SSEKMS& WithKeyId(const Aws::String& value) { SetKeyId(value); return *this;}
/**
- * <p>Specifies the ID of the AWS Key Management Service (AWS KMS) symmetric
- * customer managed customer master key (CMK) to use for encrypting inventory
+ * <p>Specifies the ID of the Amazon Web Services Key Management Service (Amazon
+ * Web Services KMS) symmetric customer managed key to use for encrypting inventory
* reports.</p>
*/
inline SSEKMS& WithKeyId(Aws::String&& value) { SetKeyId(std::move(value)); return *this;}
/**
- * <p>Specifies the ID of the AWS Key Management Service (AWS KMS) symmetric
- * customer managed customer master key (CMK) to use for encrypting inventory
+ * <p>Specifies the ID of the Amazon Web Services Key Management Service (Amazon
+ * Web Services KMS) symmetric customer managed key to use for encrypting inventory
* reports.</p>
*/
inline SSEKMS& WithKeyId(const char* value) { SetKeyId(value); return *this;}
@@ -97,7 +97,7 @@ namespace Model
private:
Aws::String m_keyId;
- bool m_keyIdHasBeenSet;
+ bool m_keyIdHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SSES3.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SSES3.h
index 7312fd2917..3311fc7879 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SSES3.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SSES3.h
@@ -26,14 +26,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SSES3">AWS API
* Reference</a></p>
*/
- class AWS_S3_API SSES3
+ class SSES3
{
public:
- SSES3();
- SSES3(const Aws::Utils::Xml::XmlNode& xmlNode);
- SSES3& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API SSES3();
+ AWS_S3_API SSES3(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API SSES3& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
};
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ScanRange.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ScanRange.h
index dd964d0441..9daee6c68d 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ScanRange.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ScanRange.h
@@ -29,20 +29,21 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ScanRange">AWS API
* Reference</a></p>
*/
- class AWS_S3_API ScanRange
+ class ScanRange
{
public:
- ScanRange();
- ScanRange(const Aws::Utils::Xml::XmlNode& xmlNode);
- ScanRange& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ScanRange();
+ AWS_S3_API ScanRange(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ScanRange& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
* <p>Specifies the start of the byte range. This parameter is optional. Valid
- * values: non-negative integers. The default value is 0. If only start is
- * supplied, it means scan from that point to the end of the file.For example;
+ * values: non-negative integers. The default value is 0. If only
+ * <code>start</code> is supplied, it means scan from that point to the end of the
+ * file. For example,
* <code>&lt;scanrange&gt;&lt;start&gt;50&lt;/start&gt;&lt;/scanrange&gt;</code>
* means scan from byte 50 until the end of the file.</p>
*/
@@ -50,8 +51,9 @@ namespace Model
/**
* <p>Specifies the start of the byte range. This parameter is optional. Valid
- * values: non-negative integers. The default value is 0. If only start is
- * supplied, it means scan from that point to the end of the file.For example;
+ * values: non-negative integers. The default value is 0. If only
+ * <code>start</code> is supplied, it means scan from that point to the end of the
+ * file. For example,
* <code>&lt;scanrange&gt;&lt;start&gt;50&lt;/start&gt;&lt;/scanrange&gt;</code>
* means scan from byte 50 until the end of the file.</p>
*/
@@ -59,8 +61,9 @@ namespace Model
/**
* <p>Specifies the start of the byte range. This parameter is optional. Valid
- * values: non-negative integers. The default value is 0. If only start is
- * supplied, it means scan from that point to the end of the file.For example;
+ * values: non-negative integers. The default value is 0. If only
+ * <code>start</code> is supplied, it means scan from that point to the end of the
+ * file. For example,
* <code>&lt;scanrange&gt;&lt;start&gt;50&lt;/start&gt;&lt;/scanrange&gt;</code>
* means scan from byte 50 until the end of the file.</p>
*/
@@ -68,8 +71,9 @@ namespace Model
/**
* <p>Specifies the start of the byte range. This parameter is optional. Valid
- * values: non-negative integers. The default value is 0. If only start is
- * supplied, it means scan from that point to the end of the file.For example;
+ * values: non-negative integers. The default value is 0. If only
+ * <code>start</code> is supplied, it means scan from that point to the end of the
+ * file. For example,
* <code>&lt;scanrange&gt;&lt;start&gt;50&lt;/start&gt;&lt;/scanrange&gt;</code>
* means scan from byte 50 until the end of the file.</p>
*/
@@ -119,10 +123,10 @@ namespace Model
private:
long long m_start;
- bool m_startHasBeenSet;
+ bool m_startHasBeenSet = false;
long long m_end;
- bool m_endHasBeenSet;
+ bool m_endHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SelectObjectContentHandler.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SelectObjectContentHandler.h
index 508785ba3b..6abef89df7 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SelectObjectContentHandler.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SelectObjectContentHandler.h
@@ -30,7 +30,7 @@ namespace Model
UNKNOWN
};
- class AWS_S3_API SelectObjectContentHandler : public Aws::Utils::Event::EventStreamHandler
+ class SelectObjectContentHandler : public Aws::Utils::Event::EventStreamHandler
{
typedef std::function<void(const RecordsEvent&)> RecordsEventCallback;
typedef std::function<void(const StatsEvent&)> StatsEventCallback;
@@ -40,10 +40,10 @@ namespace Model
typedef std::function<void(const Aws::Client::AWSError<S3Errors>& error)> ErrorCallback;
public:
- SelectObjectContentHandler();
- SelectObjectContentHandler& operator=(const SelectObjectContentHandler&) = default;
+ AWS_S3_API SelectObjectContentHandler();
+ AWS_S3_API SelectObjectContentHandler& operator=(const SelectObjectContentHandler&) = default;
- virtual void OnEvent() override;
+ AWS_S3_API virtual void OnEvent() override;
inline void SetRecordsEventCallback(const RecordsEventCallback& callback) { m_onRecordsEvent = callback; }
inline void SetStatsEventCallback(const StatsEventCallback& callback) { m_onStatsEvent = callback; }
@@ -53,9 +53,9 @@ namespace Model
inline void SetOnErrorCallback(const ErrorCallback& callback) { m_onError = callback; }
private:
- void HandleEventInMessage();
- void HandleErrorInMessage();
- void MarshallError(const Aws::String& errorCode, const Aws::String& errorMessage);
+ AWS_S3_API void HandleEventInMessage();
+ AWS_S3_API void HandleErrorInMessage();
+ AWS_S3_API void MarshallError(const Aws::String& errorCode, const Aws::String& errorMessage);
RecordsEventCallback m_onRecordsEvent;
StatsEventCallback m_onStatsEvent;
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SelectObjectContentRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SelectObjectContentRequest.h
index 73dc58023f..6d032445ef 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SelectObjectContentRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SelectObjectContentRequest.h
@@ -40,10 +40,10 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectObjectContentRequest">AWS
* API Reference</a></p>
*/
- class AWS_S3_API SelectObjectContentRequest : public S3Request
+ class SelectObjectContentRequest : public S3Request
{
public:
- SelectObjectContentRequest();
+ AWS_S3_API SelectObjectContentRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -51,11 +51,11 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "SelectObjectContent"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
/**
* Underlying Event Stream Decoder.
@@ -77,6 +77,10 @@ namespace Model
*/
inline SelectObjectContentRequest& WithEventStreamHandler(const SelectObjectContentHandler& value) { SetEventStreamHandler(value); return *this; }
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The S3 bucket.</p>
@@ -161,172 +165,220 @@ namespace Model
/**
- * <p>The SSE Algorithm used to encrypt the object. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
- * Encryption (Using Customer-Provided Encryption Keys</a>. </p>
+ * <p>The server-side encryption (SSE) algorithm used to encrypt the object. This
+ * parameter is needed only when the object was created using a checksum algorithm.
+ * For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetSSECustomerAlgorithm() const{ return m_sSECustomerAlgorithm; }
/**
- * <p>The SSE Algorithm used to encrypt the object. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
- * Encryption (Using Customer-Provided Encryption Keys</a>. </p>
+ * <p>The server-side encryption (SSE) algorithm used to encrypt the object. This
+ * parameter is needed only when the object was created using a checksum algorithm.
+ * For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool SSECustomerAlgorithmHasBeenSet() const { return m_sSECustomerAlgorithmHasBeenSet; }
/**
- * <p>The SSE Algorithm used to encrypt the object. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
- * Encryption (Using Customer-Provided Encryption Keys</a>. </p>
+ * <p>The server-side encryption (SSE) algorithm used to encrypt the object. This
+ * parameter is needed only when the object was created using a checksum algorithm.
+ * For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetSSECustomerAlgorithm(const Aws::String& value) { m_sSECustomerAlgorithmHasBeenSet = true; m_sSECustomerAlgorithm = value; }
/**
- * <p>The SSE Algorithm used to encrypt the object. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
- * Encryption (Using Customer-Provided Encryption Keys</a>. </p>
+ * <p>The server-side encryption (SSE) algorithm used to encrypt the object. This
+ * parameter is needed only when the object was created using a checksum algorithm.
+ * For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetSSECustomerAlgorithm(Aws::String&& value) { m_sSECustomerAlgorithmHasBeenSet = true; m_sSECustomerAlgorithm = std::move(value); }
/**
- * <p>The SSE Algorithm used to encrypt the object. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
- * Encryption (Using Customer-Provided Encryption Keys</a>. </p>
+ * <p>The server-side encryption (SSE) algorithm used to encrypt the object. This
+ * parameter is needed only when the object was created using a checksum algorithm.
+ * For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetSSECustomerAlgorithm(const char* value) { m_sSECustomerAlgorithmHasBeenSet = true; m_sSECustomerAlgorithm.assign(value); }
/**
- * <p>The SSE Algorithm used to encrypt the object. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
- * Encryption (Using Customer-Provided Encryption Keys</a>. </p>
+ * <p>The server-side encryption (SSE) algorithm used to encrypt the object. This
+ * parameter is needed only when the object was created using a checksum algorithm.
+ * For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline SelectObjectContentRequest& WithSSECustomerAlgorithm(const Aws::String& value) { SetSSECustomerAlgorithm(value); return *this;}
/**
- * <p>The SSE Algorithm used to encrypt the object. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
- * Encryption (Using Customer-Provided Encryption Keys</a>. </p>
+ * <p>The server-side encryption (SSE) algorithm used to encrypt the object. This
+ * parameter is needed only when the object was created using a checksum algorithm.
+ * For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline SelectObjectContentRequest& WithSSECustomerAlgorithm(Aws::String&& value) { SetSSECustomerAlgorithm(std::move(value)); return *this;}
/**
- * <p>The SSE Algorithm used to encrypt the object. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
- * Encryption (Using Customer-Provided Encryption Keys</a>. </p>
+ * <p>The server-side encryption (SSE) algorithm used to encrypt the object. This
+ * parameter is needed only when the object was created using a checksum algorithm.
+ * For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline SelectObjectContentRequest& WithSSECustomerAlgorithm(const char* value) { SetSSECustomerAlgorithm(value); return *this;}
/**
- * <p>The SSE Customer Key. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
- * Encryption (Using Customer-Provided Encryption Keys</a>. </p>
+ * <p>The server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetSSECustomerKey() const{ return m_sSECustomerKey; }
/**
- * <p>The SSE Customer Key. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
- * Encryption (Using Customer-Provided Encryption Keys</a>. </p>
+ * <p>The server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool SSECustomerKeyHasBeenSet() const { return m_sSECustomerKeyHasBeenSet; }
/**
- * <p>The SSE Customer Key. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
- * Encryption (Using Customer-Provided Encryption Keys</a>. </p>
+ * <p>The server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetSSECustomerKey(const Aws::String& value) { m_sSECustomerKeyHasBeenSet = true; m_sSECustomerKey = value; }
/**
- * <p>The SSE Customer Key. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
- * Encryption (Using Customer-Provided Encryption Keys</a>. </p>
+ * <p>The server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetSSECustomerKey(Aws::String&& value) { m_sSECustomerKeyHasBeenSet = true; m_sSECustomerKey = std::move(value); }
/**
- * <p>The SSE Customer Key. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
- * Encryption (Using Customer-Provided Encryption Keys</a>. </p>
+ * <p>The server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetSSECustomerKey(const char* value) { m_sSECustomerKeyHasBeenSet = true; m_sSECustomerKey.assign(value); }
/**
- * <p>The SSE Customer Key. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
- * Encryption (Using Customer-Provided Encryption Keys</a>. </p>
+ * <p>The server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline SelectObjectContentRequest& WithSSECustomerKey(const Aws::String& value) { SetSSECustomerKey(value); return *this;}
/**
- * <p>The SSE Customer Key. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
- * Encryption (Using Customer-Provided Encryption Keys</a>. </p>
+ * <p>The server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline SelectObjectContentRequest& WithSSECustomerKey(Aws::String&& value) { SetSSECustomerKey(std::move(value)); return *this;}
/**
- * <p>The SSE Customer Key. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
- * Encryption (Using Customer-Provided Encryption Keys</a>. </p>
+ * <p>The server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline SelectObjectContentRequest& WithSSECustomerKey(const char* value) { SetSSECustomerKey(value); return *this;}
/**
- * <p>The SSE Customer Key MD5. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
- * Encryption (Using Customer-Provided Encryption Keys</a>. </p>
+ * <p>The MD5 server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetSSECustomerKeyMD5() const{ return m_sSECustomerKeyMD5; }
/**
- * <p>The SSE Customer Key MD5. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
- * Encryption (Using Customer-Provided Encryption Keys</a>. </p>
+ * <p>The MD5 server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool SSECustomerKeyMD5HasBeenSet() const { return m_sSECustomerKeyMD5HasBeenSet; }
/**
- * <p>The SSE Customer Key MD5. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
- * Encryption (Using Customer-Provided Encryption Keys</a>. </p>
+ * <p>The MD5 server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetSSECustomerKeyMD5(const Aws::String& value) { m_sSECustomerKeyMD5HasBeenSet = true; m_sSECustomerKeyMD5 = value; }
/**
- * <p>The SSE Customer Key MD5. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
- * Encryption (Using Customer-Provided Encryption Keys</a>. </p>
+ * <p>The MD5 server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetSSECustomerKeyMD5(Aws::String&& value) { m_sSECustomerKeyMD5HasBeenSet = true; m_sSECustomerKeyMD5 = std::move(value); }
/**
- * <p>The SSE Customer Key MD5. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
- * Encryption (Using Customer-Provided Encryption Keys</a>. </p>
+ * <p>The MD5 server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetSSECustomerKeyMD5(const char* value) { m_sSECustomerKeyMD5HasBeenSet = true; m_sSECustomerKeyMD5.assign(value); }
/**
- * <p>The SSE Customer Key MD5. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
- * Encryption (Using Customer-Provided Encryption Keys</a>. </p>
+ * <p>The MD5 server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline SelectObjectContentRequest& WithSSECustomerKeyMD5(const Aws::String& value) { SetSSECustomerKeyMD5(value); return *this;}
/**
- * <p>The SSE Customer Key MD5. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
- * Encryption (Using Customer-Provided Encryption Keys</a>. </p>
+ * <p>The MD5 server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline SelectObjectContentRequest& WithSSECustomerKeyMD5(Aws::String&& value) { SetSSECustomerKeyMD5(std::move(value)); return *this;}
/**
- * <p>The SSE Customer Key MD5. For more information, see <a
- * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Server-Side
- * Encryption (Using Customer-Provided Encryption Keys</a>. </p>
+ * <p>The MD5 server-side encryption (SSE) customer managed key. This parameter is
+ * needed only when the object was created using a checksum algorithm. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/ServerSideEncryptionCustomerKeys.html">Protecting
+ * data using SSE-C keys</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline SelectObjectContentRequest& WithSSECustomerKeyMD5(const char* value) { SetSSECustomerKeyMD5(value); return *this;}
@@ -601,57 +653,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline SelectObjectContentRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline SelectObjectContentRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline SelectObjectContentRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -698,45 +750,45 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
Aws::String m_sSECustomerAlgorithm;
- bool m_sSECustomerAlgorithmHasBeenSet;
+ bool m_sSECustomerAlgorithmHasBeenSet = false;
Aws::String m_sSECustomerKey;
- bool m_sSECustomerKeyHasBeenSet;
+ bool m_sSECustomerKeyHasBeenSet = false;
Aws::String m_sSECustomerKeyMD5;
- bool m_sSECustomerKeyMD5HasBeenSet;
+ bool m_sSECustomerKeyMD5HasBeenSet = false;
Aws::String m_expression;
- bool m_expressionHasBeenSet;
+ bool m_expressionHasBeenSet = false;
ExpressionType m_expressionType;
- bool m_expressionTypeHasBeenSet;
+ bool m_expressionTypeHasBeenSet = false;
RequestProgress m_requestProgress;
- bool m_requestProgressHasBeenSet;
+ bool m_requestProgressHasBeenSet = false;
InputSerialization m_inputSerialization;
- bool m_inputSerializationHasBeenSet;
+ bool m_inputSerializationHasBeenSet = false;
OutputSerialization m_outputSerialization;
- bool m_outputSerializationHasBeenSet;
+ bool m_outputSerializationHasBeenSet = false;
ScanRange m_scanRange;
- bool m_scanRangeHasBeenSet;
+ bool m_scanRangeHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
- Aws::Utils::Event::EventStreamDecoder m_decoder;
+ bool m_customizedAccessLogTagHasBeenSet = false;
SelectObjectContentHandler m_handler;
+ Aws::Utils::Event::EventStreamDecoder m_decoder;
};
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SelectParameters.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SelectParameters.h
index b9ddc3073d..450fabed41 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SelectParameters.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SelectParameters.h
@@ -30,14 +30,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SelectParameters">AWS
* API Reference</a></p>
*/
- class AWS_S3_API SelectParameters
+ class SelectParameters
{
public:
- SelectParameters();
- SelectParameters(const Aws::Utils::Xml::XmlNode& xmlNode);
- SelectParameters& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API SelectParameters();
+ AWS_S3_API SelectParameters(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API SelectParameters& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -176,16 +176,16 @@ namespace Model
private:
InputSerialization m_inputSerialization;
- bool m_inputSerializationHasBeenSet;
+ bool m_inputSerializationHasBeenSet = false;
ExpressionType m_expressionType;
- bool m_expressionTypeHasBeenSet;
+ bool m_expressionTypeHasBeenSet = false;
Aws::String m_expression;
- bool m_expressionHasBeenSet;
+ bool m_expressionHasBeenSet = false;
OutputSerialization m_outputSerialization;
- bool m_outputSerializationHasBeenSet;
+ bool m_outputSerializationHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ServerSideEncryptionByDefault.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ServerSideEncryptionByDefault.h
index 273cb34fbe..69fc7b2dd7 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ServerSideEncryptionByDefault.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ServerSideEncryptionByDefault.h
@@ -26,21 +26,25 @@ namespace Model
/**
* <p>Describes the default server-side encryption to apply to new objects in the
* bucket. If a PUT Object request doesn't specify any server-side encryption, this
- * default encryption will be applied. For more information, see <a
+ * default encryption will be applied. If you don't specify a customer managed key
+ * at configuration, Amazon S3 automatically creates an Amazon Web Services KMS key
+ * in your Amazon Web Services account the first time that you add an object
+ * encrypted with SSE-KMS to a bucket. By default, Amazon S3 uses this KMS key for
+ * SSE-KMS. For more information, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTencryption.html">PUT
- * Bucket encryption</a> in the <i>Amazon Simple Storage Service API
- * Reference</i>.</p><p><h3>See Also:</h3> <a
+ * Bucket encryption</a> in the <i>Amazon S3 API Reference</i>.</p><p><h3>See
+ * Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ServerSideEncryptionByDefault">AWS
* API Reference</a></p>
*/
- class AWS_S3_API ServerSideEncryptionByDefault
+ class ServerSideEncryptionByDefault
{
public:
- ServerSideEncryptionByDefault();
- ServerSideEncryptionByDefault(const Aws::Utils::Xml::XmlNode& xmlNode);
- ServerSideEncryptionByDefault& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ServerSideEncryptionByDefault();
+ AWS_S3_API ServerSideEncryptionByDefault(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ServerSideEncryptionByDefault& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -75,172 +79,180 @@ namespace Model
/**
- * <p>AWS Key Management Service (KMS) customer master key ID to use for the
- * default encryption. This parameter is allowed if and only if
- * <code>SSEAlgorithm</code> is set to <code>aws:kms</code>.</p> <p>You can specify
- * the key ID or the Amazon Resource Name (ARN) of the CMK. However, if you are
- * using encryption with cross-account operations, you must use a fully qualified
- * CMK ARN. For more information, see <a
+ * <p>Amazon Web Services Key Management Service (KMS) customer Amazon Web Services
+ * KMS key ID to use for the default encryption. This parameter is allowed if and
+ * only if <code>SSEAlgorithm</code> is set to <code>aws:kms</code>.</p> <p>You can
+ * specify the key ID or the Amazon Resource Name (ARN) of the KMS key. However, if
+ * you are using encryption with cross-account or Amazon Web Services service
+ * operations you must use a fully qualified KMS key ARN. For more information, see
+ * <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy">Using
* encryption for cross-account operations</a>. </p> <p> <b>For example:</b> </p>
* <ul> <li> <p>Key ID: <code>1234abcd-12ab-34cd-56ef-1234567890ab</code> </p>
* </li> <li> <p>Key ARN:
* <code>arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab</code>
- * </p> </li> </ul> <p>Amazon S3 only supports symmetric CMKs and not
- * asymmetric CMKs. For more information, see <a
+ * </p> </li> </ul> <p>Amazon S3 only supports symmetric KMS keys and
+ * not asymmetric KMS keys. For more information, see <a
* href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using
- * Symmetric and Asymmetric Keys</a> in the <i>AWS Key Management Service Developer
- * Guide</i>.</p>
+ * symmetric and asymmetric keys</a> in the <i>Amazon Web Services Key Management
+ * Service Developer Guide</i>.</p>
*/
inline const Aws::String& GetKMSMasterKeyID() const{ return m_kMSMasterKeyID; }
/**
- * <p>AWS Key Management Service (KMS) customer master key ID to use for the
- * default encryption. This parameter is allowed if and only if
- * <code>SSEAlgorithm</code> is set to <code>aws:kms</code>.</p> <p>You can specify
- * the key ID or the Amazon Resource Name (ARN) of the CMK. However, if you are
- * using encryption with cross-account operations, you must use a fully qualified
- * CMK ARN. For more information, see <a
+ * <p>Amazon Web Services Key Management Service (KMS) customer Amazon Web Services
+ * KMS key ID to use for the default encryption. This parameter is allowed if and
+ * only if <code>SSEAlgorithm</code> is set to <code>aws:kms</code>.</p> <p>You can
+ * specify the key ID or the Amazon Resource Name (ARN) of the KMS key. However, if
+ * you are using encryption with cross-account or Amazon Web Services service
+ * operations you must use a fully qualified KMS key ARN. For more information, see
+ * <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy">Using
* encryption for cross-account operations</a>. </p> <p> <b>For example:</b> </p>
* <ul> <li> <p>Key ID: <code>1234abcd-12ab-34cd-56ef-1234567890ab</code> </p>
* </li> <li> <p>Key ARN:
* <code>arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab</code>
- * </p> </li> </ul> <p>Amazon S3 only supports symmetric CMKs and not
- * asymmetric CMKs. For more information, see <a
+ * </p> </li> </ul> <p>Amazon S3 only supports symmetric KMS keys and
+ * not asymmetric KMS keys. For more information, see <a
* href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using
- * Symmetric and Asymmetric Keys</a> in the <i>AWS Key Management Service Developer
- * Guide</i>.</p>
+ * symmetric and asymmetric keys</a> in the <i>Amazon Web Services Key Management
+ * Service Developer Guide</i>.</p>
*/
inline bool KMSMasterKeyIDHasBeenSet() const { return m_kMSMasterKeyIDHasBeenSet; }
/**
- * <p>AWS Key Management Service (KMS) customer master key ID to use for the
- * default encryption. This parameter is allowed if and only if
- * <code>SSEAlgorithm</code> is set to <code>aws:kms</code>.</p> <p>You can specify
- * the key ID or the Amazon Resource Name (ARN) of the CMK. However, if you are
- * using encryption with cross-account operations, you must use a fully qualified
- * CMK ARN. For more information, see <a
+ * <p>Amazon Web Services Key Management Service (KMS) customer Amazon Web Services
+ * KMS key ID to use for the default encryption. This parameter is allowed if and
+ * only if <code>SSEAlgorithm</code> is set to <code>aws:kms</code>.</p> <p>You can
+ * specify the key ID or the Amazon Resource Name (ARN) of the KMS key. However, if
+ * you are using encryption with cross-account or Amazon Web Services service
+ * operations you must use a fully qualified KMS key ARN. For more information, see
+ * <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy">Using
* encryption for cross-account operations</a>. </p> <p> <b>For example:</b> </p>
* <ul> <li> <p>Key ID: <code>1234abcd-12ab-34cd-56ef-1234567890ab</code> </p>
* </li> <li> <p>Key ARN:
* <code>arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab</code>
- * </p> </li> </ul> <p>Amazon S3 only supports symmetric CMKs and not
- * asymmetric CMKs. For more information, see <a
+ * </p> </li> </ul> <p>Amazon S3 only supports symmetric KMS keys and
+ * not asymmetric KMS keys. For more information, see <a
* href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using
- * Symmetric and Asymmetric Keys</a> in the <i>AWS Key Management Service Developer
- * Guide</i>.</p>
+ * symmetric and asymmetric keys</a> in the <i>Amazon Web Services Key Management
+ * Service Developer Guide</i>.</p>
*/
inline void SetKMSMasterKeyID(const Aws::String& value) { m_kMSMasterKeyIDHasBeenSet = true; m_kMSMasterKeyID = value; }
/**
- * <p>AWS Key Management Service (KMS) customer master key ID to use for the
- * default encryption. This parameter is allowed if and only if
- * <code>SSEAlgorithm</code> is set to <code>aws:kms</code>.</p> <p>You can specify
- * the key ID or the Amazon Resource Name (ARN) of the CMK. However, if you are
- * using encryption with cross-account operations, you must use a fully qualified
- * CMK ARN. For more information, see <a
+ * <p>Amazon Web Services Key Management Service (KMS) customer Amazon Web Services
+ * KMS key ID to use for the default encryption. This parameter is allowed if and
+ * only if <code>SSEAlgorithm</code> is set to <code>aws:kms</code>.</p> <p>You can
+ * specify the key ID or the Amazon Resource Name (ARN) of the KMS key. However, if
+ * you are using encryption with cross-account or Amazon Web Services service
+ * operations you must use a fully qualified KMS key ARN. For more information, see
+ * <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy">Using
* encryption for cross-account operations</a>. </p> <p> <b>For example:</b> </p>
* <ul> <li> <p>Key ID: <code>1234abcd-12ab-34cd-56ef-1234567890ab</code> </p>
* </li> <li> <p>Key ARN:
* <code>arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab</code>
- * </p> </li> </ul> <p>Amazon S3 only supports symmetric CMKs and not
- * asymmetric CMKs. For more information, see <a
+ * </p> </li> </ul> <p>Amazon S3 only supports symmetric KMS keys and
+ * not asymmetric KMS keys. For more information, see <a
* href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using
- * Symmetric and Asymmetric Keys</a> in the <i>AWS Key Management Service Developer
- * Guide</i>.</p>
+ * symmetric and asymmetric keys</a> in the <i>Amazon Web Services Key Management
+ * Service Developer Guide</i>.</p>
*/
inline void SetKMSMasterKeyID(Aws::String&& value) { m_kMSMasterKeyIDHasBeenSet = true; m_kMSMasterKeyID = std::move(value); }
/**
- * <p>AWS Key Management Service (KMS) customer master key ID to use for the
- * default encryption. This parameter is allowed if and only if
- * <code>SSEAlgorithm</code> is set to <code>aws:kms</code>.</p> <p>You can specify
- * the key ID or the Amazon Resource Name (ARN) of the CMK. However, if you are
- * using encryption with cross-account operations, you must use a fully qualified
- * CMK ARN. For more information, see <a
+ * <p>Amazon Web Services Key Management Service (KMS) customer Amazon Web Services
+ * KMS key ID to use for the default encryption. This parameter is allowed if and
+ * only if <code>SSEAlgorithm</code> is set to <code>aws:kms</code>.</p> <p>You can
+ * specify the key ID or the Amazon Resource Name (ARN) of the KMS key. However, if
+ * you are using encryption with cross-account or Amazon Web Services service
+ * operations you must use a fully qualified KMS key ARN. For more information, see
+ * <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy">Using
* encryption for cross-account operations</a>. </p> <p> <b>For example:</b> </p>
* <ul> <li> <p>Key ID: <code>1234abcd-12ab-34cd-56ef-1234567890ab</code> </p>
* </li> <li> <p>Key ARN:
* <code>arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab</code>
- * </p> </li> </ul> <p>Amazon S3 only supports symmetric CMKs and not
- * asymmetric CMKs. For more information, see <a
+ * </p> </li> </ul> <p>Amazon S3 only supports symmetric KMS keys and
+ * not asymmetric KMS keys. For more information, see <a
* href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using
- * Symmetric and Asymmetric Keys</a> in the <i>AWS Key Management Service Developer
- * Guide</i>.</p>
+ * symmetric and asymmetric keys</a> in the <i>Amazon Web Services Key Management
+ * Service Developer Guide</i>.</p>
*/
inline void SetKMSMasterKeyID(const char* value) { m_kMSMasterKeyIDHasBeenSet = true; m_kMSMasterKeyID.assign(value); }
/**
- * <p>AWS Key Management Service (KMS) customer master key ID to use for the
- * default encryption. This parameter is allowed if and only if
- * <code>SSEAlgorithm</code> is set to <code>aws:kms</code>.</p> <p>You can specify
- * the key ID or the Amazon Resource Name (ARN) of the CMK. However, if you are
- * using encryption with cross-account operations, you must use a fully qualified
- * CMK ARN. For more information, see <a
+ * <p>Amazon Web Services Key Management Service (KMS) customer Amazon Web Services
+ * KMS key ID to use for the default encryption. This parameter is allowed if and
+ * only if <code>SSEAlgorithm</code> is set to <code>aws:kms</code>.</p> <p>You can
+ * specify the key ID or the Amazon Resource Name (ARN) of the KMS key. However, if
+ * you are using encryption with cross-account or Amazon Web Services service
+ * operations you must use a fully qualified KMS key ARN. For more information, see
+ * <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy">Using
* encryption for cross-account operations</a>. </p> <p> <b>For example:</b> </p>
* <ul> <li> <p>Key ID: <code>1234abcd-12ab-34cd-56ef-1234567890ab</code> </p>
* </li> <li> <p>Key ARN:
* <code>arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab</code>
- * </p> </li> </ul> <p>Amazon S3 only supports symmetric CMKs and not
- * asymmetric CMKs. For more information, see <a
+ * </p> </li> </ul> <p>Amazon S3 only supports symmetric KMS keys and
+ * not asymmetric KMS keys. For more information, see <a
* href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using
- * Symmetric and Asymmetric Keys</a> in the <i>AWS Key Management Service Developer
- * Guide</i>.</p>
+ * symmetric and asymmetric keys</a> in the <i>Amazon Web Services Key Management
+ * Service Developer Guide</i>.</p>
*/
inline ServerSideEncryptionByDefault& WithKMSMasterKeyID(const Aws::String& value) { SetKMSMasterKeyID(value); return *this;}
/**
- * <p>AWS Key Management Service (KMS) customer master key ID to use for the
- * default encryption. This parameter is allowed if and only if
- * <code>SSEAlgorithm</code> is set to <code>aws:kms</code>.</p> <p>You can specify
- * the key ID or the Amazon Resource Name (ARN) of the CMK. However, if you are
- * using encryption with cross-account operations, you must use a fully qualified
- * CMK ARN. For more information, see <a
+ * <p>Amazon Web Services Key Management Service (KMS) customer Amazon Web Services
+ * KMS key ID to use for the default encryption. This parameter is allowed if and
+ * only if <code>SSEAlgorithm</code> is set to <code>aws:kms</code>.</p> <p>You can
+ * specify the key ID or the Amazon Resource Name (ARN) of the KMS key. However, if
+ * you are using encryption with cross-account or Amazon Web Services service
+ * operations you must use a fully qualified KMS key ARN. For more information, see
+ * <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy">Using
* encryption for cross-account operations</a>. </p> <p> <b>For example:</b> </p>
* <ul> <li> <p>Key ID: <code>1234abcd-12ab-34cd-56ef-1234567890ab</code> </p>
* </li> <li> <p>Key ARN:
* <code>arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab</code>
- * </p> </li> </ul> <p>Amazon S3 only supports symmetric CMKs and not
- * asymmetric CMKs. For more information, see <a
+ * </p> </li> </ul> <p>Amazon S3 only supports symmetric KMS keys and
+ * not asymmetric KMS keys. For more information, see <a
* href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using
- * Symmetric and Asymmetric Keys</a> in the <i>AWS Key Management Service Developer
- * Guide</i>.</p>
+ * symmetric and asymmetric keys</a> in the <i>Amazon Web Services Key Management
+ * Service Developer Guide</i>.</p>
*/
inline ServerSideEncryptionByDefault& WithKMSMasterKeyID(Aws::String&& value) { SetKMSMasterKeyID(std::move(value)); return *this;}
/**
- * <p>AWS Key Management Service (KMS) customer master key ID to use for the
- * default encryption. This parameter is allowed if and only if
- * <code>SSEAlgorithm</code> is set to <code>aws:kms</code>.</p> <p>You can specify
- * the key ID or the Amazon Resource Name (ARN) of the CMK. However, if you are
- * using encryption with cross-account operations, you must use a fully qualified
- * CMK ARN. For more information, see <a
+ * <p>Amazon Web Services Key Management Service (KMS) customer Amazon Web Services
+ * KMS key ID to use for the default encryption. This parameter is allowed if and
+ * only if <code>SSEAlgorithm</code> is set to <code>aws:kms</code>.</p> <p>You can
+ * specify the key ID or the Amazon Resource Name (ARN) of the KMS key. However, if
+ * you are using encryption with cross-account or Amazon Web Services service
+ * operations you must use a fully qualified KMS key ARN. For more information, see
+ * <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/dev/bucket-encryption.html#bucket-encryption-update-bucket-policy">Using
* encryption for cross-account operations</a>. </p> <p> <b>For example:</b> </p>
* <ul> <li> <p>Key ID: <code>1234abcd-12ab-34cd-56ef-1234567890ab</code> </p>
* </li> <li> <p>Key ARN:
* <code>arn:aws:kms:us-east-2:111122223333:key/1234abcd-12ab-34cd-56ef-1234567890ab</code>
- * </p> </li> </ul> <p>Amazon S3 only supports symmetric CMKs and not
- * asymmetric CMKs. For more information, see <a
+ * </p> </li> </ul> <p>Amazon S3 only supports symmetric KMS keys and
+ * not asymmetric KMS keys. For more information, see <a
* href="https://docs.aws.amazon.com/kms/latest/developerguide/symmetric-asymmetric.html">Using
- * Symmetric and Asymmetric Keys</a> in the <i>AWS Key Management Service Developer
- * Guide</i>.</p>
+ * symmetric and asymmetric keys</a> in the <i>Amazon Web Services Key Management
+ * Service Developer Guide</i>.</p>
*/
inline ServerSideEncryptionByDefault& WithKMSMasterKeyID(const char* value) { SetKMSMasterKeyID(value); return *this;}
private:
ServerSideEncryption m_sSEAlgorithm;
- bool m_sSEAlgorithmHasBeenSet;
+ bool m_sSEAlgorithmHasBeenSet = false;
Aws::String m_kMSMasterKeyID;
- bool m_kMSMasterKeyIDHasBeenSet;
+ bool m_kMSMasterKeyIDHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ServerSideEncryptionConfiguration.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ServerSideEncryptionConfiguration.h
index 7b96441c89..d3d7a4ef1a 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ServerSideEncryptionConfiguration.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ServerSideEncryptionConfiguration.h
@@ -29,14 +29,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ServerSideEncryptionConfiguration">AWS
* API Reference</a></p>
*/
- class AWS_S3_API ServerSideEncryptionConfiguration
+ class ServerSideEncryptionConfiguration
{
public:
- ServerSideEncryptionConfiguration();
- ServerSideEncryptionConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
- ServerSideEncryptionConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ServerSideEncryptionConfiguration();
+ AWS_S3_API ServerSideEncryptionConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ServerSideEncryptionConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -90,7 +90,7 @@ namespace Model
private:
Aws::Vector<ServerSideEncryptionRule> m_rules;
- bool m_rulesHasBeenSet;
+ bool m_rulesHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ServerSideEncryptionRule.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ServerSideEncryptionRule.h
index 0ef3c70cf4..7aa9db7bdd 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ServerSideEncryptionRule.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/ServerSideEncryptionRule.h
@@ -28,14 +28,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/ServerSideEncryptionRule">AWS
* API Reference</a></p>
*/
- class AWS_S3_API ServerSideEncryptionRule
+ class ServerSideEncryptionRule
{
public:
- ServerSideEncryptionRule();
- ServerSideEncryptionRule(const Aws::Utils::Xml::XmlNode& xmlNode);
- ServerSideEncryptionRule& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ServerSideEncryptionRule();
+ AWS_S3_API ServerSideEncryptionRule(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API ServerSideEncryptionRule& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -128,10 +128,10 @@ namespace Model
private:
ServerSideEncryptionByDefault m_applyServerSideEncryptionByDefault;
- bool m_applyServerSideEncryptionByDefaultHasBeenSet;
+ bool m_applyServerSideEncryptionByDefaultHasBeenSet = false;
bool m_bucketKeyEnabled;
- bool m_bucketKeyEnabledHasBeenSet;
+ bool m_bucketKeyEnabledHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SourceSelectionCriteria.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SourceSelectionCriteria.h
index 0395f9089b..619c115ad5 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SourceSelectionCriteria.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SourceSelectionCriteria.h
@@ -28,60 +28,66 @@ namespace Model
* objects that you want to replicate. You can choose to enable or disable the
* replication of these objects. Currently, Amazon S3 supports only the filter that
* you can specify for objects created with server-side encryption using a customer
- * master key (CMK) stored in AWS Key Management Service (SSE-KMS).</p><p><h3>See
- * Also:</h3> <a
+ * managed key stored in Amazon Web Services Key Management Service
+ * (SSE-KMS).</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SourceSelectionCriteria">AWS
* API Reference</a></p>
*/
- class AWS_S3_API SourceSelectionCriteria
+ class SourceSelectionCriteria
{
public:
- SourceSelectionCriteria();
- SourceSelectionCriteria(const Aws::Utils::Xml::XmlNode& xmlNode);
- SourceSelectionCriteria& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API SourceSelectionCriteria();
+ AWS_S3_API SourceSelectionCriteria(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API SourceSelectionCriteria& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
* <p> A container for filter information for the selection of Amazon S3 objects
- * encrypted with AWS KMS. If you include <code>SourceSelectionCriteria</code> in
- * the replication configuration, this element is required. </p>
+ * encrypted with Amazon Web Services KMS. If you include
+ * <code>SourceSelectionCriteria</code> in the replication configuration, this
+ * element is required. </p>
*/
inline const SseKmsEncryptedObjects& GetSseKmsEncryptedObjects() const{ return m_sseKmsEncryptedObjects; }
/**
* <p> A container for filter information for the selection of Amazon S3 objects
- * encrypted with AWS KMS. If you include <code>SourceSelectionCriteria</code> in
- * the replication configuration, this element is required. </p>
+ * encrypted with Amazon Web Services KMS. If you include
+ * <code>SourceSelectionCriteria</code> in the replication configuration, this
+ * element is required. </p>
*/
inline bool SseKmsEncryptedObjectsHasBeenSet() const { return m_sseKmsEncryptedObjectsHasBeenSet; }
/**
* <p> A container for filter information for the selection of Amazon S3 objects
- * encrypted with AWS KMS. If you include <code>SourceSelectionCriteria</code> in
- * the replication configuration, this element is required. </p>
+ * encrypted with Amazon Web Services KMS. If you include
+ * <code>SourceSelectionCriteria</code> in the replication configuration, this
+ * element is required. </p>
*/
inline void SetSseKmsEncryptedObjects(const SseKmsEncryptedObjects& value) { m_sseKmsEncryptedObjectsHasBeenSet = true; m_sseKmsEncryptedObjects = value; }
/**
* <p> A container for filter information for the selection of Amazon S3 objects
- * encrypted with AWS KMS. If you include <code>SourceSelectionCriteria</code> in
- * the replication configuration, this element is required. </p>
+ * encrypted with Amazon Web Services KMS. If you include
+ * <code>SourceSelectionCriteria</code> in the replication configuration, this
+ * element is required. </p>
*/
inline void SetSseKmsEncryptedObjects(SseKmsEncryptedObjects&& value) { m_sseKmsEncryptedObjectsHasBeenSet = true; m_sseKmsEncryptedObjects = std::move(value); }
/**
* <p> A container for filter information for the selection of Amazon S3 objects
- * encrypted with AWS KMS. If you include <code>SourceSelectionCriteria</code> in
- * the replication configuration, this element is required. </p>
+ * encrypted with Amazon Web Services KMS. If you include
+ * <code>SourceSelectionCriteria</code> in the replication configuration, this
+ * element is required. </p>
*/
inline SourceSelectionCriteria& WithSseKmsEncryptedObjects(const SseKmsEncryptedObjects& value) { SetSseKmsEncryptedObjects(value); return *this;}
/**
* <p> A container for filter information for the selection of Amazon S3 objects
- * encrypted with AWS KMS. If you include <code>SourceSelectionCriteria</code> in
- * the replication configuration, this element is required. </p>
+ * encrypted with Amazon Web Services KMS. If you include
+ * <code>SourceSelectionCriteria</code> in the replication configuration, this
+ * element is required. </p>
*/
inline SourceSelectionCriteria& WithSseKmsEncryptedObjects(SseKmsEncryptedObjects&& value) { SetSseKmsEncryptedObjects(std::move(value)); return *this;}
@@ -161,10 +167,10 @@ namespace Model
private:
SseKmsEncryptedObjects m_sseKmsEncryptedObjects;
- bool m_sseKmsEncryptedObjectsHasBeenSet;
+ bool m_sseKmsEncryptedObjectsHasBeenSet = false;
ReplicaModifications m_replicaModifications;
- bool m_replicaModificationsHasBeenSet;
+ bool m_replicaModificationsHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SseKmsEncryptedObjects.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SseKmsEncryptedObjects.h
index 451d3b5e69..ffc763eb20 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SseKmsEncryptedObjects.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/SseKmsEncryptedObjects.h
@@ -24,66 +24,66 @@ namespace Model
/**
* <p>A container for filter information for the selection of S3 objects encrypted
- * with AWS KMS.</p><p><h3>See Also:</h3> <a
+ * with Amazon Web Services KMS.</p><p><h3>See Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/SseKmsEncryptedObjects">AWS
* API Reference</a></p>
*/
- class AWS_S3_API SseKmsEncryptedObjects
+ class SseKmsEncryptedObjects
{
public:
- SseKmsEncryptedObjects();
- SseKmsEncryptedObjects(const Aws::Utils::Xml::XmlNode& xmlNode);
- SseKmsEncryptedObjects& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API SseKmsEncryptedObjects();
+ AWS_S3_API SseKmsEncryptedObjects(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API SseKmsEncryptedObjects& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
* <p>Specifies whether Amazon S3 replicates objects created with server-side
- * encryption using a customer master key (CMK) stored in AWS Key Management
- * Service.</p>
+ * encryption using an Amazon Web Services KMS key stored in Amazon Web Services
+ * Key Management Service.</p>
*/
inline const SseKmsEncryptedObjectsStatus& GetStatus() const{ return m_status; }
/**
* <p>Specifies whether Amazon S3 replicates objects created with server-side
- * encryption using a customer master key (CMK) stored in AWS Key Management
- * Service.</p>
+ * encryption using an Amazon Web Services KMS key stored in Amazon Web Services
+ * Key Management Service.</p>
*/
inline bool StatusHasBeenSet() const { return m_statusHasBeenSet; }
/**
* <p>Specifies whether Amazon S3 replicates objects created with server-side
- * encryption using a customer master key (CMK) stored in AWS Key Management
- * Service.</p>
+ * encryption using an Amazon Web Services KMS key stored in Amazon Web Services
+ * Key Management Service.</p>
*/
inline void SetStatus(const SseKmsEncryptedObjectsStatus& value) { m_statusHasBeenSet = true; m_status = value; }
/**
* <p>Specifies whether Amazon S3 replicates objects created with server-side
- * encryption using a customer master key (CMK) stored in AWS Key Management
- * Service.</p>
+ * encryption using an Amazon Web Services KMS key stored in Amazon Web Services
+ * Key Management Service.</p>
*/
inline void SetStatus(SseKmsEncryptedObjectsStatus&& value) { m_statusHasBeenSet = true; m_status = std::move(value); }
/**
* <p>Specifies whether Amazon S3 replicates objects created with server-side
- * encryption using a customer master key (CMK) stored in AWS Key Management
- * Service.</p>
+ * encryption using an Amazon Web Services KMS key stored in Amazon Web Services
+ * Key Management Service.</p>
*/
inline SseKmsEncryptedObjects& WithStatus(const SseKmsEncryptedObjectsStatus& value) { SetStatus(value); return *this;}
/**
* <p>Specifies whether Amazon S3 replicates objects created with server-side
- * encryption using a customer master key (CMK) stored in AWS Key Management
- * Service.</p>
+ * encryption using an Amazon Web Services KMS key stored in Amazon Web Services
+ * Key Management Service.</p>
*/
inline SseKmsEncryptedObjects& WithStatus(SseKmsEncryptedObjectsStatus&& value) { SetStatus(std::move(value)); return *this;}
private:
SseKmsEncryptedObjectsStatus m_status;
- bool m_statusHasBeenSet;
+ bool m_statusHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Stats.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Stats.h
index fd077b8872..49448d5c3c 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Stats.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Stats.h
@@ -25,14 +25,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Stats">AWS API
* Reference</a></p>
*/
- class AWS_S3_API Stats
+ class Stats
{
public:
- Stats();
- Stats(const Aws::Utils::Xml::XmlNode& xmlNode);
- Stats& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Stats();
+ AWS_S3_API Stats(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Stats& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -100,13 +100,13 @@ namespace Model
private:
long long m_bytesScanned;
- bool m_bytesScannedHasBeenSet;
+ bool m_bytesScannedHasBeenSet = false;
long long m_bytesProcessed;
- bool m_bytesProcessedHasBeenSet;
+ bool m_bytesProcessedHasBeenSet = false;
long long m_bytesReturned;
- bool m_bytesReturnedHasBeenSet;
+ bool m_bytesReturnedHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/StatsEvent.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/StatsEvent.h
index 02674cafca..8f757b9157 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/StatsEvent.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/StatsEvent.h
@@ -27,14 +27,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StatsEvent">AWS API
* Reference</a></p>
*/
- class AWS_S3_API StatsEvent
+ class StatsEvent
{
public:
- StatsEvent();
- StatsEvent(const Aws::Utils::Xml::XmlNode& xmlNode);
- StatsEvent& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API StatsEvent();
+ AWS_S3_API StatsEvent(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API StatsEvent& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -70,7 +70,7 @@ namespace Model
private:
Stats m_details;
- bool m_detailsHasBeenSet;
+ bool m_detailsHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/StorageClass.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/StorageClass.h
index b0cda25f4d..1fabe7ddea 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/StorageClass.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/StorageClass.h
@@ -23,7 +23,8 @@ namespace Model
INTELLIGENT_TIERING,
GLACIER,
DEEP_ARCHIVE,
- OUTPOSTS
+ OUTPOSTS,
+ GLACIER_IR
};
namespace StorageClassMapper
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/StorageClassAnalysis.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/StorageClassAnalysis.h
index 741db0e5f1..e07b66db1a 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/StorageClassAnalysis.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/StorageClassAnalysis.h
@@ -29,14 +29,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysis">AWS
* API Reference</a></p>
*/
- class AWS_S3_API StorageClassAnalysis
+ class StorageClassAnalysis
{
public:
- StorageClassAnalysis();
- StorageClassAnalysis(const Aws::Utils::Xml::XmlNode& xmlNode);
- StorageClassAnalysis& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API StorageClassAnalysis();
+ AWS_S3_API StorageClassAnalysis(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API StorageClassAnalysis& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -78,7 +78,7 @@ namespace Model
private:
StorageClassAnalysisDataExport m_dataExport;
- bool m_dataExportHasBeenSet;
+ bool m_dataExportHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/StorageClassAnalysisDataExport.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/StorageClassAnalysisDataExport.h
index 4a6382b3ec..718ce024a4 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/StorageClassAnalysisDataExport.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/StorageClassAnalysisDataExport.h
@@ -29,14 +29,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/StorageClassAnalysisDataExport">AWS
* API Reference</a></p>
*/
- class AWS_S3_API StorageClassAnalysisDataExport
+ class StorageClassAnalysisDataExport
{
public:
- StorageClassAnalysisDataExport();
- StorageClassAnalysisDataExport(const Aws::Utils::Xml::XmlNode& xmlNode);
- StorageClassAnalysisDataExport& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API StorageClassAnalysisDataExport();
+ AWS_S3_API StorageClassAnalysisDataExport(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API StorageClassAnalysisDataExport& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -109,10 +109,10 @@ namespace Model
private:
StorageClassAnalysisSchemaVersion m_outputSchemaVersion;
- bool m_outputSchemaVersionHasBeenSet;
+ bool m_outputSchemaVersionHasBeenSet = false;
AnalyticsExportDestination m_destination;
- bool m_destinationHasBeenSet;
+ bool m_destinationHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Tag.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Tag.h
index 68d57a3ec0..07e37f7bcc 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Tag.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Tag.h
@@ -27,14 +27,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tag">AWS API
* Reference</a></p>
*/
- class AWS_S3_API Tag
+ class Tag
{
public:
- Tag();
- Tag(const Aws::Utils::Xml::XmlNode& xmlNode);
- Tag& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Tag();
+ AWS_S3_API Tag(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Tag& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -121,10 +121,10 @@ namespace Model
private:
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
Aws::String m_value;
- bool m_valueHasBeenSet;
+ bool m_valueHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Tagging.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Tagging.h
index 198cc2669b..f14b2904cf 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Tagging.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Tagging.h
@@ -28,14 +28,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tagging">AWS API
* Reference</a></p>
*/
- class AWS_S3_API Tagging
+ class Tagging
{
public:
- Tagging();
- Tagging(const Aws::Utils::Xml::XmlNode& xmlNode);
- Tagging& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Tagging();
+ AWS_S3_API Tagging(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Tagging& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -81,7 +81,7 @@ namespace Model
private:
Aws::Vector<Tag> m_tagSet;
- bool m_tagSetHasBeenSet;
+ bool m_tagSetHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/TargetGrant.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/TargetGrant.h
index c8d1edafbd..f0a3b99be9 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/TargetGrant.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/TargetGrant.h
@@ -24,18 +24,23 @@ namespace Model
{
/**
- * <p>Container for granting information.</p><p><h3>See Also:</h3> <a
+ * <p>Container for granting information.</p> <p>Buckets that use the bucket owner
+ * enforced setting for Object Ownership don't support target grants. For more
+ * information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/enable-server-access-logging.html#grant-log-delivery-permissions-general">Permissions
+ * server access log delivery</a> in the <i>Amazon S3 User Guide</i>.</p><p><h3>See
+ * Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TargetGrant">AWS API
* Reference</a></p>
*/
- class AWS_S3_API TargetGrant
+ class TargetGrant
{
public:
- TargetGrant();
- TargetGrant(const Aws::Utils::Xml::XmlNode& xmlNode);
- TargetGrant& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API TargetGrant();
+ AWS_S3_API TargetGrant(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API TargetGrant& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -102,10 +107,10 @@ namespace Model
private:
Grantee m_grantee;
- bool m_granteeHasBeenSet;
+ bool m_granteeHasBeenSet = false;
BucketLogsPermission m_permission;
- bool m_permissionHasBeenSet;
+ bool m_permissionHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Tiering.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Tiering.h
index 0d8f5b8802..32681deded 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Tiering.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Tiering.h
@@ -29,14 +29,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Tiering">AWS API
* Reference</a></p>
*/
- class AWS_S3_API Tiering
+ class Tiering
{
public:
- Tiering();
- Tiering(const Aws::Utils::Xml::XmlNode& xmlNode);
- Tiering& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Tiering();
+ AWS_S3_API Tiering(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Tiering& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -133,10 +133,10 @@ namespace Model
private:
int m_days;
- bool m_daysHasBeenSet;
+ bool m_daysHasBeenSet = false;
IntelligentTieringAccessTier m_accessTier;
- bool m_accessTierHasBeenSet;
+ bool m_accessTierHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/TopicConfiguration.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/TopicConfiguration.h
index f57be251bb..12c1909b96 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/TopicConfiguration.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/TopicConfiguration.h
@@ -32,14 +32,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfiguration">AWS
* API Reference</a></p>
*/
- class AWS_S3_API TopicConfiguration
+ class TopicConfiguration
{
public:
- TopicConfiguration();
- TopicConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
- TopicConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API TopicConfiguration();
+ AWS_S3_API TopicConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API TopicConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
@@ -202,16 +202,16 @@ namespace Model
private:
Aws::String m_id;
- bool m_idHasBeenSet;
+ bool m_idHasBeenSet = false;
Aws::String m_topicArn;
- bool m_topicArnHasBeenSet;
+ bool m_topicArnHasBeenSet = false;
Aws::Vector<Event> m_events;
- bool m_eventsHasBeenSet;
+ bool m_eventsHasBeenSet = false;
NotificationConfigurationFilter m_filter;
- bool m_filterHasBeenSet;
+ bool m_filterHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/TopicConfigurationDeprecated.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/TopicConfigurationDeprecated.h
index 23592ee28e..fb2c5090e0 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/TopicConfigurationDeprecated.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/TopicConfigurationDeprecated.h
@@ -33,14 +33,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/TopicConfigurationDeprecated">AWS
* API Reference</a></p>
*/
- class AWS_S3_API TopicConfigurationDeprecated
+ class TopicConfigurationDeprecated
{
public:
- TopicConfigurationDeprecated();
- TopicConfigurationDeprecated(const Aws::Utils::Xml::XmlNode& xmlNode);
- TopicConfigurationDeprecated& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API TopicConfigurationDeprecated();
+ AWS_S3_API TopicConfigurationDeprecated(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API TopicConfigurationDeprecated& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
@@ -160,13 +160,13 @@ namespace Model
private:
Aws::String m_id;
- bool m_idHasBeenSet;
+ bool m_idHasBeenSet = false;
Aws::Vector<Event> m_events;
- bool m_eventsHasBeenSet;
+ bool m_eventsHasBeenSet = false;
Aws::String m_topic;
- bool m_topicHasBeenSet;
+ bool m_topicHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Transition.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Transition.h
index bc7a534e8b..bacd3e8db6 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Transition.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/Transition.h
@@ -32,14 +32,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/Transition">AWS API
* Reference</a></p>
*/
- class AWS_S3_API Transition
+ class Transition
{
public:
- Transition();
- Transition(const Aws::Utils::Xml::XmlNode& xmlNode);
- Transition& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Transition();
+ AWS_S3_API Transition(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API Transition& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -137,13 +137,13 @@ namespace Model
private:
Aws::Utils::DateTime m_date;
- bool m_dateHasBeenSet;
+ bool m_dateHasBeenSet = false;
int m_days;
- bool m_daysHasBeenSet;
+ bool m_daysHasBeenSet = false;
TransitionStorageClass m_storageClass;
- bool m_storageClassHasBeenSet;
+ bool m_storageClassHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/TransitionStorageClass.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/TransitionStorageClass.h
index 7eb5efa762..564934a864 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/TransitionStorageClass.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/TransitionStorageClass.h
@@ -20,7 +20,8 @@ namespace Model
STANDARD_IA,
ONEZONE_IA,
INTELLIGENT_TIERING,
- DEEP_ARCHIVE
+ DEEP_ARCHIVE,
+ GLACIER_IR
};
namespace TransitionStorageClassMapper
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/UploadPartCopyRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/UploadPartCopyRequest.h
index 015c869675..dd47ddc36c 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/UploadPartCopyRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/UploadPartCopyRequest.h
@@ -25,10 +25,10 @@ namespace Model
/**
*/
- class AWS_S3_API UploadPartCopyRequest : public S3Request
+ class UploadPartCopyRequest : public S3Request
{
public:
- UploadPartCopyRequest();
+ AWS_S3_API UploadPartCopyRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -36,31 +36,36 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "UploadPartCopy"; }
- Aws::String SerializePayload() const override;
+ AWS_S3_API Aws::String SerializePayload() const override;
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API bool HasEmbeddedError(IOStream &body, const Http::HeaderValueCollection &header) const override;
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The bucket name.</p> <p>When using this action with an access point, you must
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetBucket() const{ return m_bucket; }
@@ -69,19 +74,19 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool BucketHasBeenSet() const { return m_bucketHasBeenSet; }
@@ -90,19 +95,19 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const Aws::String& value) { m_bucketHasBeenSet = true; m_bucket = value; }
@@ -111,19 +116,19 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(Aws::String&& value) { m_bucketHasBeenSet = true; m_bucket = std::move(value); }
@@ -132,19 +137,19 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const char* value) { m_bucketHasBeenSet = true; m_bucket.assign(value); }
@@ -153,19 +158,19 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline UploadPartCopyRequest& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
@@ -174,19 +179,19 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline UploadPartCopyRequest& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
@@ -195,19 +200,19 @@ namespace Model
* direct requests to the access point hostname. The access point hostname takes
* the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline UploadPartCopyRequest& WithBucket(const char* value) { SetBucket(value); return *this;}
@@ -221,10 +226,10 @@ namespace Model
* specify the name of the source bucket and key of the source object, separated by
* a slash (/). For example, to copy the object <code>reports/january.pdf</code>
* from the bucket <code>awsexamplebucket</code>, use
- * <code>awsexamplebucket/reports/january.pdf</code>. The value must be URL
- * encoded.</p> </li> <li> <p>For objects accessed through access points, specify
- * the Amazon Resource Name (ARN) of the object as accessed through the access
- * point, in the format
+ * <code>awsexamplebucket/reports/january.pdf</code>. The value must be
+ * URL-encoded.</p> </li> <li> <p>For objects accessed through access points,
+ * specify the Amazon Resource Name (ARN) of the object as accessed through the
+ * access point, in the format
* <code>arn:aws:s3:&lt;Region&gt;:&lt;account-id&gt;:accesspoint/&lt;access-point-name&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through access
* point <code>my-access-point</code> owned by account <code>123456789012</code> in
@@ -232,14 +237,15 @@ namespace Model
* <code>arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf</code>.
* The value must be URL encoded.</p> <p>Amazon S3 supports copy operations
* using access points only when the source and destination buckets are in the same
- * AWS Region.</p> <p>Alternatively, for objects accessed through Amazon S3
- * on Outposts, specify the ARN of the object as accessed in the format
+ * Amazon Web Services Region.</p> <p>Alternatively, for objects accessed
+ * through Amazon S3 on Outposts, specify the ARN of the object as accessed in the
+ * format
* <code>arn:aws:s3-outposts:&lt;Region&gt;:&lt;account-id&gt;:outpost/&lt;outpost-id&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through outpost
* <code>my-outpost</code> owned by account <code>123456789012</code> in Region
* <code>us-west-2</code>, use the URL encoding of
* <code>arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf</code>.
- * The value must be URL encoded. </p> </li> </ul> <p>To copy a specific version of
+ * The value must be URL-encoded. </p> </li> </ul> <p>To copy a specific version of
* an object, append <code>?versionId=&lt;version-id&gt;</code> to the value (for
* example,
* <code>awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893</code>).
@@ -257,10 +263,10 @@ namespace Model
* specify the name of the source bucket and key of the source object, separated by
* a slash (/). For example, to copy the object <code>reports/january.pdf</code>
* from the bucket <code>awsexamplebucket</code>, use
- * <code>awsexamplebucket/reports/january.pdf</code>. The value must be URL
- * encoded.</p> </li> <li> <p>For objects accessed through access points, specify
- * the Amazon Resource Name (ARN) of the object as accessed through the access
- * point, in the format
+ * <code>awsexamplebucket/reports/january.pdf</code>. The value must be
+ * URL-encoded.</p> </li> <li> <p>For objects accessed through access points,
+ * specify the Amazon Resource Name (ARN) of the object as accessed through the
+ * access point, in the format
* <code>arn:aws:s3:&lt;Region&gt;:&lt;account-id&gt;:accesspoint/&lt;access-point-name&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through access
* point <code>my-access-point</code> owned by account <code>123456789012</code> in
@@ -268,14 +274,15 @@ namespace Model
* <code>arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf</code>.
* The value must be URL encoded.</p> <p>Amazon S3 supports copy operations
* using access points only when the source and destination buckets are in the same
- * AWS Region.</p> <p>Alternatively, for objects accessed through Amazon S3
- * on Outposts, specify the ARN of the object as accessed in the format
+ * Amazon Web Services Region.</p> <p>Alternatively, for objects accessed
+ * through Amazon S3 on Outposts, specify the ARN of the object as accessed in the
+ * format
* <code>arn:aws:s3-outposts:&lt;Region&gt;:&lt;account-id&gt;:outpost/&lt;outpost-id&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through outpost
* <code>my-outpost</code> owned by account <code>123456789012</code> in Region
* <code>us-west-2</code>, use the URL encoding of
* <code>arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf</code>.
- * The value must be URL encoded. </p> </li> </ul> <p>To copy a specific version of
+ * The value must be URL-encoded. </p> </li> </ul> <p>To copy a specific version of
* an object, append <code>?versionId=&lt;version-id&gt;</code> to the value (for
* example,
* <code>awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893</code>).
@@ -293,10 +300,10 @@ namespace Model
* specify the name of the source bucket and key of the source object, separated by
* a slash (/). For example, to copy the object <code>reports/january.pdf</code>
* from the bucket <code>awsexamplebucket</code>, use
- * <code>awsexamplebucket/reports/january.pdf</code>. The value must be URL
- * encoded.</p> </li> <li> <p>For objects accessed through access points, specify
- * the Amazon Resource Name (ARN) of the object as accessed through the access
- * point, in the format
+ * <code>awsexamplebucket/reports/january.pdf</code>. The value must be
+ * URL-encoded.</p> </li> <li> <p>For objects accessed through access points,
+ * specify the Amazon Resource Name (ARN) of the object as accessed through the
+ * access point, in the format
* <code>arn:aws:s3:&lt;Region&gt;:&lt;account-id&gt;:accesspoint/&lt;access-point-name&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through access
* point <code>my-access-point</code> owned by account <code>123456789012</code> in
@@ -304,14 +311,15 @@ namespace Model
* <code>arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf</code>.
* The value must be URL encoded.</p> <p>Amazon S3 supports copy operations
* using access points only when the source and destination buckets are in the same
- * AWS Region.</p> <p>Alternatively, for objects accessed through Amazon S3
- * on Outposts, specify the ARN of the object as accessed in the format
+ * Amazon Web Services Region.</p> <p>Alternatively, for objects accessed
+ * through Amazon S3 on Outposts, specify the ARN of the object as accessed in the
+ * format
* <code>arn:aws:s3-outposts:&lt;Region&gt;:&lt;account-id&gt;:outpost/&lt;outpost-id&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through outpost
* <code>my-outpost</code> owned by account <code>123456789012</code> in Region
* <code>us-west-2</code>, use the URL encoding of
* <code>arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf</code>.
- * The value must be URL encoded. </p> </li> </ul> <p>To copy a specific version of
+ * The value must be URL-encoded. </p> </li> </ul> <p>To copy a specific version of
* an object, append <code>?versionId=&lt;version-id&gt;</code> to the value (for
* example,
* <code>awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893</code>).
@@ -329,10 +337,10 @@ namespace Model
* specify the name of the source bucket and key of the source object, separated by
* a slash (/). For example, to copy the object <code>reports/january.pdf</code>
* from the bucket <code>awsexamplebucket</code>, use
- * <code>awsexamplebucket/reports/january.pdf</code>. The value must be URL
- * encoded.</p> </li> <li> <p>For objects accessed through access points, specify
- * the Amazon Resource Name (ARN) of the object as accessed through the access
- * point, in the format
+ * <code>awsexamplebucket/reports/january.pdf</code>. The value must be
+ * URL-encoded.</p> </li> <li> <p>For objects accessed through access points,
+ * specify the Amazon Resource Name (ARN) of the object as accessed through the
+ * access point, in the format
* <code>arn:aws:s3:&lt;Region&gt;:&lt;account-id&gt;:accesspoint/&lt;access-point-name&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through access
* point <code>my-access-point</code> owned by account <code>123456789012</code> in
@@ -340,14 +348,15 @@ namespace Model
* <code>arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf</code>.
* The value must be URL encoded.</p> <p>Amazon S3 supports copy operations
* using access points only when the source and destination buckets are in the same
- * AWS Region.</p> <p>Alternatively, for objects accessed through Amazon S3
- * on Outposts, specify the ARN of the object as accessed in the format
+ * Amazon Web Services Region.</p> <p>Alternatively, for objects accessed
+ * through Amazon S3 on Outposts, specify the ARN of the object as accessed in the
+ * format
* <code>arn:aws:s3-outposts:&lt;Region&gt;:&lt;account-id&gt;:outpost/&lt;outpost-id&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through outpost
* <code>my-outpost</code> owned by account <code>123456789012</code> in Region
* <code>us-west-2</code>, use the URL encoding of
* <code>arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf</code>.
- * The value must be URL encoded. </p> </li> </ul> <p>To copy a specific version of
+ * The value must be URL-encoded. </p> </li> </ul> <p>To copy a specific version of
* an object, append <code>?versionId=&lt;version-id&gt;</code> to the value (for
* example,
* <code>awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893</code>).
@@ -365,10 +374,10 @@ namespace Model
* specify the name of the source bucket and key of the source object, separated by
* a slash (/). For example, to copy the object <code>reports/january.pdf</code>
* from the bucket <code>awsexamplebucket</code>, use
- * <code>awsexamplebucket/reports/january.pdf</code>. The value must be URL
- * encoded.</p> </li> <li> <p>For objects accessed through access points, specify
- * the Amazon Resource Name (ARN) of the object as accessed through the access
- * point, in the format
+ * <code>awsexamplebucket/reports/january.pdf</code>. The value must be
+ * URL-encoded.</p> </li> <li> <p>For objects accessed through access points,
+ * specify the Amazon Resource Name (ARN) of the object as accessed through the
+ * access point, in the format
* <code>arn:aws:s3:&lt;Region&gt;:&lt;account-id&gt;:accesspoint/&lt;access-point-name&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through access
* point <code>my-access-point</code> owned by account <code>123456789012</code> in
@@ -376,14 +385,15 @@ namespace Model
* <code>arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf</code>.
* The value must be URL encoded.</p> <p>Amazon S3 supports copy operations
* using access points only when the source and destination buckets are in the same
- * AWS Region.</p> <p>Alternatively, for objects accessed through Amazon S3
- * on Outposts, specify the ARN of the object as accessed in the format
+ * Amazon Web Services Region.</p> <p>Alternatively, for objects accessed
+ * through Amazon S3 on Outposts, specify the ARN of the object as accessed in the
+ * format
* <code>arn:aws:s3-outposts:&lt;Region&gt;:&lt;account-id&gt;:outpost/&lt;outpost-id&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through outpost
* <code>my-outpost</code> owned by account <code>123456789012</code> in Region
* <code>us-west-2</code>, use the URL encoding of
* <code>arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf</code>.
- * The value must be URL encoded. </p> </li> </ul> <p>To copy a specific version of
+ * The value must be URL-encoded. </p> </li> </ul> <p>To copy a specific version of
* an object, append <code>?versionId=&lt;version-id&gt;</code> to the value (for
* example,
* <code>awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893</code>).
@@ -401,10 +411,10 @@ namespace Model
* specify the name of the source bucket and key of the source object, separated by
* a slash (/). For example, to copy the object <code>reports/january.pdf</code>
* from the bucket <code>awsexamplebucket</code>, use
- * <code>awsexamplebucket/reports/january.pdf</code>. The value must be URL
- * encoded.</p> </li> <li> <p>For objects accessed through access points, specify
- * the Amazon Resource Name (ARN) of the object as accessed through the access
- * point, in the format
+ * <code>awsexamplebucket/reports/january.pdf</code>. The value must be
+ * URL-encoded.</p> </li> <li> <p>For objects accessed through access points,
+ * specify the Amazon Resource Name (ARN) of the object as accessed through the
+ * access point, in the format
* <code>arn:aws:s3:&lt;Region&gt;:&lt;account-id&gt;:accesspoint/&lt;access-point-name&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through access
* point <code>my-access-point</code> owned by account <code>123456789012</code> in
@@ -412,14 +422,15 @@ namespace Model
* <code>arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf</code>.
* The value must be URL encoded.</p> <p>Amazon S3 supports copy operations
* using access points only when the source and destination buckets are in the same
- * AWS Region.</p> <p>Alternatively, for objects accessed through Amazon S3
- * on Outposts, specify the ARN of the object as accessed in the format
+ * Amazon Web Services Region.</p> <p>Alternatively, for objects accessed
+ * through Amazon S3 on Outposts, specify the ARN of the object as accessed in the
+ * format
* <code>arn:aws:s3-outposts:&lt;Region&gt;:&lt;account-id&gt;:outpost/&lt;outpost-id&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through outpost
* <code>my-outpost</code> owned by account <code>123456789012</code> in Region
* <code>us-west-2</code>, use the URL encoding of
* <code>arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf</code>.
- * The value must be URL encoded. </p> </li> </ul> <p>To copy a specific version of
+ * The value must be URL-encoded. </p> </li> </ul> <p>To copy a specific version of
* an object, append <code>?versionId=&lt;version-id&gt;</code> to the value (for
* example,
* <code>awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893</code>).
@@ -437,10 +448,10 @@ namespace Model
* specify the name of the source bucket and key of the source object, separated by
* a slash (/). For example, to copy the object <code>reports/january.pdf</code>
* from the bucket <code>awsexamplebucket</code>, use
- * <code>awsexamplebucket/reports/january.pdf</code>. The value must be URL
- * encoded.</p> </li> <li> <p>For objects accessed through access points, specify
- * the Amazon Resource Name (ARN) of the object as accessed through the access
- * point, in the format
+ * <code>awsexamplebucket/reports/january.pdf</code>. The value must be
+ * URL-encoded.</p> </li> <li> <p>For objects accessed through access points,
+ * specify the Amazon Resource Name (ARN) of the object as accessed through the
+ * access point, in the format
* <code>arn:aws:s3:&lt;Region&gt;:&lt;account-id&gt;:accesspoint/&lt;access-point-name&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through access
* point <code>my-access-point</code> owned by account <code>123456789012</code> in
@@ -448,14 +459,15 @@ namespace Model
* <code>arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf</code>.
* The value must be URL encoded.</p> <p>Amazon S3 supports copy operations
* using access points only when the source and destination buckets are in the same
- * AWS Region.</p> <p>Alternatively, for objects accessed through Amazon S3
- * on Outposts, specify the ARN of the object as accessed in the format
+ * Amazon Web Services Region.</p> <p>Alternatively, for objects accessed
+ * through Amazon S3 on Outposts, specify the ARN of the object as accessed in the
+ * format
* <code>arn:aws:s3-outposts:&lt;Region&gt;:&lt;account-id&gt;:outpost/&lt;outpost-id&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through outpost
* <code>my-outpost</code> owned by account <code>123456789012</code> in Region
* <code>us-west-2</code>, use the URL encoding of
* <code>arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf</code>.
- * The value must be URL encoded. </p> </li> </ul> <p>To copy a specific version of
+ * The value must be URL-encoded. </p> </li> </ul> <p>To copy a specific version of
* an object, append <code>?versionId=&lt;version-id&gt;</code> to the value (for
* example,
* <code>awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893</code>).
@@ -473,10 +485,10 @@ namespace Model
* specify the name of the source bucket and key of the source object, separated by
* a slash (/). For example, to copy the object <code>reports/january.pdf</code>
* from the bucket <code>awsexamplebucket</code>, use
- * <code>awsexamplebucket/reports/january.pdf</code>. The value must be URL
- * encoded.</p> </li> <li> <p>For objects accessed through access points, specify
- * the Amazon Resource Name (ARN) of the object as accessed through the access
- * point, in the format
+ * <code>awsexamplebucket/reports/january.pdf</code>. The value must be
+ * URL-encoded.</p> </li> <li> <p>For objects accessed through access points,
+ * specify the Amazon Resource Name (ARN) of the object as accessed through the
+ * access point, in the format
* <code>arn:aws:s3:&lt;Region&gt;:&lt;account-id&gt;:accesspoint/&lt;access-point-name&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through access
* point <code>my-access-point</code> owned by account <code>123456789012</code> in
@@ -484,14 +496,15 @@ namespace Model
* <code>arn:aws:s3:us-west-2:123456789012:accesspoint/my-access-point/object/reports/january.pdf</code>.
* The value must be URL encoded.</p> <p>Amazon S3 supports copy operations
* using access points only when the source and destination buckets are in the same
- * AWS Region.</p> <p>Alternatively, for objects accessed through Amazon S3
- * on Outposts, specify the ARN of the object as accessed in the format
+ * Amazon Web Services Region.</p> <p>Alternatively, for objects accessed
+ * through Amazon S3 on Outposts, specify the ARN of the object as accessed in the
+ * format
* <code>arn:aws:s3-outposts:&lt;Region&gt;:&lt;account-id&gt;:outpost/&lt;outpost-id&gt;/object/&lt;key&gt;</code>.
* For example, to copy the object <code>reports/january.pdf</code> through outpost
* <code>my-outpost</code> owned by account <code>123456789012</code> in Region
* <code>us-west-2</code>, use the URL encoding of
* <code>arn:aws:s3-outposts:us-west-2:123456789012:outpost/my-outpost/object/reports/january.pdf</code>.
- * The value must be URL encoded. </p> </li> </ul> <p>To copy a specific version of
+ * The value must be URL-encoded. </p> </li> </ul> <p>To copy a specific version of
* an object, append <code>?versionId=&lt;version-id&gt;</code> to the value (for
* example,
* <code>awsexamplebucket/reports/january.pdf?versionId=QUpfdndhfd8438MNFDN93jdnJFkdmqnh893</code>).
@@ -1212,114 +1225,114 @@ namespace Model
/**
* <p>The account ID of the expected destination bucket owner. If the destination
- * bucket is owned by a different account, the request will fail with an HTTP
- * <code>403 (Access Denied)</code> error.</p>
+ * bucket is owned by a different account, the request fails with the HTTP status
+ * code <code>403 Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected destination bucket owner. If the destination
- * bucket is owned by a different account, the request will fail with an HTTP
- * <code>403 (Access Denied)</code> error.</p>
+ * bucket is owned by a different account, the request fails with the HTTP status
+ * code <code>403 Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected destination bucket owner. If the destination
- * bucket is owned by a different account, the request will fail with an HTTP
- * <code>403 (Access Denied)</code> error.</p>
+ * bucket is owned by a different account, the request fails with the HTTP status
+ * code <code>403 Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected destination bucket owner. If the destination
- * bucket is owned by a different account, the request will fail with an HTTP
- * <code>403 (Access Denied)</code> error.</p>
+ * bucket is owned by a different account, the request fails with the HTTP status
+ * code <code>403 Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected destination bucket owner. If the destination
- * bucket is owned by a different account, the request will fail with an HTTP
- * <code>403 (Access Denied)</code> error.</p>
+ * bucket is owned by a different account, the request fails with the HTTP status
+ * code <code>403 Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected destination bucket owner. If the destination
- * bucket is owned by a different account, the request will fail with an HTTP
- * <code>403 (Access Denied)</code> error.</p>
+ * bucket is owned by a different account, the request fails with the HTTP status
+ * code <code>403 Forbidden</code> (access denied).</p>
*/
inline UploadPartCopyRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected destination bucket owner. If the destination
- * bucket is owned by a different account, the request will fail with an HTTP
- * <code>403 (Access Denied)</code> error.</p>
+ * bucket is owned by a different account, the request fails with the HTTP status
+ * code <code>403 Forbidden</code> (access denied).</p>
*/
inline UploadPartCopyRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected destination bucket owner. If the destination
- * bucket is owned by a different account, the request will fail with an HTTP
- * <code>403 (Access Denied)</code> error.</p>
+ * bucket is owned by a different account, the request fails with the HTTP status
+ * code <code>403 Forbidden</code> (access denied).</p>
*/
inline UploadPartCopyRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected source bucket owner. If the source bucket is
- * owned by a different account, the request will fail with an HTTP <code>403
- * (Access Denied)</code> error.</p>
+ * owned by a different account, the request fails with the HTTP status code
+ * <code>403 Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedSourceBucketOwner() const{ return m_expectedSourceBucketOwner; }
/**
* <p>The account ID of the expected source bucket owner. If the source bucket is
- * owned by a different account, the request will fail with an HTTP <code>403
- * (Access Denied)</code> error.</p>
+ * owned by a different account, the request fails with the HTTP status code
+ * <code>403 Forbidden</code> (access denied).</p>
*/
inline bool ExpectedSourceBucketOwnerHasBeenSet() const { return m_expectedSourceBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected source bucket owner. If the source bucket is
- * owned by a different account, the request will fail with an HTTP <code>403
- * (Access Denied)</code> error.</p>
+ * owned by a different account, the request fails with the HTTP status code
+ * <code>403 Forbidden</code> (access denied).</p>
*/
inline void SetExpectedSourceBucketOwner(const Aws::String& value) { m_expectedSourceBucketOwnerHasBeenSet = true; m_expectedSourceBucketOwner = value; }
/**
* <p>The account ID of the expected source bucket owner. If the source bucket is
- * owned by a different account, the request will fail with an HTTP <code>403
- * (Access Denied)</code> error.</p>
+ * owned by a different account, the request fails with the HTTP status code
+ * <code>403 Forbidden</code> (access denied).</p>
*/
inline void SetExpectedSourceBucketOwner(Aws::String&& value) { m_expectedSourceBucketOwnerHasBeenSet = true; m_expectedSourceBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected source bucket owner. If the source bucket is
- * owned by a different account, the request will fail with an HTTP <code>403
- * (Access Denied)</code> error.</p>
+ * owned by a different account, the request fails with the HTTP status code
+ * <code>403 Forbidden</code> (access denied).</p>
*/
inline void SetExpectedSourceBucketOwner(const char* value) { m_expectedSourceBucketOwnerHasBeenSet = true; m_expectedSourceBucketOwner.assign(value); }
/**
* <p>The account ID of the expected source bucket owner. If the source bucket is
- * owned by a different account, the request will fail with an HTTP <code>403
- * (Access Denied)</code> error.</p>
+ * owned by a different account, the request fails with the HTTP status code
+ * <code>403 Forbidden</code> (access denied).</p>
*/
inline UploadPartCopyRequest& WithExpectedSourceBucketOwner(const Aws::String& value) { SetExpectedSourceBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected source bucket owner. If the source bucket is
- * owned by a different account, the request will fail with an HTTP <code>403
- * (Access Denied)</code> error.</p>
+ * owned by a different account, the request fails with the HTTP status code
+ * <code>403 Forbidden</code> (access denied).</p>
*/
inline UploadPartCopyRequest& WithExpectedSourceBucketOwner(Aws::String&& value) { SetExpectedSourceBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected source bucket owner. If the source bucket is
- * owned by a different account, the request will fail with an HTTP <code>403
- * (Access Denied)</code> error.</p>
+ * owned by a different account, the request fails with the HTTP status code
+ * <code>403 Forbidden</code> (access denied).</p>
*/
inline UploadPartCopyRequest& WithExpectedSourceBucketOwner(const char* value) { SetExpectedSourceBucketOwner(value); return *this;}
@@ -1366,64 +1379,64 @@ namespace Model
private:
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
Aws::String m_copySource;
- bool m_copySourceHasBeenSet;
+ bool m_copySourceHasBeenSet = false;
Aws::String m_copySourceIfMatch;
- bool m_copySourceIfMatchHasBeenSet;
+ bool m_copySourceIfMatchHasBeenSet = false;
Aws::Utils::DateTime m_copySourceIfModifiedSince;
- bool m_copySourceIfModifiedSinceHasBeenSet;
+ bool m_copySourceIfModifiedSinceHasBeenSet = false;
Aws::String m_copySourceIfNoneMatch;
- bool m_copySourceIfNoneMatchHasBeenSet;
+ bool m_copySourceIfNoneMatchHasBeenSet = false;
Aws::Utils::DateTime m_copySourceIfUnmodifiedSince;
- bool m_copySourceIfUnmodifiedSinceHasBeenSet;
+ bool m_copySourceIfUnmodifiedSinceHasBeenSet = false;
Aws::String m_copySourceRange;
- bool m_copySourceRangeHasBeenSet;
+ bool m_copySourceRangeHasBeenSet = false;
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
int m_partNumber;
- bool m_partNumberHasBeenSet;
+ bool m_partNumberHasBeenSet = false;
Aws::String m_uploadId;
- bool m_uploadIdHasBeenSet;
+ bool m_uploadIdHasBeenSet = false;
Aws::String m_sSECustomerAlgorithm;
- bool m_sSECustomerAlgorithmHasBeenSet;
+ bool m_sSECustomerAlgorithmHasBeenSet = false;
Aws::String m_sSECustomerKey;
- bool m_sSECustomerKeyHasBeenSet;
+ bool m_sSECustomerKeyHasBeenSet = false;
Aws::String m_sSECustomerKeyMD5;
- bool m_sSECustomerKeyMD5HasBeenSet;
+ bool m_sSECustomerKeyMD5HasBeenSet = false;
Aws::String m_copySourceSSECustomerAlgorithm;
- bool m_copySourceSSECustomerAlgorithmHasBeenSet;
+ bool m_copySourceSSECustomerAlgorithmHasBeenSet = false;
Aws::String m_copySourceSSECustomerKey;
- bool m_copySourceSSECustomerKeyHasBeenSet;
+ bool m_copySourceSSECustomerKeyHasBeenSet = false;
Aws::String m_copySourceSSECustomerKeyMD5;
- bool m_copySourceSSECustomerKeyMD5HasBeenSet;
+ bool m_copySourceSSECustomerKeyMD5HasBeenSet = false;
RequestPayer m_requestPayer;
- bool m_requestPayerHasBeenSet;
+ bool m_requestPayerHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::String m_expectedSourceBucketOwner;
- bool m_expectedSourceBucketOwnerHasBeenSet;
+ bool m_expectedSourceBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/UploadPartCopyResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/UploadPartCopyResult.h
index ee8933482f..3e2bfc6b45 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/UploadPartCopyResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/UploadPartCopyResult.h
@@ -27,12 +27,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API UploadPartCopyResult
+ class UploadPartCopyResult
{
public:
- UploadPartCopyResult();
- UploadPartCopyResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- UploadPartCopyResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API UploadPartCopyResult();
+ AWS_S3_API UploadPartCopyResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API UploadPartCopyResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
@@ -236,70 +236,70 @@ namespace Model
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline const Aws::String& GetSSEKMSKeyId() const{ return m_sSEKMSKeyId; }
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline void SetSSEKMSKeyId(const Aws::String& value) { m_sSEKMSKeyId = value; }
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline void SetSSEKMSKeyId(Aws::String&& value) { m_sSEKMSKeyId = std::move(value); }
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline void SetSSEKMSKeyId(const char* value) { m_sSEKMSKeyId.assign(value); }
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline UploadPartCopyResult& WithSSEKMSKeyId(const Aws::String& value) { SetSSEKMSKeyId(value); return *this;}
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline UploadPartCopyResult& WithSSEKMSKeyId(Aws::String&& value) { SetSSEKMSKeyId(std::move(value)); return *this;}
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for the object.</p>
*/
inline UploadPartCopyResult& WithSSEKMSKeyId(const char* value) { SetSSEKMSKeyId(value); return *this;}
/**
* <p>Indicates whether the multipart upload uses an S3 Bucket Key for server-side
- * encryption with AWS KMS (SSE-KMS).</p>
+ * encryption with Amazon Web Services KMS (SSE-KMS).</p>
*/
inline bool GetBucketKeyEnabled() const{ return m_bucketKeyEnabled; }
/**
* <p>Indicates whether the multipart upload uses an S3 Bucket Key for server-side
- * encryption with AWS KMS (SSE-KMS).</p>
+ * encryption with Amazon Web Services KMS (SSE-KMS).</p>
*/
inline void SetBucketKeyEnabled(bool value) { m_bucketKeyEnabled = value; }
/**
* <p>Indicates whether the multipart upload uses an S3 Bucket Key for server-side
- * encryption with AWS KMS (SSE-KMS).</p>
+ * encryption with Amazon Web Services KMS (SSE-KMS).</p>
*/
inline UploadPartCopyResult& WithBucketKeyEnabled(bool value) { SetBucketKeyEnabled(value); return *this;}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/UploadPartRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/UploadPartRequest.h
index 49060c0d65..01711b9120 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/UploadPartRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/UploadPartRequest.h
@@ -8,6 +8,7 @@
#include <aws/s3/S3Request.h>
#include <aws/core/utils/Array.h>
#include <aws/core/utils/memory/stl/AWSString.h>
+#include <aws/s3/model/ChecksumAlgorithm.h>
#include <aws/s3/model/RequestPayer.h>
#include <aws/core/utils/memory/stl/AWSMap.h>
#include <utility>
@@ -25,10 +26,10 @@ namespace Model
/**
*/
- class AWS_S3_API UploadPartRequest : public StreamingS3Request
+ class UploadPartRequest : public StreamingS3Request
{
public:
- UploadPartRequest();
+ AWS_S3_API UploadPartRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -36,29 +37,35 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "UploadPart"; }
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::String GetChecksumAlgorithmName() const override;
+
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>The name of the bucket to which the multipart upload was initiated.</p>
* <p>When using this action with an access point, you must direct requests to the
* access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline const Aws::String& GetBucket() const{ return m_bucket; }
@@ -67,19 +74,19 @@ namespace Model
* <p>When using this action with an access point, you must direct requests to the
* access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline bool BucketHasBeenSet() const { return m_bucketHasBeenSet; }
@@ -88,19 +95,19 @@ namespace Model
* <p>When using this action with an access point, you must direct requests to the
* access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const Aws::String& value) { m_bucketHasBeenSet = true; m_bucket = value; }
@@ -109,19 +116,19 @@ namespace Model
* <p>When using this action with an access point, you must direct requests to the
* access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(Aws::String&& value) { m_bucketHasBeenSet = true; m_bucket = std::move(value); }
@@ -130,19 +137,19 @@ namespace Model
* <p>When using this action with an access point, you must direct requests to the
* access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline void SetBucket(const char* value) { m_bucketHasBeenSet = true; m_bucket.assign(value); }
@@ -151,19 +158,19 @@ namespace Model
* <p>When using this action with an access point, you must direct requests to the
* access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline UploadPartRequest& WithBucket(const Aws::String& value) { SetBucket(value); return *this;}
@@ -172,19 +179,19 @@ namespace Model
* <p>When using this action with an access point, you must direct requests to the
* access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline UploadPartRequest& WithBucket(Aws::String&& value) { SetBucket(std::move(value)); return *this;}
@@ -193,19 +200,19 @@ namespace Model
* <p>When using this action with an access point, you must direct requests to the
* access point hostname. The access point hostname takes the form
* <i>AccessPointName</i>-<i>AccountId</i>.s3-accesspoint.<i>Region</i>.amazonaws.com.
- * When using this action with an access point through the AWS SDKs, you provide
- * the access point ARN in place of the bucket name. For more information about
- * access point ARNs, see <a
+ * When using this action with an access point through the Amazon Web Services
+ * SDKs, you provide the access point ARN in place of the bucket name. For more
+ * information about access point ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/using-access-points.html">Using
- * Access Points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
+ * access points</a> in the <i>Amazon S3 User Guide</i>.</p> <p>When using this
* action with Amazon S3 on Outposts, you must direct requests to the S3 on
- * Outposts hostname. The S3 on Outposts hostname takes the form
- * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com.
- * When using this action using S3 on Outposts through the AWS SDKs, you provide
- * the Outposts bucket ARN in place of the bucket name. For more information about
- * S3 on Outposts ARNs, see <a
+ * Outposts hostname. The S3 on Outposts hostname takes the form <code>
+ * <i>AccessPointName</i>-<i>AccountId</i>.<i>outpostID</i>.s3-outposts.<i>Region</i>.amazonaws.com</code>.
+ * When using this action with S3 on Outposts through the Amazon Web Services SDKs,
+ * you provide the Outposts bucket ARN in place of the bucket name. For more
+ * information about S3 on Outposts ARNs, see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/S3onOutposts.html">Using
- * S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
+ * Amazon S3 on Outposts</a> in the <i>Amazon S3 User Guide</i>.</p>
*/
inline UploadPartRequest& WithBucket(const char* value) { SetBucket(value); return *this;}
@@ -293,6 +300,419 @@ namespace Model
/**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p> <p>This checksum algorithm must be
+ * the same for all parts and it match the checksum value supplied in the
+ * <code>CreateMultipartUpload</code> request.</p>
+ */
+ inline const ChecksumAlgorithm& GetChecksumAlgorithm() const{ return m_checksumAlgorithm; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p> <p>This checksum algorithm must be
+ * the same for all parts and it match the checksum value supplied in the
+ * <code>CreateMultipartUpload</code> request.</p>
+ */
+ inline bool ChecksumAlgorithmHasBeenSet() const { return m_checksumAlgorithmHasBeenSet; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p> <p>This checksum algorithm must be
+ * the same for all parts and it match the checksum value supplied in the
+ * <code>CreateMultipartUpload</code> request.</p>
+ */
+ inline void SetChecksumAlgorithm(const ChecksumAlgorithm& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = value; }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p> <p>This checksum algorithm must be
+ * the same for all parts and it match the checksum value supplied in the
+ * <code>CreateMultipartUpload</code> request.</p>
+ */
+ inline void SetChecksumAlgorithm(ChecksumAlgorithm&& value) { m_checksumAlgorithmHasBeenSet = true; m_checksumAlgorithm = std::move(value); }
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p> <p>This checksum algorithm must be
+ * the same for all parts and it match the checksum value supplied in the
+ * <code>CreateMultipartUpload</code> request.</p>
+ */
+ inline UploadPartRequest& WithChecksumAlgorithm(const ChecksumAlgorithm& value) { SetChecksumAlgorithm(value); return *this;}
+
+ /**
+ * <p>Indicates the algorithm used to create the checksum for the object when using
+ * the SDK. This header will not provide any additional functionality if not using
+ * the SDK. When sending this header, there must be a corresponding
+ * <code>x-amz-checksum</code> or <code>x-amz-trailer</code> header sent.
+ * Otherwise, Amazon S3 fails the request with the HTTP status code <code>400 Bad
+ * Request</code>. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>If you provide
+ * an individual checksum, Amazon S3 ignores any provided
+ * <code>ChecksumAlgorithm</code> parameter.</p> <p>This checksum algorithm must be
+ * the same for all parts and it match the checksum value supplied in the
+ * <code>CreateMultipartUpload</code> request.</p>
+ */
+ inline UploadPartRequest& WithChecksumAlgorithm(ChecksumAlgorithm&& value) { SetChecksumAlgorithm(std::move(value)); return *this;}
+
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumCRC32() const{ return m_checksumCRC32; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumCRC32HasBeenSet() const { return m_checksumCRC32HasBeenSet; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(const Aws::String& value) { m_checksumCRC32HasBeenSet = true; m_checksumCRC32 = value; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(Aws::String&& value) { m_checksumCRC32HasBeenSet = true; m_checksumCRC32 = std::move(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(const char* value) { m_checksumCRC32HasBeenSet = true; m_checksumCRC32.assign(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline UploadPartRequest& WithChecksumCRC32(const Aws::String& value) { SetChecksumCRC32(value); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline UploadPartRequest& WithChecksumCRC32(Aws::String&& value) { SetChecksumCRC32(std::move(value)); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline UploadPartRequest& WithChecksumCRC32(const char* value) { SetChecksumCRC32(value); return *this;}
+
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumCRC32C() const{ return m_checksumCRC32C; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumCRC32CHasBeenSet() const { return m_checksumCRC32CHasBeenSet; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(const Aws::String& value) { m_checksumCRC32CHasBeenSet = true; m_checksumCRC32C = value; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(Aws::String&& value) { m_checksumCRC32CHasBeenSet = true; m_checksumCRC32C = std::move(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(const char* value) { m_checksumCRC32CHasBeenSet = true; m_checksumCRC32C.assign(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline UploadPartRequest& WithChecksumCRC32C(const Aws::String& value) { SetChecksumCRC32C(value); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline UploadPartRequest& WithChecksumCRC32C(Aws::String&& value) { SetChecksumCRC32C(std::move(value)); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline UploadPartRequest& WithChecksumCRC32C(const char* value) { SetChecksumCRC32C(value); return *this;}
+
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumSHA1() const{ return m_checksumSHA1; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumSHA1HasBeenSet() const { return m_checksumSHA1HasBeenSet; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(const Aws::String& value) { m_checksumSHA1HasBeenSet = true; m_checksumSHA1 = value; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(Aws::String&& value) { m_checksumSHA1HasBeenSet = true; m_checksumSHA1 = std::move(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(const char* value) { m_checksumSHA1HasBeenSet = true; m_checksumSHA1.assign(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline UploadPartRequest& WithChecksumSHA1(const Aws::String& value) { SetChecksumSHA1(value); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline UploadPartRequest& WithChecksumSHA1(Aws::String&& value) { SetChecksumSHA1(std::move(value)); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object. For more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline UploadPartRequest& WithChecksumSHA1(const char* value) { SetChecksumSHA1(value); return *this;}
+
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumSHA256() const{ return m_checksumSHA256; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline bool ChecksumSHA256HasBeenSet() const { return m_checksumSHA256HasBeenSet; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(const Aws::String& value) { m_checksumSHA256HasBeenSet = true; m_checksumSHA256 = value; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(Aws::String&& value) { m_checksumSHA256HasBeenSet = true; m_checksumSHA256 = std::move(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(const char* value) { m_checksumSHA256HasBeenSet = true; m_checksumSHA256.assign(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline UploadPartRequest& WithChecksumSHA256(const Aws::String& value) { SetChecksumSHA256(value); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline UploadPartRequest& WithChecksumSHA256(Aws::String&& value) { SetChecksumSHA256(std::move(value)); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This header specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object. For more information, see
+ * <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline UploadPartRequest& WithChecksumSHA256(const char* value) { SetChecksumSHA256(value); return *this;}
+
+
+ /**
* <p>Object key for which the multipart upload was initiated.</p>
*/
inline const Aws::String& GetKey() const{ return m_key; }
@@ -615,57 +1035,57 @@ namespace Model
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline const Aws::String& GetExpectedBucketOwner() const{ return m_expectedBucketOwner; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline bool ExpectedBucketOwnerHasBeenSet() const { return m_expectedBucketOwnerHasBeenSet; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const Aws::String& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = value; }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(Aws::String&& value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner = std::move(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline void SetExpectedBucketOwner(const char* value) { m_expectedBucketOwnerHasBeenSet = true; m_expectedBucketOwner.assign(value); }
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline UploadPartRequest& WithExpectedBucketOwner(const Aws::String& value) { SetExpectedBucketOwner(value); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline UploadPartRequest& WithExpectedBucketOwner(Aws::String&& value) { SetExpectedBucketOwner(std::move(value)); return *this;}
/**
* <p>The account ID of the expected bucket owner. If the bucket is owned by a
- * different account, the request will fail with an HTTP <code>403 (Access
- * Denied)</code> error.</p>
+ * different account, the request fails with the HTTP status code <code>403
+ * Forbidden</code> (access denied).</p>
*/
inline UploadPartRequest& WithExpectedBucketOwner(const char* value) { SetExpectedBucketOwner(value); return *this;}
@@ -713,40 +1133,55 @@ namespace Model
Aws::String m_bucket;
- bool m_bucketHasBeenSet;
+ bool m_bucketHasBeenSet = false;
long long m_contentLength;
- bool m_contentLengthHasBeenSet;
+ bool m_contentLengthHasBeenSet = false;
Aws::String m_contentMD5;
- bool m_contentMD5HasBeenSet;
+ bool m_contentMD5HasBeenSet = false;
+
+ ChecksumAlgorithm m_checksumAlgorithm;
+ bool m_checksumAlgorithmHasBeenSet = false;
+
+ Aws::String m_checksumCRC32;
+ bool m_checksumCRC32HasBeenSet = false;
+
+ Aws::String m_checksumCRC32C;
+ bool m_checksumCRC32CHasBeenSet = false;
+
+ Aws::String m_checksumSHA1;
+ bool m_checksumSHA1HasBeenSet = false;
+
+ Aws::String m_checksumSHA256;
+ bool m_checksumSHA256HasBeenSet = false;
Aws::String m_key;
- bool m_keyHasBeenSet;
+ bool m_keyHasBeenSet = false;
int m_partNumber;
- bool m_partNumberHasBeenSet;
+ bool m_partNumberHasBeenSet = false;
Aws::String m_uploadId;
- bool m_uploadIdHasBeenSet;
+ bool m_uploadIdHasBeenSet = false;
Aws::String m_sSECustomerAlgorithm;
- bool m_sSECustomerAlgorithmHasBeenSet;
+ bool m_sSECustomerAlgorithmHasBeenSet = false;
Aws::String m_sSECustomerKey;
- bool m_sSECustomerKeyHasBeenSet;
+ bool m_sSECustomerKeyHasBeenSet = false;
Aws::String m_sSECustomerKeyMD5;
- bool m_sSECustomerKeyMD5HasBeenSet;
+ bool m_sSECustomerKeyMD5HasBeenSet = false;
RequestPayer m_requestPayer;
- bool m_requestPayerHasBeenSet;
+ bool m_requestPayerHasBeenSet = false;
Aws::String m_expectedBucketOwner;
- bool m_expectedBucketOwnerHasBeenSet;
+ bool m_expectedBucketOwnerHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/UploadPartResult.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/UploadPartResult.h
index 146a25021b..22c25cb6c4 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/UploadPartResult.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/UploadPartResult.h
@@ -26,12 +26,12 @@ namespace S3
{
namespace Model
{
- class AWS_S3_API UploadPartResult
+ class UploadPartResult
{
public:
- UploadPartResult();
- UploadPartResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
- UploadPartResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API UploadPartResult();
+ AWS_S3_API UploadPartResult(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
+ AWS_S3_API UploadPartResult& operator=(const Aws::AmazonWebServiceResult<Aws::Utils::Xml::XmlDocument>& result);
/**
@@ -102,6 +102,290 @@ namespace Model
/**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumCRC32() const{ return m_checksumCRC32; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(const Aws::String& value) { m_checksumCRC32 = value; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(Aws::String&& value) { m_checksumCRC32 = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32(const char* value) { m_checksumCRC32.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline UploadPartResult& WithChecksumCRC32(const Aws::String& value) { SetChecksumCRC32(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline UploadPartResult& WithChecksumCRC32(Aws::String&& value) { SetChecksumCRC32(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32 checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline UploadPartResult& WithChecksumCRC32(const char* value) { SetChecksumCRC32(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumCRC32C() const{ return m_checksumCRC32C; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(const Aws::String& value) { m_checksumCRC32C = value; }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(Aws::String&& value) { m_checksumCRC32C = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumCRC32C(const char* value) { m_checksumCRC32C.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline UploadPartResult& WithChecksumCRC32C(const Aws::String& value) { SetChecksumCRC32C(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline UploadPartResult& WithChecksumCRC32C(Aws::String&& value) { SetChecksumCRC32C(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 32-bit CRC32C checksum of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline UploadPartResult& WithChecksumCRC32C(const char* value) { SetChecksumCRC32C(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumSHA1() const{ return m_checksumSHA1; }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(const Aws::String& value) { m_checksumSHA1 = value; }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(Aws::String&& value) { m_checksumSHA1 = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA1(const char* value) { m_checksumSHA1.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline UploadPartResult& WithChecksumSHA1(const Aws::String& value) { SetChecksumSHA1(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline UploadPartResult& WithChecksumSHA1(Aws::String&& value) { SetChecksumSHA1(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 160-bit SHA-1 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline UploadPartResult& WithChecksumSHA1(const char* value) { SetChecksumSHA1(value); return *this;}
+
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline const Aws::String& GetChecksumSHA256() const{ return m_checksumSHA256; }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(const Aws::String& value) { m_checksumSHA256 = value; }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(Aws::String&& value) { m_checksumSHA256 = std::move(value); }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline void SetChecksumSHA256(const char* value) { m_checksumSHA256.assign(value); }
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline UploadPartResult& WithChecksumSHA256(const Aws::String& value) { SetChecksumSHA256(value); return *this;}
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline UploadPartResult& WithChecksumSHA256(Aws::String&& value) { SetChecksumSHA256(std::move(value)); return *this;}
+
+ /**
+ * <p>The base64-encoded, 256-bit SHA-256 digest of the object. This will only be
+ * present if it was uploaded with the object. With multipart uploads, this may not
+ * be a checksum value of the object. For more information about how checksums are
+ * calculated with multipart uploads, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html#large-object-checksums">
+ * Checking object integrity</a> in the <i>Amazon S3 User Guide</i>.</p>
+ */
+ inline UploadPartResult& WithChecksumSHA256(const char* value) { SetChecksumSHA256(value); return *this;}
+
+
+ /**
* <p>If server-side encryption with a customer-provided encryption key was
* requested, the response will include this header confirming the encryption
* algorithm used.</p>
@@ -202,70 +486,70 @@ namespace Model
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key was used for
+ * the object.</p>
*/
inline const Aws::String& GetSSEKMSKeyId() const{ return m_sSEKMSKeyId; }
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key was used for
+ * the object.</p>
*/
inline void SetSSEKMSKeyId(const Aws::String& value) { m_sSEKMSKeyId = value; }
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key was used for
+ * the object.</p>
*/
inline void SetSSEKMSKeyId(Aws::String&& value) { m_sSEKMSKeyId = std::move(value); }
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key was used for
+ * the object.</p>
*/
inline void SetSSEKMSKeyId(const char* value) { m_sSEKMSKeyId.assign(value); }
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key was used for
+ * the object.</p>
*/
inline UploadPartResult& WithSSEKMSKeyId(const Aws::String& value) { SetSSEKMSKeyId(value); return *this;}
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key was used for
+ * the object.</p>
*/
inline UploadPartResult& WithSSEKMSKeyId(Aws::String&& value) { SetSSEKMSKeyId(std::move(value)); return *this;}
/**
- * <p>If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) was used for the
- * object.</p>
+ * <p>If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key was used for
+ * the object.</p>
*/
inline UploadPartResult& WithSSEKMSKeyId(const char* value) { SetSSEKMSKeyId(value); return *this;}
/**
* <p>Indicates whether the multipart upload uses an S3 Bucket Key for server-side
- * encryption with AWS KMS (SSE-KMS).</p>
+ * encryption with Amazon Web Services KMS (SSE-KMS).</p>
*/
inline bool GetBucketKeyEnabled() const{ return m_bucketKeyEnabled; }
/**
* <p>Indicates whether the multipart upload uses an S3 Bucket Key for server-side
- * encryption with AWS KMS (SSE-KMS).</p>
+ * encryption with Amazon Web Services KMS (SSE-KMS).</p>
*/
inline void SetBucketKeyEnabled(bool value) { m_bucketKeyEnabled = value; }
/**
* <p>Indicates whether the multipart upload uses an S3 Bucket Key for server-side
- * encryption with AWS KMS (SSE-KMS).</p>
+ * encryption with Amazon Web Services KMS (SSE-KMS).</p>
*/
inline UploadPartResult& WithBucketKeyEnabled(bool value) { SetBucketKeyEnabled(value); return *this;}
@@ -291,6 +575,14 @@ namespace Model
Aws::String m_eTag;
+ Aws::String m_checksumCRC32;
+
+ Aws::String m_checksumCRC32C;
+
+ Aws::String m_checksumSHA1;
+
+ Aws::String m_checksumSHA256;
+
Aws::String m_sSECustomerAlgorithm;
Aws::String m_sSECustomerKeyMD5;
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/VersioningConfiguration.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/VersioningConfiguration.h
index b30cfbd698..1350a698a3 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/VersioningConfiguration.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/VersioningConfiguration.h
@@ -27,19 +27,19 @@ namespace Model
* <p>Describes the versioning state of an Amazon S3 bucket. For more information,
* see <a
* href="https://docs.aws.amazon.com/AmazonS3/latest/API/RESTBucketPUTVersioningStatus.html">PUT
- * Bucket versioning</a> in the <i>Amazon Simple Storage Service API
- * Reference</i>.</p><p><h3>See Also:</h3> <a
+ * Bucket versioning</a> in the <i>Amazon S3 API Reference</i>.</p><p><h3>See
+ * Also:</h3> <a
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/VersioningConfiguration">AWS
* API Reference</a></p>
*/
- class AWS_S3_API VersioningConfiguration
+ class VersioningConfiguration
{
public:
- VersioningConfiguration();
- VersioningConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
- VersioningConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API VersioningConfiguration();
+ AWS_S3_API VersioningConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API VersioningConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -124,10 +124,10 @@ namespace Model
private:
MFADelete m_mFADelete;
- bool m_mFADeleteHasBeenSet;
+ bool m_mFADeleteHasBeenSet = false;
BucketVersioningStatus m_status;
- bool m_statusHasBeenSet;
+ bool m_statusHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/WebsiteConfiguration.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/WebsiteConfiguration.h
index d862c3e230..278a2d37ea 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/WebsiteConfiguration.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/WebsiteConfiguration.h
@@ -32,14 +32,14 @@ namespace Model
* href="http://docs.aws.amazon.com/goto/WebAPI/s3-2006-03-01/WebsiteConfiguration">AWS
* API Reference</a></p>
*/
- class AWS_S3_API WebsiteConfiguration
+ class WebsiteConfiguration
{
public:
- WebsiteConfiguration();
- WebsiteConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
- WebsiteConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API WebsiteConfiguration();
+ AWS_S3_API WebsiteConfiguration(const Aws::Utils::Xml::XmlNode& xmlNode);
+ AWS_S3_API WebsiteConfiguration& operator=(const Aws::Utils::Xml::XmlNode& xmlNode);
- void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
+ AWS_S3_API void AddToNode(Aws::Utils::Xml::XmlNode& parentNode) const;
/**
@@ -190,16 +190,16 @@ namespace Model
private:
ErrorDocument m_errorDocument;
- bool m_errorDocumentHasBeenSet;
+ bool m_errorDocumentHasBeenSet = false;
IndexDocument m_indexDocument;
- bool m_indexDocumentHasBeenSet;
+ bool m_indexDocumentHasBeenSet = false;
RedirectAllRequestsTo m_redirectAllRequestsTo;
- bool m_redirectAllRequestsToHasBeenSet;
+ bool m_redirectAllRequestsToHasBeenSet = false;
Aws::Vector<RoutingRule> m_routingRules;
- bool m_routingRulesHasBeenSet;
+ bool m_routingRulesHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/WriteGetObjectResponseRequest.h b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/WriteGetObjectResponseRequest.h
index 59f0a57d80..58f1deab1c 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/WriteGetObjectResponseRequest.h
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/include/aws/s3/model/WriteGetObjectResponseRequest.h
@@ -31,10 +31,10 @@ namespace Model
/**
*/
- class AWS_S3_API WriteGetObjectResponseRequest : public StreamingS3Request
+ class WriteGetObjectResponseRequest : public StreamingS3Request
{
public:
- WriteGetObjectResponseRequest();
+ AWS_S3_API WriteGetObjectResponseRequest();
// Service request name is the Operation name which will send this request out,
// each operation should has unique request name, so that we can get operation's name from this request.
@@ -42,14 +42,18 @@ namespace Model
// so we can not get operation's name from response.
inline virtual const char* GetServiceRequestName() const override { return "WriteGetObjectResponse"; }
- void AddQueryStringParameters(Aws::Http::URI& uri) const override;
+ AWS_S3_API void AddQueryStringParameters(Aws::Http::URI& uri) const override;
- Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
+ AWS_S3_API Aws::Http::HeaderValueCollection GetRequestSpecificHeaders() const override;
- bool SignBody() const override { return false; }
+ AWS_S3_API bool SignBody() const override { return false; }
- bool IsChunked() const override { return true; }
+ AWS_S3_API bool IsChunked() const override { return true; }
+ /**
+ * Helper function to collect parameters (configurable and static hardcoded) required for endpoint computation.
+ */
+ AWS_S3_API EndpointParameters GetEndpointContextParams() const override;
/**
* <p>Route prefix to the HTTP URL generated.</p>
@@ -144,60 +148,64 @@ namespace Model
/**
* <p>The integer status code for an HTTP response of a corresponding
* <code>GetObject</code> request.</p> <p class="title"> <b>Status Codes</b> </p>
- * <ul> <li> <p> <i>200 - OK</i> </p> </li> <li> <p> <i>206 - Partial Content</i>
- * </p> </li> <li> <p> <i>304 - Not Modified</i> </p> </li> <li> <p> <i>400 - Bad
- * Request</i> </p> </li> <li> <p> <i>401 - Unauthorized</i> </p> </li> <li> <p>
- * <i>403 - Forbidden</i> </p> </li> <li> <p> <i>404 - Not Found</i> </p> </li>
- * <li> <p> <i>405 - Method Not Allowed</i> </p> </li> <li> <p> <i>409 -
- * Conflict</i> </p> </li> <li> <p> <i>411 - Length Required</i> </p> </li> <li>
- * <p> <i>412 - Precondition Failed</i> </p> </li> <li> <p> <i>416 - Range Not
- * Satisfiable</i> </p> </li> <li> <p> <i>500 - Internal Server Error</i> </p>
- * </li> <li> <p> <i>503 - Service Unavailable</i> </p> </li> </ul>
+ * <ul> <li> <p> <code>200 - OK</code> </p> </li> <li> <p> <code>206 - Partial
+ * Content</code> </p> </li> <li> <p> <code>304 - Not Modified</code> </p> </li>
+ * <li> <p> <code>400 - Bad Request</code> </p> </li> <li> <p> <code>401 -
+ * Unauthorized</code> </p> </li> <li> <p> <code>403 - Forbidden</code> </p> </li>
+ * <li> <p> <code>404 - Not Found</code> </p> </li> <li> <p> <code>405 - Method Not
+ * Allowed</code> </p> </li> <li> <p> <code>409 - Conflict</code> </p> </li> <li>
+ * <p> <code>411 - Length Required</code> </p> </li> <li> <p> <code>412 -
+ * Precondition Failed</code> </p> </li> <li> <p> <code>416 - Range Not
+ * Satisfiable</code> </p> </li> <li> <p> <code>500 - Internal Server Error</code>
+ * </p> </li> <li> <p> <code>503 - Service Unavailable</code> </p> </li> </ul>
*/
inline int GetStatusCode() const{ return m_statusCode; }
/**
* <p>The integer status code for an HTTP response of a corresponding
* <code>GetObject</code> request.</p> <p class="title"> <b>Status Codes</b> </p>
- * <ul> <li> <p> <i>200 - OK</i> </p> </li> <li> <p> <i>206 - Partial Content</i>
- * </p> </li> <li> <p> <i>304 - Not Modified</i> </p> </li> <li> <p> <i>400 - Bad
- * Request</i> </p> </li> <li> <p> <i>401 - Unauthorized</i> </p> </li> <li> <p>
- * <i>403 - Forbidden</i> </p> </li> <li> <p> <i>404 - Not Found</i> </p> </li>
- * <li> <p> <i>405 - Method Not Allowed</i> </p> </li> <li> <p> <i>409 -
- * Conflict</i> </p> </li> <li> <p> <i>411 - Length Required</i> </p> </li> <li>
- * <p> <i>412 - Precondition Failed</i> </p> </li> <li> <p> <i>416 - Range Not
- * Satisfiable</i> </p> </li> <li> <p> <i>500 - Internal Server Error</i> </p>
- * </li> <li> <p> <i>503 - Service Unavailable</i> </p> </li> </ul>
+ * <ul> <li> <p> <code>200 - OK</code> </p> </li> <li> <p> <code>206 - Partial
+ * Content</code> </p> </li> <li> <p> <code>304 - Not Modified</code> </p> </li>
+ * <li> <p> <code>400 - Bad Request</code> </p> </li> <li> <p> <code>401 -
+ * Unauthorized</code> </p> </li> <li> <p> <code>403 - Forbidden</code> </p> </li>
+ * <li> <p> <code>404 - Not Found</code> </p> </li> <li> <p> <code>405 - Method Not
+ * Allowed</code> </p> </li> <li> <p> <code>409 - Conflict</code> </p> </li> <li>
+ * <p> <code>411 - Length Required</code> </p> </li> <li> <p> <code>412 -
+ * Precondition Failed</code> </p> </li> <li> <p> <code>416 - Range Not
+ * Satisfiable</code> </p> </li> <li> <p> <code>500 - Internal Server Error</code>
+ * </p> </li> <li> <p> <code>503 - Service Unavailable</code> </p> </li> </ul>
*/
inline bool StatusCodeHasBeenSet() const { return m_statusCodeHasBeenSet; }
/**
* <p>The integer status code for an HTTP response of a corresponding
* <code>GetObject</code> request.</p> <p class="title"> <b>Status Codes</b> </p>
- * <ul> <li> <p> <i>200 - OK</i> </p> </li> <li> <p> <i>206 - Partial Content</i>
- * </p> </li> <li> <p> <i>304 - Not Modified</i> </p> </li> <li> <p> <i>400 - Bad
- * Request</i> </p> </li> <li> <p> <i>401 - Unauthorized</i> </p> </li> <li> <p>
- * <i>403 - Forbidden</i> </p> </li> <li> <p> <i>404 - Not Found</i> </p> </li>
- * <li> <p> <i>405 - Method Not Allowed</i> </p> </li> <li> <p> <i>409 -
- * Conflict</i> </p> </li> <li> <p> <i>411 - Length Required</i> </p> </li> <li>
- * <p> <i>412 - Precondition Failed</i> </p> </li> <li> <p> <i>416 - Range Not
- * Satisfiable</i> </p> </li> <li> <p> <i>500 - Internal Server Error</i> </p>
- * </li> <li> <p> <i>503 - Service Unavailable</i> </p> </li> </ul>
+ * <ul> <li> <p> <code>200 - OK</code> </p> </li> <li> <p> <code>206 - Partial
+ * Content</code> </p> </li> <li> <p> <code>304 - Not Modified</code> </p> </li>
+ * <li> <p> <code>400 - Bad Request</code> </p> </li> <li> <p> <code>401 -
+ * Unauthorized</code> </p> </li> <li> <p> <code>403 - Forbidden</code> </p> </li>
+ * <li> <p> <code>404 - Not Found</code> </p> </li> <li> <p> <code>405 - Method Not
+ * Allowed</code> </p> </li> <li> <p> <code>409 - Conflict</code> </p> </li> <li>
+ * <p> <code>411 - Length Required</code> </p> </li> <li> <p> <code>412 -
+ * Precondition Failed</code> </p> </li> <li> <p> <code>416 - Range Not
+ * Satisfiable</code> </p> </li> <li> <p> <code>500 - Internal Server Error</code>
+ * </p> </li> <li> <p> <code>503 - Service Unavailable</code> </p> </li> </ul>
*/
inline void SetStatusCode(int value) { m_statusCodeHasBeenSet = true; m_statusCode = value; }
/**
* <p>The integer status code for an HTTP response of a corresponding
* <code>GetObject</code> request.</p> <p class="title"> <b>Status Codes</b> </p>
- * <ul> <li> <p> <i>200 - OK</i> </p> </li> <li> <p> <i>206 - Partial Content</i>
- * </p> </li> <li> <p> <i>304 - Not Modified</i> </p> </li> <li> <p> <i>400 - Bad
- * Request</i> </p> </li> <li> <p> <i>401 - Unauthorized</i> </p> </li> <li> <p>
- * <i>403 - Forbidden</i> </p> </li> <li> <p> <i>404 - Not Found</i> </p> </li>
- * <li> <p> <i>405 - Method Not Allowed</i> </p> </li> <li> <p> <i>409 -
- * Conflict</i> </p> </li> <li> <p> <i>411 - Length Required</i> </p> </li> <li>
- * <p> <i>412 - Precondition Failed</i> </p> </li> <li> <p> <i>416 - Range Not
- * Satisfiable</i> </p> </li> <li> <p> <i>500 - Internal Server Error</i> </p>
- * </li> <li> <p> <i>503 - Service Unavailable</i> </p> </li> </ul>
+ * <ul> <li> <p> <code>200 - OK</code> </p> </li> <li> <p> <code>206 - Partial
+ * Content</code> </p> </li> <li> <p> <code>304 - Not Modified</code> </p> </li>
+ * <li> <p> <code>400 - Bad Request</code> </p> </li> <li> <p> <code>401 -
+ * Unauthorized</code> </p> </li> <li> <p> <code>403 - Forbidden</code> </p> </li>
+ * <li> <p> <code>404 - Not Found</code> </p> </li> <li> <p> <code>405 - Method Not
+ * Allowed</code> </p> </li> <li> <p> <code>409 - Conflict</code> </p> </li> <li>
+ * <p> <code>411 - Length Required</code> </p> </li> <li> <p> <code>412 -
+ * Precondition Failed</code> </p> </li> <li> <p> <code>416 - Range Not
+ * Satisfiable</code> </p> </li> <li> <p> <code>500 - Internal Server Error</code>
+ * </p> </li> <li> <p> <code>503 - Service Unavailable</code> </p> </li> </ul>
*/
inline WriteGetObjectResponseRequest& WithStatusCode(int value) { SetStatusCode(value); return *this;}
@@ -207,8 +215,8 @@ namespace Model
* &lt;Code&gt; tag of the error XML response for a corresponding
* <code>GetObject</code> call. Cannot be used with a successful
* <code>StatusCode</code> header or when the transformed object is provided in the
- * body. All error codes from S3 are sentence-cased. Regex value is
- * "^[A-Z][a-zA-Z]+$".</p>
+ * body. All error codes from S3 are sentence-cased. The regular expression (regex)
+ * value is <code>"^[A-Z][a-zA-Z]+$"</code>.</p>
*/
inline const Aws::String& GetErrorCode() const{ return m_errorCode; }
@@ -217,8 +225,8 @@ namespace Model
* &lt;Code&gt; tag of the error XML response for a corresponding
* <code>GetObject</code> call. Cannot be used with a successful
* <code>StatusCode</code> header or when the transformed object is provided in the
- * body. All error codes from S3 are sentence-cased. Regex value is
- * "^[A-Z][a-zA-Z]+$".</p>
+ * body. All error codes from S3 are sentence-cased. The regular expression (regex)
+ * value is <code>"^[A-Z][a-zA-Z]+$"</code>.</p>
*/
inline bool ErrorCodeHasBeenSet() const { return m_errorCodeHasBeenSet; }
@@ -227,8 +235,8 @@ namespace Model
* &lt;Code&gt; tag of the error XML response for a corresponding
* <code>GetObject</code> call. Cannot be used with a successful
* <code>StatusCode</code> header or when the transformed object is provided in the
- * body. All error codes from S3 are sentence-cased. Regex value is
- * "^[A-Z][a-zA-Z]+$".</p>
+ * body. All error codes from S3 are sentence-cased. The regular expression (regex)
+ * value is <code>"^[A-Z][a-zA-Z]+$"</code>.</p>
*/
inline void SetErrorCode(const Aws::String& value) { m_errorCodeHasBeenSet = true; m_errorCode = value; }
@@ -237,8 +245,8 @@ namespace Model
* &lt;Code&gt; tag of the error XML response for a corresponding
* <code>GetObject</code> call. Cannot be used with a successful
* <code>StatusCode</code> header or when the transformed object is provided in the
- * body. All error codes from S3 are sentence-cased. Regex value is
- * "^[A-Z][a-zA-Z]+$".</p>
+ * body. All error codes from S3 are sentence-cased. The regular expression (regex)
+ * value is <code>"^[A-Z][a-zA-Z]+$"</code>.</p>
*/
inline void SetErrorCode(Aws::String&& value) { m_errorCodeHasBeenSet = true; m_errorCode = std::move(value); }
@@ -247,8 +255,8 @@ namespace Model
* &lt;Code&gt; tag of the error XML response for a corresponding
* <code>GetObject</code> call. Cannot be used with a successful
* <code>StatusCode</code> header or when the transformed object is provided in the
- * body. All error codes from S3 are sentence-cased. Regex value is
- * "^[A-Z][a-zA-Z]+$".</p>
+ * body. All error codes from S3 are sentence-cased. The regular expression (regex)
+ * value is <code>"^[A-Z][a-zA-Z]+$"</code>.</p>
*/
inline void SetErrorCode(const char* value) { m_errorCodeHasBeenSet = true; m_errorCode.assign(value); }
@@ -257,8 +265,8 @@ namespace Model
* &lt;Code&gt; tag of the error XML response for a corresponding
* <code>GetObject</code> call. Cannot be used with a successful
* <code>StatusCode</code> header or when the transformed object is provided in the
- * body. All error codes from S3 are sentence-cased. Regex value is
- * "^[A-Z][a-zA-Z]+$".</p>
+ * body. All error codes from S3 are sentence-cased. The regular expression (regex)
+ * value is <code>"^[A-Z][a-zA-Z]+$"</code>.</p>
*/
inline WriteGetObjectResponseRequest& WithErrorCode(const Aws::String& value) { SetErrorCode(value); return *this;}
@@ -267,8 +275,8 @@ namespace Model
* &lt;Code&gt; tag of the error XML response for a corresponding
* <code>GetObject</code> call. Cannot be used with a successful
* <code>StatusCode</code> header or when the transformed object is provided in the
- * body. All error codes from S3 are sentence-cased. Regex value is
- * "^[A-Z][a-zA-Z]+$".</p>
+ * body. All error codes from S3 are sentence-cased. The regular expression (regex)
+ * value is <code>"^[A-Z][a-zA-Z]+$"</code>.</p>
*/
inline WriteGetObjectResponseRequest& WithErrorCode(Aws::String&& value) { SetErrorCode(std::move(value)); return *this;}
@@ -277,8 +285,8 @@ namespace Model
* &lt;Code&gt; tag of the error XML response for a corresponding
* <code>GetObject</code> call. Cannot be used with a successful
* <code>StatusCode</code> header or when the transformed object is provided in the
- * body. All error codes from S3 are sentence-cased. Regex value is
- * "^[A-Z][a-zA-Z]+$".</p>
+ * body. All error codes from S3 are sentence-cased. The regular expression (regex)
+ * value is <code>"^[A-Z][a-zA-Z]+$"</code>.</p>
*/
inline WriteGetObjectResponseRequest& WithErrorCode(const char* value) { SetErrorCode(value); return *this;}
@@ -640,6 +648,490 @@ namespace Model
/**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object returned by the Object
+ * Lambda function. This may not match the checksum for the object stored in Amazon
+ * S3. Amazon S3 will perform validation of the checksum values only when the
+ * original <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p> <p/>
+ */
+ inline const Aws::String& GetChecksumCRC32() const{ return m_checksumCRC32; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object returned by the Object
+ * Lambda function. This may not match the checksum for the object stored in Amazon
+ * S3. Amazon S3 will perform validation of the checksum values only when the
+ * original <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p> <p/>
+ */
+ inline bool ChecksumCRC32HasBeenSet() const { return m_checksumCRC32HasBeenSet; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object returned by the Object
+ * Lambda function. This may not match the checksum for the object stored in Amazon
+ * S3. Amazon S3 will perform validation of the checksum values only when the
+ * original <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p> <p/>
+ */
+ inline void SetChecksumCRC32(const Aws::String& value) { m_checksumCRC32HasBeenSet = true; m_checksumCRC32 = value; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object returned by the Object
+ * Lambda function. This may not match the checksum for the object stored in Amazon
+ * S3. Amazon S3 will perform validation of the checksum values only when the
+ * original <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p> <p/>
+ */
+ inline void SetChecksumCRC32(Aws::String&& value) { m_checksumCRC32HasBeenSet = true; m_checksumCRC32 = std::move(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object returned by the Object
+ * Lambda function. This may not match the checksum for the object stored in Amazon
+ * S3. Amazon S3 will perform validation of the checksum values only when the
+ * original <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p> <p/>
+ */
+ inline void SetChecksumCRC32(const char* value) { m_checksumCRC32HasBeenSet = true; m_checksumCRC32.assign(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object returned by the Object
+ * Lambda function. This may not match the checksum for the object stored in Amazon
+ * S3. Amazon S3 will perform validation of the checksum values only when the
+ * original <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p> <p/>
+ */
+ inline WriteGetObjectResponseRequest& WithChecksumCRC32(const Aws::String& value) { SetChecksumCRC32(value); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object returned by the Object
+ * Lambda function. This may not match the checksum for the object stored in Amazon
+ * S3. Amazon S3 will perform validation of the checksum values only when the
+ * original <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p> <p/>
+ */
+ inline WriteGetObjectResponseRequest& WithChecksumCRC32(Aws::String&& value) { SetChecksumCRC32(std::move(value)); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 32-bit CRC32 checksum of the object returned by the Object
+ * Lambda function. This may not match the checksum for the object stored in Amazon
+ * S3. Amazon S3 will perform validation of the checksum values only when the
+ * original <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p> <p/>
+ */
+ inline WriteGetObjectResponseRequest& WithChecksumCRC32(const char* value) { SetChecksumCRC32(value); return *this;}
+
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object returned by the Object
+ * Lambda function. This may not match the checksum for the object stored in Amazon
+ * S3. Amazon S3 will perform validation of the checksum values only when the
+ * original <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p>
+ */
+ inline const Aws::String& GetChecksumCRC32C() const{ return m_checksumCRC32C; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object returned by the Object
+ * Lambda function. This may not match the checksum for the object stored in Amazon
+ * S3. Amazon S3 will perform validation of the checksum values only when the
+ * original <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p>
+ */
+ inline bool ChecksumCRC32CHasBeenSet() const { return m_checksumCRC32CHasBeenSet; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object returned by the Object
+ * Lambda function. This may not match the checksum for the object stored in Amazon
+ * S3. Amazon S3 will perform validation of the checksum values only when the
+ * original <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p>
+ */
+ inline void SetChecksumCRC32C(const Aws::String& value) { m_checksumCRC32CHasBeenSet = true; m_checksumCRC32C = value; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object returned by the Object
+ * Lambda function. This may not match the checksum for the object stored in Amazon
+ * S3. Amazon S3 will perform validation of the checksum values only when the
+ * original <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p>
+ */
+ inline void SetChecksumCRC32C(Aws::String&& value) { m_checksumCRC32CHasBeenSet = true; m_checksumCRC32C = std::move(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object returned by the Object
+ * Lambda function. This may not match the checksum for the object stored in Amazon
+ * S3. Amazon S3 will perform validation of the checksum values only when the
+ * original <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p>
+ */
+ inline void SetChecksumCRC32C(const char* value) { m_checksumCRC32CHasBeenSet = true; m_checksumCRC32C.assign(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object returned by the Object
+ * Lambda function. This may not match the checksum for the object stored in Amazon
+ * S3. Amazon S3 will perform validation of the checksum values only when the
+ * original <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p>
+ */
+ inline WriteGetObjectResponseRequest& WithChecksumCRC32C(const Aws::String& value) { SetChecksumCRC32C(value); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object returned by the Object
+ * Lambda function. This may not match the checksum for the object stored in Amazon
+ * S3. Amazon S3 will perform validation of the checksum values only when the
+ * original <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p>
+ */
+ inline WriteGetObjectResponseRequest& WithChecksumCRC32C(Aws::String&& value) { SetChecksumCRC32C(std::move(value)); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 32-bit CRC32C checksum of the object returned by the Object
+ * Lambda function. This may not match the checksum for the object stored in Amazon
+ * S3. Amazon S3 will perform validation of the checksum values only when the
+ * original <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p>
+ */
+ inline WriteGetObjectResponseRequest& WithChecksumCRC32C(const char* value) { SetChecksumCRC32C(value); return *this;}
+
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object returned by the Object Lambda
+ * function. This may not match the checksum for the object stored in Amazon S3.
+ * Amazon S3 will perform validation of the checksum values only when the original
+ * <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p>
+ */
+ inline const Aws::String& GetChecksumSHA1() const{ return m_checksumSHA1; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object returned by the Object Lambda
+ * function. This may not match the checksum for the object stored in Amazon S3.
+ * Amazon S3 will perform validation of the checksum values only when the original
+ * <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p>
+ */
+ inline bool ChecksumSHA1HasBeenSet() const { return m_checksumSHA1HasBeenSet; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object returned by the Object Lambda
+ * function. This may not match the checksum for the object stored in Amazon S3.
+ * Amazon S3 will perform validation of the checksum values only when the original
+ * <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p>
+ */
+ inline void SetChecksumSHA1(const Aws::String& value) { m_checksumSHA1HasBeenSet = true; m_checksumSHA1 = value; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object returned by the Object Lambda
+ * function. This may not match the checksum for the object stored in Amazon S3.
+ * Amazon S3 will perform validation of the checksum values only when the original
+ * <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p>
+ */
+ inline void SetChecksumSHA1(Aws::String&& value) { m_checksumSHA1HasBeenSet = true; m_checksumSHA1 = std::move(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object returned by the Object Lambda
+ * function. This may not match the checksum for the object stored in Amazon S3.
+ * Amazon S3 will perform validation of the checksum values only when the original
+ * <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p>
+ */
+ inline void SetChecksumSHA1(const char* value) { m_checksumSHA1HasBeenSet = true; m_checksumSHA1.assign(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object returned by the Object Lambda
+ * function. This may not match the checksum for the object stored in Amazon S3.
+ * Amazon S3 will perform validation of the checksum values only when the original
+ * <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p>
+ */
+ inline WriteGetObjectResponseRequest& WithChecksumSHA1(const Aws::String& value) { SetChecksumSHA1(value); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object returned by the Object Lambda
+ * function. This may not match the checksum for the object stored in Amazon S3.
+ * Amazon S3 will perform validation of the checksum values only when the original
+ * <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p>
+ */
+ inline WriteGetObjectResponseRequest& WithChecksumSHA1(Aws::String&& value) { SetChecksumSHA1(std::move(value)); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 160-bit SHA-1 digest of the object returned by the Object Lambda
+ * function. This may not match the checksum for the object stored in Amazon S3.
+ * Amazon S3 will perform validation of the checksum values only when the original
+ * <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p>
+ */
+ inline WriteGetObjectResponseRequest& WithChecksumSHA1(const char* value) { SetChecksumSHA1(value); return *this;}
+
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object returned by the Object
+ * Lambda function. This may not match the checksum for the object stored in Amazon
+ * S3. Amazon S3 will perform validation of the checksum values only when the
+ * original <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p>
+ */
+ inline const Aws::String& GetChecksumSHA256() const{ return m_checksumSHA256; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object returned by the Object
+ * Lambda function. This may not match the checksum for the object stored in Amazon
+ * S3. Amazon S3 will perform validation of the checksum values only when the
+ * original <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p>
+ */
+ inline bool ChecksumSHA256HasBeenSet() const { return m_checksumSHA256HasBeenSet; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object returned by the Object
+ * Lambda function. This may not match the checksum for the object stored in Amazon
+ * S3. Amazon S3 will perform validation of the checksum values only when the
+ * original <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p>
+ */
+ inline void SetChecksumSHA256(const Aws::String& value) { m_checksumSHA256HasBeenSet = true; m_checksumSHA256 = value; }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object returned by the Object
+ * Lambda function. This may not match the checksum for the object stored in Amazon
+ * S3. Amazon S3 will perform validation of the checksum values only when the
+ * original <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p>
+ */
+ inline void SetChecksumSHA256(Aws::String&& value) { m_checksumSHA256HasBeenSet = true; m_checksumSHA256 = std::move(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object returned by the Object
+ * Lambda function. This may not match the checksum for the object stored in Amazon
+ * S3. Amazon S3 will perform validation of the checksum values only when the
+ * original <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p>
+ */
+ inline void SetChecksumSHA256(const char* value) { m_checksumSHA256HasBeenSet = true; m_checksumSHA256.assign(value); }
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object returned by the Object
+ * Lambda function. This may not match the checksum for the object stored in Amazon
+ * S3. Amazon S3 will perform validation of the checksum values only when the
+ * original <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p>
+ */
+ inline WriteGetObjectResponseRequest& WithChecksumSHA256(const Aws::String& value) { SetChecksumSHA256(value); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object returned by the Object
+ * Lambda function. This may not match the checksum for the object stored in Amazon
+ * S3. Amazon S3 will perform validation of the checksum values only when the
+ * original <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p>
+ */
+ inline WriteGetObjectResponseRequest& WithChecksumSHA256(Aws::String&& value) { SetChecksumSHA256(std::move(value)); return *this;}
+
+ /**
+ * <p>This header can be used as a data integrity check to verify that the data
+ * received is the same data that was originally sent. This specifies the
+ * base64-encoded, 256-bit SHA-256 digest of the object returned by the Object
+ * Lambda function. This may not match the checksum for the object stored in Amazon
+ * S3. Amazon S3 will perform validation of the checksum values only when the
+ * original <code>GetObject</code> request required checksum validation. For more
+ * information about checksums, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/userguide/checking-object-integrity.html">Checking
+ * object integrity</a> in the <i>Amazon S3 User Guide</i>.</p> <p>Only one
+ * checksum header can be specified at a time. If you supply multiple checksum
+ * headers, this request will fail.</p>
+ */
+ inline WriteGetObjectResponseRequest& WithChecksumSHA256(const char* value) { SetChecksumSHA256(value); return *this;}
+
+
+ /**
* <p>Specifies whether an object stored in Amazon S3 is (<code>true</code>) or is
* not (<code>false</code>) a delete marker. </p>
*/
@@ -745,58 +1237,66 @@ namespace Model
/**
- * <p>If object stored in Amazon S3 expiration is configured (see PUT Bucket
- * lifecycle) it includes expiry-date and rule-id key-value pairs providing object
- * expiration information. The value of the rule-id is URL encoded. </p>
+ * <p>If the object expiration is configured (see PUT Bucket lifecycle), the
+ * response includes this header. It includes the <code>expiry-date</code> and
+ * <code>rule-id</code> key-value pairs that provide the object expiration
+ * information. The value of the <code>rule-id</code> is URL-encoded. </p>
*/
inline const Aws::String& GetExpiration() const{ return m_expiration; }
/**
- * <p>If object stored in Amazon S3 expiration is configured (see PUT Bucket
- * lifecycle) it includes expiry-date and rule-id key-value pairs providing object
- * expiration information. The value of the rule-id is URL encoded. </p>
+ * <p>If the object expiration is configured (see PUT Bucket lifecycle), the
+ * response includes this header. It includes the <code>expiry-date</code> and
+ * <code>rule-id</code> key-value pairs that provide the object expiration
+ * information. The value of the <code>rule-id</code> is URL-encoded. </p>
*/
inline bool ExpirationHasBeenSet() const { return m_expirationHasBeenSet; }
/**
- * <p>If object stored in Amazon S3 expiration is configured (see PUT Bucket
- * lifecycle) it includes expiry-date and rule-id key-value pairs providing object
- * expiration information. The value of the rule-id is URL encoded. </p>
+ * <p>If the object expiration is configured (see PUT Bucket lifecycle), the
+ * response includes this header. It includes the <code>expiry-date</code> and
+ * <code>rule-id</code> key-value pairs that provide the object expiration
+ * information. The value of the <code>rule-id</code> is URL-encoded. </p>
*/
inline void SetExpiration(const Aws::String& value) { m_expirationHasBeenSet = true; m_expiration = value; }
/**
- * <p>If object stored in Amazon S3 expiration is configured (see PUT Bucket
- * lifecycle) it includes expiry-date and rule-id key-value pairs providing object
- * expiration information. The value of the rule-id is URL encoded. </p>
+ * <p>If the object expiration is configured (see PUT Bucket lifecycle), the
+ * response includes this header. It includes the <code>expiry-date</code> and
+ * <code>rule-id</code> key-value pairs that provide the object expiration
+ * information. The value of the <code>rule-id</code> is URL-encoded. </p>
*/
inline void SetExpiration(Aws::String&& value) { m_expirationHasBeenSet = true; m_expiration = std::move(value); }
/**
- * <p>If object stored in Amazon S3 expiration is configured (see PUT Bucket
- * lifecycle) it includes expiry-date and rule-id key-value pairs providing object
- * expiration information. The value of the rule-id is URL encoded. </p>
+ * <p>If the object expiration is configured (see PUT Bucket lifecycle), the
+ * response includes this header. It includes the <code>expiry-date</code> and
+ * <code>rule-id</code> key-value pairs that provide the object expiration
+ * information. The value of the <code>rule-id</code> is URL-encoded. </p>
*/
inline void SetExpiration(const char* value) { m_expirationHasBeenSet = true; m_expiration.assign(value); }
/**
- * <p>If object stored in Amazon S3 expiration is configured (see PUT Bucket
- * lifecycle) it includes expiry-date and rule-id key-value pairs providing object
- * expiration information. The value of the rule-id is URL encoded. </p>
+ * <p>If the object expiration is configured (see PUT Bucket lifecycle), the
+ * response includes this header. It includes the <code>expiry-date</code> and
+ * <code>rule-id</code> key-value pairs that provide the object expiration
+ * information. The value of the <code>rule-id</code> is URL-encoded. </p>
*/
inline WriteGetObjectResponseRequest& WithExpiration(const Aws::String& value) { SetExpiration(value); return *this;}
/**
- * <p>If object stored in Amazon S3 expiration is configured (see PUT Bucket
- * lifecycle) it includes expiry-date and rule-id key-value pairs providing object
- * expiration information. The value of the rule-id is URL encoded. </p>
+ * <p>If the object expiration is configured (see PUT Bucket lifecycle), the
+ * response includes this header. It includes the <code>expiry-date</code> and
+ * <code>rule-id</code> key-value pairs that provide the object expiration
+ * information. The value of the <code>rule-id</code> is URL-encoded. </p>
*/
inline WriteGetObjectResponseRequest& WithExpiration(Aws::String&& value) { SetExpiration(std::move(value)); return *this;}
/**
- * <p>If object stored in Amazon S3 expiration is configured (see PUT Bucket
- * lifecycle) it includes expiry-date and rule-id key-value pairs providing object
- * expiration information. The value of the rule-id is URL encoded. </p>
+ * <p>If the object expiration is configured (see PUT Bucket lifecycle), the
+ * response includes this header. It includes the <code>expiry-date</code> and
+ * <code>rule-id</code> key-value pairs that provide the object expiration
+ * information. The value of the <code>rule-id</code> is URL-encoded. </p>
*/
inline WriteGetObjectResponseRequest& WithExpiration(const char* value) { SetExpiration(value); return *this;}
@@ -1261,58 +1761,58 @@ namespace Model
/**
- * <p> If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for stored in
- * Amazon S3 object. </p>
+ * <p> If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for stored in Amazon S3 object. </p>
*/
inline const Aws::String& GetSSEKMSKeyId() const{ return m_sSEKMSKeyId; }
/**
- * <p> If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for stored in
- * Amazon S3 object. </p>
+ * <p> If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for stored in Amazon S3 object. </p>
*/
inline bool SSEKMSKeyIdHasBeenSet() const { return m_sSEKMSKeyIdHasBeenSet; }
/**
- * <p> If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for stored in
- * Amazon S3 object. </p>
+ * <p> If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for stored in Amazon S3 object. </p>
*/
inline void SetSSEKMSKeyId(const Aws::String& value) { m_sSEKMSKeyIdHasBeenSet = true; m_sSEKMSKeyId = value; }
/**
- * <p> If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for stored in
- * Amazon S3 object. </p>
+ * <p> If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for stored in Amazon S3 object. </p>
*/
inline void SetSSEKMSKeyId(Aws::String&& value) { m_sSEKMSKeyIdHasBeenSet = true; m_sSEKMSKeyId = std::move(value); }
/**
- * <p> If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for stored in
- * Amazon S3 object. </p>
+ * <p> If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for stored in Amazon S3 object. </p>
*/
inline void SetSSEKMSKeyId(const char* value) { m_sSEKMSKeyIdHasBeenSet = true; m_sSEKMSKeyId.assign(value); }
/**
- * <p> If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for stored in
- * Amazon S3 object. </p>
+ * <p> If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for stored in Amazon S3 object. </p>
*/
inline WriteGetObjectResponseRequest& WithSSEKMSKeyId(const Aws::String& value) { SetSSEKMSKeyId(value); return *this;}
/**
- * <p> If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for stored in
- * Amazon S3 object. </p>
+ * <p> If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for stored in Amazon S3 object. </p>
*/
inline WriteGetObjectResponseRequest& WithSSEKMSKeyId(Aws::String&& value) { SetSSEKMSKeyId(std::move(value)); return *this;}
/**
- * <p> If present, specifies the ID of the AWS Key Management Service (AWS KMS)
- * symmetric customer managed customer master key (CMK) that was used for stored in
- * Amazon S3 object. </p>
+ * <p> If present, specifies the ID of the Amazon Web Services Key Management
+ * Service (Amazon Web Services KMS) symmetric customer managed key that was used
+ * for stored in Amazon S3 object. </p>
*/
inline WriteGetObjectResponseRequest& WithSSEKMSKeyId(const char* value) { SetSSEKMSKeyId(value); return *this;}
@@ -1391,32 +1891,56 @@ namespace Model
/**
- * <p> The class of storage used to store object in Amazon S3.</p>
+ * <p>Provides storage class information of the object. Amazon S3 returns this
+ * header for all objects except for S3 Standard storage class objects.</p> <p>For
+ * more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
+ * Classes</a>.</p>
*/
inline const StorageClass& GetStorageClass() const{ return m_storageClass; }
/**
- * <p> The class of storage used to store object in Amazon S3.</p>
+ * <p>Provides storage class information of the object. Amazon S3 returns this
+ * header for all objects except for S3 Standard storage class objects.</p> <p>For
+ * more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
+ * Classes</a>.</p>
*/
inline bool StorageClassHasBeenSet() const { return m_storageClassHasBeenSet; }
/**
- * <p> The class of storage used to store object in Amazon S3.</p>
+ * <p>Provides storage class information of the object. Amazon S3 returns this
+ * header for all objects except for S3 Standard storage class objects.</p> <p>For
+ * more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
+ * Classes</a>.</p>
*/
inline void SetStorageClass(const StorageClass& value) { m_storageClassHasBeenSet = true; m_storageClass = value; }
/**
- * <p> The class of storage used to store object in Amazon S3.</p>
+ * <p>Provides storage class information of the object. Amazon S3 returns this
+ * header for all objects except for S3 Standard storage class objects.</p> <p>For
+ * more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
+ * Classes</a>.</p>
*/
inline void SetStorageClass(StorageClass&& value) { m_storageClassHasBeenSet = true; m_storageClass = std::move(value); }
/**
- * <p> The class of storage used to store object in Amazon S3.</p>
+ * <p>Provides storage class information of the object. Amazon S3 returns this
+ * header for all objects except for S3 Standard storage class objects.</p> <p>For
+ * more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
+ * Classes</a>.</p>
*/
inline WriteGetObjectResponseRequest& WithStorageClass(const StorageClass& value) { SetStorageClass(value); return *this;}
/**
- * <p> The class of storage used to store object in Amazon S3.</p>
+ * <p>Provides storage class information of the object. Amazon S3 returns this
+ * header for all objects except for S3 Standard storage class objects.</p> <p>For
+ * more information, see <a
+ * href="https://docs.aws.amazon.com/AmazonS3/latest/dev/storage-class-intro.html">Storage
+ * Classes</a>.</p>
*/
inline WriteGetObjectResponseRequest& WithStorageClass(StorageClass&& value) { SetStorageClass(std::move(value)); return *this;}
@@ -1485,25 +2009,25 @@ namespace Model
/**
* <p> Indicates whether the object stored in Amazon S3 uses an S3 bucket key for
- * server-side encryption with AWS KMS (SSE-KMS).</p>
+ * server-side encryption with Amazon Web Services KMS (SSE-KMS).</p>
*/
inline bool GetBucketKeyEnabled() const{ return m_bucketKeyEnabled; }
/**
* <p> Indicates whether the object stored in Amazon S3 uses an S3 bucket key for
- * server-side encryption with AWS KMS (SSE-KMS).</p>
+ * server-side encryption with Amazon Web Services KMS (SSE-KMS).</p>
*/
inline bool BucketKeyEnabledHasBeenSet() const { return m_bucketKeyEnabledHasBeenSet; }
/**
* <p> Indicates whether the object stored in Amazon S3 uses an S3 bucket key for
- * server-side encryption with AWS KMS (SSE-KMS).</p>
+ * server-side encryption with Amazon Web Services KMS (SSE-KMS).</p>
*/
inline void SetBucketKeyEnabled(bool value) { m_bucketKeyEnabledHasBeenSet = true; m_bucketKeyEnabled = value; }
/**
* <p> Indicates whether the object stored in Amazon S3 uses an S3 bucket key for
- * server-side encryption with AWS KMS (SSE-KMS).</p>
+ * server-side encryption with Amazon Web Services KMS (SSE-KMS).</p>
*/
inline WriteGetObjectResponseRequest& WithBucketKeyEnabled(bool value) { SetBucketKeyEnabled(value); return *this;}
@@ -1550,110 +2074,122 @@ namespace Model
private:
Aws::String m_requestRoute;
- bool m_requestRouteHasBeenSet;
+ bool m_requestRouteHasBeenSet = false;
Aws::String m_requestToken;
- bool m_requestTokenHasBeenSet;
+ bool m_requestTokenHasBeenSet = false;
int m_statusCode;
- bool m_statusCodeHasBeenSet;
+ bool m_statusCodeHasBeenSet = false;
Aws::String m_errorCode;
- bool m_errorCodeHasBeenSet;
+ bool m_errorCodeHasBeenSet = false;
Aws::String m_errorMessage;
- bool m_errorMessageHasBeenSet;
+ bool m_errorMessageHasBeenSet = false;
Aws::String m_acceptRanges;
- bool m_acceptRangesHasBeenSet;
+ bool m_acceptRangesHasBeenSet = false;
Aws::String m_cacheControl;
- bool m_cacheControlHasBeenSet;
+ bool m_cacheControlHasBeenSet = false;
Aws::String m_contentDisposition;
- bool m_contentDispositionHasBeenSet;
+ bool m_contentDispositionHasBeenSet = false;
Aws::String m_contentEncoding;
- bool m_contentEncodingHasBeenSet;
+ bool m_contentEncodingHasBeenSet = false;
Aws::String m_contentLanguage;
- bool m_contentLanguageHasBeenSet;
+ bool m_contentLanguageHasBeenSet = false;
long long m_contentLength;
- bool m_contentLengthHasBeenSet;
+ bool m_contentLengthHasBeenSet = false;
Aws::String m_contentRange;
- bool m_contentRangeHasBeenSet;
+ bool m_contentRangeHasBeenSet = false;
+
+ Aws::String m_checksumCRC32;
+ bool m_checksumCRC32HasBeenSet = false;
+
+ Aws::String m_checksumCRC32C;
+ bool m_checksumCRC32CHasBeenSet = false;
+
+ Aws::String m_checksumSHA1;
+ bool m_checksumSHA1HasBeenSet = false;
+
+ Aws::String m_checksumSHA256;
+ bool m_checksumSHA256HasBeenSet = false;
bool m_deleteMarker;
- bool m_deleteMarkerHasBeenSet;
+ bool m_deleteMarkerHasBeenSet = false;
Aws::String m_eTag;
- bool m_eTagHasBeenSet;
+ bool m_eTagHasBeenSet = false;
Aws::Utils::DateTime m_expires;
- bool m_expiresHasBeenSet;
+ bool m_expiresHasBeenSet = false;
Aws::String m_expiration;
- bool m_expirationHasBeenSet;
+ bool m_expirationHasBeenSet = false;
Aws::Utils::DateTime m_lastModified;
- bool m_lastModifiedHasBeenSet;
+ bool m_lastModifiedHasBeenSet = false;
int m_missingMeta;
- bool m_missingMetaHasBeenSet;
+ bool m_missingMetaHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_metadata;
- bool m_metadataHasBeenSet;
+ bool m_metadataHasBeenSet = false;
ObjectLockMode m_objectLockMode;
- bool m_objectLockModeHasBeenSet;
+ bool m_objectLockModeHasBeenSet = false;
ObjectLockLegalHoldStatus m_objectLockLegalHoldStatus;
- bool m_objectLockLegalHoldStatusHasBeenSet;
+ bool m_objectLockLegalHoldStatusHasBeenSet = false;
Aws::Utils::DateTime m_objectLockRetainUntilDate;
- bool m_objectLockRetainUntilDateHasBeenSet;
+ bool m_objectLockRetainUntilDateHasBeenSet = false;
int m_partsCount;
- bool m_partsCountHasBeenSet;
+ bool m_partsCountHasBeenSet = false;
ReplicationStatus m_replicationStatus;
- bool m_replicationStatusHasBeenSet;
+ bool m_replicationStatusHasBeenSet = false;
RequestCharged m_requestCharged;
- bool m_requestChargedHasBeenSet;
+ bool m_requestChargedHasBeenSet = false;
Aws::String m_restore;
- bool m_restoreHasBeenSet;
+ bool m_restoreHasBeenSet = false;
ServerSideEncryption m_serverSideEncryption;
- bool m_serverSideEncryptionHasBeenSet;
+ bool m_serverSideEncryptionHasBeenSet = false;
Aws::String m_sSECustomerAlgorithm;
- bool m_sSECustomerAlgorithmHasBeenSet;
+ bool m_sSECustomerAlgorithmHasBeenSet = false;
Aws::String m_sSEKMSKeyId;
- bool m_sSEKMSKeyIdHasBeenSet;
+ bool m_sSEKMSKeyIdHasBeenSet = false;
Aws::String m_sSECustomerKeyMD5;
- bool m_sSECustomerKeyMD5HasBeenSet;
+ bool m_sSECustomerKeyMD5HasBeenSet = false;
StorageClass m_storageClass;
- bool m_storageClassHasBeenSet;
+ bool m_storageClassHasBeenSet = false;
int m_tagCount;
- bool m_tagCountHasBeenSet;
+ bool m_tagCountHasBeenSet = false;
Aws::String m_versionId;
- bool m_versionIdHasBeenSet;
+ bool m_versionIdHasBeenSet = false;
bool m_bucketKeyEnabled;
- bool m_bucketKeyEnabledHasBeenSet;
+ bool m_bucketKeyEnabledHasBeenSet = false;
Aws::Map<Aws::String, Aws::String> m_customizedAccessLogTag;
- bool m_customizedAccessLogTagHasBeenSet;
+ bool m_customizedAccessLogTagHasBeenSet = false;
};
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3ARN.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3ARN.cpp
deleted file mode 100644
index 72bbbc8c7e..0000000000
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3ARN.cpp
+++ /dev/null
@@ -1,210 +0,0 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-
-#include <cassert>
-#include <aws/core/Region.h>
-#include <aws/core/utils/DNS.h>
-#include <aws/core/utils/Outcome.h>
-#include <aws/core/utils/StringUtils.h>
-#include <aws/s3/S3ARN.h>
-
-namespace Aws
-{
- namespace S3
- {
- S3ARN::S3ARN(const Aws::String& arn) : Utils::ARN(arn)
- {
- ParseARNResource();
- }
-
- S3ARNOutcome S3ARN::Validate(const char* clientRegion) const
- {
- // Take pseudo region into consideration here.
- Aws::String region = clientRegion ? clientRegion : "";
- Aws::StringStream ss;
- if (this->GetResourceType() == ARNResourceType::OUTPOST && region.find("fips") != Aws::String::npos)
- {
- ss.str("");
- ss << "Outposts ARN do not support fips regions right now.";
- return S3ARNOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::VALIDATION, "VALIDATION", ss.str(), false));
- }
- else if (this->GetRegion() != Aws::Region::ComputeSignerRegion(clientRegion))
- {
- ss.str("");
- ss << "Region mismatch between \"" << this->GetRegion() << "\" defined in ARN and \""
- << clientRegion << "\" defined in client configuration. "
- << "You can specify AWS_S3_USE_ARN_REGION to ignore region defined in client configuration.";
- return S3ARNOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::VALIDATION, "VALIDATION", ss.str(), false));
- }
- else
- {
- return Validate();
- }
- }
-
- S3ARNOutcome S3ARN::Validate() const
- {
- Aws::String errorMessage;
- bool success = false;
- Aws::StringStream ss;
-
- if (!*this)
- {
- errorMessage = "Invalid ARN.";
- }
- // Validation on partition.
- else if (this->GetPartition().find("aws") != 0)
- {
- ss.str("");
- ss << "Invalid partition in ARN: " << this->GetPartition() << ". Valid options: aws, aws-cn, and etc.";
- }
- // Validation on service.
- else if (this->GetService() != ARNService::S3 && this->GetService() != ARNService::S3_OUTPOSTS && this->GetService() != ARNService::S3_OBJECT_LAMBDA)
- {
- ss.str("");
- ss << "Invalid service in ARN: " << this->GetService() << ". Valid options: " << ARNService::S3 << ", " << ARNService::S3_OUTPOSTS << ", " << ARNService::S3_OBJECT_LAMBDA << ".";
- errorMessage = ss.str();
- }
- // Validation on region.
- // TODO: Failure on different partitions.
- else if (this->GetRegion().empty())
- {
- errorMessage = "Invalid ARN with empty region.";
- }
- else if (!Utils::IsValidDnsLabel(this->GetRegion()))
- {
- ss.str("");
- ss << "Invalid region in ARN: " << this->GetRegion() << ". Region should be a RFC 3986 Host label.";
- errorMessage = ss.str();
- }
- // Validation on account ID
- else if (!Utils::IsValidDnsLabel(this->GetAccountId()))
- {
- ss.str("");
- ss << "Invalid account ID in ARN: " << this->GetAccountId() << ". Account ID should be a RFC 3986 Host label.";
- errorMessage = ss.str();
- }
- // Validation on Access Point ARN and Object Lambda Access Point ARN:
- else if (this->GetResourceType() == ARNResourceType::ACCESSPOINT)
- {
- if (!Utils::IsValidDnsLabel(this->GetResourceId()))
- {
- ss.str("");
- ss << "Invalid resource ID in accesspoint ARN: " << this->GetResourceId() << ". Resource ID should be a RFC 3986 Host label.";
- errorMessage = ss.str();
- }
- else if (!this->GetResourceQualifier().empty())
- {
- ss.str("");
- ss << "Invalid accesspoint ARN with non empty resource qualifier: " << this->GetResourceQualifier();
- errorMessage = ss.str();
- }
- else if (!this->GetSubResourceType().empty() || !this->GetSubResourceId().empty())
- {
- ss.str("");
- ss << "Invalid accesspoint ARN with non empty sub resource type: " << this->GetSubResourceType() << ", sub resource ID: " << this->GetSubResourceId();
- errorMessage = ss.str();
- }
- else
- {
- success = true;
- }
- }
- // Validation on Outposts ARN:
- else if (this->GetResourceType() == ARNResourceType::OUTPOST)
- {
- if (this->GetRegion().find("fips") != Aws::String::npos)
- {
- ss.str("");
- ss << "Outposts ARN do not support fips regions right now.";
- errorMessage = ss.str();
- }
- else if (!Utils::IsValidDnsLabel(this->GetResourceId()))
- {
- ss.str("");
- ss << "Invalid outpost ID in Outposts ARN: " << this->GetResourceId() << ". Outpost ID should be a RFC 3986 Host label.";
- errorMessage = ss.str();
- }
- else if (this->GetSubResourceType() != ARNResourceType::ACCESSPOINT)
- {
- ss.str("");
- ss << "Invalid sub resource type in Outposts ARN: " << this->GetSubResourceType() << ". Valid options: " << ARNResourceType::ACCESSPOINT;
- errorMessage = ss.str();
- }
- else if (!Utils::IsValidDnsLabel(this->GetSubResourceId()))
- {
- ss.str("");
- ss << "Invalid accesspoint name in Outposts ARN: " << this->GetSubResourceId() << ", accesspoint name should be a RFC 3986 Host label.";
- errorMessage = ss.str();
- }
- else
- {
- success = true;
- }
- }
- // ARN with unknown resource type.
- else
- {
- ss.str("");
- ss << "Invalid resource type in ARN: " << this->GetResourceType() << ". Valid options: " << ARNResourceType::ACCESSPOINT << ", " << ARNResourceType::OUTPOST << ".";
- errorMessage = ss.str();
- }
-
- if (success)
- {
- return S3ARNOutcome(success);
- }
- else
- {
- return S3ARNOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::VALIDATION, "VALIDATION", errorMessage, false));
- }
- }
-
- void S3ARN::ParseARNResource()
- {
- if (!*this) return;
-
- Aws::String resource = this->GetResource();
- Aws::Vector<Aws::String> resourceSegments;
- if (resource.find(':') != std::string::npos)
- {
- resourceSegments = Utils::StringUtils::Split(resource, ':', 4, Utils::StringUtils::SplitOptions::INCLUDE_EMPTY_ENTRIES);
- }
- else if (resource.find('/') != std::string::npos)
- {
- resourceSegments = Utils::StringUtils::Split(resource, '/', 4, Utils::StringUtils::SplitOptions::INCLUDE_EMPTY_ENTRIES);
- }
- else
- {
- resourceSegments.emplace_back(resource);
- }
-
- switch (resourceSegments.size())
- {
- case 1:
- m_resourceId = resourceSegments[0];
- break;
- case 2:
- m_resourceType = resourceSegments[0];
- m_resourceId = resourceSegments[1];
- break;
- case 3:
- m_resourceType = resourceSegments[0];
- m_resourceId = resourceSegments[1];
- m_resourceQualifier = resourceSegments[2];
- break;
- case 4:
- m_resourceType = resourceSegments[0];
- m_resourceId = resourceSegments[1];
- m_subResourceType = resourceSegments[2];
- m_subResourceId = resourceSegments[3];
- break;
- default:
- assert(false);
- break;
- }
- }
- }
-}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Client.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Client.cpp
index 7f9eb2b4c6..fa38c023e0 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Client.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Client.cpp
@@ -16,12 +16,13 @@
#include <aws/core/utils/threading/Executor.h>
#include <aws/core/utils/DNS.h>
#include <aws/core/utils/logging/LogMacros.h>
-
+#include <aws/core/utils/logging/ErrorMacros.h>
#include <aws/core/utils/event/EventStream.h>
+#include <aws/core/platform/Environment.h>
+
#include <aws/s3/S3Client.h>
-#include <aws/s3/S3Endpoint.h>
#include <aws/s3/S3ErrorMarshaller.h>
-#include <aws/s3/S3ARN.h>
+#include <aws/s3/S3EndpointProvider.h>
#include <aws/s3/model/AbortMultipartUploadRequest.h>
#include <aws/s3/model/CompleteMultipartUploadRequest.h>
#include <aws/s3/model/CopyObjectRequest.h>
@@ -66,6 +67,7 @@
#include <aws/s3/model/GetBucketWebsiteRequest.h>
#include <aws/s3/model/GetObjectRequest.h>
#include <aws/s3/model/GetObjectAclRequest.h>
+#include <aws/s3/model/GetObjectAttributesRequest.h>
#include <aws/s3/model/GetObjectLegalHoldRequest.h>
#include <aws/s3/model/GetObjectLockConfigurationRequest.h>
#include <aws/s3/model/GetObjectRetentionRequest.h>
@@ -121,88 +123,151 @@ using namespace Aws::S3;
using namespace Aws::S3::Model;
using namespace Aws::Http;
using namespace Aws::Utils::Xml;
+using ResolveEndpointOutcome = Aws::Endpoint::ResolveEndpointOutcome;
-static const char* SERVICE_NAME = "s3";
-static const char* ALLOCATION_TAG = "S3Client";
-
+const char* S3Client::SERVICE_NAME = "s3";
+const char* S3Client::ALLOCATION_TAG = "S3Client";
-S3Client::S3Client(const Client::ClientConfiguration& clientConfiguration, Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy signPayloads, bool useVirtualAddressing, Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION USEast1RegionalEndPointOption) :
+S3Client::S3Client(const S3::S3ClientConfiguration& clientConfiguration,
+ std::shared_ptr<S3EndpointProviderBase> endpointProvider) :
BASECLASS(clientConfiguration,
- Aws::MakeShared<AWSAuthV4Signer>(ALLOCATION_TAG, Aws::MakeShared<DefaultAWSCredentialsProviderChain>(ALLOCATION_TAG),
- SERVICE_NAME, Aws::Region::ComputeSignerRegion(clientConfiguration.region), signPayloads, false),
- Aws::MakeShared<S3ErrorMarshaller>(ALLOCATION_TAG)),
- m_executor(clientConfiguration.executor), m_useVirtualAddressing(useVirtualAddressing), m_USEast1RegionalEndpointOption(USEast1RegionalEndPointOption)
+ Aws::MakeShared<Aws::Auth::DefaultAuthSignerProvider>(ALLOCATION_TAG,
+ Aws::MakeShared<DefaultAWSCredentialsProviderChain>(ALLOCATION_TAG),
+ SERVICE_NAME,
+ Aws::Region::ComputeSignerRegion(clientConfiguration.region),
+ clientConfiguration.payloadSigningPolicy,
+ /*doubleEncodeValue*/ false),
+ Aws::MakeShared<S3ErrorMarshaller>(ALLOCATION_TAG)),
+ m_clientConfiguration(clientConfiguration),
+ m_executor(clientConfiguration.executor),
+ m_endpointProvider(std::move(endpointProvider))
+{
+ init(m_clientConfiguration);
+}
+
+S3Client::S3Client(const AWSCredentials& credentials,
+ std::shared_ptr<S3EndpointProviderBase> endpointProvider,
+ const S3::S3ClientConfiguration& clientConfiguration) :
+ BASECLASS(clientConfiguration,
+ Aws::MakeShared<Aws::Auth::DefaultAuthSignerProvider>(ALLOCATION_TAG,
+ Aws::MakeShared<SimpleAWSCredentialsProvider>(ALLOCATION_TAG, credentials),
+ SERVICE_NAME,
+ Aws::Region::ComputeSignerRegion(clientConfiguration.region),
+ clientConfiguration.payloadSigningPolicy,
+ /*doubleEncodeValue*/ false),
+ Aws::MakeShared<S3ErrorMarshaller>(ALLOCATION_TAG)),
+ m_clientConfiguration(clientConfiguration),
+ m_executor(clientConfiguration.executor),
+ m_endpointProvider(std::move(endpointProvider))
{
- init(clientConfiguration);
+ init(m_clientConfiguration);
}
-S3Client::S3Client(const AWSCredentials& credentials, const Client::ClientConfiguration& clientConfiguration, Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy signPayloads, bool useVirtualAddressing, Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION USEast1RegionalEndPointOption) :
+S3Client::S3Client(const std::shared_ptr<AWSCredentialsProvider>& credentialsProvider,
+ std::shared_ptr<S3EndpointProviderBase> endpointProvider,
+ const S3::S3ClientConfiguration& clientConfiguration) :
+ BASECLASS(clientConfiguration,
+ Aws::MakeShared<Aws::Auth::DefaultAuthSignerProvider>(ALLOCATION_TAG,
+ credentialsProvider,
+ SERVICE_NAME,
+ Aws::Region::ComputeSignerRegion(clientConfiguration.region),
+ clientConfiguration.payloadSigningPolicy,
+ /*doubleEncodeValue*/ false),
+ Aws::MakeShared<S3ErrorMarshaller>(ALLOCATION_TAG)),
+ m_clientConfiguration(clientConfiguration),
+ m_executor(clientConfiguration.executor),
+ m_endpointProvider(std::move(endpointProvider))
+{
+ init(m_clientConfiguration);
+}
+
+ /* Legacy constructors due deprecation */
+ S3Client::S3Client(const Client::ClientConfiguration& clientConfiguration,
+ Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy signPayloads /*= Never*/,
+ bool useVirtualAddressing /*= true*/,
+ Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION USEast1RegionalEndPointOption) :
BASECLASS(clientConfiguration,
- Aws::MakeShared<AWSAuthV4Signer>(ALLOCATION_TAG, Aws::MakeShared<SimpleAWSCredentialsProvider>(ALLOCATION_TAG, credentials),
- SERVICE_NAME, Aws::Region::ComputeSignerRegion(clientConfiguration.region), signPayloads, false),
- Aws::MakeShared<S3ErrorMarshaller>(ALLOCATION_TAG)),
- m_executor(clientConfiguration.executor), m_useVirtualAddressing(useVirtualAddressing), m_USEast1RegionalEndpointOption(USEast1RegionalEndPointOption)
+ Aws::MakeShared<Aws::Auth::DefaultAuthSignerProvider>(ALLOCATION_TAG,
+ Aws::MakeShared<DefaultAWSCredentialsProviderChain>(ALLOCATION_TAG),
+ SERVICE_NAME,
+ Aws::Region::ComputeSignerRegion(clientConfiguration.region),
+ signPayloads,
+ /*doubleEncodeValue*/ false),
+ Aws::MakeShared<S3ErrorMarshaller>(ALLOCATION_TAG)),
+ m_clientConfiguration(clientConfiguration, signPayloads, useVirtualAddressing, USEast1RegionalEndPointOption),
+ m_executor(clientConfiguration.executor),
+ m_endpointProvider(Aws::MakeShared<S3EndpointProvider>(ALLOCATION_TAG))
+{
+ init(m_clientConfiguration);
+}
+
+S3Client::S3Client(const AWSCredentials& credentials,
+ const Client::ClientConfiguration& clientConfiguration,
+ Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy signPayloads /*= Never*/,
+ bool useVirtualAddressing /*= true*/,
+ Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION USEast1RegionalEndPointOption) :
+ BASECLASS(clientConfiguration,
+ Aws::MakeShared<Aws::Auth::DefaultAuthSignerProvider>(ALLOCATION_TAG,
+ Aws::MakeShared<SimpleAWSCredentialsProvider>(ALLOCATION_TAG, credentials),
+ SERVICE_NAME,
+ Aws::Region::ComputeSignerRegion(clientConfiguration.region),
+ signPayloads,
+ /*doubleEncodeValue*/ false),
+ Aws::MakeShared<S3ErrorMarshaller>(ALLOCATION_TAG)),
+ m_clientConfiguration(clientConfiguration, signPayloads, useVirtualAddressing, USEast1RegionalEndPointOption),
+ m_executor(clientConfiguration.executor),
+ m_endpointProvider(Aws::MakeShared<S3EndpointProvider>(ALLOCATION_TAG))
{
- init(clientConfiguration);
+ init(m_clientConfiguration);
}
S3Client::S3Client(const std::shared_ptr<AWSCredentialsProvider>& credentialsProvider,
- const Client::ClientConfiguration& clientConfiguration, Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy signPayloads, bool useVirtualAddressing, Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION USEast1RegionalEndPointOption) :
+ const Client::ClientConfiguration& clientConfiguration,
+ Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy signPayloads /*= Never*/,
+ bool useVirtualAddressing /*= true*/,
+ Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION USEast1RegionalEndPointOption) :
BASECLASS(clientConfiguration,
- Aws::MakeShared<AWSAuthV4Signer>(ALLOCATION_TAG, credentialsProvider,
- SERVICE_NAME, Aws::Region::ComputeSignerRegion(clientConfiguration.region), signPayloads, false),
- Aws::MakeShared<S3ErrorMarshaller>(ALLOCATION_TAG)),
- m_executor(clientConfiguration.executor), m_useVirtualAddressing(useVirtualAddressing), m_USEast1RegionalEndpointOption(USEast1RegionalEndPointOption)
+ Aws::MakeShared<Aws::Auth::DefaultAuthSignerProvider>(ALLOCATION_TAG,
+ credentialsProvider,
+ SERVICE_NAME,
+ Aws::Region::ComputeSignerRegion(clientConfiguration.region),
+ signPayloads,
+ /*doubleEncodeValue*/ false),
+ Aws::MakeShared<S3ErrorMarshaller>(ALLOCATION_TAG)),
+ m_clientConfiguration(clientConfiguration, signPayloads, useVirtualAddressing, USEast1RegionalEndPointOption),
+ m_executor(clientConfiguration.executor),
+ m_endpointProvider(Aws::MakeShared<S3EndpointProvider>(ALLOCATION_TAG))
{
- init(clientConfiguration);
+ init(m_clientConfiguration);
}
+ /* End of legacy constructors due deprecation */
S3Client::~S3Client()
{
}
-void S3Client::init(const ClientConfiguration& config)
+std::shared_ptr<S3EndpointProviderBase>& S3Client::accessEndpointProvider()
{
- SetServiceClientName("S3");
- LoadS3SpecificConfig(config.profileName);
- m_configScheme = SchemeMapper::ToString(config.scheme);
- m_scheme = m_configScheme;
- m_useDualStack = config.useDualStack;
- if (config.endpointOverride.empty())
- {
- m_useCustomEndpoint = false;
- m_baseUri = S3Endpoint::ForRegion(config.region, config.useDualStack, m_USEast1RegionalEndpointOption == Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION::REGIONAL);
- }
- else
- {
- m_useCustomEndpoint = true;
- OverrideEndpoint(config.endpointOverride);
- }
- m_enableHostPrefixInjection = config.enableHostPrefixInjection;
+ return m_endpointProvider;
+}
+
+void S3Client::init(const S3::S3ClientConfiguration& config)
+{
+ AWSClient::SetServiceClientName("S3");
+ AWS_CHECK_PTR(SERVICE_NAME, m_endpointProvider);
+ m_endpointProvider->InitBuiltInParameters(config);
}
void S3Client::OverrideEndpoint(const Aws::String& endpoint)
{
- if (endpoint.compare(0, 7, "http://") == 0)
- {
- m_scheme = "http";
- m_baseUri = endpoint.substr(7);
- }
- else if (endpoint.compare(0, 8, "https://") == 0)
- {
- m_scheme = "https";
- m_baseUri = endpoint.substr(8);
- }
- else
- {
- m_scheme = m_configScheme;
- m_baseUri = endpoint;
- }
+ AWS_CHECK_PTR(SERVICE_NAME, m_endpointProvider);
+ m_endpointProvider->OverrideEndpoint(endpoint);
}
AbortMultipartUploadOutcome S3Client::AbortMultipartUpload(const AbortMultipartUploadRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, AbortMultipartUpload, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("AbortMultipartUpload", "Required field: Bucket, is not set");
@@ -218,39 +283,15 @@ AbortMultipartUploadOutcome S3Client::AbortMultipartUpload(const AbortMultipartU
AWS_LOGSTREAM_ERROR("AbortMultipartUpload", "Required field: UploadId, is not set");
return AbortMultipartUploadOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [UploadId]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return AbortMultipartUploadOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
- Aws::StringStream ss;
- ss << "/";
- ss << request.GetKey();
- uri.SetPath(uri.GetPath() + ss.str());
- return AbortMultipartUploadOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_DELETE, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-AbortMultipartUploadOutcomeCallable S3Client::AbortMultipartUploadCallable(const AbortMultipartUploadRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< AbortMultipartUploadOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->AbortMultipartUpload(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::AbortMultipartUploadAsync(const AbortMultipartUploadRequest& request, const AbortMultipartUploadResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->AbortMultipartUploadAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::AbortMultipartUploadAsyncHelper(const AbortMultipartUploadRequest& request, const AbortMultipartUploadResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, AbortMultipartUpload(request), context);
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, AbortMultipartUpload, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
+ endpointResolutionOutcome.GetResult().AddPathSegments(request.GetKey());
+ return AbortMultipartUploadOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_DELETE));
}
CompleteMultipartUploadOutcome S3Client::CompleteMultipartUpload(const CompleteMultipartUploadRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, CompleteMultipartUpload, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("CompleteMultipartUpload", "Required field: Bucket, is not set");
@@ -266,39 +307,15 @@ CompleteMultipartUploadOutcome S3Client::CompleteMultipartUpload(const CompleteM
AWS_LOGSTREAM_ERROR("CompleteMultipartUpload", "Required field: UploadId, is not set");
return CompleteMultipartUploadOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [UploadId]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return CompleteMultipartUploadOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
- Aws::StringStream ss;
- ss << "/";
- ss << request.GetKey();
- uri.SetPath(uri.GetPath() + ss.str());
- return CompleteMultipartUploadOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_POST, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-CompleteMultipartUploadOutcomeCallable S3Client::CompleteMultipartUploadCallable(const CompleteMultipartUploadRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< CompleteMultipartUploadOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->CompleteMultipartUpload(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::CompleteMultipartUploadAsync(const CompleteMultipartUploadRequest& request, const CompleteMultipartUploadResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->CompleteMultipartUploadAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::CompleteMultipartUploadAsyncHelper(const CompleteMultipartUploadRequest& request, const CompleteMultipartUploadResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, CompleteMultipartUpload(request), context);
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, CompleteMultipartUpload, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
+ endpointResolutionOutcome.GetResult().AddPathSegments(request.GetKey());
+ return CompleteMultipartUploadOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_POST));
}
CopyObjectOutcome S3Client::CopyObject(const CopyObjectRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, CopyObject, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("CopyObject", "Required field: Bucket, is not set");
@@ -314,77 +331,28 @@ CopyObjectOutcome S3Client::CopyObject(const CopyObjectRequest& request) const
AWS_LOGSTREAM_ERROR("CopyObject", "Required field: Key, is not set");
return CopyObjectOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Key]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return CopyObjectOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
- Aws::StringStream ss;
- ss << "/";
- ss << request.GetKey();
- uri.SetPath(uri.GetPath() + ss.str());
- return CopyObjectOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-CopyObjectOutcomeCallable S3Client::CopyObjectCallable(const CopyObjectRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< CopyObjectOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->CopyObject(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::CopyObjectAsync(const CopyObjectRequest& request, const CopyObjectResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->CopyObjectAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::CopyObjectAsyncHelper(const CopyObjectRequest& request, const CopyObjectResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, CopyObject(request), context);
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, CopyObject, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
+ endpointResolutionOutcome.GetResult().AddPathSegments(request.GetKey());
+ return CopyObjectOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_PUT));
}
CreateBucketOutcome S3Client::CreateBucket(const CreateBucketRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, CreateBucket, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("CreateBucket", "Required field: Bucket, is not set");
return CreateBucketOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString();
- if (!computeEndpointOutcome.IsSuccess())
- {
- return CreateBucketOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
- Aws::StringStream ss;
- ss << "/";
- ss << request.GetBucket();
- uri.SetPath(uri.GetPath() + ss.str());
- return CreateBucketOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-CreateBucketOutcomeCallable S3Client::CreateBucketCallable(const CreateBucketRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< CreateBucketOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->CreateBucket(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::CreateBucketAsync(const CreateBucketRequest& request, const CreateBucketResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->CreateBucketAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::CreateBucketAsyncHelper(const CreateBucketRequest& request, const CreateBucketResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, CreateBucket(request), context);
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, CreateBucket, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
+ return CreateBucketOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_PUT));
}
CreateMultipartUploadOutcome S3Client::CreateMultipartUpload(const CreateMultipartUploadRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, CreateMultipartUpload, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("CreateMultipartUpload", "Required field: Bucket, is not set");
@@ -395,77 +363,31 @@ CreateMultipartUploadOutcome S3Client::CreateMultipartUpload(const CreateMultipa
AWS_LOGSTREAM_ERROR("CreateMultipartUpload", "Required field: Key, is not set");
return CreateMultipartUploadOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Key]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return CreateMultipartUploadOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, CreateMultipartUpload, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
- ss << "/";
- ss << request.GetKey();
- uri.SetPath(uri.GetPath() + ss.str());
+ endpointResolutionOutcome.GetResult().AddPathSegments(request.GetKey());
ss.str("?uploads");
- uri.SetQueryString(ss.str());
- return CreateMultipartUploadOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_POST, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-CreateMultipartUploadOutcomeCallable S3Client::CreateMultipartUploadCallable(const CreateMultipartUploadRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< CreateMultipartUploadOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->CreateMultipartUpload(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::CreateMultipartUploadAsync(const CreateMultipartUploadRequest& request, const CreateMultipartUploadResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->CreateMultipartUploadAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::CreateMultipartUploadAsyncHelper(const CreateMultipartUploadRequest& request, const CreateMultipartUploadResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, CreateMultipartUpload(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return CreateMultipartUploadOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_POST));
}
DeleteBucketOutcome S3Client::DeleteBucket(const DeleteBucketRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, DeleteBucket, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("DeleteBucket", "Required field: Bucket, is not set");
return DeleteBucketOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return DeleteBucketOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
- Aws::StringStream ss;
- uri.SetPath(uri.GetPath() + ss.str());
- return DeleteBucketOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_DELETE, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-DeleteBucketOutcomeCallable S3Client::DeleteBucketCallable(const DeleteBucketRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< DeleteBucketOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->DeleteBucket(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::DeleteBucketAsync(const DeleteBucketRequest& request, const DeleteBucketResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->DeleteBucketAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::DeleteBucketAsyncHelper(const DeleteBucketRequest& request, const DeleteBucketResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, DeleteBucket(request), context);
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, DeleteBucket, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
+ return DeleteBucketOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_DELETE));
}
DeleteBucketAnalyticsConfigurationOutcome S3Client::DeleteBucketAnalyticsConfiguration(const DeleteBucketAnalyticsConfigurationRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, DeleteBucketAnalyticsConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("DeleteBucketAnalyticsConfiguration", "Required field: Bucket, is not set");
@@ -476,112 +398,49 @@ DeleteBucketAnalyticsConfigurationOutcome S3Client::DeleteBucketAnalyticsConfigu
AWS_LOGSTREAM_ERROR("DeleteBucketAnalyticsConfiguration", "Required field: Id, is not set");
return DeleteBucketAnalyticsConfigurationOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Id]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return DeleteBucketAnalyticsConfigurationOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, DeleteBucketAnalyticsConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?analytics");
- uri.SetQueryString(ss.str());
- return DeleteBucketAnalyticsConfigurationOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_DELETE, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-DeleteBucketAnalyticsConfigurationOutcomeCallable S3Client::DeleteBucketAnalyticsConfigurationCallable(const DeleteBucketAnalyticsConfigurationRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< DeleteBucketAnalyticsConfigurationOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->DeleteBucketAnalyticsConfiguration(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::DeleteBucketAnalyticsConfigurationAsync(const DeleteBucketAnalyticsConfigurationRequest& request, const DeleteBucketAnalyticsConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->DeleteBucketAnalyticsConfigurationAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::DeleteBucketAnalyticsConfigurationAsyncHelper(const DeleteBucketAnalyticsConfigurationRequest& request, const DeleteBucketAnalyticsConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, DeleteBucketAnalyticsConfiguration(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return DeleteBucketAnalyticsConfigurationOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_DELETE));
}
DeleteBucketCorsOutcome S3Client::DeleteBucketCors(const DeleteBucketCorsRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, DeleteBucketCors, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("DeleteBucketCors", "Required field: Bucket, is not set");
return DeleteBucketCorsOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return DeleteBucketCorsOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, DeleteBucketCors, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?cors");
- uri.SetQueryString(ss.str());
- return DeleteBucketCorsOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_DELETE, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-DeleteBucketCorsOutcomeCallable S3Client::DeleteBucketCorsCallable(const DeleteBucketCorsRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< DeleteBucketCorsOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->DeleteBucketCors(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::DeleteBucketCorsAsync(const DeleteBucketCorsRequest& request, const DeleteBucketCorsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->DeleteBucketCorsAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::DeleteBucketCorsAsyncHelper(const DeleteBucketCorsRequest& request, const DeleteBucketCorsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, DeleteBucketCors(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return DeleteBucketCorsOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_DELETE));
}
DeleteBucketEncryptionOutcome S3Client::DeleteBucketEncryption(const DeleteBucketEncryptionRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, DeleteBucketEncryption, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("DeleteBucketEncryption", "Required field: Bucket, is not set");
return DeleteBucketEncryptionOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return DeleteBucketEncryptionOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, DeleteBucketEncryption, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?encryption");
- uri.SetQueryString(ss.str());
- return DeleteBucketEncryptionOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_DELETE, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-DeleteBucketEncryptionOutcomeCallable S3Client::DeleteBucketEncryptionCallable(const DeleteBucketEncryptionRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< DeleteBucketEncryptionOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->DeleteBucketEncryption(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::DeleteBucketEncryptionAsync(const DeleteBucketEncryptionRequest& request, const DeleteBucketEncryptionResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->DeleteBucketEncryptionAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::DeleteBucketEncryptionAsyncHelper(const DeleteBucketEncryptionRequest& request, const DeleteBucketEncryptionResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, DeleteBucketEncryption(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return DeleteBucketEncryptionOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_DELETE));
}
DeleteBucketIntelligentTieringConfigurationOutcome S3Client::DeleteBucketIntelligentTieringConfiguration(const DeleteBucketIntelligentTieringConfigurationRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, DeleteBucketIntelligentTieringConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("DeleteBucketIntelligentTieringConfiguration", "Required field: Bucket, is not set");
@@ -592,38 +451,17 @@ DeleteBucketIntelligentTieringConfigurationOutcome S3Client::DeleteBucketIntelli
AWS_LOGSTREAM_ERROR("DeleteBucketIntelligentTieringConfiguration", "Required field: Id, is not set");
return DeleteBucketIntelligentTieringConfigurationOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Id]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return DeleteBucketIntelligentTieringConfigurationOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, DeleteBucketIntelligentTieringConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?intelligent-tiering");
- uri.SetQueryString(ss.str());
- return DeleteBucketIntelligentTieringConfigurationOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_DELETE, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-DeleteBucketIntelligentTieringConfigurationOutcomeCallable S3Client::DeleteBucketIntelligentTieringConfigurationCallable(const DeleteBucketIntelligentTieringConfigurationRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< DeleteBucketIntelligentTieringConfigurationOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->DeleteBucketIntelligentTieringConfiguration(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::DeleteBucketIntelligentTieringConfigurationAsync(const DeleteBucketIntelligentTieringConfigurationRequest& request, const DeleteBucketIntelligentTieringConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->DeleteBucketIntelligentTieringConfigurationAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::DeleteBucketIntelligentTieringConfigurationAsyncHelper(const DeleteBucketIntelligentTieringConfigurationRequest& request, const DeleteBucketIntelligentTieringConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, DeleteBucketIntelligentTieringConfiguration(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return DeleteBucketIntelligentTieringConfigurationOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_DELETE));
}
DeleteBucketInventoryConfigurationOutcome S3Client::DeleteBucketInventoryConfiguration(const DeleteBucketInventoryConfigurationRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, DeleteBucketInventoryConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("DeleteBucketInventoryConfiguration", "Required field: Bucket, is not set");
@@ -634,75 +472,33 @@ DeleteBucketInventoryConfigurationOutcome S3Client::DeleteBucketInventoryConfigu
AWS_LOGSTREAM_ERROR("DeleteBucketInventoryConfiguration", "Required field: Id, is not set");
return DeleteBucketInventoryConfigurationOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Id]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return DeleteBucketInventoryConfigurationOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, DeleteBucketInventoryConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?inventory");
- uri.SetQueryString(ss.str());
- return DeleteBucketInventoryConfigurationOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_DELETE, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-DeleteBucketInventoryConfigurationOutcomeCallable S3Client::DeleteBucketInventoryConfigurationCallable(const DeleteBucketInventoryConfigurationRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< DeleteBucketInventoryConfigurationOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->DeleteBucketInventoryConfiguration(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::DeleteBucketInventoryConfigurationAsync(const DeleteBucketInventoryConfigurationRequest& request, const DeleteBucketInventoryConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->DeleteBucketInventoryConfigurationAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::DeleteBucketInventoryConfigurationAsyncHelper(const DeleteBucketInventoryConfigurationRequest& request, const DeleteBucketInventoryConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, DeleteBucketInventoryConfiguration(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return DeleteBucketInventoryConfigurationOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_DELETE));
}
DeleteBucketLifecycleOutcome S3Client::DeleteBucketLifecycle(const DeleteBucketLifecycleRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, DeleteBucketLifecycle, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("DeleteBucketLifecycle", "Required field: Bucket, is not set");
return DeleteBucketLifecycleOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return DeleteBucketLifecycleOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, DeleteBucketLifecycle, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?lifecycle");
- uri.SetQueryString(ss.str());
- return DeleteBucketLifecycleOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_DELETE, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-DeleteBucketLifecycleOutcomeCallable S3Client::DeleteBucketLifecycleCallable(const DeleteBucketLifecycleRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< DeleteBucketLifecycleOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->DeleteBucketLifecycle(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::DeleteBucketLifecycleAsync(const DeleteBucketLifecycleRequest& request, const DeleteBucketLifecycleResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->DeleteBucketLifecycleAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::DeleteBucketLifecycleAsyncHelper(const DeleteBucketLifecycleRequest& request, const DeleteBucketLifecycleResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, DeleteBucketLifecycle(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return DeleteBucketLifecycleOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_DELETE));
}
DeleteBucketMetricsConfigurationOutcome S3Client::DeleteBucketMetricsConfiguration(const DeleteBucketMetricsConfigurationRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, DeleteBucketMetricsConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("DeleteBucketMetricsConfiguration", "Required field: Bucket, is not set");
@@ -713,223 +509,97 @@ DeleteBucketMetricsConfigurationOutcome S3Client::DeleteBucketMetricsConfigurati
AWS_LOGSTREAM_ERROR("DeleteBucketMetricsConfiguration", "Required field: Id, is not set");
return DeleteBucketMetricsConfigurationOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Id]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return DeleteBucketMetricsConfigurationOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, DeleteBucketMetricsConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?metrics");
- uri.SetQueryString(ss.str());
- return DeleteBucketMetricsConfigurationOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_DELETE, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-DeleteBucketMetricsConfigurationOutcomeCallable S3Client::DeleteBucketMetricsConfigurationCallable(const DeleteBucketMetricsConfigurationRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< DeleteBucketMetricsConfigurationOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->DeleteBucketMetricsConfiguration(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::DeleteBucketMetricsConfigurationAsync(const DeleteBucketMetricsConfigurationRequest& request, const DeleteBucketMetricsConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->DeleteBucketMetricsConfigurationAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::DeleteBucketMetricsConfigurationAsyncHelper(const DeleteBucketMetricsConfigurationRequest& request, const DeleteBucketMetricsConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, DeleteBucketMetricsConfiguration(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return DeleteBucketMetricsConfigurationOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_DELETE));
}
DeleteBucketOwnershipControlsOutcome S3Client::DeleteBucketOwnershipControls(const DeleteBucketOwnershipControlsRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, DeleteBucketOwnershipControls, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("DeleteBucketOwnershipControls", "Required field: Bucket, is not set");
return DeleteBucketOwnershipControlsOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return DeleteBucketOwnershipControlsOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, DeleteBucketOwnershipControls, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?ownershipControls");
- uri.SetQueryString(ss.str());
- return DeleteBucketOwnershipControlsOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_DELETE, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-DeleteBucketOwnershipControlsOutcomeCallable S3Client::DeleteBucketOwnershipControlsCallable(const DeleteBucketOwnershipControlsRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< DeleteBucketOwnershipControlsOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->DeleteBucketOwnershipControls(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::DeleteBucketOwnershipControlsAsync(const DeleteBucketOwnershipControlsRequest& request, const DeleteBucketOwnershipControlsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->DeleteBucketOwnershipControlsAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::DeleteBucketOwnershipControlsAsyncHelper(const DeleteBucketOwnershipControlsRequest& request, const DeleteBucketOwnershipControlsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, DeleteBucketOwnershipControls(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return DeleteBucketOwnershipControlsOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_DELETE));
}
DeleteBucketPolicyOutcome S3Client::DeleteBucketPolicy(const DeleteBucketPolicyRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, DeleteBucketPolicy, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("DeleteBucketPolicy", "Required field: Bucket, is not set");
return DeleteBucketPolicyOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return DeleteBucketPolicyOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, DeleteBucketPolicy, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?policy");
- uri.SetQueryString(ss.str());
- return DeleteBucketPolicyOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_DELETE, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-DeleteBucketPolicyOutcomeCallable S3Client::DeleteBucketPolicyCallable(const DeleteBucketPolicyRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< DeleteBucketPolicyOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->DeleteBucketPolicy(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::DeleteBucketPolicyAsync(const DeleteBucketPolicyRequest& request, const DeleteBucketPolicyResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->DeleteBucketPolicyAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::DeleteBucketPolicyAsyncHelper(const DeleteBucketPolicyRequest& request, const DeleteBucketPolicyResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, DeleteBucketPolicy(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return DeleteBucketPolicyOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_DELETE));
}
DeleteBucketReplicationOutcome S3Client::DeleteBucketReplication(const DeleteBucketReplicationRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, DeleteBucketReplication, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("DeleteBucketReplication", "Required field: Bucket, is not set");
return DeleteBucketReplicationOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return DeleteBucketReplicationOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, DeleteBucketReplication, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?replication");
- uri.SetQueryString(ss.str());
- return DeleteBucketReplicationOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_DELETE, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-DeleteBucketReplicationOutcomeCallable S3Client::DeleteBucketReplicationCallable(const DeleteBucketReplicationRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< DeleteBucketReplicationOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->DeleteBucketReplication(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::DeleteBucketReplicationAsync(const DeleteBucketReplicationRequest& request, const DeleteBucketReplicationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->DeleteBucketReplicationAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::DeleteBucketReplicationAsyncHelper(const DeleteBucketReplicationRequest& request, const DeleteBucketReplicationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, DeleteBucketReplication(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return DeleteBucketReplicationOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_DELETE));
}
DeleteBucketTaggingOutcome S3Client::DeleteBucketTagging(const DeleteBucketTaggingRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, DeleteBucketTagging, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("DeleteBucketTagging", "Required field: Bucket, is not set");
return DeleteBucketTaggingOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return DeleteBucketTaggingOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, DeleteBucketTagging, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?tagging");
- uri.SetQueryString(ss.str());
- return DeleteBucketTaggingOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_DELETE, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-DeleteBucketTaggingOutcomeCallable S3Client::DeleteBucketTaggingCallable(const DeleteBucketTaggingRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< DeleteBucketTaggingOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->DeleteBucketTagging(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::DeleteBucketTaggingAsync(const DeleteBucketTaggingRequest& request, const DeleteBucketTaggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->DeleteBucketTaggingAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::DeleteBucketTaggingAsyncHelper(const DeleteBucketTaggingRequest& request, const DeleteBucketTaggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, DeleteBucketTagging(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return DeleteBucketTaggingOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_DELETE));
}
DeleteBucketWebsiteOutcome S3Client::DeleteBucketWebsite(const DeleteBucketWebsiteRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, DeleteBucketWebsite, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("DeleteBucketWebsite", "Required field: Bucket, is not set");
return DeleteBucketWebsiteOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return DeleteBucketWebsiteOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, DeleteBucketWebsite, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?website");
- uri.SetQueryString(ss.str());
- return DeleteBucketWebsiteOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_DELETE, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-DeleteBucketWebsiteOutcomeCallable S3Client::DeleteBucketWebsiteCallable(const DeleteBucketWebsiteRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< DeleteBucketWebsiteOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->DeleteBucketWebsite(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::DeleteBucketWebsiteAsync(const DeleteBucketWebsiteRequest& request, const DeleteBucketWebsiteResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->DeleteBucketWebsiteAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::DeleteBucketWebsiteAsyncHelper(const DeleteBucketWebsiteRequest& request, const DeleteBucketWebsiteResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, DeleteBucketWebsite(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return DeleteBucketWebsiteOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_DELETE));
}
DeleteObjectOutcome S3Client::DeleteObject(const DeleteObjectRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, DeleteObject, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("DeleteObject", "Required field: Bucket, is not set");
@@ -940,39 +610,15 @@ DeleteObjectOutcome S3Client::DeleteObject(const DeleteObjectRequest& request) c
AWS_LOGSTREAM_ERROR("DeleteObject", "Required field: Key, is not set");
return DeleteObjectOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Key]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return DeleteObjectOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
- Aws::StringStream ss;
- ss << "/";
- ss << request.GetKey();
- uri.SetPath(uri.GetPath() + ss.str());
- return DeleteObjectOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_DELETE, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-DeleteObjectOutcomeCallable S3Client::DeleteObjectCallable(const DeleteObjectRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< DeleteObjectOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->DeleteObject(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::DeleteObjectAsync(const DeleteObjectRequest& request, const DeleteObjectResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->DeleteObjectAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::DeleteObjectAsyncHelper(const DeleteObjectRequest& request, const DeleteObjectResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, DeleteObject(request), context);
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, DeleteObject, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
+ endpointResolutionOutcome.GetResult().AddPathSegments(request.GetKey());
+ return DeleteObjectOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_DELETE));
}
DeleteObjectTaggingOutcome S3Client::DeleteObjectTagging(const DeleteObjectTaggingRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, DeleteObjectTagging, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("DeleteObjectTagging", "Required field: Bucket, is not set");
@@ -983,189 +629,82 @@ DeleteObjectTaggingOutcome S3Client::DeleteObjectTagging(const DeleteObjectTaggi
AWS_LOGSTREAM_ERROR("DeleteObjectTagging", "Required field: Key, is not set");
return DeleteObjectTaggingOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Key]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return DeleteObjectTaggingOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, DeleteObjectTagging, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
- ss << "/";
- ss << request.GetKey();
- uri.SetPath(uri.GetPath() + ss.str());
+ endpointResolutionOutcome.GetResult().AddPathSegments(request.GetKey());
ss.str("?tagging");
- uri.SetQueryString(ss.str());
- return DeleteObjectTaggingOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_DELETE, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-DeleteObjectTaggingOutcomeCallable S3Client::DeleteObjectTaggingCallable(const DeleteObjectTaggingRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< DeleteObjectTaggingOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->DeleteObjectTagging(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::DeleteObjectTaggingAsync(const DeleteObjectTaggingRequest& request, const DeleteObjectTaggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->DeleteObjectTaggingAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::DeleteObjectTaggingAsyncHelper(const DeleteObjectTaggingRequest& request, const DeleteObjectTaggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, DeleteObjectTagging(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return DeleteObjectTaggingOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_DELETE));
}
DeleteObjectsOutcome S3Client::DeleteObjects(const DeleteObjectsRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, DeleteObjects, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("DeleteObjects", "Required field: Bucket, is not set");
return DeleteObjectsOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return DeleteObjectsOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, DeleteObjects, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?delete");
- uri.SetQueryString(ss.str());
- return DeleteObjectsOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_POST, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-DeleteObjectsOutcomeCallable S3Client::DeleteObjectsCallable(const DeleteObjectsRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< DeleteObjectsOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->DeleteObjects(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::DeleteObjectsAsync(const DeleteObjectsRequest& request, const DeleteObjectsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->DeleteObjectsAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::DeleteObjectsAsyncHelper(const DeleteObjectsRequest& request, const DeleteObjectsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, DeleteObjects(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return DeleteObjectsOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_POST));
}
DeletePublicAccessBlockOutcome S3Client::DeletePublicAccessBlock(const DeletePublicAccessBlockRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, DeletePublicAccessBlock, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("DeletePublicAccessBlock", "Required field: Bucket, is not set");
return DeletePublicAccessBlockOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return DeletePublicAccessBlockOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, DeletePublicAccessBlock, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?publicAccessBlock");
- uri.SetQueryString(ss.str());
- return DeletePublicAccessBlockOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_DELETE, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-DeletePublicAccessBlockOutcomeCallable S3Client::DeletePublicAccessBlockCallable(const DeletePublicAccessBlockRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< DeletePublicAccessBlockOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->DeletePublicAccessBlock(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::DeletePublicAccessBlockAsync(const DeletePublicAccessBlockRequest& request, const DeletePublicAccessBlockResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->DeletePublicAccessBlockAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::DeletePublicAccessBlockAsyncHelper(const DeletePublicAccessBlockRequest& request, const DeletePublicAccessBlockResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, DeletePublicAccessBlock(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return DeletePublicAccessBlockOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_DELETE));
}
GetBucketAccelerateConfigurationOutcome S3Client::GetBucketAccelerateConfiguration(const GetBucketAccelerateConfigurationRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, GetBucketAccelerateConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetBucketAccelerateConfiguration", "Required field: Bucket, is not set");
return GetBucketAccelerateConfigurationOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return GetBucketAccelerateConfigurationOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, GetBucketAccelerateConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?accelerate");
- uri.SetQueryString(ss.str());
- return GetBucketAccelerateConfigurationOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-GetBucketAccelerateConfigurationOutcomeCallable S3Client::GetBucketAccelerateConfigurationCallable(const GetBucketAccelerateConfigurationRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< GetBucketAccelerateConfigurationOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->GetBucketAccelerateConfiguration(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::GetBucketAccelerateConfigurationAsync(const GetBucketAccelerateConfigurationRequest& request, const GetBucketAccelerateConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->GetBucketAccelerateConfigurationAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::GetBucketAccelerateConfigurationAsyncHelper(const GetBucketAccelerateConfigurationRequest& request, const GetBucketAccelerateConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, GetBucketAccelerateConfiguration(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return GetBucketAccelerateConfigurationOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
GetBucketAclOutcome S3Client::GetBucketAcl(const GetBucketAclRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, GetBucketAcl, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetBucketAcl", "Required field: Bucket, is not set");
return GetBucketAclOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return GetBucketAclOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, GetBucketAcl, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?acl");
- uri.SetQueryString(ss.str());
- return GetBucketAclOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-GetBucketAclOutcomeCallable S3Client::GetBucketAclCallable(const GetBucketAclRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< GetBucketAclOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->GetBucketAcl(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::GetBucketAclAsync(const GetBucketAclRequest& request, const GetBucketAclResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->GetBucketAclAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::GetBucketAclAsyncHelper(const GetBucketAclRequest& request, const GetBucketAclResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, GetBucketAcl(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return GetBucketAclOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
GetBucketAnalyticsConfigurationOutcome S3Client::GetBucketAnalyticsConfiguration(const GetBucketAnalyticsConfigurationRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, GetBucketAnalyticsConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetBucketAnalyticsConfiguration", "Required field: Bucket, is not set");
@@ -1176,112 +715,49 @@ GetBucketAnalyticsConfigurationOutcome S3Client::GetBucketAnalyticsConfiguration
AWS_LOGSTREAM_ERROR("GetBucketAnalyticsConfiguration", "Required field: Id, is not set");
return GetBucketAnalyticsConfigurationOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Id]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return GetBucketAnalyticsConfigurationOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, GetBucketAnalyticsConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?analytics");
- uri.SetQueryString(ss.str());
- return GetBucketAnalyticsConfigurationOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-GetBucketAnalyticsConfigurationOutcomeCallable S3Client::GetBucketAnalyticsConfigurationCallable(const GetBucketAnalyticsConfigurationRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< GetBucketAnalyticsConfigurationOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->GetBucketAnalyticsConfiguration(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::GetBucketAnalyticsConfigurationAsync(const GetBucketAnalyticsConfigurationRequest& request, const GetBucketAnalyticsConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->GetBucketAnalyticsConfigurationAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::GetBucketAnalyticsConfigurationAsyncHelper(const GetBucketAnalyticsConfigurationRequest& request, const GetBucketAnalyticsConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, GetBucketAnalyticsConfiguration(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return GetBucketAnalyticsConfigurationOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
GetBucketCorsOutcome S3Client::GetBucketCors(const GetBucketCorsRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, GetBucketCors, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetBucketCors", "Required field: Bucket, is not set");
return GetBucketCorsOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return GetBucketCorsOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, GetBucketCors, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?cors");
- uri.SetQueryString(ss.str());
- return GetBucketCorsOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-GetBucketCorsOutcomeCallable S3Client::GetBucketCorsCallable(const GetBucketCorsRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< GetBucketCorsOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->GetBucketCors(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::GetBucketCorsAsync(const GetBucketCorsRequest& request, const GetBucketCorsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->GetBucketCorsAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::GetBucketCorsAsyncHelper(const GetBucketCorsRequest& request, const GetBucketCorsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, GetBucketCors(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return GetBucketCorsOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
GetBucketEncryptionOutcome S3Client::GetBucketEncryption(const GetBucketEncryptionRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, GetBucketEncryption, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetBucketEncryption", "Required field: Bucket, is not set");
return GetBucketEncryptionOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return GetBucketEncryptionOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, GetBucketEncryption, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?encryption");
- uri.SetQueryString(ss.str());
- return GetBucketEncryptionOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-GetBucketEncryptionOutcomeCallable S3Client::GetBucketEncryptionCallable(const GetBucketEncryptionRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< GetBucketEncryptionOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->GetBucketEncryption(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::GetBucketEncryptionAsync(const GetBucketEncryptionRequest& request, const GetBucketEncryptionResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->GetBucketEncryptionAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::GetBucketEncryptionAsyncHelper(const GetBucketEncryptionRequest& request, const GetBucketEncryptionResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, GetBucketEncryption(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return GetBucketEncryptionOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
GetBucketIntelligentTieringConfigurationOutcome S3Client::GetBucketIntelligentTieringConfiguration(const GetBucketIntelligentTieringConfigurationRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, GetBucketIntelligentTieringConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetBucketIntelligentTieringConfiguration", "Required field: Bucket, is not set");
@@ -1292,38 +768,17 @@ GetBucketIntelligentTieringConfigurationOutcome S3Client::GetBucketIntelligentTi
AWS_LOGSTREAM_ERROR("GetBucketIntelligentTieringConfiguration", "Required field: Id, is not set");
return GetBucketIntelligentTieringConfigurationOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Id]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return GetBucketIntelligentTieringConfigurationOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, GetBucketIntelligentTieringConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?intelligent-tiering");
- uri.SetQueryString(ss.str());
- return GetBucketIntelligentTieringConfigurationOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-GetBucketIntelligentTieringConfigurationOutcomeCallable S3Client::GetBucketIntelligentTieringConfigurationCallable(const GetBucketIntelligentTieringConfigurationRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< GetBucketIntelligentTieringConfigurationOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->GetBucketIntelligentTieringConfiguration(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::GetBucketIntelligentTieringConfigurationAsync(const GetBucketIntelligentTieringConfigurationRequest& request, const GetBucketIntelligentTieringConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->GetBucketIntelligentTieringConfigurationAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::GetBucketIntelligentTieringConfigurationAsyncHelper(const GetBucketIntelligentTieringConfigurationRequest& request, const GetBucketIntelligentTieringConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, GetBucketIntelligentTieringConfiguration(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return GetBucketIntelligentTieringConfigurationOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
GetBucketInventoryConfigurationOutcome S3Client::GetBucketInventoryConfiguration(const GetBucketInventoryConfigurationRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, GetBucketInventoryConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetBucketInventoryConfiguration", "Required field: Bucket, is not set");
@@ -1334,149 +789,65 @@ GetBucketInventoryConfigurationOutcome S3Client::GetBucketInventoryConfiguration
AWS_LOGSTREAM_ERROR("GetBucketInventoryConfiguration", "Required field: Id, is not set");
return GetBucketInventoryConfigurationOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Id]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return GetBucketInventoryConfigurationOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, GetBucketInventoryConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?inventory");
- uri.SetQueryString(ss.str());
- return GetBucketInventoryConfigurationOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-GetBucketInventoryConfigurationOutcomeCallable S3Client::GetBucketInventoryConfigurationCallable(const GetBucketInventoryConfigurationRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< GetBucketInventoryConfigurationOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->GetBucketInventoryConfiguration(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::GetBucketInventoryConfigurationAsync(const GetBucketInventoryConfigurationRequest& request, const GetBucketInventoryConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->GetBucketInventoryConfigurationAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::GetBucketInventoryConfigurationAsyncHelper(const GetBucketInventoryConfigurationRequest& request, const GetBucketInventoryConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, GetBucketInventoryConfiguration(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return GetBucketInventoryConfigurationOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
GetBucketLifecycleConfigurationOutcome S3Client::GetBucketLifecycleConfiguration(const GetBucketLifecycleConfigurationRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, GetBucketLifecycleConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetBucketLifecycleConfiguration", "Required field: Bucket, is not set");
return GetBucketLifecycleConfigurationOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return GetBucketLifecycleConfigurationOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, GetBucketLifecycleConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?lifecycle");
- uri.SetQueryString(ss.str());
- return GetBucketLifecycleConfigurationOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-GetBucketLifecycleConfigurationOutcomeCallable S3Client::GetBucketLifecycleConfigurationCallable(const GetBucketLifecycleConfigurationRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< GetBucketLifecycleConfigurationOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->GetBucketLifecycleConfiguration(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::GetBucketLifecycleConfigurationAsync(const GetBucketLifecycleConfigurationRequest& request, const GetBucketLifecycleConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->GetBucketLifecycleConfigurationAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::GetBucketLifecycleConfigurationAsyncHelper(const GetBucketLifecycleConfigurationRequest& request, const GetBucketLifecycleConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, GetBucketLifecycleConfiguration(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return GetBucketLifecycleConfigurationOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
GetBucketLocationOutcome S3Client::GetBucketLocation(const GetBucketLocationRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, GetBucketLocation, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetBucketLocation", "Required field: Bucket, is not set");
return GetBucketLocationOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return GetBucketLocationOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, GetBucketLocation, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?location");
- uri.SetQueryString(ss.str());
- return GetBucketLocationOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-GetBucketLocationOutcomeCallable S3Client::GetBucketLocationCallable(const GetBucketLocationRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< GetBucketLocationOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->GetBucketLocation(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::GetBucketLocationAsync(const GetBucketLocationRequest& request, const GetBucketLocationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->GetBucketLocationAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::GetBucketLocationAsyncHelper(const GetBucketLocationRequest& request, const GetBucketLocationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, GetBucketLocation(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return GetBucketLocationOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
GetBucketLoggingOutcome S3Client::GetBucketLogging(const GetBucketLoggingRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, GetBucketLogging, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetBucketLogging", "Required field: Bucket, is not set");
return GetBucketLoggingOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return GetBucketLoggingOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, GetBucketLogging, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?logging");
- uri.SetQueryString(ss.str());
- return GetBucketLoggingOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-GetBucketLoggingOutcomeCallable S3Client::GetBucketLoggingCallable(const GetBucketLoggingRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< GetBucketLoggingOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->GetBucketLogging(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::GetBucketLoggingAsync(const GetBucketLoggingRequest& request, const GetBucketLoggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->GetBucketLoggingAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::GetBucketLoggingAsyncHelper(const GetBucketLoggingRequest& request, const GetBucketLoggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, GetBucketLogging(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return GetBucketLoggingOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
GetBucketMetricsConfigurationOutcome S3Client::GetBucketMetricsConfiguration(const GetBucketMetricsConfigurationRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, GetBucketMetricsConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetBucketMetricsConfiguration", "Required field: Bucket, is not set");
@@ -1487,371 +858,161 @@ GetBucketMetricsConfigurationOutcome S3Client::GetBucketMetricsConfiguration(con
AWS_LOGSTREAM_ERROR("GetBucketMetricsConfiguration", "Required field: Id, is not set");
return GetBucketMetricsConfigurationOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Id]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return GetBucketMetricsConfigurationOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, GetBucketMetricsConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?metrics");
- uri.SetQueryString(ss.str());
- return GetBucketMetricsConfigurationOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-GetBucketMetricsConfigurationOutcomeCallable S3Client::GetBucketMetricsConfigurationCallable(const GetBucketMetricsConfigurationRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< GetBucketMetricsConfigurationOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->GetBucketMetricsConfiguration(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::GetBucketMetricsConfigurationAsync(const GetBucketMetricsConfigurationRequest& request, const GetBucketMetricsConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->GetBucketMetricsConfigurationAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::GetBucketMetricsConfigurationAsyncHelper(const GetBucketMetricsConfigurationRequest& request, const GetBucketMetricsConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, GetBucketMetricsConfiguration(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return GetBucketMetricsConfigurationOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
GetBucketNotificationConfigurationOutcome S3Client::GetBucketNotificationConfiguration(const GetBucketNotificationConfigurationRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, GetBucketNotificationConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetBucketNotificationConfiguration", "Required field: Bucket, is not set");
return GetBucketNotificationConfigurationOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return GetBucketNotificationConfigurationOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, GetBucketNotificationConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?notification");
- uri.SetQueryString(ss.str());
- return GetBucketNotificationConfigurationOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-GetBucketNotificationConfigurationOutcomeCallable S3Client::GetBucketNotificationConfigurationCallable(const GetBucketNotificationConfigurationRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< GetBucketNotificationConfigurationOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->GetBucketNotificationConfiguration(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::GetBucketNotificationConfigurationAsync(const GetBucketNotificationConfigurationRequest& request, const GetBucketNotificationConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->GetBucketNotificationConfigurationAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::GetBucketNotificationConfigurationAsyncHelper(const GetBucketNotificationConfigurationRequest& request, const GetBucketNotificationConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, GetBucketNotificationConfiguration(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return GetBucketNotificationConfigurationOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
GetBucketOwnershipControlsOutcome S3Client::GetBucketOwnershipControls(const GetBucketOwnershipControlsRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, GetBucketOwnershipControls, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetBucketOwnershipControls", "Required field: Bucket, is not set");
return GetBucketOwnershipControlsOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return GetBucketOwnershipControlsOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, GetBucketOwnershipControls, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?ownershipControls");
- uri.SetQueryString(ss.str());
- return GetBucketOwnershipControlsOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-GetBucketOwnershipControlsOutcomeCallable S3Client::GetBucketOwnershipControlsCallable(const GetBucketOwnershipControlsRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< GetBucketOwnershipControlsOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->GetBucketOwnershipControls(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::GetBucketOwnershipControlsAsync(const GetBucketOwnershipControlsRequest& request, const GetBucketOwnershipControlsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->GetBucketOwnershipControlsAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::GetBucketOwnershipControlsAsyncHelper(const GetBucketOwnershipControlsRequest& request, const GetBucketOwnershipControlsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, GetBucketOwnershipControls(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return GetBucketOwnershipControlsOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
GetBucketPolicyOutcome S3Client::GetBucketPolicy(const GetBucketPolicyRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, GetBucketPolicy, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetBucketPolicy", "Required field: Bucket, is not set");
return GetBucketPolicyOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return GetBucketPolicyOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, GetBucketPolicy, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?policy");
- uri.SetQueryString(ss.str());
- return GetBucketPolicyOutcome(MakeRequestWithUnparsedResponse(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-GetBucketPolicyOutcomeCallable S3Client::GetBucketPolicyCallable(const GetBucketPolicyRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< GetBucketPolicyOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->GetBucketPolicy(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::GetBucketPolicyAsync(const GetBucketPolicyRequest& request, const GetBucketPolicyResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->GetBucketPolicyAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::GetBucketPolicyAsyncHelper(const GetBucketPolicyRequest& request, const GetBucketPolicyResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, GetBucketPolicy(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return GetBucketPolicyOutcome(MakeRequestWithUnparsedResponse(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
GetBucketPolicyStatusOutcome S3Client::GetBucketPolicyStatus(const GetBucketPolicyStatusRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, GetBucketPolicyStatus, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetBucketPolicyStatus", "Required field: Bucket, is not set");
return GetBucketPolicyStatusOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return GetBucketPolicyStatusOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, GetBucketPolicyStatus, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?policyStatus");
- uri.SetQueryString(ss.str());
- return GetBucketPolicyStatusOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-GetBucketPolicyStatusOutcomeCallable S3Client::GetBucketPolicyStatusCallable(const GetBucketPolicyStatusRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< GetBucketPolicyStatusOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->GetBucketPolicyStatus(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::GetBucketPolicyStatusAsync(const GetBucketPolicyStatusRequest& request, const GetBucketPolicyStatusResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->GetBucketPolicyStatusAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::GetBucketPolicyStatusAsyncHelper(const GetBucketPolicyStatusRequest& request, const GetBucketPolicyStatusResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, GetBucketPolicyStatus(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return GetBucketPolicyStatusOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
GetBucketReplicationOutcome S3Client::GetBucketReplication(const GetBucketReplicationRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, GetBucketReplication, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetBucketReplication", "Required field: Bucket, is not set");
return GetBucketReplicationOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return GetBucketReplicationOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, GetBucketReplication, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?replication");
- uri.SetQueryString(ss.str());
- return GetBucketReplicationOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-GetBucketReplicationOutcomeCallable S3Client::GetBucketReplicationCallable(const GetBucketReplicationRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< GetBucketReplicationOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->GetBucketReplication(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::GetBucketReplicationAsync(const GetBucketReplicationRequest& request, const GetBucketReplicationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->GetBucketReplicationAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::GetBucketReplicationAsyncHelper(const GetBucketReplicationRequest& request, const GetBucketReplicationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, GetBucketReplication(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return GetBucketReplicationOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
GetBucketRequestPaymentOutcome S3Client::GetBucketRequestPayment(const GetBucketRequestPaymentRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, GetBucketRequestPayment, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetBucketRequestPayment", "Required field: Bucket, is not set");
return GetBucketRequestPaymentOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return GetBucketRequestPaymentOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, GetBucketRequestPayment, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?requestPayment");
- uri.SetQueryString(ss.str());
- return GetBucketRequestPaymentOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-GetBucketRequestPaymentOutcomeCallable S3Client::GetBucketRequestPaymentCallable(const GetBucketRequestPaymentRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< GetBucketRequestPaymentOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->GetBucketRequestPayment(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::GetBucketRequestPaymentAsync(const GetBucketRequestPaymentRequest& request, const GetBucketRequestPaymentResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->GetBucketRequestPaymentAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::GetBucketRequestPaymentAsyncHelper(const GetBucketRequestPaymentRequest& request, const GetBucketRequestPaymentResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, GetBucketRequestPayment(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return GetBucketRequestPaymentOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
GetBucketTaggingOutcome S3Client::GetBucketTagging(const GetBucketTaggingRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, GetBucketTagging, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetBucketTagging", "Required field: Bucket, is not set");
return GetBucketTaggingOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return GetBucketTaggingOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, GetBucketTagging, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?tagging");
- uri.SetQueryString(ss.str());
- return GetBucketTaggingOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-GetBucketTaggingOutcomeCallable S3Client::GetBucketTaggingCallable(const GetBucketTaggingRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< GetBucketTaggingOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->GetBucketTagging(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::GetBucketTaggingAsync(const GetBucketTaggingRequest& request, const GetBucketTaggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->GetBucketTaggingAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::GetBucketTaggingAsyncHelper(const GetBucketTaggingRequest& request, const GetBucketTaggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, GetBucketTagging(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return GetBucketTaggingOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
GetBucketVersioningOutcome S3Client::GetBucketVersioning(const GetBucketVersioningRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, GetBucketVersioning, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetBucketVersioning", "Required field: Bucket, is not set");
return GetBucketVersioningOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return GetBucketVersioningOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, GetBucketVersioning, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?versioning");
- uri.SetQueryString(ss.str());
- return GetBucketVersioningOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-GetBucketVersioningOutcomeCallable S3Client::GetBucketVersioningCallable(const GetBucketVersioningRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< GetBucketVersioningOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->GetBucketVersioning(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::GetBucketVersioningAsync(const GetBucketVersioningRequest& request, const GetBucketVersioningResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->GetBucketVersioningAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::GetBucketVersioningAsyncHelper(const GetBucketVersioningRequest& request, const GetBucketVersioningResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, GetBucketVersioning(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return GetBucketVersioningOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
GetBucketWebsiteOutcome S3Client::GetBucketWebsite(const GetBucketWebsiteRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, GetBucketWebsite, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetBucketWebsite", "Required field: Bucket, is not set");
return GetBucketWebsiteOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return GetBucketWebsiteOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, GetBucketWebsite, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?website");
- uri.SetQueryString(ss.str());
- return GetBucketWebsiteOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-GetBucketWebsiteOutcomeCallable S3Client::GetBucketWebsiteCallable(const GetBucketWebsiteRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< GetBucketWebsiteOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->GetBucketWebsite(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::GetBucketWebsiteAsync(const GetBucketWebsiteRequest& request, const GetBucketWebsiteResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->GetBucketWebsiteAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::GetBucketWebsiteAsyncHelper(const GetBucketWebsiteRequest& request, const GetBucketWebsiteResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, GetBucketWebsite(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return GetBucketWebsiteOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
GetObjectOutcome S3Client::GetObject(const GetObjectRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, GetObject, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetObject", "Required field: Bucket, is not set");
@@ -1862,17 +1023,10 @@ GetObjectOutcome S3Client::GetObject(const GetObjectRequest& request) const
AWS_LOGSTREAM_ERROR("GetObject", "Required field: Key, is not set");
return GetObjectOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Key]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return GetObjectOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
- Aws::StringStream ss;
- ss << "/";
- ss << request.GetKey();
- uri.SetPath(uri.GetPath() + ss.str());
- return GetObjectOutcome(MakeRequestWithUnparsedResponse(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, GetObject, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
+ endpointResolutionOutcome.GetResult().AddPathSegments(request.GetKey());
+ return GetObjectOutcome(MakeRequestWithUnparsedResponse(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
GetObjectOutcomeCallable S3Client::GetObjectCallable(const GetObjectRequest& request) const
@@ -1885,16 +1039,15 @@ GetObjectOutcomeCallable S3Client::GetObjectCallable(const GetObjectRequest& req
void S3Client::GetObjectAsync(const GetObjectRequest& request, const GetObjectResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
- m_executor->Submit( [this, request, handler, context](){ this->GetObjectAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::GetObjectAsyncHelper(const GetObjectRequest& request, const GetObjectResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, GetObject(request), context);
+ m_executor->Submit( [this, request, handler, context]()
+ {
+ handler(this, request, GetObject(request), context);
+ } );
}
GetObjectAclOutcome S3Client::GetObjectAcl(const GetObjectAclRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, GetObjectAcl, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetObjectAcl", "Required field: Bucket, is not set");
@@ -1905,41 +1058,45 @@ GetObjectAclOutcome S3Client::GetObjectAcl(const GetObjectAclRequest& request) c
AWS_LOGSTREAM_ERROR("GetObjectAcl", "Required field: Key, is not set");
return GetObjectAclOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Key]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return GetObjectAclOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, GetObjectAcl, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
- ss << "/";
- ss << request.GetKey();
- uri.SetPath(uri.GetPath() + ss.str());
+ endpointResolutionOutcome.GetResult().AddPathSegments(request.GetKey());
ss.str("?acl");
- uri.SetQueryString(ss.str());
- return GetObjectAclOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-GetObjectAclOutcomeCallable S3Client::GetObjectAclCallable(const GetObjectAclRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< GetObjectAclOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->GetObjectAcl(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::GetObjectAclAsync(const GetObjectAclRequest& request, const GetObjectAclResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->GetObjectAclAsyncHelper( request, handler, context ); } );
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return GetObjectAclOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
-void S3Client::GetObjectAclAsyncHelper(const GetObjectAclRequest& request, const GetObjectAclResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
+GetObjectAttributesOutcome S3Client::GetObjectAttributes(const GetObjectAttributesRequest& request) const
{
- handler(this, request, GetObjectAcl(request), context);
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, GetObjectAttributes, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
+ if (!request.BucketHasBeenSet())
+ {
+ AWS_LOGSTREAM_ERROR("GetObjectAttributes", "Required field: Bucket, is not set");
+ return GetObjectAttributesOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
+ }
+ if (!request.KeyHasBeenSet())
+ {
+ AWS_LOGSTREAM_ERROR("GetObjectAttributes", "Required field: Key, is not set");
+ return GetObjectAttributesOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Key]", false));
+ }
+ if (!request.ObjectAttributesHasBeenSet())
+ {
+ AWS_LOGSTREAM_ERROR("GetObjectAttributes", "Required field: ObjectAttributes, is not set");
+ return GetObjectAttributesOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [ObjectAttributes]", false));
+ }
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, GetObjectAttributes, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
+ Aws::StringStream ss;
+ endpointResolutionOutcome.GetResult().AddPathSegments(request.GetKey());
+ ss.str("?attributes");
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return GetObjectAttributesOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
GetObjectLegalHoldOutcome S3Client::GetObjectLegalHold(const GetObjectLegalHoldRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, GetObjectLegalHold, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetObjectLegalHold", "Required field: Bucket, is not set");
@@ -1950,78 +1107,34 @@ GetObjectLegalHoldOutcome S3Client::GetObjectLegalHold(const GetObjectLegalHoldR
AWS_LOGSTREAM_ERROR("GetObjectLegalHold", "Required field: Key, is not set");
return GetObjectLegalHoldOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Key]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return GetObjectLegalHoldOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, GetObjectLegalHold, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
- ss << "/";
- ss << request.GetKey();
- uri.SetPath(uri.GetPath() + ss.str());
+ endpointResolutionOutcome.GetResult().AddPathSegments(request.GetKey());
ss.str("?legal-hold");
- uri.SetQueryString(ss.str());
- return GetObjectLegalHoldOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-GetObjectLegalHoldOutcomeCallable S3Client::GetObjectLegalHoldCallable(const GetObjectLegalHoldRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< GetObjectLegalHoldOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->GetObjectLegalHold(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::GetObjectLegalHoldAsync(const GetObjectLegalHoldRequest& request, const GetObjectLegalHoldResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->GetObjectLegalHoldAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::GetObjectLegalHoldAsyncHelper(const GetObjectLegalHoldRequest& request, const GetObjectLegalHoldResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, GetObjectLegalHold(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return GetObjectLegalHoldOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
GetObjectLockConfigurationOutcome S3Client::GetObjectLockConfiguration(const GetObjectLockConfigurationRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, GetObjectLockConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetObjectLockConfiguration", "Required field: Bucket, is not set");
return GetObjectLockConfigurationOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return GetObjectLockConfigurationOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, GetObjectLockConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?object-lock");
- uri.SetQueryString(ss.str());
- return GetObjectLockConfigurationOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-GetObjectLockConfigurationOutcomeCallable S3Client::GetObjectLockConfigurationCallable(const GetObjectLockConfigurationRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< GetObjectLockConfigurationOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->GetObjectLockConfiguration(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::GetObjectLockConfigurationAsync(const GetObjectLockConfigurationRequest& request, const GetObjectLockConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->GetObjectLockConfigurationAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::GetObjectLockConfigurationAsyncHelper(const GetObjectLockConfigurationRequest& request, const GetObjectLockConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, GetObjectLockConfiguration(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return GetObjectLockConfigurationOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
GetObjectRetentionOutcome S3Client::GetObjectRetention(const GetObjectRetentionRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, GetObjectRetention, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetObjectRetention", "Required field: Bucket, is not set");
@@ -2032,41 +1145,18 @@ GetObjectRetentionOutcome S3Client::GetObjectRetention(const GetObjectRetentionR
AWS_LOGSTREAM_ERROR("GetObjectRetention", "Required field: Key, is not set");
return GetObjectRetentionOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Key]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return GetObjectRetentionOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, GetObjectRetention, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
- ss << "/";
- ss << request.GetKey();
- uri.SetPath(uri.GetPath() + ss.str());
+ endpointResolutionOutcome.GetResult().AddPathSegments(request.GetKey());
ss.str("?retention");
- uri.SetQueryString(ss.str());
- return GetObjectRetentionOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-GetObjectRetentionOutcomeCallable S3Client::GetObjectRetentionCallable(const GetObjectRetentionRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< GetObjectRetentionOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->GetObjectRetention(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::GetObjectRetentionAsync(const GetObjectRetentionRequest& request, const GetObjectRetentionResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->GetObjectRetentionAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::GetObjectRetentionAsyncHelper(const GetObjectRetentionRequest& request, const GetObjectRetentionResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, GetObjectRetention(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return GetObjectRetentionOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
GetObjectTaggingOutcome S3Client::GetObjectTagging(const GetObjectTaggingRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, GetObjectTagging, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetObjectTagging", "Required field: Bucket, is not set");
@@ -2077,41 +1167,18 @@ GetObjectTaggingOutcome S3Client::GetObjectTagging(const GetObjectTaggingRequest
AWS_LOGSTREAM_ERROR("GetObjectTagging", "Required field: Key, is not set");
return GetObjectTaggingOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Key]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return GetObjectTaggingOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, GetObjectTagging, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
- ss << "/";
- ss << request.GetKey();
- uri.SetPath(uri.GetPath() + ss.str());
+ endpointResolutionOutcome.GetResult().AddPathSegments(request.GetKey());
ss.str("?tagging");
- uri.SetQueryString(ss.str());
- return GetObjectTaggingOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-GetObjectTaggingOutcomeCallable S3Client::GetObjectTaggingCallable(const GetObjectTaggingRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< GetObjectTaggingOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->GetObjectTagging(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::GetObjectTaggingAsync(const GetObjectTaggingRequest& request, const GetObjectTaggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->GetObjectTaggingAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::GetObjectTaggingAsyncHelper(const GetObjectTaggingRequest& request, const GetObjectTaggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, GetObjectTagging(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return GetObjectTaggingOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
GetObjectTorrentOutcome S3Client::GetObjectTorrent(const GetObjectTorrentRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, GetObjectTorrent, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetObjectTorrent", "Required field: Bucket, is not set");
@@ -2122,114 +1189,47 @@ GetObjectTorrentOutcome S3Client::GetObjectTorrent(const GetObjectTorrentRequest
AWS_LOGSTREAM_ERROR("GetObjectTorrent", "Required field: Key, is not set");
return GetObjectTorrentOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Key]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return GetObjectTorrentOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, GetObjectTorrent, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
- ss << "/";
- ss << request.GetKey();
- uri.SetPath(uri.GetPath() + ss.str());
+ endpointResolutionOutcome.GetResult().AddPathSegments(request.GetKey());
ss.str("?torrent");
- uri.SetQueryString(ss.str());
- return GetObjectTorrentOutcome(MakeRequestWithUnparsedResponse(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-GetObjectTorrentOutcomeCallable S3Client::GetObjectTorrentCallable(const GetObjectTorrentRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< GetObjectTorrentOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->GetObjectTorrent(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::GetObjectTorrentAsync(const GetObjectTorrentRequest& request, const GetObjectTorrentResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->GetObjectTorrentAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::GetObjectTorrentAsyncHelper(const GetObjectTorrentRequest& request, const GetObjectTorrentResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, GetObjectTorrent(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return GetObjectTorrentOutcome(MakeRequestWithUnparsedResponse(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
GetPublicAccessBlockOutcome S3Client::GetPublicAccessBlock(const GetPublicAccessBlockRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, GetPublicAccessBlock, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("GetPublicAccessBlock", "Required field: Bucket, is not set");
return GetPublicAccessBlockOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return GetPublicAccessBlockOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, GetPublicAccessBlock, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?publicAccessBlock");
- uri.SetQueryString(ss.str());
- return GetPublicAccessBlockOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-GetPublicAccessBlockOutcomeCallable S3Client::GetPublicAccessBlockCallable(const GetPublicAccessBlockRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< GetPublicAccessBlockOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->GetPublicAccessBlock(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::GetPublicAccessBlockAsync(const GetPublicAccessBlockRequest& request, const GetPublicAccessBlockResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->GetPublicAccessBlockAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::GetPublicAccessBlockAsyncHelper(const GetPublicAccessBlockRequest& request, const GetPublicAccessBlockResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, GetPublicAccessBlock(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return GetPublicAccessBlockOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
HeadBucketOutcome S3Client::HeadBucket(const HeadBucketRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, HeadBucket, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("HeadBucket", "Required field: Bucket, is not set");
return HeadBucketOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return HeadBucketOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
- Aws::StringStream ss;
- uri.SetPath(uri.GetPath() + ss.str());
- return HeadBucketOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_HEAD, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-HeadBucketOutcomeCallable S3Client::HeadBucketCallable(const HeadBucketRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< HeadBucketOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->HeadBucket(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::HeadBucketAsync(const HeadBucketRequest& request, const HeadBucketResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->HeadBucketAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::HeadBucketAsyncHelper(const HeadBucketRequest& request, const HeadBucketResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, HeadBucket(request), context);
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, HeadBucket, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
+ return HeadBucketOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_HEAD));
}
HeadObjectOutcome S3Client::HeadObject(const HeadObjectRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, HeadObject, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("HeadObject", "Required field: Bucket, is not set");
@@ -2240,364 +1240,149 @@ HeadObjectOutcome S3Client::HeadObject(const HeadObjectRequest& request) const
AWS_LOGSTREAM_ERROR("HeadObject", "Required field: Key, is not set");
return HeadObjectOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Key]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return HeadObjectOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
- Aws::StringStream ss;
- ss << "/";
- ss << request.GetKey();
- uri.SetPath(uri.GetPath() + ss.str());
- return HeadObjectOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_HEAD, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-HeadObjectOutcomeCallable S3Client::HeadObjectCallable(const HeadObjectRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< HeadObjectOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->HeadObject(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::HeadObjectAsync(const HeadObjectRequest& request, const HeadObjectResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->HeadObjectAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::HeadObjectAsyncHelper(const HeadObjectRequest& request, const HeadObjectResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, HeadObject(request), context);
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, HeadObject, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
+ endpointResolutionOutcome.GetResult().AddPathSegments(request.GetKey());
+ return HeadObjectOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_HEAD));
}
ListBucketAnalyticsConfigurationsOutcome S3Client::ListBucketAnalyticsConfigurations(const ListBucketAnalyticsConfigurationsRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, ListBucketAnalyticsConfigurations, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("ListBucketAnalyticsConfigurations", "Required field: Bucket, is not set");
return ListBucketAnalyticsConfigurationsOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return ListBucketAnalyticsConfigurationsOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, ListBucketAnalyticsConfigurations, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?analytics");
- uri.SetQueryString(ss.str());
- return ListBucketAnalyticsConfigurationsOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-ListBucketAnalyticsConfigurationsOutcomeCallable S3Client::ListBucketAnalyticsConfigurationsCallable(const ListBucketAnalyticsConfigurationsRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< ListBucketAnalyticsConfigurationsOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->ListBucketAnalyticsConfigurations(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::ListBucketAnalyticsConfigurationsAsync(const ListBucketAnalyticsConfigurationsRequest& request, const ListBucketAnalyticsConfigurationsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->ListBucketAnalyticsConfigurationsAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::ListBucketAnalyticsConfigurationsAsyncHelper(const ListBucketAnalyticsConfigurationsRequest& request, const ListBucketAnalyticsConfigurationsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, ListBucketAnalyticsConfigurations(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return ListBucketAnalyticsConfigurationsOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
ListBucketIntelligentTieringConfigurationsOutcome S3Client::ListBucketIntelligentTieringConfigurations(const ListBucketIntelligentTieringConfigurationsRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, ListBucketIntelligentTieringConfigurations, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("ListBucketIntelligentTieringConfigurations", "Required field: Bucket, is not set");
return ListBucketIntelligentTieringConfigurationsOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return ListBucketIntelligentTieringConfigurationsOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, ListBucketIntelligentTieringConfigurations, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?intelligent-tiering");
- uri.SetQueryString(ss.str());
- return ListBucketIntelligentTieringConfigurationsOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-ListBucketIntelligentTieringConfigurationsOutcomeCallable S3Client::ListBucketIntelligentTieringConfigurationsCallable(const ListBucketIntelligentTieringConfigurationsRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< ListBucketIntelligentTieringConfigurationsOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->ListBucketIntelligentTieringConfigurations(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::ListBucketIntelligentTieringConfigurationsAsync(const ListBucketIntelligentTieringConfigurationsRequest& request, const ListBucketIntelligentTieringConfigurationsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->ListBucketIntelligentTieringConfigurationsAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::ListBucketIntelligentTieringConfigurationsAsyncHelper(const ListBucketIntelligentTieringConfigurationsRequest& request, const ListBucketIntelligentTieringConfigurationsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, ListBucketIntelligentTieringConfigurations(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return ListBucketIntelligentTieringConfigurationsOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
ListBucketInventoryConfigurationsOutcome S3Client::ListBucketInventoryConfigurations(const ListBucketInventoryConfigurationsRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, ListBucketInventoryConfigurations, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("ListBucketInventoryConfigurations", "Required field: Bucket, is not set");
return ListBucketInventoryConfigurationsOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return ListBucketInventoryConfigurationsOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, ListBucketInventoryConfigurations, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?inventory");
- uri.SetQueryString(ss.str());
- return ListBucketInventoryConfigurationsOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-ListBucketInventoryConfigurationsOutcomeCallable S3Client::ListBucketInventoryConfigurationsCallable(const ListBucketInventoryConfigurationsRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< ListBucketInventoryConfigurationsOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->ListBucketInventoryConfigurations(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::ListBucketInventoryConfigurationsAsync(const ListBucketInventoryConfigurationsRequest& request, const ListBucketInventoryConfigurationsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->ListBucketInventoryConfigurationsAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::ListBucketInventoryConfigurationsAsyncHelper(const ListBucketInventoryConfigurationsRequest& request, const ListBucketInventoryConfigurationsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, ListBucketInventoryConfigurations(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return ListBucketInventoryConfigurationsOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
ListBucketMetricsConfigurationsOutcome S3Client::ListBucketMetricsConfigurations(const ListBucketMetricsConfigurationsRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, ListBucketMetricsConfigurations, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("ListBucketMetricsConfigurations", "Required field: Bucket, is not set");
return ListBucketMetricsConfigurationsOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return ListBucketMetricsConfigurationsOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, ListBucketMetricsConfigurations, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?metrics");
- uri.SetQueryString(ss.str());
- return ListBucketMetricsConfigurationsOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-ListBucketMetricsConfigurationsOutcomeCallable S3Client::ListBucketMetricsConfigurationsCallable(const ListBucketMetricsConfigurationsRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< ListBucketMetricsConfigurationsOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->ListBucketMetricsConfigurations(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::ListBucketMetricsConfigurationsAsync(const ListBucketMetricsConfigurationsRequest& request, const ListBucketMetricsConfigurationsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->ListBucketMetricsConfigurationsAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::ListBucketMetricsConfigurationsAsyncHelper(const ListBucketMetricsConfigurationsRequest& request, const ListBucketMetricsConfigurationsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, ListBucketMetricsConfigurations(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return ListBucketMetricsConfigurationsOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
ListBucketsOutcome S3Client::ListBuckets() const
{
- Aws::StringStream ss;
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString();
- if (!computeEndpointOutcome.IsSuccess())
- {
- return ListBucketsOutcome(computeEndpointOutcome.GetError());
- }
- ss << computeEndpointOutcome.GetResult().endpoint;
- return ListBucketsOutcome(MakeRequest(ss.str(), Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, "ListBuckets", computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-ListBucketsOutcomeCallable S3Client::ListBucketsCallable() const
-{
- auto task = Aws::MakeShared< std::packaged_task< ListBucketsOutcome() > >(ALLOCATION_TAG, [this](){ return this->ListBuckets(); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::ListBucketsAsync(const ListBucketsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, handler, context](){ this->ListBucketsAsyncHelper( handler, context ); } );
-}
-
-void S3Client::ListBucketsAsyncHelper(const ListBucketsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, ListBuckets(), context);
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, ListBuckets, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
+ const Aws::Vector<Aws::Endpoint::EndpointParameter> staticEndpointParameters;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(staticEndpointParameters);
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, ListBuckets, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
+ return ListBucketsOutcome(MakeRequest(endpointResolutionOutcome.GetResult(), "ListBuckets", Aws::Http::HttpMethod::HTTP_GET));
}
ListMultipartUploadsOutcome S3Client::ListMultipartUploads(const ListMultipartUploadsRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, ListMultipartUploads, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("ListMultipartUploads", "Required field: Bucket, is not set");
return ListMultipartUploadsOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return ListMultipartUploadsOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, ListMultipartUploads, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?uploads");
- uri.SetQueryString(ss.str());
- return ListMultipartUploadsOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-ListMultipartUploadsOutcomeCallable S3Client::ListMultipartUploadsCallable(const ListMultipartUploadsRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< ListMultipartUploadsOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->ListMultipartUploads(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::ListMultipartUploadsAsync(const ListMultipartUploadsRequest& request, const ListMultipartUploadsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->ListMultipartUploadsAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::ListMultipartUploadsAsyncHelper(const ListMultipartUploadsRequest& request, const ListMultipartUploadsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, ListMultipartUploads(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return ListMultipartUploadsOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
ListObjectVersionsOutcome S3Client::ListObjectVersions(const ListObjectVersionsRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, ListObjectVersions, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("ListObjectVersions", "Required field: Bucket, is not set");
return ListObjectVersionsOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return ListObjectVersionsOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, ListObjectVersions, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?versions");
- uri.SetQueryString(ss.str());
- return ListObjectVersionsOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-ListObjectVersionsOutcomeCallable S3Client::ListObjectVersionsCallable(const ListObjectVersionsRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< ListObjectVersionsOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->ListObjectVersions(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::ListObjectVersionsAsync(const ListObjectVersionsRequest& request, const ListObjectVersionsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->ListObjectVersionsAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::ListObjectVersionsAsyncHelper(const ListObjectVersionsRequest& request, const ListObjectVersionsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, ListObjectVersions(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return ListObjectVersionsOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
ListObjectsOutcome S3Client::ListObjects(const ListObjectsRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, ListObjects, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("ListObjects", "Required field: Bucket, is not set");
return ListObjectsOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return ListObjectsOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
- Aws::StringStream ss;
- uri.SetPath(uri.GetPath() + ss.str());
- return ListObjectsOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-ListObjectsOutcomeCallable S3Client::ListObjectsCallable(const ListObjectsRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< ListObjectsOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->ListObjects(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::ListObjectsAsync(const ListObjectsRequest& request, const ListObjectsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->ListObjectsAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::ListObjectsAsyncHelper(const ListObjectsRequest& request, const ListObjectsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, ListObjects(request), context);
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, ListObjects, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
+ return ListObjectsOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
ListObjectsV2Outcome S3Client::ListObjectsV2(const ListObjectsV2Request& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, ListObjectsV2, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("ListObjectsV2", "Required field: Bucket, is not set");
return ListObjectsV2Outcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return ListObjectsV2Outcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, ListObjectsV2, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?list-type=2");
- uri.SetQueryString(ss.str());
- return ListObjectsV2Outcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-ListObjectsV2OutcomeCallable S3Client::ListObjectsV2Callable(const ListObjectsV2Request& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< ListObjectsV2Outcome() > >(ALLOCATION_TAG, [this, request](){ return this->ListObjectsV2(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::ListObjectsV2Async(const ListObjectsV2Request& request, const ListObjectsV2ResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->ListObjectsV2AsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::ListObjectsV2AsyncHelper(const ListObjectsV2Request& request, const ListObjectsV2ResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, ListObjectsV2(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return ListObjectsV2Outcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
ListPartsOutcome S3Client::ListParts(const ListPartsRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, ListParts, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("ListParts", "Required field: Bucket, is not set");
@@ -2613,113 +1398,47 @@ ListPartsOutcome S3Client::ListParts(const ListPartsRequest& request) const
AWS_LOGSTREAM_ERROR("ListParts", "Required field: UploadId, is not set");
return ListPartsOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [UploadId]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return ListPartsOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
- Aws::StringStream ss;
- ss << "/";
- ss << request.GetKey();
- uri.SetPath(uri.GetPath() + ss.str());
- return ListPartsOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_GET, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-ListPartsOutcomeCallable S3Client::ListPartsCallable(const ListPartsRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< ListPartsOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->ListParts(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::ListPartsAsync(const ListPartsRequest& request, const ListPartsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->ListPartsAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::ListPartsAsyncHelper(const ListPartsRequest& request, const ListPartsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, ListParts(request), context);
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, ListParts, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
+ endpointResolutionOutcome.GetResult().AddPathSegments(request.GetKey());
+ return ListPartsOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_GET));
}
PutBucketAccelerateConfigurationOutcome S3Client::PutBucketAccelerateConfiguration(const PutBucketAccelerateConfigurationRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, PutBucketAccelerateConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("PutBucketAccelerateConfiguration", "Required field: Bucket, is not set");
return PutBucketAccelerateConfigurationOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return PutBucketAccelerateConfigurationOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, PutBucketAccelerateConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?accelerate");
- uri.SetQueryString(ss.str());
- return PutBucketAccelerateConfigurationOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-PutBucketAccelerateConfigurationOutcomeCallable S3Client::PutBucketAccelerateConfigurationCallable(const PutBucketAccelerateConfigurationRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< PutBucketAccelerateConfigurationOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->PutBucketAccelerateConfiguration(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::PutBucketAccelerateConfigurationAsync(const PutBucketAccelerateConfigurationRequest& request, const PutBucketAccelerateConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->PutBucketAccelerateConfigurationAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::PutBucketAccelerateConfigurationAsyncHelper(const PutBucketAccelerateConfigurationRequest& request, const PutBucketAccelerateConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, PutBucketAccelerateConfiguration(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return PutBucketAccelerateConfigurationOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_PUT));
}
PutBucketAclOutcome S3Client::PutBucketAcl(const PutBucketAclRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, PutBucketAcl, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("PutBucketAcl", "Required field: Bucket, is not set");
return PutBucketAclOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return PutBucketAclOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, PutBucketAcl, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?acl");
- uri.SetQueryString(ss.str());
- return PutBucketAclOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-PutBucketAclOutcomeCallable S3Client::PutBucketAclCallable(const PutBucketAclRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< PutBucketAclOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->PutBucketAcl(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::PutBucketAclAsync(const PutBucketAclRequest& request, const PutBucketAclResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->PutBucketAclAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::PutBucketAclAsyncHelper(const PutBucketAclRequest& request, const PutBucketAclResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, PutBucketAcl(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return PutBucketAclOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_PUT));
}
PutBucketAnalyticsConfigurationOutcome S3Client::PutBucketAnalyticsConfiguration(const PutBucketAnalyticsConfigurationRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, PutBucketAnalyticsConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("PutBucketAnalyticsConfiguration", "Required field: Bucket, is not set");
@@ -2730,112 +1449,49 @@ PutBucketAnalyticsConfigurationOutcome S3Client::PutBucketAnalyticsConfiguration
AWS_LOGSTREAM_ERROR("PutBucketAnalyticsConfiguration", "Required field: Id, is not set");
return PutBucketAnalyticsConfigurationOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Id]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return PutBucketAnalyticsConfigurationOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, PutBucketAnalyticsConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?analytics");
- uri.SetQueryString(ss.str());
- return PutBucketAnalyticsConfigurationOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-PutBucketAnalyticsConfigurationOutcomeCallable S3Client::PutBucketAnalyticsConfigurationCallable(const PutBucketAnalyticsConfigurationRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< PutBucketAnalyticsConfigurationOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->PutBucketAnalyticsConfiguration(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::PutBucketAnalyticsConfigurationAsync(const PutBucketAnalyticsConfigurationRequest& request, const PutBucketAnalyticsConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->PutBucketAnalyticsConfigurationAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::PutBucketAnalyticsConfigurationAsyncHelper(const PutBucketAnalyticsConfigurationRequest& request, const PutBucketAnalyticsConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, PutBucketAnalyticsConfiguration(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return PutBucketAnalyticsConfigurationOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_PUT));
}
PutBucketCorsOutcome S3Client::PutBucketCors(const PutBucketCorsRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, PutBucketCors, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("PutBucketCors", "Required field: Bucket, is not set");
return PutBucketCorsOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return PutBucketCorsOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, PutBucketCors, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?cors");
- uri.SetQueryString(ss.str());
- return PutBucketCorsOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-PutBucketCorsOutcomeCallable S3Client::PutBucketCorsCallable(const PutBucketCorsRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< PutBucketCorsOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->PutBucketCors(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::PutBucketCorsAsync(const PutBucketCorsRequest& request, const PutBucketCorsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->PutBucketCorsAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::PutBucketCorsAsyncHelper(const PutBucketCorsRequest& request, const PutBucketCorsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, PutBucketCors(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return PutBucketCorsOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_PUT));
}
PutBucketEncryptionOutcome S3Client::PutBucketEncryption(const PutBucketEncryptionRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, PutBucketEncryption, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("PutBucketEncryption", "Required field: Bucket, is not set");
return PutBucketEncryptionOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return PutBucketEncryptionOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, PutBucketEncryption, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?encryption");
- uri.SetQueryString(ss.str());
- return PutBucketEncryptionOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-PutBucketEncryptionOutcomeCallable S3Client::PutBucketEncryptionCallable(const PutBucketEncryptionRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< PutBucketEncryptionOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->PutBucketEncryption(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::PutBucketEncryptionAsync(const PutBucketEncryptionRequest& request, const PutBucketEncryptionResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->PutBucketEncryptionAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::PutBucketEncryptionAsyncHelper(const PutBucketEncryptionRequest& request, const PutBucketEncryptionResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, PutBucketEncryption(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return PutBucketEncryptionOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_PUT));
}
PutBucketIntelligentTieringConfigurationOutcome S3Client::PutBucketIntelligentTieringConfiguration(const PutBucketIntelligentTieringConfigurationRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, PutBucketIntelligentTieringConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("PutBucketIntelligentTieringConfiguration", "Required field: Bucket, is not set");
@@ -2846,38 +1502,17 @@ PutBucketIntelligentTieringConfigurationOutcome S3Client::PutBucketIntelligentTi
AWS_LOGSTREAM_ERROR("PutBucketIntelligentTieringConfiguration", "Required field: Id, is not set");
return PutBucketIntelligentTieringConfigurationOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Id]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return PutBucketIntelligentTieringConfigurationOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, PutBucketIntelligentTieringConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?intelligent-tiering");
- uri.SetQueryString(ss.str());
- return PutBucketIntelligentTieringConfigurationOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-PutBucketIntelligentTieringConfigurationOutcomeCallable S3Client::PutBucketIntelligentTieringConfigurationCallable(const PutBucketIntelligentTieringConfigurationRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< PutBucketIntelligentTieringConfigurationOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->PutBucketIntelligentTieringConfiguration(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::PutBucketIntelligentTieringConfigurationAsync(const PutBucketIntelligentTieringConfigurationRequest& request, const PutBucketIntelligentTieringConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->PutBucketIntelligentTieringConfigurationAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::PutBucketIntelligentTieringConfigurationAsyncHelper(const PutBucketIntelligentTieringConfigurationRequest& request, const PutBucketIntelligentTieringConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, PutBucketIntelligentTieringConfiguration(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return PutBucketIntelligentTieringConfigurationOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_PUT));
}
PutBucketInventoryConfigurationOutcome S3Client::PutBucketInventoryConfiguration(const PutBucketInventoryConfigurationRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, PutBucketInventoryConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("PutBucketInventoryConfiguration", "Required field: Bucket, is not set");
@@ -2888,112 +1523,49 @@ PutBucketInventoryConfigurationOutcome S3Client::PutBucketInventoryConfiguration
AWS_LOGSTREAM_ERROR("PutBucketInventoryConfiguration", "Required field: Id, is not set");
return PutBucketInventoryConfigurationOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Id]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return PutBucketInventoryConfigurationOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, PutBucketInventoryConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?inventory");
- uri.SetQueryString(ss.str());
- return PutBucketInventoryConfigurationOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-PutBucketInventoryConfigurationOutcomeCallable S3Client::PutBucketInventoryConfigurationCallable(const PutBucketInventoryConfigurationRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< PutBucketInventoryConfigurationOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->PutBucketInventoryConfiguration(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::PutBucketInventoryConfigurationAsync(const PutBucketInventoryConfigurationRequest& request, const PutBucketInventoryConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->PutBucketInventoryConfigurationAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::PutBucketInventoryConfigurationAsyncHelper(const PutBucketInventoryConfigurationRequest& request, const PutBucketInventoryConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, PutBucketInventoryConfiguration(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return PutBucketInventoryConfigurationOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_PUT));
}
PutBucketLifecycleConfigurationOutcome S3Client::PutBucketLifecycleConfiguration(const PutBucketLifecycleConfigurationRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, PutBucketLifecycleConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("PutBucketLifecycleConfiguration", "Required field: Bucket, is not set");
return PutBucketLifecycleConfigurationOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return PutBucketLifecycleConfigurationOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, PutBucketLifecycleConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?lifecycle");
- uri.SetQueryString(ss.str());
- return PutBucketLifecycleConfigurationOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-PutBucketLifecycleConfigurationOutcomeCallable S3Client::PutBucketLifecycleConfigurationCallable(const PutBucketLifecycleConfigurationRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< PutBucketLifecycleConfigurationOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->PutBucketLifecycleConfiguration(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::PutBucketLifecycleConfigurationAsync(const PutBucketLifecycleConfigurationRequest& request, const PutBucketLifecycleConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->PutBucketLifecycleConfigurationAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::PutBucketLifecycleConfigurationAsyncHelper(const PutBucketLifecycleConfigurationRequest& request, const PutBucketLifecycleConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, PutBucketLifecycleConfiguration(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return PutBucketLifecycleConfigurationOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_PUT));
}
PutBucketLoggingOutcome S3Client::PutBucketLogging(const PutBucketLoggingRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, PutBucketLogging, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("PutBucketLogging", "Required field: Bucket, is not set");
return PutBucketLoggingOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return PutBucketLoggingOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, PutBucketLogging, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?logging");
- uri.SetQueryString(ss.str());
- return PutBucketLoggingOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-PutBucketLoggingOutcomeCallable S3Client::PutBucketLoggingCallable(const PutBucketLoggingRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< PutBucketLoggingOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->PutBucketLogging(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::PutBucketLoggingAsync(const PutBucketLoggingRequest& request, const PutBucketLoggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->PutBucketLoggingAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::PutBucketLoggingAsyncHelper(const PutBucketLoggingRequest& request, const PutBucketLoggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, PutBucketLogging(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return PutBucketLoggingOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_PUT));
}
PutBucketMetricsConfigurationOutcome S3Client::PutBucketMetricsConfiguration(const PutBucketMetricsConfigurationRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, PutBucketMetricsConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("PutBucketMetricsConfiguration", "Required field: Bucket, is not set");
@@ -3004,334 +1576,145 @@ PutBucketMetricsConfigurationOutcome S3Client::PutBucketMetricsConfiguration(con
AWS_LOGSTREAM_ERROR("PutBucketMetricsConfiguration", "Required field: Id, is not set");
return PutBucketMetricsConfigurationOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Id]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return PutBucketMetricsConfigurationOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, PutBucketMetricsConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?metrics");
- uri.SetQueryString(ss.str());
- return PutBucketMetricsConfigurationOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-PutBucketMetricsConfigurationOutcomeCallable S3Client::PutBucketMetricsConfigurationCallable(const PutBucketMetricsConfigurationRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< PutBucketMetricsConfigurationOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->PutBucketMetricsConfiguration(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::PutBucketMetricsConfigurationAsync(const PutBucketMetricsConfigurationRequest& request, const PutBucketMetricsConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->PutBucketMetricsConfigurationAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::PutBucketMetricsConfigurationAsyncHelper(const PutBucketMetricsConfigurationRequest& request, const PutBucketMetricsConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, PutBucketMetricsConfiguration(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return PutBucketMetricsConfigurationOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_PUT));
}
PutBucketNotificationConfigurationOutcome S3Client::PutBucketNotificationConfiguration(const PutBucketNotificationConfigurationRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, PutBucketNotificationConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("PutBucketNotificationConfiguration", "Required field: Bucket, is not set");
return PutBucketNotificationConfigurationOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return PutBucketNotificationConfigurationOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, PutBucketNotificationConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?notification");
- uri.SetQueryString(ss.str());
- return PutBucketNotificationConfigurationOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-PutBucketNotificationConfigurationOutcomeCallable S3Client::PutBucketNotificationConfigurationCallable(const PutBucketNotificationConfigurationRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< PutBucketNotificationConfigurationOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->PutBucketNotificationConfiguration(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::PutBucketNotificationConfigurationAsync(const PutBucketNotificationConfigurationRequest& request, const PutBucketNotificationConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->PutBucketNotificationConfigurationAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::PutBucketNotificationConfigurationAsyncHelper(const PutBucketNotificationConfigurationRequest& request, const PutBucketNotificationConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, PutBucketNotificationConfiguration(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return PutBucketNotificationConfigurationOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_PUT));
}
PutBucketOwnershipControlsOutcome S3Client::PutBucketOwnershipControls(const PutBucketOwnershipControlsRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, PutBucketOwnershipControls, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("PutBucketOwnershipControls", "Required field: Bucket, is not set");
return PutBucketOwnershipControlsOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return PutBucketOwnershipControlsOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, PutBucketOwnershipControls, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?ownershipControls");
- uri.SetQueryString(ss.str());
- return PutBucketOwnershipControlsOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-PutBucketOwnershipControlsOutcomeCallable S3Client::PutBucketOwnershipControlsCallable(const PutBucketOwnershipControlsRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< PutBucketOwnershipControlsOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->PutBucketOwnershipControls(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::PutBucketOwnershipControlsAsync(const PutBucketOwnershipControlsRequest& request, const PutBucketOwnershipControlsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->PutBucketOwnershipControlsAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::PutBucketOwnershipControlsAsyncHelper(const PutBucketOwnershipControlsRequest& request, const PutBucketOwnershipControlsResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, PutBucketOwnershipControls(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return PutBucketOwnershipControlsOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_PUT));
}
PutBucketPolicyOutcome S3Client::PutBucketPolicy(const PutBucketPolicyRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, PutBucketPolicy, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("PutBucketPolicy", "Required field: Bucket, is not set");
return PutBucketPolicyOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return PutBucketPolicyOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, PutBucketPolicy, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?policy");
- uri.SetQueryString(ss.str());
- return PutBucketPolicyOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-PutBucketPolicyOutcomeCallable S3Client::PutBucketPolicyCallable(const PutBucketPolicyRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< PutBucketPolicyOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->PutBucketPolicy(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::PutBucketPolicyAsync(const PutBucketPolicyRequest& request, const PutBucketPolicyResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->PutBucketPolicyAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::PutBucketPolicyAsyncHelper(const PutBucketPolicyRequest& request, const PutBucketPolicyResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, PutBucketPolicy(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return PutBucketPolicyOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_PUT));
}
PutBucketReplicationOutcome S3Client::PutBucketReplication(const PutBucketReplicationRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, PutBucketReplication, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("PutBucketReplication", "Required field: Bucket, is not set");
return PutBucketReplicationOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return PutBucketReplicationOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, PutBucketReplication, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?replication");
- uri.SetQueryString(ss.str());
- return PutBucketReplicationOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-PutBucketReplicationOutcomeCallable S3Client::PutBucketReplicationCallable(const PutBucketReplicationRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< PutBucketReplicationOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->PutBucketReplication(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::PutBucketReplicationAsync(const PutBucketReplicationRequest& request, const PutBucketReplicationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->PutBucketReplicationAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::PutBucketReplicationAsyncHelper(const PutBucketReplicationRequest& request, const PutBucketReplicationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, PutBucketReplication(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return PutBucketReplicationOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_PUT));
}
PutBucketRequestPaymentOutcome S3Client::PutBucketRequestPayment(const PutBucketRequestPaymentRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, PutBucketRequestPayment, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("PutBucketRequestPayment", "Required field: Bucket, is not set");
return PutBucketRequestPaymentOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return PutBucketRequestPaymentOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, PutBucketRequestPayment, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?requestPayment");
- uri.SetQueryString(ss.str());
- return PutBucketRequestPaymentOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-PutBucketRequestPaymentOutcomeCallable S3Client::PutBucketRequestPaymentCallable(const PutBucketRequestPaymentRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< PutBucketRequestPaymentOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->PutBucketRequestPayment(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::PutBucketRequestPaymentAsync(const PutBucketRequestPaymentRequest& request, const PutBucketRequestPaymentResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->PutBucketRequestPaymentAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::PutBucketRequestPaymentAsyncHelper(const PutBucketRequestPaymentRequest& request, const PutBucketRequestPaymentResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, PutBucketRequestPayment(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return PutBucketRequestPaymentOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_PUT));
}
PutBucketTaggingOutcome S3Client::PutBucketTagging(const PutBucketTaggingRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, PutBucketTagging, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("PutBucketTagging", "Required field: Bucket, is not set");
return PutBucketTaggingOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return PutBucketTaggingOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, PutBucketTagging, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?tagging");
- uri.SetQueryString(ss.str());
- return PutBucketTaggingOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-PutBucketTaggingOutcomeCallable S3Client::PutBucketTaggingCallable(const PutBucketTaggingRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< PutBucketTaggingOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->PutBucketTagging(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::PutBucketTaggingAsync(const PutBucketTaggingRequest& request, const PutBucketTaggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->PutBucketTaggingAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::PutBucketTaggingAsyncHelper(const PutBucketTaggingRequest& request, const PutBucketTaggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, PutBucketTagging(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return PutBucketTaggingOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_PUT));
}
PutBucketVersioningOutcome S3Client::PutBucketVersioning(const PutBucketVersioningRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, PutBucketVersioning, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("PutBucketVersioning", "Required field: Bucket, is not set");
return PutBucketVersioningOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return PutBucketVersioningOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, PutBucketVersioning, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?versioning");
- uri.SetQueryString(ss.str());
- return PutBucketVersioningOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-PutBucketVersioningOutcomeCallable S3Client::PutBucketVersioningCallable(const PutBucketVersioningRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< PutBucketVersioningOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->PutBucketVersioning(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::PutBucketVersioningAsync(const PutBucketVersioningRequest& request, const PutBucketVersioningResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->PutBucketVersioningAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::PutBucketVersioningAsyncHelper(const PutBucketVersioningRequest& request, const PutBucketVersioningResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, PutBucketVersioning(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return PutBucketVersioningOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_PUT));
}
PutBucketWebsiteOutcome S3Client::PutBucketWebsite(const PutBucketWebsiteRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, PutBucketWebsite, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("PutBucketWebsite", "Required field: Bucket, is not set");
return PutBucketWebsiteOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return PutBucketWebsiteOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, PutBucketWebsite, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?website");
- uri.SetQueryString(ss.str());
- return PutBucketWebsiteOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-PutBucketWebsiteOutcomeCallable S3Client::PutBucketWebsiteCallable(const PutBucketWebsiteRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< PutBucketWebsiteOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->PutBucketWebsite(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::PutBucketWebsiteAsync(const PutBucketWebsiteRequest& request, const PutBucketWebsiteResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->PutBucketWebsiteAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::PutBucketWebsiteAsyncHelper(const PutBucketWebsiteRequest& request, const PutBucketWebsiteResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, PutBucketWebsite(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return PutBucketWebsiteOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_PUT));
}
PutObjectOutcome S3Client::PutObject(const PutObjectRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, PutObject, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("PutObject", "Required field: Bucket, is not set");
@@ -3342,17 +1725,10 @@ PutObjectOutcome S3Client::PutObject(const PutObjectRequest& request) const
AWS_LOGSTREAM_ERROR("PutObject", "Required field: Key, is not set");
return PutObjectOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Key]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return PutObjectOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
- Aws::StringStream ss;
- ss << "/";
- ss << request.GetKey();
- uri.SetPath(uri.GetPath() + ss.str());
- return PutObjectOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, PutObject, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
+ endpointResolutionOutcome.GetResult().AddPathSegments(request.GetKey());
+ return PutObjectOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_PUT));
}
PutObjectOutcomeCallable S3Client::PutObjectCallable(const PutObjectRequest& request) const
@@ -3365,16 +1741,15 @@ PutObjectOutcomeCallable S3Client::PutObjectCallable(const PutObjectRequest& req
void S3Client::PutObjectAsync(const PutObjectRequest& request, const PutObjectResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
{
- m_executor->Submit( [this, request, handler, context](){ this->PutObjectAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::PutObjectAsyncHelper(const PutObjectRequest& request, const PutObjectResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, PutObject(request), context);
+ m_executor->Submit( [this, request, handler, context]()
+ {
+ handler(this, request, PutObject(request), context);
+ } );
}
PutObjectAclOutcome S3Client::PutObjectAcl(const PutObjectAclRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, PutObjectAcl, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("PutObjectAcl", "Required field: Bucket, is not set");
@@ -3385,41 +1760,18 @@ PutObjectAclOutcome S3Client::PutObjectAcl(const PutObjectAclRequest& request) c
AWS_LOGSTREAM_ERROR("PutObjectAcl", "Required field: Key, is not set");
return PutObjectAclOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Key]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return PutObjectAclOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, PutObjectAcl, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
- ss << "/";
- ss << request.GetKey();
- uri.SetPath(uri.GetPath() + ss.str());
+ endpointResolutionOutcome.GetResult().AddPathSegments(request.GetKey());
ss.str("?acl");
- uri.SetQueryString(ss.str());
- return PutObjectAclOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-PutObjectAclOutcomeCallable S3Client::PutObjectAclCallable(const PutObjectAclRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< PutObjectAclOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->PutObjectAcl(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::PutObjectAclAsync(const PutObjectAclRequest& request, const PutObjectAclResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->PutObjectAclAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::PutObjectAclAsyncHelper(const PutObjectAclRequest& request, const PutObjectAclResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, PutObjectAcl(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return PutObjectAclOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_PUT));
}
PutObjectLegalHoldOutcome S3Client::PutObjectLegalHold(const PutObjectLegalHoldRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, PutObjectLegalHold, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("PutObjectLegalHold", "Required field: Bucket, is not set");
@@ -3430,78 +1782,34 @@ PutObjectLegalHoldOutcome S3Client::PutObjectLegalHold(const PutObjectLegalHoldR
AWS_LOGSTREAM_ERROR("PutObjectLegalHold", "Required field: Key, is not set");
return PutObjectLegalHoldOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Key]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return PutObjectLegalHoldOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, PutObjectLegalHold, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
- ss << "/";
- ss << request.GetKey();
- uri.SetPath(uri.GetPath() + ss.str());
+ endpointResolutionOutcome.GetResult().AddPathSegments(request.GetKey());
ss.str("?legal-hold");
- uri.SetQueryString(ss.str());
- return PutObjectLegalHoldOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-PutObjectLegalHoldOutcomeCallable S3Client::PutObjectLegalHoldCallable(const PutObjectLegalHoldRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< PutObjectLegalHoldOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->PutObjectLegalHold(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::PutObjectLegalHoldAsync(const PutObjectLegalHoldRequest& request, const PutObjectLegalHoldResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->PutObjectLegalHoldAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::PutObjectLegalHoldAsyncHelper(const PutObjectLegalHoldRequest& request, const PutObjectLegalHoldResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, PutObjectLegalHold(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return PutObjectLegalHoldOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_PUT));
}
PutObjectLockConfigurationOutcome S3Client::PutObjectLockConfiguration(const PutObjectLockConfigurationRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, PutObjectLockConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("PutObjectLockConfiguration", "Required field: Bucket, is not set");
return PutObjectLockConfigurationOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return PutObjectLockConfigurationOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, PutObjectLockConfiguration, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?object-lock");
- uri.SetQueryString(ss.str());
- return PutObjectLockConfigurationOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-PutObjectLockConfigurationOutcomeCallable S3Client::PutObjectLockConfigurationCallable(const PutObjectLockConfigurationRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< PutObjectLockConfigurationOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->PutObjectLockConfiguration(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::PutObjectLockConfigurationAsync(const PutObjectLockConfigurationRequest& request, const PutObjectLockConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->PutObjectLockConfigurationAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::PutObjectLockConfigurationAsyncHelper(const PutObjectLockConfigurationRequest& request, const PutObjectLockConfigurationResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, PutObjectLockConfiguration(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return PutObjectLockConfigurationOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_PUT));
}
PutObjectRetentionOutcome S3Client::PutObjectRetention(const PutObjectRetentionRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, PutObjectRetention, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("PutObjectRetention", "Required field: Bucket, is not set");
@@ -3512,41 +1820,18 @@ PutObjectRetentionOutcome S3Client::PutObjectRetention(const PutObjectRetentionR
AWS_LOGSTREAM_ERROR("PutObjectRetention", "Required field: Key, is not set");
return PutObjectRetentionOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Key]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return PutObjectRetentionOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, PutObjectRetention, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
- ss << "/";
- ss << request.GetKey();
- uri.SetPath(uri.GetPath() + ss.str());
+ endpointResolutionOutcome.GetResult().AddPathSegments(request.GetKey());
ss.str("?retention");
- uri.SetQueryString(ss.str());
- return PutObjectRetentionOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-PutObjectRetentionOutcomeCallable S3Client::PutObjectRetentionCallable(const PutObjectRetentionRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< PutObjectRetentionOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->PutObjectRetention(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::PutObjectRetentionAsync(const PutObjectRetentionRequest& request, const PutObjectRetentionResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->PutObjectRetentionAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::PutObjectRetentionAsyncHelper(const PutObjectRetentionRequest& request, const PutObjectRetentionResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, PutObjectRetention(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return PutObjectRetentionOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_PUT));
}
PutObjectTaggingOutcome S3Client::PutObjectTagging(const PutObjectTaggingRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, PutObjectTagging, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("PutObjectTagging", "Required field: Bucket, is not set");
@@ -3557,78 +1842,34 @@ PutObjectTaggingOutcome S3Client::PutObjectTagging(const PutObjectTaggingRequest
AWS_LOGSTREAM_ERROR("PutObjectTagging", "Required field: Key, is not set");
return PutObjectTaggingOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Key]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return PutObjectTaggingOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, PutObjectTagging, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
- ss << "/";
- ss << request.GetKey();
- uri.SetPath(uri.GetPath() + ss.str());
+ endpointResolutionOutcome.GetResult().AddPathSegments(request.GetKey());
ss.str("?tagging");
- uri.SetQueryString(ss.str());
- return PutObjectTaggingOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-PutObjectTaggingOutcomeCallable S3Client::PutObjectTaggingCallable(const PutObjectTaggingRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< PutObjectTaggingOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->PutObjectTagging(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::PutObjectTaggingAsync(const PutObjectTaggingRequest& request, const PutObjectTaggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->PutObjectTaggingAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::PutObjectTaggingAsyncHelper(const PutObjectTaggingRequest& request, const PutObjectTaggingResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, PutObjectTagging(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return PutObjectTaggingOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_PUT));
}
PutPublicAccessBlockOutcome S3Client::PutPublicAccessBlock(const PutPublicAccessBlockRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, PutPublicAccessBlock, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("PutPublicAccessBlock", "Required field: Bucket, is not set");
return PutPublicAccessBlockOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Bucket]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return PutPublicAccessBlockOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, PutPublicAccessBlock, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
ss.str("?publicAccessBlock");
- uri.SetQueryString(ss.str());
- return PutPublicAccessBlockOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-PutPublicAccessBlockOutcomeCallable S3Client::PutPublicAccessBlockCallable(const PutPublicAccessBlockRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< PutPublicAccessBlockOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->PutPublicAccessBlock(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::PutPublicAccessBlockAsync(const PutPublicAccessBlockRequest& request, const PutPublicAccessBlockResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->PutPublicAccessBlockAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::PutPublicAccessBlockAsyncHelper(const PutPublicAccessBlockRequest& request, const PutPublicAccessBlockResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, PutPublicAccessBlock(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return PutPublicAccessBlockOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_PUT));
}
RestoreObjectOutcome S3Client::RestoreObject(const RestoreObjectRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, RestoreObject, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("RestoreObject", "Required field: Bucket, is not set");
@@ -3639,41 +1880,18 @@ RestoreObjectOutcome S3Client::RestoreObject(const RestoreObjectRequest& request
AWS_LOGSTREAM_ERROR("RestoreObject", "Required field: Key, is not set");
return RestoreObjectOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Key]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return RestoreObjectOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, RestoreObject, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
- ss << "/";
- ss << request.GetKey();
- uri.SetPath(uri.GetPath() + ss.str());
+ endpointResolutionOutcome.GetResult().AddPathSegments(request.GetKey());
ss.str("?restore");
- uri.SetQueryString(ss.str());
- return RestoreObjectOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_POST, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-RestoreObjectOutcomeCallable S3Client::RestoreObjectCallable(const RestoreObjectRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< RestoreObjectOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->RestoreObject(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::RestoreObjectAsync(const RestoreObjectRequest& request, const RestoreObjectResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->RestoreObjectAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::RestoreObjectAsyncHelper(const RestoreObjectRequest& request, const RestoreObjectResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, RestoreObject(request), context);
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
+ return RestoreObjectOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_POST));
}
SelectObjectContentOutcome S3Client::SelectObjectContent(SelectObjectContentRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, SelectObjectContent, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("SelectObjectContent", "Required field: Bucket, is not set");
@@ -3684,44 +1902,21 @@ SelectObjectContentOutcome S3Client::SelectObjectContent(SelectObjectContentRequ
AWS_LOGSTREAM_ERROR("SelectObjectContent", "Required field: Key, is not set");
return SelectObjectContentOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [Key]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return SelectObjectContentOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, SelectObjectContent, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
Aws::StringStream ss;
- ss << "/";
- ss << request.GetKey();
- uri.SetPath(uri.GetPath() + ss.str());
+ endpointResolutionOutcome.GetResult().AddPathSegments(request.GetKey());
ss.str("?select&select-type=2");
- uri.SetQueryString(ss.str());
+ endpointResolutionOutcome.GetResult().SetQueryString(ss.str());
request.SetResponseStreamFactory(
[&] { request.GetEventStreamDecoder().Reset(); return Aws::New<Aws::Utils::Event::EventDecoderStream>(ALLOCATION_TAG, request.GetEventStreamDecoder()); }
);
- return SelectObjectContentOutcome(MakeRequestWithEventStream(uri, request, Aws::Http::HttpMethod::HTTP_POST, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-SelectObjectContentOutcomeCallable S3Client::SelectObjectContentCallable(SelectObjectContentRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< SelectObjectContentOutcome() > >(ALLOCATION_TAG, [this, &request](){ return this->SelectObjectContent(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::SelectObjectContentAsync(SelectObjectContentRequest& request, const SelectObjectContentResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, &request, handler, context](){ this->SelectObjectContentAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::SelectObjectContentAsyncHelper(SelectObjectContentRequest& request, const SelectObjectContentResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, SelectObjectContent(request), context);
+ return SelectObjectContentOutcome(MakeRequestWithEventStream(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_POST));
}
UploadPartOutcome S3Client::UploadPart(const UploadPartRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, UploadPart, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("UploadPart", "Required field: Bucket, is not set");
@@ -3742,39 +1937,15 @@ UploadPartOutcome S3Client::UploadPart(const UploadPartRequest& request) const
AWS_LOGSTREAM_ERROR("UploadPart", "Required field: UploadId, is not set");
return UploadPartOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [UploadId]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return UploadPartOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
- Aws::StringStream ss;
- ss << "/";
- ss << request.GetKey();
- uri.SetPath(uri.GetPath() + ss.str());
- return UploadPartOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-UploadPartOutcomeCallable S3Client::UploadPartCallable(const UploadPartRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< UploadPartOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->UploadPart(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::UploadPartAsync(const UploadPartRequest& request, const UploadPartResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->UploadPartAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::UploadPartAsyncHelper(const UploadPartRequest& request, const UploadPartResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, UploadPart(request), context);
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, UploadPart, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
+ endpointResolutionOutcome.GetResult().AddPathSegments(request.GetKey());
+ return UploadPartOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_PUT));
}
UploadPartCopyOutcome S3Client::UploadPartCopy(const UploadPartCopyRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, UploadPartCopy, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.BucketHasBeenSet())
{
AWS_LOGSTREAM_ERROR("UploadPartCopy", "Required field: Bucket, is not set");
@@ -3800,39 +1971,15 @@ UploadPartCopyOutcome S3Client::UploadPartCopy(const UploadPartCopyRequest& requ
AWS_LOGSTREAM_ERROR("UploadPartCopy", "Required field: UploadId, is not set");
return UploadPartCopyOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [UploadId]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(request.GetBucket());
- if (!computeEndpointOutcome.IsSuccess())
- {
- return UploadPartCopyOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
- Aws::StringStream ss;
- ss << "/";
- ss << request.GetKey();
- uri.SetPath(uri.GetPath() + ss.str());
- return UploadPartCopyOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_PUT, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
-}
-
-UploadPartCopyOutcomeCallable S3Client::UploadPartCopyCallable(const UploadPartCopyRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< UploadPartCopyOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->UploadPartCopy(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-
-void S3Client::UploadPartCopyAsync(const UploadPartCopyRequest& request, const UploadPartCopyResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->UploadPartCopyAsyncHelper( request, handler, context ); } );
-}
-
-void S3Client::UploadPartCopyAsyncHelper(const UploadPartCopyRequest& request, const UploadPartCopyResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, UploadPartCopy(request), context);
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, UploadPartCopy, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
+ endpointResolutionOutcome.GetResult().AddPathSegments(request.GetKey());
+ return UploadPartCopyOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_PUT));
}
WriteGetObjectResponseOutcome S3Client::WriteGetObjectResponse(const WriteGetObjectResponseRequest& request) const
{
+ AWS_OPERATION_CHECK_PTR(m_endpointProvider, WriteGetObjectResponse, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE);
if (!request.RequestRouteHasBeenSet())
{
AWS_LOGSTREAM_ERROR("WriteGetObjectResponse", "Required field: RequestRoute, is not set");
@@ -3843,349 +1990,123 @@ WriteGetObjectResponseOutcome S3Client::WriteGetObjectResponse(const WriteGetObj
AWS_LOGSTREAM_ERROR("WriteGetObjectResponse", "Required field: RequestToken, is not set");
return WriteGetObjectResponseOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::MISSING_PARAMETER, "MISSING_PARAMETER", "Missing required field [RequestToken]", false));
}
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointStringWithServiceName("s3-object-lambda");
- if (!computeEndpointOutcome.IsSuccess())
- {
- return WriteGetObjectResponseOutcome(computeEndpointOutcome.GetError());
- }
- Aws::Http::URI uri = computeEndpointOutcome.GetResult().endpoint;
- if (m_enableHostPrefixInjection)
- {
- if (request.GetRequestRoute().empty())
- {
- AWS_LOGSTREAM_ERROR("WriteGetObjectResponse", "HostPrefix required field: RequestRoute, is empty");
- return WriteGetObjectResponseOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::INVALID_PARAMETER_VALUE, "INVALID_PARAMETER", "Host prefix field is empty", false));
- }
- uri.SetAuthority("" + request.GetRequestRoute() + "." + uri.GetAuthority());
- if (!Aws::Utils::IsValidHost(uri.GetAuthority()))
- {
- AWS_LOGSTREAM_ERROR("WriteGetObjectResponse", "Invalid DNS host: " << uri.GetAuthority());
- return WriteGetObjectResponseOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::INVALID_PARAMETER_VALUE, "INVALID_PARAMETER", "Host is invalid", false));
- }
- }
- Aws::StringStream ss;
- ss << "/WriteGetObjectResponse";
- uri.SetPath(uri.GetPath() + ss.str());
- return WriteGetObjectResponseOutcome(MakeRequest(uri, request, Aws::Http::HttpMethod::HTTP_POST, Aws::Auth::SIGV4_SIGNER, computeEndpointOutcome.GetResult().signerRegion.c_str() /*signerRegionOverride*/, computeEndpointOutcome.GetResult().signerServiceName.c_str() /*signerServiceNameOverride*/));
+ ResolveEndpointOutcome endpointResolutionOutcome = m_endpointProvider->ResolveEndpoint(request.GetEndpointContextParams());
+ AWS_OPERATION_CHECK_SUCCESS(endpointResolutionOutcome, WriteGetObjectResponse, CoreErrors, CoreErrors::ENDPOINT_RESOLUTION_FAILURE, endpointResolutionOutcome.GetError().GetMessage());
+ auto addPrefixErr = endpointResolutionOutcome.GetResult().AddPrefixIfMissing("" + request.GetRequestRoute() + ".");
+ AWS_CHECK(SERVICE_NAME, !addPrefixErr, addPrefixErr->GetMessage(), WriteGetObjectResponseOutcome(addPrefixErr.value()));
+ endpointResolutionOutcome.GetResult().AddPathSegments("/WriteGetObjectResponse");
+ return WriteGetObjectResponseOutcome(MakeRequest(request, endpointResolutionOutcome.GetResult(), Aws::Http::HttpMethod::HTTP_POST));
}
-WriteGetObjectResponseOutcomeCallable S3Client::WriteGetObjectResponseCallable(const WriteGetObjectResponseRequest& request) const
-{
- auto task = Aws::MakeShared< std::packaged_task< WriteGetObjectResponseOutcome() > >(ALLOCATION_TAG, [this, request](){ return this->WriteGetObjectResponse(request); } );
- auto packagedFunction = [task]() { (*task)(); };
- m_executor->Submit(packagedFunction);
- return task->get_future();
-}
-void S3Client::WriteGetObjectResponseAsync(const WriteGetObjectResponseRequest& request, const WriteGetObjectResponseResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- m_executor->Submit( [this, request, handler, context](){ this->WriteGetObjectResponseAsyncHelper( request, handler, context ); } );
-}
-void S3Client::WriteGetObjectResponseAsyncHelper(const WriteGetObjectResponseRequest& request, const WriteGetObjectResponseResponseReceivedHandler& handler, const std::shared_ptr<const Aws::Client::AsyncCallerContext>& context) const
-{
- handler(this, request, WriteGetObjectResponse(request), context);
-}
-
-
-
-#include<aws/core/platform/Environment.h>
-
-static const char US_EAST_1_REGIONAL_ENDPOINT_ENV_VAR[] = "AWS_S3_US_EAST_1_REGIONAL_ENDPOINT";
-static const char US_EAST_1_REGIONAL_ENDPOINT_CONFIG_VAR[] = "s3_us_east_1_regional_endpoint";
-static const char S3_USE_ARN_REGION_ENVIRONMENT_VARIABLE[] = "AWS_S3_USE_ARN_REGION";
-static const char S3_USE_ARN_REGION_CONFIG_FILE_OPTION[] = "s3_use_arn_region";
-
-void S3Client::LoadS3SpecificConfig(const Aws::String& profile)
+#include<aws/core/utils/HashingUtils.h>
+Aws::String S3Client::GeneratePresignedUrl(const Aws::String& bucket,
+ const Aws::String& key,
+ Aws::Http::HttpMethod method,
+ uint64_t expirationInSeconds)
{
- if (m_USEast1RegionalEndpointOption == Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION::NOT_SET)
- {
- Aws::String option = Aws::Environment::GetEnv(US_EAST_1_REGIONAL_ENDPOINT_ENV_VAR);
- if (option.empty())
- {
- option = Aws::Config::GetCachedConfigValue(profile, US_EAST_1_REGIONAL_ENDPOINT_CONFIG_VAR);
- }
-
- if (Aws::Utils::StringUtils::ToLower(option.c_str()) == "legacy")
- {
- m_USEast1RegionalEndpointOption = Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION::LEGACY;
- }
- else // defaults is regional
- {
- m_USEast1RegionalEndpointOption = Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION::REGIONAL;
- }
- }
-
- Aws::String s3UseArnRegion = Aws::Environment::GetEnv(S3_USE_ARN_REGION_ENVIRONMENT_VARIABLE);
- if (s3UseArnRegion.empty())
- {
- s3UseArnRegion = Aws::Config::GetCachedConfigValue(profile, S3_USE_ARN_REGION_CONFIG_FILE_OPTION);
- }
-
- if (s3UseArnRegion == "true")
- {
- m_useArnRegion = true;
- }
- else
- {
- if (!s3UseArnRegion.empty() && s3UseArnRegion != "false")
- {
- AWS_LOGSTREAM_WARN("S3Client", "AWS_S3_USE_ARN_REGION in environment variables or s3_use_arn_region in config file"
- << "should either be true of false if specified, otherwise turn off this flag by default.");
- }
- m_useArnRegion = false;
- }
+ return GeneratePresignedUrl(bucket, key, method, {}, expirationInSeconds);
}
-#include<aws/core/utils/HashingUtils.h>
-Aws::String S3Client::GeneratePresignedUrl(const Aws::String& bucket, const Aws::String& key, Aws::Http::HttpMethod method, long long expirationInSeconds)
+Aws::String S3Client::GeneratePresignedUrl(const Aws::String& bucket,
+ const Aws::String& key,
+ Aws::Http::HttpMethod method,
+ const Http::HeaderValueCollection& customizedHeaders,
+ uint64_t expirationInSeconds)
{
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(bucket);
- if (!computeEndpointOutcome.IsSuccess())
+ if (!m_endpointProvider)
{
- AWS_LOGSTREAM_ERROR(ALLOCATION_TAG, "Presigned URL generating failed. Encountered error: " << computeEndpointOutcome.GetError());
+ AWS_LOGSTREAM_ERROR(ALLOCATION_TAG, "Presigned URL generating failed. Endpoint provider is not initialized.");
return {};
}
- Aws::StringStream ss;
- ss << computeEndpointOutcome.GetResult().endpoint << "/" << key;
- URI uri(ss.str());
- return AWSClient::GeneratePresignedUrl(uri, method, computeEndpointOutcome.GetResult().signerRegion.c_str(), computeEndpointOutcome.GetResult().signerServiceName.c_str(), expirationInSeconds);
-}
-
-Aws::String S3Client::GeneratePresignedUrl(const Aws::String& bucket, const Aws::String& key, Aws::Http::HttpMethod method, const Http::HeaderValueCollection& customizedHeaders, long long expirationInSeconds)
-{
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(bucket);
+ ResolveEndpointOutcome computeEndpointOutcome = m_endpointProvider->ResolveEndpoint({{Aws::String("Bucket"), bucket}});
if (!computeEndpointOutcome.IsSuccess())
{
- AWS_LOGSTREAM_ERROR(ALLOCATION_TAG, "Presigned URL generating failed. Encountered error: " << computeEndpointOutcome.GetError());
+ AWS_LOGSTREAM_ERROR(ALLOCATION_TAG, "Presigned URL generating failed. Encountered error: " << computeEndpointOutcome.GetError().GetMessage());
return {};
}
- Aws::StringStream ss;
- ss << computeEndpointOutcome.GetResult().endpoint << "/" << key;
- URI uri(ss.str());
- return AWSClient::GeneratePresignedUrl(uri, method, computeEndpointOutcome.GetResult().signerRegion.c_str(), computeEndpointOutcome.GetResult().signerServiceName.c_str(), customizedHeaders, expirationInSeconds);
+ Aws::Endpoint::AWSEndpoint& endpoint = computeEndpointOutcome.GetResult();
+ URI uri(endpoint.GetURL());
+ uri.SetPath(uri.GetPath() + "/" + key);
+ endpoint.SetURL(uri.GetURIString());
+ return AWSClient::GeneratePresignedUrl(endpoint, method, customizedHeaders, expirationInSeconds);
}
-Aws::String S3Client::GeneratePresignedUrlWithSSES3(const Aws::String& bucket, const Aws::String& key, Aws::Http::HttpMethod method, long long expirationInSeconds)
+Aws::String S3Client::GeneratePresignedUrlWithSSES3(const Aws::String& bucket,
+ const Aws::String& key,
+ Aws::Http::HttpMethod method,
+ uint64_t expirationInSeconds)
{
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(bucket);
- if (!computeEndpointOutcome.IsSuccess())
- {
- AWS_LOGSTREAM_ERROR(ALLOCATION_TAG, "Presigned URL generating failed. Encountered error: " << computeEndpointOutcome.GetError());
- return {};
- }
- Aws::StringStream ss;
- ss << computeEndpointOutcome.GetResult().endpoint << "/" << key;
- URI uri(ss.str());
Aws::Http::HeaderValueCollection headers;
headers.emplace(Aws::S3::SSEHeaders::SERVER_SIDE_ENCRYPTION, Aws::S3::Model::ServerSideEncryptionMapper::GetNameForServerSideEncryption(Aws::S3::Model::ServerSideEncryption::AES256));
- return AWSClient::GeneratePresignedUrl(uri, method, computeEndpointOutcome.GetResult().signerRegion.c_str(), computeEndpointOutcome.GetResult().signerServiceName.c_str(), headers, expirationInSeconds);
+ return GeneratePresignedUrl(bucket, key, method, headers, expirationInSeconds);
}
-Aws::String S3Client::GeneratePresignedUrlWithSSES3(const Aws::String& bucket, const Aws::String& key, Aws::Http::HttpMethod method, Http::HeaderValueCollection customizedHeaders, long long expirationInSeconds)
+Aws::String S3Client::GeneratePresignedUrlWithSSES3(const Aws::String& bucket,
+ const Aws::String& key,
+ Aws::Http::HttpMethod method,
+ Http::HeaderValueCollection customizedHeaders,
+ uint64_t expirationInSeconds)
{
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(bucket);
- if (!computeEndpointOutcome.IsSuccess())
- {
- AWS_LOGSTREAM_ERROR(ALLOCATION_TAG, "Presigned URL generating failed. Encountered error: " << computeEndpointOutcome.GetError());
- return {};
- }
- Aws::StringStream ss;
- ss << computeEndpointOutcome.GetResult().endpoint << "/" << key;
- URI uri(ss.str());
customizedHeaders.emplace(Aws::S3::SSEHeaders::SERVER_SIDE_ENCRYPTION, Aws::S3::Model::ServerSideEncryptionMapper::GetNameForServerSideEncryption(Aws::S3::Model::ServerSideEncryption::AES256));
- return AWSClient::GeneratePresignedUrl(uri, method, computeEndpointOutcome.GetResult().signerRegion.c_str(), computeEndpointOutcome.GetResult().signerServiceName.c_str(), customizedHeaders, expirationInSeconds);
+ return GeneratePresignedUrl(bucket, key, method, customizedHeaders, expirationInSeconds);
}
-Aws::String S3Client::GeneratePresignedUrlWithSSEKMS(const Aws::String& bucket, const Aws::String& key, Aws::Http::HttpMethod method, const Aws::String& kmsMasterKeyId, long long expirationInSeconds)
+Aws::String S3Client::GeneratePresignedUrlWithSSEKMS(const Aws::String& bucket,
+ const Aws::String& key,
+ Aws::Http::HttpMethod method,
+ const Aws::String& kmsMasterKeyId,
+ uint64_t expirationInSeconds)
{
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(bucket);
- if (!computeEndpointOutcome.IsSuccess())
- {
- AWS_LOGSTREAM_ERROR(ALLOCATION_TAG, "Presigned URL generating failed. Encountered error: " << computeEndpointOutcome.GetError());
- return {};
- }
- Aws::StringStream ss;
- ss << computeEndpointOutcome.GetResult().endpoint << "/" << key;
- URI uri(ss.str());
Aws::Http::HeaderValueCollection headers;
headers.emplace(Aws::S3::SSEHeaders::SERVER_SIDE_ENCRYPTION, Aws::S3::Model::ServerSideEncryptionMapper::GetNameForServerSideEncryption(Aws::S3::Model::ServerSideEncryption::aws_kms));
headers.emplace(Aws::S3::SSEHeaders::SERVER_SIDE_ENCRYPTION_AWS_KMS_KEY_ID, kmsMasterKeyId);
- return AWSClient::GeneratePresignedUrl(uri, method, computeEndpointOutcome.GetResult().signerRegion.c_str(), computeEndpointOutcome.GetResult().signerServiceName.c_str(), headers, expirationInSeconds);
+ return GeneratePresignedUrl(bucket, key, method, headers, expirationInSeconds);
}
-Aws::String S3Client::GeneratePresignedUrlWithSSEKMS(const Aws::String& bucket, const Aws::String& key, Aws::Http::HttpMethod method, Http::HeaderValueCollection customizedHeaders, const Aws::String& kmsMasterKeyId, long long expirationInSeconds)
+Aws::String S3Client::GeneratePresignedUrlWithSSEKMS(const Aws::String& bucket,
+ const Aws::String& key,
+ Aws::Http::HttpMethod method,
+ Http::HeaderValueCollection customizedHeaders,
+ const Aws::String& kmsMasterKeyId,
+ uint64_t expirationInSeconds)
{
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(bucket);
- if (!computeEndpointOutcome.IsSuccess())
- {
- AWS_LOGSTREAM_ERROR(ALLOCATION_TAG, "Presigned URL generating failed. Encountered error: " << computeEndpointOutcome.GetError());
- return {};
- }
- Aws::StringStream ss;
- ss << computeEndpointOutcome.GetResult().endpoint << "/" << key;
- URI uri(ss.str());
customizedHeaders.emplace(Aws::S3::SSEHeaders::SERVER_SIDE_ENCRYPTION, Aws::S3::Model::ServerSideEncryptionMapper::GetNameForServerSideEncryption(Aws::S3::Model::ServerSideEncryption::aws_kms));
customizedHeaders.emplace(Aws::S3::SSEHeaders::SERVER_SIDE_ENCRYPTION_AWS_KMS_KEY_ID, kmsMasterKeyId);
- return AWSClient::GeneratePresignedUrl(uri, method, computeEndpointOutcome.GetResult().signerRegion.c_str(), computeEndpointOutcome.GetResult().signerServiceName.c_str(), customizedHeaders, expirationInSeconds);
+ return GeneratePresignedUrl(bucket, key, method, customizedHeaders, expirationInSeconds);
}
-Aws::String S3Client::GeneratePresignedUrlWithSSEC(const Aws::String& bucket, const Aws::String& key, Aws::Http::HttpMethod method, const Aws::String& base64EncodedAES256Key, long long expirationInSeconds)
+Aws::String S3Client::GeneratePresignedUrlWithSSEC(const Aws::String& bucket,
+ const Aws::String& key,
+ Aws::Http::HttpMethod method,
+ const Aws::String& base64EncodedAES256Key,
+ uint64_t expirationInSeconds)
{
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(bucket);
- if (!computeEndpointOutcome.IsSuccess())
- {
- AWS_LOGSTREAM_ERROR(ALLOCATION_TAG, "Presigned URL generating failed. Encountered error: " << computeEndpointOutcome.GetError());
- return {};
- }
- Aws::StringStream ss;
- ss << computeEndpointOutcome.GetResult().endpoint << "/" << key;
- URI uri(ss.str());
Aws::Http::HeaderValueCollection headers;
headers.emplace(Aws::S3::SSEHeaders::SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM, Aws::S3::Model::ServerSideEncryptionMapper::GetNameForServerSideEncryption(Aws::S3::Model::ServerSideEncryption::AES256));
headers.emplace(Aws::S3::SSEHeaders::SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY, base64EncodedAES256Key);
Aws::Utils::ByteBuffer buffer = Aws::Utils::HashingUtils::Base64Decode(base64EncodedAES256Key);
Aws::String strBuffer(reinterpret_cast<char*>(buffer.GetUnderlyingData()), buffer.GetLength());
headers.emplace(Aws::S3::SSEHeaders::SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5, Aws::Utils::HashingUtils::Base64Encode(Aws::Utils::HashingUtils::CalculateMD5(strBuffer)));
- return AWSClient::GeneratePresignedUrl(uri, method, computeEndpointOutcome.GetResult().signerRegion.c_str(), computeEndpointOutcome.GetResult().signerServiceName.c_str(), headers, expirationInSeconds);
+ return GeneratePresignedUrl(bucket, key, method, headers, expirationInSeconds);
}
-Aws::String S3Client::GeneratePresignedUrlWithSSEC(const Aws::String& bucket, const Aws::String& key, Aws::Http::HttpMethod method, Http::HeaderValueCollection customizedHeaders, const Aws::String& base64EncodedAES256Key, long long expirationInSeconds)
+Aws::String S3Client::GeneratePresignedUrlWithSSEC(const Aws::String& bucket,
+ const Aws::String& key,
+ Aws::Http::HttpMethod method,
+ Http::HeaderValueCollection customizedHeaders,
+ const Aws::String& base64EncodedAES256Key,
+ uint64_t expirationInSeconds)
{
- ComputeEndpointOutcome computeEndpointOutcome = ComputeEndpointString(bucket);
- if (!computeEndpointOutcome.IsSuccess())
- {
- AWS_LOGSTREAM_ERROR(ALLOCATION_TAG, "Presigned URL generating failed. Encountered error: " << computeEndpointOutcome.GetError());
- return {};
- }
- Aws::StringStream ss;
- ss << computeEndpointOutcome.GetResult().endpoint << "/" << key;
- URI uri(ss.str());
customizedHeaders.emplace(Aws::S3::SSEHeaders::SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM, Aws::S3::Model::ServerSideEncryptionMapper::GetNameForServerSideEncryption(Aws::S3::Model::ServerSideEncryption::AES256));
customizedHeaders.emplace(Aws::S3::SSEHeaders::SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY, base64EncodedAES256Key);
Aws::Utils::ByteBuffer buffer = Aws::Utils::HashingUtils::Base64Decode(base64EncodedAES256Key);
Aws::String strBuffer(reinterpret_cast<char*>(buffer.GetUnderlyingData()), buffer.GetLength());
customizedHeaders.emplace(Aws::S3::SSEHeaders::SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5, Aws::Utils::HashingUtils::Base64Encode(Aws::Utils::HashingUtils::CalculateMD5(strBuffer)));
- return AWSClient::GeneratePresignedUrl(uri, method, computeEndpointOutcome.GetResult().signerRegion.c_str(), computeEndpointOutcome.GetResult().signerServiceName.c_str(), customizedHeaders, expirationInSeconds);
+ return GeneratePresignedUrl(bucket, key, method, customizedHeaders, expirationInSeconds);
}
-ComputeEndpointOutcome S3Client::ComputeEndpointString(const Aws::String& bucketOrArn) const
-{
- if (m_useDualStack && m_useCustomEndpoint)
- {
- return ComputeEndpointOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::VALIDATION, "VALIDATION",
- "Dual-stack endpoint is incompatible with a custom endpoint override.", false));
- }
-
- Aws::StringStream ss;
- ss << m_scheme << "://";
- Aws::String bucket = bucketOrArn;
- Aws::String signerRegion = Aws::Region::ComputeSignerRegion(m_region);
- S3ARN arn(bucketOrArn);
-
- if (arn)
- {
- if (!m_useVirtualAddressing)
- {
- return ComputeEndpointOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::VALIDATION, "VALIDATION",
- "Path style addressing is not compatible with Access Point ARN or Outposts ARN in Bucket field, please consider using virtual addressing for this client instead.", false));
- }
-
- S3ARNOutcome s3ArnOutcome = m_useArnRegion ? arn.Validate() : arn.Validate(m_region.c_str());
- if (!s3ArnOutcome.IsSuccess())
- {
- return ComputeEndpointOutcome(s3ArnOutcome.GetError());
- }
- signerRegion = m_useArnRegion ? arn.GetRegion() : signerRegion;
- if (arn.GetService() == ARNService::S3_OBJECT_LAMBDA)
- {
- if (m_useDualStack)
- {
- return ComputeEndpointOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::VALIDATION, "VALIDATION",
- "S3 Object Lambda Access Point ARNs do not support dualstack right now.", false));
- }
- ss << S3Endpoint::ForObjectLambdaAccessPointArn(arn, m_useArnRegion ? "" : m_region, m_useDualStack, m_useCustomEndpoint ? m_baseUri : "");
- return ComputeEndpointOutcome(ComputeEndpointResult(ss.str(), signerRegion, ARNService::S3_OBJECT_LAMBDA));
- }
- else if (arn.GetResourceType() == ARNResourceType::ACCESSPOINT)
- {
- ss << S3Endpoint::ForAccessPointArn(arn, m_useArnRegion ? "" : m_region, m_useDualStack, m_useCustomEndpoint ? m_baseUri : "");
- return ComputeEndpointOutcome(ComputeEndpointResult(ss.str(), signerRegion, SERVICE_NAME));
- }
- else if (arn.GetResourceType() == ARNResourceType::OUTPOST)
- {
- if (m_useDualStack)
- {
- return ComputeEndpointOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::VALIDATION, "VALIDATION",
- "Outposts Access Points do not support dualstack right now.", false));
- }
- ss << S3Endpoint::ForOutpostsArn(arn, m_useArnRegion ? "" : m_region, m_useDualStack, m_useCustomEndpoint ? m_baseUri : "");
- return ComputeEndpointOutcome(ComputeEndpointResult(ss.str(), signerRegion, "s3-outposts"));
- }
- }
-
- // when using virtual hosting of buckets, the bucket name has to follow some rules.
- // Mainly, it has to be a valid DNS label, and it must be lowercase.
- // For more information see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html#VirtualHostingSpecifyBucket
- if(m_useVirtualAddressing && Aws::Utils::IsValidDnsLabel(bucket) &&
- bucket == Aws::Utils::StringUtils::ToLower(bucket.c_str()))
- {
- ss << bucket << "." << m_baseUri;
- }
- else
- {
- ss << m_baseUri << "/" << bucket;
- }
-
- return ComputeEndpointOutcome(ComputeEndpointResult(ss.str(), signerRegion, SERVICE_NAME));
-}
-
-ComputeEndpointOutcome S3Client::ComputeEndpointString() const
-{
- if (m_useDualStack && m_useCustomEndpoint)
- {
- return ComputeEndpointOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::VALIDATION, "VALIDATION",
- "Dual-stack endpoint is incompatible with a custom endpoint override.", false));
- }
- Aws::StringStream ss;
- ss << m_scheme << "://" << m_baseUri;
- return ComputeEndpointOutcome(ComputeEndpointResult(ss.str(), Aws::Region::ComputeSignerRegion(m_region), SERVICE_NAME));
-}
-
-ComputeEndpointOutcome S3Client::ComputeEndpointStringWithServiceName(const Aws::String& serviceNameOverride) const
-{
- if (serviceNameOverride.empty())
- {
- return ComputeEndpointString();
- }
-
- if (m_useDualStack && m_useCustomEndpoint)
- {
- return ComputeEndpointOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::VALIDATION, "VALIDATION",
- "Dual-stack endpoint is incompatible with a custom endpoint override.", false));
- }
-
- Aws::StringStream ss;
- ss << m_scheme << "://";
- if (m_useCustomEndpoint)
- {
- ss << m_baseUri;
- return ComputeEndpointOutcome(ComputeEndpointResult(ss.str(), Aws::Region::ComputeSignerRegion(m_region), serviceNameOverride));
- }
- else
- {
- if (m_useDualStack)
- {
- return ComputeEndpointOutcome(Aws::Client::AWSError<S3Errors>(S3Errors::VALIDATION, "VALIDATION",
- "S3 Object Lambda endpoints do not support dualstack right now.", false));
- }
- else
- {
- ss << S3Endpoint::ForRegion(m_region, m_useDualStack, true, serviceNameOverride);
- return ComputeEndpointOutcome(ComputeEndpointResult(ss.str(), Aws::Region::ComputeSignerRegion(m_region), serviceNameOverride));
- }
- }
-}
bool S3Client::MultipartUploadSupported() const
{
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3ClientConfiguration.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3ClientConfiguration.cpp
new file mode 100644
index 0000000000..0e379b3dad
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3ClientConfiguration.cpp
@@ -0,0 +1,89 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/s3/S3ClientConfiguration.h>
+
+namespace Aws
+{
+namespace S3
+{
+
+static const char US_EAST_1_REGIONAL_ENDPOINT_ENV_VAR[] = "AWS_S3_US_EAST_1_REGIONAL_ENDPOINT";
+static const char US_EAST_1_REGIONAL_ENDPOINT_CONFIG_VAR[] = "s3_us_east_1_regional_endpoint";
+static const char S3_DISABLE_MULTIREGION_ACCESS_POINTS_ENV_VAR[] = "AWS_S3_DISABLE_MULTIREGION_ACCESS_POINTS";
+static const char S3_DISABLE_MULTIREGION_ACCESS_POINTS_CONFIG_VAR[] = "s3_disable_multiregion_access_points";
+static const char S3_USE_ARN_REGION_ENVIRONMENT_VARIABLE[] = "AWS_S3_USE_ARN_REGION";
+static const char S3_USE_ARN_REGION_CONFIG_FILE_OPTION[] = "s3_use_arn_region";
+
+void S3ClientConfiguration::LoadS3SpecificConfig(const Aws::String& inputProfileName)
+{
+ if (Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION::NOT_SET == this->useUSEast1RegionalEndPointOption)
+ {
+ const Aws::String& useUSEastOption =
+ BaseClientConfigClass::LoadConfigFromEnvOrProfile(US_EAST_1_REGIONAL_ENDPOINT_ENV_VAR,
+ inputProfileName,
+ US_EAST_1_REGIONAL_ENDPOINT_CONFIG_VAR,
+ {"legacy", "regional"},
+ "regional");
+ if (useUSEastOption == "legacy") {
+ this->useUSEast1RegionalEndPointOption = Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION::LEGACY;
+ } else {
+ this->useUSEast1RegionalEndPointOption = Aws::S3::US_EAST_1_REGIONAL_ENDPOINT_OPTION::REGIONAL;
+ }
+ }
+
+ Aws::String s3DisableMultiRegionAccessPoints = ClientConfiguration::LoadConfigFromEnvOrProfile(S3_DISABLE_MULTIREGION_ACCESS_POINTS_ENV_VAR,
+ inputProfileName,
+ S3_DISABLE_MULTIREGION_ACCESS_POINTS_CONFIG_VAR,
+ {"true", "false"},
+ "false");
+ if (s3DisableMultiRegionAccessPoints == "true")
+ {
+ disableMultiRegionAccessPoints = true;
+ }
+ Aws::String useArnRegionCfg = ClientConfiguration::LoadConfigFromEnvOrProfile(S3_USE_ARN_REGION_ENVIRONMENT_VARIABLE,
+ inputProfileName,
+ S3_USE_ARN_REGION_CONFIG_FILE_OPTION,
+ {"true", "false"},
+ "false");
+ if (useArnRegionCfg == "true")
+ {
+ useArnRegion = true;
+ }
+}
+
+S3ClientConfiguration::S3ClientConfiguration()
+: BaseClientConfigClass()
+{
+ LoadS3SpecificConfig(this->profileName);
+}
+
+S3ClientConfiguration::S3ClientConfiguration(const char* inputProfileName, bool shouldDisableIMDS)
+: BaseClientConfigClass(inputProfileName, shouldDisableIMDS)
+{
+ LoadS3SpecificConfig(Aws::String(inputProfileName));
+}
+
+S3ClientConfiguration::S3ClientConfiguration(bool useSmartDefaults, const char* defaultMode, bool shouldDisableIMDS)
+: BaseClientConfigClass(useSmartDefaults, defaultMode, shouldDisableIMDS)
+{
+ LoadS3SpecificConfig(this->profileName);
+}
+
+S3ClientConfiguration::S3ClientConfiguration(const Client::ClientConfiguration& config,
+ Client::AWSAuthV4Signer::PayloadSigningPolicy iPayloadSigningPolicy,
+ bool iUseVirtualAddressing,
+ US_EAST_1_REGIONAL_ENDPOINT_OPTION iUseUSEast1RegionalEndPointOption)
+ : BaseClientConfigClass(config),
+ useVirtualAddressing(iUseVirtualAddressing),
+ useUSEast1RegionalEndPointOption(iUseUSEast1RegionalEndPointOption),
+ payloadSigningPolicy(iPayloadSigningPolicy)
+{
+ LoadS3SpecificConfig(this->profileName);
+}
+
+
+} // namespace S3
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Endpoint.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Endpoint.cpp
deleted file mode 100644
index d684a11a44..0000000000
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Endpoint.cpp
+++ /dev/null
@@ -1,221 +0,0 @@
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-
-#include <aws/s3/S3Endpoint.h>
-#include <aws/core/utils/memory/stl/AWSStringStream.h>
-#include <aws/core/utils/HashingUtils.h>
-
-using namespace Aws;
-using namespace Aws::S3;
-
-namespace Aws
-{
-namespace S3
-{
-namespace S3Endpoint
-{
- static const int CN_NORTH_1_HASH = Aws::Utils::HashingUtils::HashString("cn-north-1");
- static const int CN_NORTHWEST_1_HASH = Aws::Utils::HashingUtils::HashString("cn-northwest-1");
- static const int US_ISO_EAST_1_HASH = Aws::Utils::HashingUtils::HashString("us-iso-east-1");
- static const int US_ISOB_EAST_1_HASH = Aws::Utils::HashingUtils::HashString("us-isob-east-1");
-
- static const int FIPS_US_GOV_WEST_1_HASH = Aws::Utils::HashingUtils::HashString("fips-us-gov-west-1");
- static const int US_GOV_WEST_1_HASH = Aws::Utils::HashingUtils::HashString("us-gov-west-1");
- static const int US_GOV_EAST_1_HASH = Aws::Utils::HashingUtils::HashString("us-gov-east-1");
- static const int S3_EXTERNAL_1_HASH = Aws::Utils::HashingUtils::HashString("s3-external-1");
- static const int US_EAST_1_HASH = Aws::Utils::HashingUtils::HashString("us-east-1");
- static const int AWS_GLOBAL_HASH = Aws::Utils::HashingUtils::HashString("aws-global");
-
- Aws::String ForAccessPointArn(const S3ARN& arn, const Aws::String& regionNameOverride, bool useDualStack, const Aws::String& endpointOverride)
- {
- Aws::StringStream ss;
-
- if (!endpointOverride.empty())
- {
- ss << arn.GetResourceId() << "-" << arn.GetAccountId() << "." << endpointOverride;
- return ss.str();
- }
-
- const Aws::String& region = regionNameOverride.empty() ? arn.GetRegion() : regionNameOverride;
- auto hash = Aws::Utils::HashingUtils::HashString(region.c_str());
-
- ss << arn.GetResourceId() << "-" << arn.GetAccountId() << ".s3-accesspoint.";
- if (useDualStack)
- {
- ss << "dualstack.";
- }
- ss << region << "." << "amazonaws.com";
-
- if (hash == CN_NORTH_1_HASH || hash == CN_NORTHWEST_1_HASH)
- {
- ss << ".cn";
- }
-
- return ss.str();
- }
-
- Aws::String ForOutpostsArn(const S3ARN& arn, const Aws::String& regionNameOverride, bool useDualStack, const Aws::String& endpointOverride)
- {
- AWS_UNREFERENCED_PARAM(useDualStack);
- assert(!useDualStack);
- Aws::StringStream ss;
-
- if (!endpointOverride.empty())
- {
- ss << arn.GetSubResourceId() << "-" << arn.GetAccountId() << "." << arn.GetResourceId() << "." << endpointOverride;
- return ss.str();
- }
-
- const Aws::String& region = regionNameOverride.empty() ? arn.GetRegion() : regionNameOverride;
- auto hash = Aws::Utils::HashingUtils::HashString(region.c_str());
-
- ss << arn.GetSubResourceId() << "-" << arn.GetAccountId() << "." << arn.GetResourceId() << "." << ARNService::S3_OUTPOSTS << "." << region << "." << "amazonaws.com";
-
- if (hash == CN_NORTH_1_HASH || hash == CN_NORTHWEST_1_HASH)
- {
- ss << ".cn";
- }
-
- return ss.str();
- }
-
- Aws::String ForObjectLambdaAccessPointArn(const S3ARN& arn, const Aws::String& regionNameOverride, bool useDualStack, const Aws::String& endpointOverride)
- {
- AWS_UNREFERENCED_PARAM(useDualStack);
- assert(!useDualStack);
- Aws::StringStream ss;
-
- if (!endpointOverride.empty())
- {
- ss << arn.GetResourceId() << "-" << arn.GetAccountId() << "." << endpointOverride;
- return ss.str();
- }
-
- Aws::String region = regionNameOverride.empty() ? arn.GetRegion() : regionNameOverride;
- Aws::String fipsSuffix = "";
- if (region.size() >= 5 && region.compare(0, 5, "fips-") == 0)
- {
- region = region.substr(5);
- fipsSuffix = "-fips";
- }
- else if (region.size() >= 5 && region.compare(region.size() - 5, 5, "-fips") == 0)
- {
- region = region.substr(0, region.size() - 5);
- fipsSuffix = "-fips";
- }
-
- ss << arn.GetResourceId() << "-" << arn.GetAccountId() << "." << ARNService::S3_OBJECT_LAMBDA << fipsSuffix << "." << region << "." << "amazonaws.com";
-
- auto hash = Aws::Utils::HashingUtils::HashString(region.c_str());
- if (hash == CN_NORTH_1_HASH || hash == CN_NORTHWEST_1_HASH)
- {
- ss << ".cn";
- }
-
- return ss.str();
- }
-
- Aws::String ForRegion(const Aws::String& regionName, bool useDualStack, bool USEast1UseRegionalEndpoint, const Aws::String& serviceName)
- {
- auto hash = Aws::Utils::HashingUtils::HashString(regionName.c_str());
-
- if (!serviceName.empty())
- {
- assert(!useDualStack);
-
- Aws::StringStream ss;
- ss << serviceName;
-
- if (regionName.size() >= 5 && regionName.compare(0, 5, "fips-") == 0)
- {
- ss << "-fips." << regionName.substr(5);
- }
- else if (regionName.size() >= 5 && regionName.compare(regionName.size() - 5, 5, "-fips") == 0)
- {
- ss << "-fips." << regionName.substr(0, regionName.size() - 5);
- }
- else if (hash == AWS_GLOBAL_HASH || hash == S3_EXTERNAL_1_HASH)
- {
- ss << "." << Aws::Region::US_EAST_1;
- }
- else
- {
- ss << "." << regionName;
- }
- ss << ".amazonaws.com";
- if (hash == CN_NORTH_1_HASH || hash == CN_NORTHWEST_1_HASH)
- {
- ss << ".cn";
- }
- return ss.str();
- }
-
- if(!useDualStack)
- {
- if(hash == FIPS_US_GOV_WEST_1_HASH)
- {
- return "s3-fips-us-gov-west-1.amazonaws.com";
- }
- if(hash == US_GOV_WEST_1_HASH)
- {
- return "s3.us-gov-west-1.amazonaws.com";
- }
- if(hash == US_GOV_EAST_1_HASH)
- {
- return "s3.us-gov-east-1.amazonaws.com";
- }
- if (hash == AWS_GLOBAL_HASH)
- {
- return "s3.amazonaws.com";
- }
- if (hash == S3_EXTERNAL_1_HASH)
- {
- return "s3-external-1.amazonaws.com";
- }
- if(hash == US_EAST_1_HASH)
- {
- if (USEast1UseRegionalEndpoint)
- {
- return "s3.us-east-1.amazonaws.com";
- }
- else
- {
- return "s3.amazonaws.com";
- }
- }
- }
- Aws::StringStream ss;
- ss << "s3" << ".";
-
- if(useDualStack)
- {
- ss << "dualstack.";
- }
-
- ss << regionName;
-
- if (hash == CN_NORTH_1_HASH || hash == CN_NORTHWEST_1_HASH)
- {
- ss << ".amazonaws.com.cn";
- }
- else if (hash == US_ISO_EAST_1_HASH)
- {
- ss << ".c2s.ic.gov";
- }
- else if (hash == US_ISOB_EAST_1_HASH)
- {
- ss << ".sc2s.sgov.gov";
- }
- else
- {
- ss << ".amazonaws.com";
- }
-
- return ss.str();
- }
-
-} // namespace S3Endpoint
-} // namespace S3
-} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3EndpointProvider.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3EndpointProvider.cpp
new file mode 100644
index 0000000000..32803417d4
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3EndpointProvider.cpp
@@ -0,0 +1,72 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/s3/S3EndpointProvider.h>
+
+namespace Aws
+{
+namespace S3
+{
+namespace Endpoint
+{
+ void S3ClientContextParameters::SetForcePathStyle(bool value)
+ {
+ return SetBooleanParameter(Aws::String("ForcePathStyle"), value);
+ }
+ const S3ClientContextParameters::ClientContextParameters::EndpointParameter& S3ClientContextParameters::GetForcePathStyle() const
+ {
+ return GetParameter("ForcePathStyle");
+ }
+ void S3ClientContextParameters::SetDisableMultiRegionAccessPoints(bool value)
+ {
+ return SetBooleanParameter(Aws::String("DisableMultiRegionAccessPoints"), value);
+ }
+ const S3ClientContextParameters::ClientContextParameters::EndpointParameter& S3ClientContextParameters::GetDisableMultiRegionAccessPoints() const
+ {
+ return GetParameter("DisableMultiRegionAccessPoints");
+ }
+ void S3ClientContextParameters::SetUseArnRegion(bool value)
+ {
+ return SetBooleanParameter(Aws::String("UseArnRegion"), value);
+ }
+ const S3ClientContextParameters::ClientContextParameters::EndpointParameter& S3ClientContextParameters::GetUseArnRegion() const
+ {
+ return GetParameter("UseArnRegion");
+ }
+ void S3ClientContextParameters::SetAccelerate(bool value)
+ {
+ return SetBooleanParameter(Aws::String("Accelerate"), value);
+ }
+ const S3ClientContextParameters::ClientContextParameters::EndpointParameter& S3ClientContextParameters::GetAccelerate() const
+ {
+ return GetParameter("Accelerate");
+ }
+ void S3BuiltInParameters::SetFromClientConfiguration(const S3ClientConfiguration& config)
+ {
+ SetFromClientConfiguration(static_cast<const S3ClientConfiguration::BaseClientConfigClass&>(config));
+
+ static const char* AWS_S3_USE_GLOBAL_ENDPOINT = "UseGlobalEndpoint";
+ if (config.useUSEast1RegionalEndPointOption == US_EAST_1_REGIONAL_ENDPOINT_OPTION::LEGACY) {
+ SetBooleanParameter(AWS_S3_USE_GLOBAL_ENDPOINT, true);
+ }
+
+ // Not supported by this SDK:
+ // static const char* AWS_S3_ACCELERATE = "Accelerate";
+ // static const char* AWS_S3_FORCE_PATH_STYLE = "ForcePathStyle";
+
+ static const char* AWS_S3_USE_ARN_REGION = "UseArnRegion";
+ SetBooleanParameter(AWS_S3_USE_ARN_REGION, config.useArnRegion);
+
+ static const char* AWS_S3_DISABLE_MRAP = "DisableMultiRegionAccessPoints";
+ SetBooleanParameter(AWS_S3_DISABLE_MRAP, config.disableMultiRegionAccessPoints);
+
+ static const char* AWS_S3_FORCE_PATH_STYLE = "ForcePathStyle";
+ if (!config.useVirtualAddressing) {
+ SetBooleanParameter(AWS_S3_FORCE_PATH_STYLE, true);
+ }
+ }
+} // namespace Endpoint
+} // namespace S3
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3EndpointRules.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3EndpointRules.cpp
new file mode 100644
index 0000000000..d855b8910f
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3EndpointRules.cpp
@@ -0,0 +1,4191 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/s3/S3EndpointRules.h>
+#include <aws/core/utils/memory/stl/AWSArray.h>
+
+namespace Aws
+{
+namespace S3
+{
+const size_t S3EndpointRules::RulesBlobStrLen = 104103;
+const size_t S3EndpointRules::RulesBlobSize = 104104;
+
+using RulesBlobT = Aws::Array<const char, S3EndpointRules::RulesBlobSize>;
+static constexpr RulesBlobT RulesBlob = {{
+'{','"','v','e','r','s','i','o','n','"',':','"','1','.','0','"',',','"','p','a','r','a','m','e','t',
+'e','r','s','"',':','{','"','B','u','c','k','e','t','"',':','{','"','r','e','q','u','i','r','e','d',
+'"',':','f','a','l','s','e',',','"','d','o','c','u','m','e','n','t','a','t','i','o','n','"',':','"',
+'T','h','e',' ','S','3',' ','b','u','c','k','e','t',' ','u','s','e','d',' ','t','o',' ','s','e','n',
+'d',' ','t','h','e',' ','r','e','q','u','e','s','t','.',' ','T','h','i','s',' ','i','s',' ','a','n',
+' ','o','p','t','i','o','n','a','l',' ','p','a','r','a','m','e','t','e','r',' ','t','h','a','t',' ',
+'w','i','l','l',' ','b','e',' ','s','e','t',' ','a','u','t','o','m','a','t','i','c','a','l','l','y',
+' ','f','o','r',' ','o','p','e','r','a','t','i','o','n','s',' ','t','h','a','t',' ','a','r','e',' ',
+'s','c','o','p','e','d',' ','t','o',' ','a','n',' ','S','3',' ','b','u','c','k','e','t','.','"',',',
+'"','t','y','p','e','"',':','"','S','t','r','i','n','g','"','}',',','"','R','e','g','i','o','n','"',
+':','{','"','b','u','i','l','t','I','n','"',':','"','A','W','S',':',':','R','e','g','i','o','n','"',
+',','"','r','e','q','u','i','r','e','d','"',':','f','a','l','s','e',',','"','d','o','c','u','m','e',
+'n','t','a','t','i','o','n','"',':','"','T','h','e',' ','A','W','S',' ','r','e','g','i','o','n',' ',
+'u','s','e','d',' ','t','o',' ','d','i','s','p','a','t','c','h',' ','t','h','e',' ','r','e','q','u',
+'e','s','t','.','"',',','"','t','y','p','e','"',':','"','S','t','r','i','n','g','"','}',',','"','U',
+'s','e','F','I','P','S','"',':','{','"','b','u','i','l','t','I','n','"',':','"','A','W','S',':',':',
+'U','s','e','F','I','P','S','"',',','"','r','e','q','u','i','r','e','d','"',':','t','r','u','e',',',
+'"','d','e','f','a','u','l','t','"',':','f','a','l','s','e',',','"','d','o','c','u','m','e','n','t',
+'a','t','i','o','n','"',':','"','W','h','e','n',' ','t','r','u','e',',',' ','s','e','n','d',' ','t',
+'h','i','s',' ','r','e','q','u','e','s','t',' ','t','o',' ','t','h','e',' ','F','I','P','S','-','c',
+'o','m','p','l','i','a','n','t',' ','r','e','g','i','o','n','a','l',' ','e','n','d','p','o','i','n',
+'t','.',' ','I','f',' ','t','h','e',' ','c','o','n','f','i','g','u','r','e','d',' ','e','n','d','p',
+'o','i','n','t',' ','d','o','e','s',' ','n','o','t',' ','h','a','v','e',' ','a',' ','F','I','P','S',
+' ','c','o','m','p','l','i','a','n','t',' ','e','n','d','p','o','i','n','t',',',' ','d','i','s','p',
+'a','t','c','h','i','n','g',' ','t','h','e',' ','r','e','q','u','e','s','t',' ','w','i','l','l',' ',
+'r','e','t','u','r','n',' ','a','n',' ','e','r','r','o','r','.','"',',','"','t','y','p','e','"',':',
+'"','B','o','o','l','e','a','n','"','}',',','"','U','s','e','D','u','a','l','S','t','a','c','k','"',
+':','{','"','b','u','i','l','t','I','n','"',':','"','A','W','S',':',':','U','s','e','D','u','a','l',
+'S','t','a','c','k','"',',','"','r','e','q','u','i','r','e','d','"',':','t','r','u','e',',','"','d',
+'e','f','a','u','l','t','"',':','f','a','l','s','e',',','"','d','o','c','u','m','e','n','t','a','t',
+'i','o','n','"',':','"','W','h','e','n',' ','t','r','u','e',',',' ','u','s','e',' ','t','h','e',' ',
+'d','u','a','l','-','s','t','a','c','k',' ','e','n','d','p','o','i','n','t','.',' ','I','f',' ','t',
+'h','e',' ','c','o','n','f','i','g','u','r','e','d',' ','e','n','d','p','o','i','n','t',' ','d','o',
+'e','s',' ','n','o','t',' ','s','u','p','p','o','r','t',' ','d','u','a','l','-','s','t','a','c','k',
+',',' ','d','i','s','p','a','t','c','h','i','n','g',' ','t','h','e',' ','r','e','q','u','e','s','t',
+' ','M','A','Y',' ','r','e','t','u','r','n',' ','a','n',' ','e','r','r','o','r','.','"',',','"','t',
+'y','p','e','"',':','"','B','o','o','l','e','a','n','"','}',',','"','E','n','d','p','o','i','n','t',
+'"',':','{','"','b','u','i','l','t','I','n','"',':','"','S','D','K',':',':','E','n','d','p','o','i',
+'n','t','"',',','"','r','e','q','u','i','r','e','d','"',':','f','a','l','s','e',',','"','d','o','c',
+'u','m','e','n','t','a','t','i','o','n','"',':','"','O','v','e','r','r','i','d','e',' ','t','h','e',
+' ','e','n','d','p','o','i','n','t',' ','u','s','e','d',' ','t','o',' ','s','e','n','d',' ','t','h',
+'i','s',' ','r','e','q','u','e','s','t','"',',','"','t','y','p','e','"',':','"','S','t','r','i','n',
+'g','"','}',',','"','F','o','r','c','e','P','a','t','h','S','t','y','l','e','"',':','{','"','b','u',
+'i','l','t','I','n','"',':','"','A','W','S',':',':','S','3',':',':','F','o','r','c','e','P','a','t',
+'h','S','t','y','l','e','"',',','"','r','e','q','u','i','r','e','d','"',':','f','a','l','s','e',',',
+'"','d','o','c','u','m','e','n','t','a','t','i','o','n','"',':','"','W','h','e','n',' ','t','r','u',
+'e',',',' ','f','o','r','c','e',' ','a',' ','p','a','t','h','-','s','t','y','l','e',' ','e','n','d',
+'p','o','i','n','t',' ','t','o',' ','b','e',' ','u','s','e','d',' ','w','h','e','r','e',' ','t','h',
+'e',' ','b','u','c','k','e','t',' ','n','a','m','e',' ','i','s',' ','p','a','r','t',' ','o','f',' ',
+'t','h','e',' ','p','a','t','h','.','"',',','"','t','y','p','e','"',':','"','B','o','o','l','e','a',
+'n','"','}',',','"','A','c','c','e','l','e','r','a','t','e','"',':','{','"','b','u','i','l','t','I',
+'n','"',':','"','A','W','S',':',':','S','3',':',':','A','c','c','e','l','e','r','a','t','e','"',',',
+'"','r','e','q','u','i','r','e','d','"',':','t','r','u','e',',','"','d','e','f','a','u','l','t','"',
+':','f','a','l','s','e',',','"','d','o','c','u','m','e','n','t','a','t','i','o','n','"',':','"','W',
+'h','e','n',' ','t','r','u','e',',',' ','u','s','e',' ','S','3',' ','A','c','c','e','l','e','r','a',
+'t','e','.',' ','N','O','T','E',':',' ','N','o','t',' ','a','l','l',' ','r','e','g','i','o','n','s',
+' ','s','u','p','p','o','r','t',' ','S','3',' ','a','c','c','e','l','e','r','a','t','e','.','"',',',
+'"','t','y','p','e','"',':','"','B','o','o','l','e','a','n','"','}',',','"','U','s','e','G','l','o',
+'b','a','l','E','n','d','p','o','i','n','t','"',':','{','"','b','u','i','l','t','I','n','"',':','"',
+'A','W','S',':',':','S','3',':',':','U','s','e','G','l','o','b','a','l','E','n','d','p','o','i','n',
+'t','"',',','"','r','e','q','u','i','r','e','d','"',':','t','r','u','e',',','"','d','e','f','a','u',
+'l','t','"',':','f','a','l','s','e',',','"','d','o','c','u','m','e','n','t','a','t','i','o','n','"',
+':','"','W','h','e','t','h','e','r',' ','t','h','e',' ','g','l','o','b','a','l',' ','e','n','d','p',
+'o','i','n','t',' ','s','h','o','u','l','d',' ','b','e',' ','u','s','e','d',',',' ','r','a','t','h',
+'e','r',' ','t','h','e','n',' ','t','h','e',' ','r','e','g','i','o','n','a','l',' ','e','n','d','p',
+'o','i','n','t',' ','f','o','r',' ','u','s','-','e','a','s','t','-','1','.','"',',','"','t','y','p',
+'e','"',':','"','B','o','o','l','e','a','n','"','}',',','"','U','s','e','O','b','j','e','c','t','L',
+'a','m','b','d','a','E','n','d','p','o','i','n','t','"',':','{','"','r','e','q','u','i','r','e','d',
+'"',':','f','a','l','s','e',',','"','d','o','c','u','m','e','n','t','a','t','i','o','n','"',':','"',
+'I','n','t','e','r','n','a','l',' ','p','a','r','a','m','e','t','e','r',' ','t','o',' ','u','s','e',
+' ','o','b','j','e','c','t',' ','l','a','m','b','d','a',' ','e','n','d','p','o','i','n','t',' ','f',
+'o','r',' ','a','n',' ','o','p','e','r','a','t','i','o','n',' ','(','e','g',':',' ','W','r','i','t',
+'e','G','e','t','O','b','j','e','c','t','R','e','s','p','o','n','s','e',')','"',',','"','t','y','p',
+'e','"',':','"','B','o','o','l','e','a','n','"','}',',','"','D','i','s','a','b','l','e','A','c','c',
+'e','s','s','P','o','i','n','t','s','"',':','{','"','r','e','q','u','i','r','e','d','"',':','f','a',
+'l','s','e',',','"','d','o','c','u','m','e','n','t','a','t','i','o','n','"',':','"','I','n','t','e',
+'r','n','a','l',' ','p','a','r','a','m','e','t','e','r',' ','t','o',' ','d','i','s','a','b','l','e',
+' ','A','c','c','e','s','s',' ','P','o','i','n','t',' ','B','u','c','k','e','t','s','"',',','"','t',
+'y','p','e','"',':','"','B','o','o','l','e','a','n','"','}',',','"','D','i','s','a','b','l','e','M',
+'u','l','t','i','R','e','g','i','o','n','A','c','c','e','s','s','P','o','i','n','t','s','"',':','{',
+'"','b','u','i','l','t','I','n','"',':','"','A','W','S',':',':','S','3',':',':','D','i','s','a','b',
+'l','e','M','u','l','t','i','R','e','g','i','o','n','A','c','c','e','s','s','P','o','i','n','t','s',
+'"',',','"','r','e','q','u','i','r','e','d','"',':','t','r','u','e',',','"','d','e','f','a','u','l',
+'t','"',':','f','a','l','s','e',',','"','d','o','c','u','m','e','n','t','a','t','i','o','n','"',':',
+'"','W','h','e','t','h','e','r',' ','m','u','l','t','i','-','r','e','g','i','o','n',' ','a','c','c',
+'e','s','s',' ','p','o','i','n','t','s',' ','(','M','R','A','P',')',' ','s','h','o','u','l','d',' ',
+'b','e',' ','d','i','s','a','b','l','e','d','.','"',',','"','t','y','p','e','"',':','"','B','o','o',
+'l','e','a','n','"','}',',','"','U','s','e','A','r','n','R','e','g','i','o','n','"',':','{','"','b',
+'u','i','l','t','I','n','"',':','"','A','W','S',':',':','S','3',':',':','U','s','e','A','r','n','R',
+'e','g','i','o','n','"',',','"','r','e','q','u','i','r','e','d','"',':','f','a','l','s','e',',','"',
+'d','o','c','u','m','e','n','t','a','t','i','o','n','"',':','"','W','h','e','n',' ','a','n',' ','A',
+'c','c','e','s','s',' ','P','o','i','n','t',' ','A','R','N',' ','i','s',' ','p','r','o','v','i','d',
+'e','d',' ','a','n','d',' ','t','h','i','s',' ','f','l','a','g',' ','i','s',' ','e','n','a','b','l',
+'e','d',',',' ','t','h','e',' ','S','D','K',' ','M','U','S','T',' ','u','s','e',' ','t','h','e',' ',
+'A','R','N','\'','s',' ','r','e','g','i','o','n',' ','w','h','e','n',' ','c','o','n','s','t','r','u',
+'c','t','i','n','g',' ','t','h','e',' ','e','n','d','p','o','i','n','t',' ','i','n','s','t','e','a',
+'d',' ','o','f',' ','t','h','e',' ','c','l','i','e','n','t','\'','s',' ','c','o','n','f','i','g','u',
+'r','e','d',' ','r','e','g','i','o','n','.','"',',','"','t','y','p','e','"',':','"','B','o','o','l',
+'e','a','n','"','}','}',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i',
+'o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u',
+'l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n',
+'"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','R','e','g','i','o','n','"','}',']','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e',
+'"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':',
+'[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':',
+'[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','i','s',
+'S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','B','u','c','k',
+'e','t','"','}',']','}',',','{','"','f','n','"',':','"','s','u','b','s','t','r','i','n','g','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','B','u','c','k','e','t','"','}',',',
+'4','9',',','5','0',',','t','r','u','e',']',',','"','a','s','s','i','g','n','"',':','"','h','a','r',
+'d','w','a','r','e','T','y','p','e','"','}',',','{','"','f','n','"',':','"','s','u','b','s','t','r',
+'i','n','g','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','B','u','c','k',
+'e','t','"','}',',','8',',','1','2',',','t','r','u','e',']',',','"','a','s','s','i','g','n','"',':',
+'"','r','e','g','i','o','n','P','r','e','f','i','x','"','}',',','{','"','f','n','"',':','"','s','u',
+'b','s','t','r','i','n','g','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"',
+'B','u','c','k','e','t','"','}',',','0',',','7',',','t','r','u','e',']',',','"','a','s','s','i','g',
+'n','"',':','"','a','b','b','a','S','u','f','f','i','x','"','}',',','{','"','f','n','"',':','"','s',
+'u','b','s','t','r','i','n','g','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','B','u','c','k','e','t','"','}',',','3','2',',','4','9',',','t','r','u','e',']',',','"','a','s',
+'s','i','g','n','"',':','"','o','u','t','p','o','s','t','I','d','"','}',',','{','"','f','n','"',':',
+'"','a','w','s','.','p','a','r','t','i','t','i','o','n','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',']',',','"','a','s','s','i','g','n','"',
+':','"','r','e','g','i','o','n','P','a','r','t','i','t','i','o','n','"','}',',','{','"','f','n','"',
+':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','a','b','b','a','S','u','f','f','i','x','"','}',',','"','-','-','o','p',
+'-','s','3','"',']','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u',
+'l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n',
+'"',':','"','i','s','V','a','l','i','d','H','o','s','t','L','a','b','e','l','"',',','"','a','r','g',
+'v','"',':','[','{','"','r','e','f','"',':','"','o','u','t','p','o','s','t','I','d','"','}',',','f',
+'a','l','s','e',']','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u',
+'l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','t',
+'y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o',
+'n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E',
+'q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','h','a',
+'r','d','w','a','r','e','T','y','p','e','"','}',',','"','e','"',']','}',']',',','"','t','y','p','e',
+'"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i',
+'t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a',
+'l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','r','e','g','i','o',
+'n','P','r','e','f','i','x','"','}',',','"','b','e','t','a','"',']','}',']',',','"','t','y','p','e',
+'"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i',
+'t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v',
+'"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[',
+'{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',']',',','"',
+'e','r','r','o','r','"',':','"','E','x','p','e','c','t','e','d',' ','a',' ','e','n','d','p','o','i',
+'n','t',' ','t','o',' ','b','e',' ','s','p','e','c','i','f','i','e','d',' ','b','u','t',' ','n','o',
+' ','e','n','d','p','o','i','n','t',' ','w','a','s',' ','f','o','u','n','d','"',',','"','t','y','p',
+'e','"',':','"','e','r','r','o','r','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',
+':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',',','{','"','f','n','"',
+':','"','p','a','r','s','e','U','R','L','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f',
+'"',':','"','E','n','d','p','o','i','n','t','"','}',']',',','"','a','s','s','i','g','n','"',':','"',
+'u','r','l','"','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':',
+'"','h','t','t','p','s',':','/','/','{','B','u','c','k','e','t','}','.','e','c','2','.','{','u','r',
+'l','#','a','u','t','h','o','r','i','t','y','}','"',',','"','p','r','o','p','e','r','t','i','e','s',
+'"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',
+':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':',
+'"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':',
+'"','s','3','-','o','u','t','p','o','s','t','s','"',',','"','d','i','s','a','b','l','e','D','o','u',
+'b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a',
+'d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n',
+'t','"','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e',
+'n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/',
+'{','B','u','c','k','e','t','}','.','e','c','2','.','s','3','-','o','u','t','p','o','s','t','s','.',
+'{','R','e','g','i','o','n','}','.','{','r','e','g','i','o','n','P','a','r','t','i','t','i','o','n',
+'#','d','n','s','S','u','f','f','i','x','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',
+':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':',
+'"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"',
+'{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"',
+'s','3','-','o','u','t','p','o','s','t','s','"',',','"','d','i','s','a','b','l','e','D','o','u','b',
+'l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d',
+'e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t',
+'"','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',
+':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','h','a','r','d','w','a','r','e','T','y','p','e','"','}',',','"','o','"',
+']','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',
+':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','s',
+'t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e',
+'f','"',':','"','r','e','g','i','o','n','P','r','e','f','i','x','"','}',',','"','b','e','t','a','"',
+']','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',
+':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','n',
+'o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',
+',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t',
+'"','}',']','}',']','}',']',',','"','e','r','r','o','r','"',':','"','E','x','p','e','c','t','e','d',
+' ','a',' ','e','n','d','p','o','i','n','t',' ','t','o',' ','b','e',' ','s','p','e','c','i','f','i',
+'e','d',' ','b','u','t',' ','n','o',' ','e','n','d','p','o','i','n','t',' ','w','a','s',' ','f','o',
+'u','n','d','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',',','{','"','c','o',
+'n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"',
+'}',']','}',',','{','"','f','n','"',':','"','p','a','r','s','e','U','R','L','"',',','"','a','r','g',
+'v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']',',','"',
+'a','s','s','i','g','n','"',':','"','u','r','l','"','}',']',',','"','e','n','d','p','o','i','n','t',
+'"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/','{','B','u','c','k','e','t',
+'}','.','o','p','-','{','o','u','t','p','o','s','t','I','d','}','.','{','u','r','l','#','a','u','t',
+'h','o','r','i','t','y','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a',
+'u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g',
+'v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','{','R','e','g',
+'i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','-','o',
+'u','t','p','o','s','t','s','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n',
+'c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',
+':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',']','}',
+',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','n','d','p','o','i',
+'n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/','{','B','u','c','k',
+'e','t','}','.','o','p','-','{','o','u','t','p','o','s','t','I','d','}','.','s','3','-','o','u','t',
+'p','o','s','t','s','.','{','R','e','g','i','o','n','}','.','{','r','e','g','i','o','n','P','a','r',
+'t','i','t','i','o','n','#','d','n','s','S','u','f','f','i','x','}','"',',','"','p','r','o','p','e',
+'r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"',
+'n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g',
+'i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N',
+'a','m','e','"',':','"','s','3','-','o','u','t','p','o','s','t','s','"',',','"','d','i','s','a','b',
+'l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',
+',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n',
+'d','p','o','i','n','t','"','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':',
+'[',']',',','"','e','r','r','o','r','"',':','"','U','n','r','e','c','o','g','n','i','z','e','d',' ',
+'h','a','r','d','w','a','r','e',' ','t','y','p','e',':',' ','\\','"','E','x','p','e','c','t','e','d',
+' ','h','a','r','d','w','a','r','e',' ','t','y','p','e',' ','o',' ','o','r',' ','e',' ','b','u','t',
+' ','g','o','t',' ','{','h','a','r','d','w','a','r','e','T','y','p','e','}','\\','"','"',',','"','t',
+'y','p','e','"',':','"','e','r','r','o','r','"','}',']','}',']','}',',','{','"','c','o','n','d','i',
+'t','i','o','n','s','"',':','[',']',',','"','e','r','r','o','r','"',':','"','I','n','v','a','l','i',
+'d',' ','A','R','N',':',' ','T','h','e',' ','o','u','t','p','o','s','t',' ','I','d',' ','m','u','s',
+'t',' ','o','n','l','y',' ','c','o','n','t','a','i','n',' ','a','-','z',',',' ','A','-','Z',',',' ',
+'0','-','9',' ','a','n','d',' ','`','-','`','.','"',',','"','t','y','p','e','"',':','"','e','r','r',
+'o','r','"','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f',
+'n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','B','u','c','k','e','t','"','}',']','}',']',',','"','t','y','p','e','"',':','"','t','r','e',
+'e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',
+':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',',','{','"','f','n','"',
+':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','i','s','S',
+'e','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','p','a','r','s','e','U',
+'R','L','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o',
+'i','n','t','"','}',']','}',']','}',']','}',']',',','"','e','r','r','o','r','"',':','"','C','u','s',
+'t','o','m',' ','e','n','d','p','o','i','n','t',' ','`','{','E','n','d','p','o','i','n','t','}','`',
+' ','w','a','s',' ','n','o','t',' ','a',' ','v','a','l','i','d',' ','U','R','I','"',',','"','t','y',
+'p','e','"',':','"','e','r','r','o','r','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s',
+'"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s',
+'"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"',
+'i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','F','o',
+'r','c','e','P','a','t','h','S','t','y','l','e','"','}',']','}',',','{','"','f','n','"',':','"','b',
+'o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','F','o','r','c','e','P','a','t','h','S','t','y','l','e','"','}',',','t','r','u',
+'e',']','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s',
+'"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','t','y','p','e',
+'"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i',
+'t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','a','w','s','.','p','a','r','s','e','A',
+'r','n','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','B','u','c','k','e',
+'t','"','}',']','}',']',',','"','e','r','r','o','r','"',':','"','P','a','t','h','-','s','t','y','l',
+'e',' ','a','d','d','r','e','s','s','i','n','g',' ','c','a','n','n','o','t',' ','b','e',' ','u','s',
+'e','d',' ','w','i','t','h',' ','A','R','N',' ','b','u','c','k','e','t','s','"',',','"','t','y','p',
+'e','"',':','"','e','r','r','o','r','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',
+':','[','{','"','f','n','"',':','"','u','r','i','E','n','c','o','d','e','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','B','u','c','k','e','t','"','}',']',',','"','a','s','s',
+'i','g','n','"',':','"','u','r','i','_','e','n','c','o','d','e','d','_','b','u','c','k','e','t','"',
+'}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':',
+'[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o',
+'o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e',
+'f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','t','r','u','e',']','}',
+',','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']',',','"','e','r','r','o',
+'r','"',':','"','C','a','n','n','o','t',' ','s','e','t',' ','d','u','a','l','-','s','t','a','c','k',
+' ','i','n',' ','c','o','m','b','i','n','a','t','i','o','n',' ','w','i','t','h',' ','a',' ','c','u',
+'s','t','o','m',' ','e','n','d','p','o','i','n','t','.','"',',','"','t','y','p','e','"',':','"','e',
+'r','r','o','r','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"',
+'t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c',
+'o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','a','w','s','.','p','a',
+'r','t','i','t','i','o','n','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"',
+'R','e','g','i','o','n','"','}',']',',','"','a','s','s','i','g','n','"',':','"','p','a','r','t','i',
+'t','i','o','n','R','e','s','u','l','t','"','}',']',',','"','t','y','p','e','"',':','"','t','r','e',
+'e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',
+':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',
+':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b',
+'o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','A','c','c','e','l','e','r','a','t','e','"','}',',','f','a','l','s','e',']','}',
+']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[',
+'{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"',
+'t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o',
+'n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s',
+'"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l',
+'S','t','a','c','k','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','n','o','t',
+'"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"',
+'a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',
+']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s',
+'"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S',
+'"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q',
+'u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g',
+'i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']',',','"','e','n',
+'d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/','s',
+'3','-','f','i','p','s','.','d','u','a','l','s','t','a','c','k','.','u','s','-','e','a','s','t','-',
+'1','.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f',
+'f','i','x','}','/','{','u','r','i','_','e','n','c','o','d','e','d','_','b','u','c','k','e','t','}',
+'"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e',
+'m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i',
+'g','n','i','n','g','R','e','g','i','o','n','"',':','"','u','s','-','e','a','s','t','-','1','"',',',
+'"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b',
+'l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',
+',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n',
+'d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{',
+'"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g',
+'v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"',
+'}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g',
+'v','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':',
+'[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',','{',
+'"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g',
+'v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','t','r','u',
+'e',']','}',',','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',',
+'"','a','w','s','-','g','l','o','b','a','l','"',']','}',']',',','"','e','n','d','p','o','i','n','t',
+'"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/','s','3','-','f','i','p','s',
+'.','d','u','a','l','s','t','a','c','k','.','u','s','-','e','a','s','t','-','1','.','{','p','a','r',
+'t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f','f','i','x','}','/','{',
+'u','r','i','_','e','n','c','o','d','e','d','_','b','u','c','k','e','t','}','"',',','"','p','r','o',
+'p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[',
+'{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R',
+'e','g','i','o','n','"',':','"','u','s','-','e','a','s','t','-','1','"',',','"','s','i','g','n','i',
+'n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b',
+'l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d',
+'e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t',
+'"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"',
+'b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','t','r','u','e',
+']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"',
+'f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f',
+'"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"',
+'b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','t','r','u','e',']','}',',','{','"',
+'f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"',
+'s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l',
+'"',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l',
+'s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','G','l','o',
+'b','a','l','E','n','d','p','o','i','n','t','"','}',',','t','r','u','e',']','}',']',',','"','t','y',
+'p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n',
+'d','i','t','i','o','n','s','"',':','[',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"',
+'u','r','l','"',':','"','h','t','t','p','s',':','/','/','s','3','-','f','i','p','s','.','d','u','a',
+'l','s','t','a','c','k','.','{','R','e','g','i','o','n','}','.','{','p','a','r','t','i','t','i','o',
+'n','R','e','s','u','l','t','#','d','n','s','S','u','f','f','i','x','}','/','{','u','r','i','_','e',
+'n','c','o','d','e','d','_','b','u','c','k','e','t','}','"',',','"','p','r','o','p','e','r','t','i',
+'e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m',
+'e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n',
+'"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e',
+'"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o',
+'d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{',
+'}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',']','}',',','{',
+'"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l',
+'e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','t','r','u','e',']','}',',','{',
+'"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':',
+'"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E',
+'n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l',
+'e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','U','s','e','F','I','P','S','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':',
+'"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','s','t','r','i',
+'n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']',
+'}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"',
+'a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','G','l','o','b','a','l','E',
+'n','d','p','o','i','n','t','"','}',',','f','a','l','s','e',']','}',']',',','"','e','n','d','p','o',
+'i','n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/','s','3','-','f',
+'i','p','s','.','d','u','a','l','s','t','a','c','k','.','{','R','e','g','i','o','n','}','.','{','p',
+'a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f','f','i','x','}',
+'/','{','u','r','i','_','e','n','c','o','d','e','d','_','b','u','c','k','e','t','}','"',',','"','p',
+'r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',
+':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n',
+'g','R','e','g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n',
+'i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u',
+'b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a',
+'d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n',
+'t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':',
+'"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','f','a','l',
+'s','e',']','}',',','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',',','{','"',
+'f','n','"',':','"','p','a','r','s','e','U','R','L','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']',',','"','a','s','s','i','g','n',
+'"',':','"','u','r','l','"','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q',
+'u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e',
+'F','I','P','S','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','s','t','r','i',
+'n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']',
+',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','{','u','r','l','#',
+'s','c','h','e','m','e','}',':','/','/','{','u','r','l','#','a','u','t','h','o','r','i','t','y','}',
+'{','u','r','l','#','n','o','r','m','a','l','i','z','e','d','P','a','t','h','}','{','u','r','i','_',
+'e','n','c','o','d','e','d','_','b','u','c','k','e','t','}','"',',','"','p','r','o','p','e','r','t',
+'i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a',
+'m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o',
+'n','"',':','"','u','s','-','e','a','s','t','-','1','"',',','"','s','i','g','n','i','n','g','N','a',
+'m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n',
+'c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',
+':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{',
+'"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l',
+'e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',',
+'{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',',','{','"','f','n','"',':','"',
+'p','a','r','s','e','U','R','L','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','E','n','d','p','o','i','n','t','"','}',']',',','"','a','s','s','i','g','n','"',':','"','u','r',
+'l','"','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',
+',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"',
+'}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u',
+'a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i',
+'o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']',',','"','e','n','d',
+'p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','{','u','r','l','#','s','c','h','e','m',
+'e','}',':','/','/','{','u','r','l','#','a','u','t','h','o','r','i','t','y','}','{','u','r','l','#',
+'n','o','r','m','a','l','i','z','e','d','P','a','t','h','}','{','u','r','i','_','e','n','c','o','d',
+'e','d','_','b','u','c','k','e','t','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':',
+'{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"',
+'s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','u',
+'s','-','e','a','s','t','-','1','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"',
+'s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n',
+'g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',',
+'"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d',
+'i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q',
+'u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e',
+'D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',
+':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"',
+'E','n','d','p','o','i','n','t','"','}',']','}',',','{','"','f','n','"',':','"','p','a','r','s','e',
+'U','R','L','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p',
+'o','i','n','t','"','}',']',',','"','a','s','s','i','g','n','"',':','"','u','r','l','"','}',',','{',
+'"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g',
+'v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','t','r','u',
+'e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{',
+'"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-',
+'g','l','o','b','a','l','"',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a',
+'n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"',
+'U','s','e','G','l','o','b','a','l','E','n','d','p','o','i','n','t','"','}',',','t','r','u','e',']',
+'}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':',
+'[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','n','d','p','o','i',
+'n','t','"',':','{','"','u','r','l','"',':','"','{','u','r','l','#','s','c','h','e','m','e','}',':',
+'/','/','{','u','r','l','#','a','u','t','h','o','r','i','t','y','}','{','u','r','l','#','n','o','r',
+'m','a','l','i','z','e','d','P','a','t','h','}','{','u','r','i','_','e','n','c','o','d','e','d','_',
+'b','u','c','k','e','t','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a',
+'u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g',
+'v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','{','R','e','g',
+'i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',',
+'"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t',
+'r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p',
+'e','"',':','"','e','n','d','p','o','i','n','t','"','}',']','}',',','{','"','c','o','n','d','i','t',
+'i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a',
+'l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u',
+'a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"',
+'i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n',
+'d','p','o','i','n','t','"','}',']','}',',','{','"','f','n','"',':','"','p','a','r','s','e','U','R',
+'L','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i',
+'n','t','"','}',']',',','"','a','s','s','i','g','n','"',':','"','u','r','l','"','}',',','{','"','f',
+'n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','t','r','u','e',']',
+'}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f',
+'n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':',
+'[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l',
+'o','b','a','l','"',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E',
+'q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s',
+'e','G','l','o','b','a','l','E','n','d','p','o','i','n','t','"','}',',','f','a','l','s','e',']','}',
+']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','{','u','r','l',
+'#','s','c','h','e','m','e','}',':','/','/','{','u','r','l','#','a','u','t','h','o','r','i','t','y',
+'}','{','u','r','l','#','n','o','r','m','a','l','i','z','e','d','P','a','t','h','}','{','u','r','i',
+'_','e','n','c','o','d','e','d','_','b','u','c','k','e','t','}','"',',','"','p','r','o','p','e','r',
+'t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n',
+'a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i',
+'o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a',
+'m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n',
+'c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',
+':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{',
+'"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l',
+'e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',',
+'{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',
+':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"',
+'E','n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o',
+'l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f',
+'"',':','"','U','s','e','F','I','P','S','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',
+':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b',
+'a','l','"',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':',
+'"','h','t','t','p','s',':','/','/','s','3','-','f','i','p','s','.','u','s','-','e','a','s','t','-',
+'1','.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f',
+'f','i','x','}','/','{','u','r','i','_','e','n','c','o','d','e','d','_','b','u','c','k','e','t','}',
+'"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e',
+'m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i',
+'g','n','i','n','g','R','e','g','i','o','n','"',':','"','u','s','-','e','a','s','t','-','1','"',',',
+'"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b',
+'l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',
+',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n',
+'d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{',
+'"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g',
+'v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"',
+'}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r',
+'g','v','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',',
+'{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r',
+'g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','t','r',
+'u','e',']','}',',','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',
+',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',
+',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']',',','"','e','n','d','p','o','i','n',
+'t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/','s','3','-','f','i','p',
+'s','.','u','s','-','e','a','s','t','-','1','.','{','p','a','r','t','i','t','i','o','n','R','e','s',
+'u','l','t','#','d','n','s','S','u','f','f','i','x','}','/','{','u','r','i','_','e','n','c','o','d',
+'e','d','_','b','u','c','k','e','t','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':',
+'{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"',
+'s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','u',
+'s','-','e','a','s','t','-','1','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"',
+'s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n',
+'g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',',
+'"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d',
+'i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q',
+'u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e',
+'D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',
+':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','i','s','S',
+'e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o',
+'i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E',
+'q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s',
+'e','F','I','P','S','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','n','o','t',
+'"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q',
+'u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g',
+'i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']','}',',','{','"',
+'f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','U','s','e','G','l','o','b','a','l','E','n','d','p','o',
+'i','n','t','"','}',',','t','r','u','e',']','}',']',',','"','t','y','p','e','"',':','"','t','r','e',
+'e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',
+':','[',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t',
+'t','p','s',':','/','/','s','3','-','f','i','p','s','.','{','R','e','g','i','o','n','}','.','{','p',
+'a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f','f','i','x','}',
+'/','{','u','r','i','_','e','n','c','o','d','e','d','_','b','u','c','k','e','t','}','"',',','"','p',
+'r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',
+':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n',
+'g','R','e','g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n',
+'i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u',
+'b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a',
+'d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n',
+'t','"','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n',
+'"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':',
+'[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','f',
+'a','l','s','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',
+':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f',
+'n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','t','r','u','e',']',
+'}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f',
+'n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':',
+'[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l',
+'o','b','a','l','"',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E',
+'q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s',
+'e','G','l','o','b','a','l','E','n','d','p','o','i','n','t','"','}',',','f','a','l','s','e',']','}',
+']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p',
+'s',':','/','/','s','3','-','f','i','p','s','.','{','R','e','g','i','o','n','}','.','{','p','a','r',
+'t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f','f','i','x','}','/','{',
+'u','r','i','_','e','n','c','o','d','e','d','_','b','u','c','k','e','t','}','"',',','"','p','r','o',
+'p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[',
+'{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R',
+'e','g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n',
+'g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l',
+'e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e',
+'r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"',
+'}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b',
+'o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','t','r','u','e',']',
+'}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f',
+'n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"','b',
+'o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','f','a','l','s','e',']','}',',','{','"',
+'f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g',
+'l','o','b','a','l','"',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r',
+'l','"',':','"','h','t','t','p','s',':','/','/','s','3','.','d','u','a','l','s','t','a','c','k','.',
+'u','s','-','e','a','s','t','-','1','.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l',
+'t','#','d','n','s','S','u','f','f','i','x','}','/','{','u','r','i','_','e','n','c','o','d','e','d',
+'_','b','u','c','k','e','t','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"',
+'a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i',
+'g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','u','s','-',
+'e','a','s','t','-','1','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3',
+'"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',
+':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t',
+'y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t',
+'i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a',
+'l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u',
+'a','l','S','t','a','c','k','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','n',
+'o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',
+',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t',
+'"','}',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a',
+'l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I',
+'P','S','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','s','t','r','i','n',
+'g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"',
+'R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']',',',
+'"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':',
+'/','/','s','3','.','d','u','a','l','s','t','a','c','k','.','u','s','-','e','a','s','t','-','1','.',
+'{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f','f','i',
+'x','}','/','{','u','r','i','_','e','n','c','o','d','e','d','_','b','u','c','k','e','t','}','"',',',
+'"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e',
+'s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n',
+'i','n','g','R','e','g','i','o','n','"',':','"','u','s','-','e','a','s','t','-','1','"',',','"','s',
+'i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e',
+'D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"',
+'h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p',
+'o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f',
+'n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',',
+'t','r','u','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',
+':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f',
+'n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','f','a','l','s','e',
+']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"',
+'f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g',
+'l','o','b','a','l','"',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n',
+'E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U',
+'s','e','G','l','o','b','a','l','E','n','d','p','o','i','n','t','"','}',',','t','r','u','e',']','}',
+']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[',
+'{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','n','d','p','o','i','n',
+'t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/','s','3','.','d','u','a',
+'l','s','t','a','c','k','.','{','R','e','g','i','o','n','}','.','{','p','a','r','t','i','t','i','o',
+'n','R','e','s','u','l','t','#','d','n','s','S','u','f','f','i','x','}','/','{','u','r','i','_','e',
+'n','c','o','d','e','d','_','b','u','c','k','e','t','}','"',',','"','p','r','o','p','e','r','t','i',
+'e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m',
+'e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n',
+'"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e',
+'"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o',
+'d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{',
+'}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',']','}',',','{',
+'"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l',
+'e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','t','r','u','e',']','}',',','{',
+'"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':',
+'"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E',
+'n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l',
+'e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','U','s','e','F','I','P','S','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',
+':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','s','t','r',
+'i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',
+']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','G','l','o','b','a','l',
+'E','n','d','p','o','i','n','t','"','}',',','f','a','l','s','e',']','}',']',',','"','e','n','d','p',
+'o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/','s','3','.',
+'d','u','a','l','s','t','a','c','k','.','{','R','e','g','i','o','n','}','.','{','p','a','r','t','i',
+'t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f','f','i','x','}','/','{','u','r',
+'i','_','e','n','c','o','d','e','d','_','b','u','c','k','e','t','}','"',',','"','p','r','o','p','e',
+'r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"',
+'n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g',
+'i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N',
+'a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E',
+'n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s',
+'"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',',
+'{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o',
+'l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f',
+'"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',
+',','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',',','{','"','f','n','"',':',
+'"','p','a','r','s','e','U','R','L','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','E','n','d','p','o','i','n','t','"','}',']',',','"','a','s','s','i','g','n','"',':','"','u',
+'r','l','"','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s',
+'"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S',
+'"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','s','t','r','i','n','g','E',
+'q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e',
+'g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']',',','"','e',
+'n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','{','u','r','l','#','s','c','h',
+'e','m','e','}',':','/','/','{','u','r','l','#','a','u','t','h','o','r','i','t','y','}','{','u','r',
+'l','#','n','o','r','m','a','l','i','z','e','d','P','a','t','h','}','{','u','r','i','_','e','n','c',
+'o','d','e','d','_','b','u','c','k','e','t','}','"',',','"','p','r','o','p','e','r','t','i','e','s',
+'"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',
+':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':',
+'"','u','s','-','e','a','s','t','-','1','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',
+':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d',
+'i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}',
+'}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o',
+'n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n',
+'E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U',
+'s','e','D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f',
+'n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','E','n','d','p','o','i','n','t','"','}',']','}',',','{','"','f','n','"',':','"','p','a','r',
+'s','e','U','R','L','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n',
+'d','p','o','i','n','t','"','}',']',',','"','a','s','s','i','g','n','"',':','"','u','r','l','"','}',
+',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','f',
+'a','l','s','e',']','}',',','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l',
+'s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n',
+'"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']',',','"','e','n','d','p','o',
+'i','n','t','"',':','{','"','u','r','l','"',':','"','{','u','r','l','#','s','c','h','e','m','e','}',
+':','/','/','{','u','r','l','#','a','u','t','h','o','r','i','t','y','}','{','u','r','l','#','n','o',
+'r','m','a','l','i','z','e','d','P','a','t','h','}','{','u','r','i','_','e','n','c','o','d','e','d',
+'_','b','u','c','k','e','t','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"',
+'a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i',
+'g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','u','s','-',
+'e','a','s','t','-','1','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3',
+'"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',
+':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t',
+'y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t',
+'i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a',
+'l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u',
+'a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"',
+'i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n',
+'d','p','o','i','n','t','"','}',']','}',',','{','"','f','n','"',':','"','p','a','r','s','e','U','R',
+'L','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i',
+'n','t','"','}',']',',','"','a','s','s','i','g','n','"',':','"','u','r','l','"','}',',','{','"','f',
+'n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','f','a','l','s','e',
+']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"',
+'f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g',
+'l','o','b','a','l','"',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n',
+'E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U',
+'s','e','G','l','o','b','a','l','E','n','d','p','o','i','n','t','"','}',',','t','r','u','e',']','}',
+']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[',
+'{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','s','t','r',
+'i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','R','e','g','i','o','n','"','}',',','"','u','s','-','e','a','s','t','-','1','"',']','}',']',
+',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','{','u','r','l','#',
+'s','c','h','e','m','e','}',':','/','/','{','u','r','l','#','a','u','t','h','o','r','i','t','y','}',
+'{','u','r','l','#','n','o','r','m','a','l','i','z','e','d','P','a','t','h','}','{','u','r','i','_',
+'e','n','c','o','d','e','d','_','b','u','c','k','e','t','}','"',',','"','p','r','o','p','e','r','t',
+'i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a',
+'m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o',
+'n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m',
+'e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c',
+'o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':',
+'{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"',
+'c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','n','d','p','o','i','n','t','"',
+':','{','"','u','r','l','"',':','"','{','u','r','l','#','s','c','h','e','m','e','}',':','/','/','{',
+'u','r','l','#','a','u','t','h','o','r','i','t','y','}','{','u','r','l','#','n','o','r','m','a','l',
+'i','z','e','d','P','a','t','h','}','{','u','r','i','_','e','n','c','o','d','e','d','_','b','u','c',
+'k','e','t','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h',
+'S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',
+',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','{','R','e','g','i','o','n',
+'}','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i',
+'s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e',
+'}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':',
+'"','e','n','d','p','o','i','n','t','"','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n',
+'s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',
+',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S',
+'t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','i','s','S',
+'e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o',
+'i','n','t','"','}',']','}',',','{','"','f','n','"',':','"','p','a','r','s','e','U','R','L','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"',
+'}',']',',','"','a','s','s','i','g','n','"',':','"','u','r','l','"','}',',','{','"','f','n','"',':',
+'"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','f','a','l','s','e',']','}',',',
+'{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',
+':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b',
+'a','l','"',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u',
+'a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','G',
+'l','o','b','a','l','E','n','d','p','o','i','n','t','"','}',',','f','a','l','s','e',']','}',']',',',
+'"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','{','u','r','l','#','s',
+'c','h','e','m','e','}',':','/','/','{','u','r','l','#','a','u','t','h','o','r','i','t','y','}','{',
+'u','r','l','#','n','o','r','m','a','l','i','z','e','d','P','a','t','h','}','{','u','r','i','_','e',
+'n','c','o','d','e','d','_','b','u','c','k','e','t','}','"',',','"','p','r','o','p','e','r','t','i',
+'e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m',
+'e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n',
+'"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e',
+'"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o',
+'d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{',
+'}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c',
+'o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a',
+'n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"',
+'U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"',
+'f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"',
+'i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n',
+'d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e',
+'a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','U','s','e','F','I','P','S','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':',
+'"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a',
+'l','"',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"',
+'h','t','t','p','s',':','/','/','s','3','.','{','p','a','r','t','i','t','i','o','n','R','e','s','u',
+'l','t','#','d','n','s','S','u','f','f','i','x','}','/','{','u','r','i','_','e','n','c','o','d','e',
+'d','_','b','u','c','k','e','t','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{',
+'"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s',
+'i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','u','s',
+'-','e','a','s','t','-','1','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s',
+'3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g',
+'"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"',
+'t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i',
+'t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u',
+'a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D',
+'u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':',
+'"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','i','s','S','e',
+'t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i',
+'n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q',
+'u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e',
+'F','I','P','S','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','s','t','r',
+'i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',
+']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p',
+'s',':','/','/','s','3','.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d',
+'n','s','S','u','f','f','i','x','}','/','{','u','r','i','_','e','n','c','o','d','e','d','_','b','u',
+'c','k','e','t','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t',
+'h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4',
+'"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','u','s','-','e','a','s',
+'t','-','1','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"',
+'d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r',
+'u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e',
+'"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n',
+'s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',
+',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S',
+'t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','n','o','t',
+'"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"',
+'a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',
+']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s',
+'"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S',
+'"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a',
+'r','g','v','"',':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s',
+'"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"',
+'}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']','}',',','{','"','f','n','"',':',
+'"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','U','s','e','G','l','o','b','a','l','E','n','d','p','o','i','n','t','"',
+'}',',','t','r','u','e',']','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"',
+'r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"',
+'f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','u','s','-','e','a',
+'s','t','-','1','"',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l',
+'"',':','"','h','t','t','p','s',':','/','/','s','3','.','{','p','a','r','t','i','t','i','o','n','R',
+'e','s','u','l','t','#','d','n','s','S','u','f','f','i','x','}','/','{','u','r','i','_','e','n','c',
+'o','d','e','d','_','b','u','c','k','e','t','}','"',',','"','p','r','o','p','e','r','t','i','e','s',
+'"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',
+':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':',
+'"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':',
+'"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i',
+'n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',
+',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n',
+'d','i','t','i','o','n','s','"',':','[',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"',
+'u','r','l','"',':','"','h','t','t','p','s',':','/','/','s','3','.','{','R','e','g','i','o','n','}',
+'.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f','f',
+'i','x','}','/','{','u','r','i','_','e','n','c','o','d','e','d','_','b','u','c','k','e','t','}','"',
+',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m',
+'e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g',
+'n','i','n','g','R','e','g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s',
+'i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e',
+'D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"',
+'h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p',
+'o','i','n','t','"','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{',
+'"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g',
+'v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"',
+'}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r',
+'g','v','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',',
+'{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r',
+'g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','f','a',
+'l','s','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':',
+'[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r',
+'g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w',
+'s','-','g','l','o','b','a','l','"',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l',
+'e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','U','s','e','G','l','o','b','a','l','E','n','d','p','o','i','n','t','"','}',',','f','a','l',
+'s','e',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"',
+'h','t','t','p','s',':','/','/','s','3','.','{','R','e','g','i','o','n','}','.','{','p','a','r','t',
+'i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f','f','i','x','}','/','{','u',
+'r','i','_','e','n','c','o','d','e','d','_','b','u','c','k','e','t','}','"',',','"','p','r','o','p',
+'e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{',
+'"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e',
+'g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g',
+'N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e',
+'E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r',
+'s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',
+']','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','r',
+'r','o','r','"',':','"','P','a','t','h','-','s','t','y','l','e',' ','a','d','d','r','e','s','s','i',
+'n','g',' ','c','a','n','n','o','t',' ','b','e',' ','u','s','e','d',' ','w','i','t','h',' ','S','3',
+' ','A','c','c','e','l','e','r','a','t','e','"',',','"','t','y','p','e','"',':','"','e','r','r','o',
+'r','"','}',']','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',',
+'"','e','r','r','o','r','"',':','"','A',' ','v','a','l','i','d',' ','p','a','r','t','i','t','i','o',
+'n',' ','c','o','u','l','d',' ','n','o','t',' ','b','e',' ','d','e','t','e','r','m','i','n','e','d',
+'"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',']','}',']','}',']','}',']','}',
+',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','a','w',
+'s','.','i','s','V','i','r','t','u','a','l','H','o','s','t','a','b','l','e','S','3','B','u','c','k',
+'e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','B','u','c','k','e',
+'t','"','}',',','f','a','l','s','e',']','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e',
+'"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':',
+'[','{','"','f','n','"',':','"','a','w','s','.','p','a','r','t','i','t','i','o','n','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',']',',','"',
+'a','s','s','i','g','n','"',':','"','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','"',
+'}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':',
+'[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','t','y','p','e','"',':',
+'"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i',
+'o','n','s','"',':','[','{','"','f','n','"',':','"','i','s','V','a','l','i','d','H','o','s','t','L',
+'a','b','e','l','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g',
+'i','o','n','"','}',',','f','a','l','s','e',']','}',']',',','"','t','y','p','e','"',':','"','t','r',
+'e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s',
+'"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s',
+'"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"',
+'b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','t','r','u','e',']','}',',','{','"',
+'f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',
+':','[','{','"','f','n','"',':','"','g','e','t','A','t','t','r','"',',','"','a','r','g','v','"',':',
+'[','{','"','r','e','f','"',':','"','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','"',
+'}',',','"','n','a','m','e','"',']','}',',','"','a','w','s','-','c','n','"',']','}',']',',','"','e',
+'r','r','o','r','"',':','"','P','a','r','t','i','t','i','o','n',' ','d','o','e','s',' ','n','o','t',
+' ','s','u','p','p','o','r','t',' ','F','I','P','S','"',',','"','t','y','p','e','"',':','"','e','r',
+'r','o','r','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','t',
+'y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o',
+'n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n',
+'E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','A',
+'c','c','e','l','e','r','a','t','e','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':',
+'"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','t','r','u','e',']','}',']',',',
+'"','e','r','r','o','r','"',':','"','A','c','c','e','l','e','r','a','t','e',' ','c','a','n','n','o',
+'t',' ','b','e',' ','u','s','e','d',' ','w','i','t','h',' ','F','I','P','S','"',',','"','t','y','p',
+'e','"',':','"','e','r','r','o','r','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',
+':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',
+':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b',
+'o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','A','c','c','e','l','e','r','a','t','e','"','}',',','t','r','u','e',']','}',',',
+'{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g',
+'v','"',':','[','{','"','f','n','"',':','"','g','e','t','A','t','t','r','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','p','a','r','t','i','t','i','o','n','R','e','s','u','l',
+'t','"','}',',','"','n','a','m','e','"',']','}',',','"','a','w','s','-','c','n','"',']','}',']',',',
+'"','e','r','r','o','r','"',':','"','S','3',' ','A','c','c','e','l','e','r','a','t','e',' ','c','a',
+'n','n','o','t',' ','b','e',' ','u','s','e','d',' ','i','n',' ','t','h','i','s',' ','r','e','g','i',
+'o','n','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',',','{','"','c','o','n',
+'d','i','t','i','o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',
+',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',
+'{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',',','{','"','f','n','"',':','"',
+'b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','t','r','u','e',
+']','}',']',',','"','e','r','r','o','r','"',':','"','H','o','s','t',' ','o','v','e','r','r','i','d',
+'e',' ','c','a','n','n','o','t',' ','b','e',' ','c','o','m','b','i','n','e','d',' ','w','i','t','h',
+' ','D','u','a','l','s','t','a','c','k',',',' ','F','I','P','S',',',' ','o','r',' ','S','3',' ','A',
+'c','c','e','l','e','r','a','t','e','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"',
+'}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','t','y','p','e','"',
+':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t',
+'i','o','n','s','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g',
+'v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',',',
+'{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r',
+'g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','t','r',
+'u','e',']','}',']',',','"','e','r','r','o','r','"',':','"','H','o','s','t',' ','o','v','e','r','r',
+'i','d','e',' ','c','a','n','n','o','t',' ','b','e',' ','c','o','m','b','i','n','e','d',' ','w','i',
+'t','h',' ','D','u','a','l','s','t','a','c','k',',',' ','F','I','P','S',',',' ','o','r',' ','S','3',
+' ','A','c','c','e','l','e','r','a','t','e','"',',','"','t','y','p','e','"',':','"','e','r','r','o',
+'r','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','t','y','p',
+'e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d',
+'i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']',
+'}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"',
+'a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','A','c','c','e','l','e','r','a','t','e',
+'"','}',',','t','r','u','e',']','}',']',',','"','e','r','r','o','r','"',':','"','H','o','s','t',' ',
+'o','v','e','r','r','i','d','e',' ','c','a','n','n','o','t',' ','b','e',' ','c','o','m','b','i','n',
+'e','d',' ','w','i','t','h',' ','D','u','a','l','s','t','a','c','k',',',' ','F','I','P','S',',',' ',
+'o','r',' ','S','3',' ','A','c','c','e','l','e','r','a','t','e','"',',','"','t','y','p','e','"',':',
+'"','e','r','r','o','r','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',
+',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{',
+'"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l',
+'e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','t','r','u','e',']','}',',','{',
+'"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g',
+'v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','t','r','u',
+'e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',
+',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','A','c','c','e','l','e','r','a',
+'t','e','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',',
+'"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r',
+'g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',
+']','}',',','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"',
+'a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"',
+'a','w','s','-','g','l','o','b','a','l','"',']','}',']',',','"','e','n','d','p','o','i','n','t','"',
+':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/','{','B','u','c','k','e','t','}',
+'.','s','3','-','f','i','p','s','.','d','u','a','l','s','t','a','c','k','.','u','s','-','e','a','s',
+'t','-','1','.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S',
+'u','f','f','i','x','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u',
+'t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v',
+'4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','u','s','-','e','a',
+'s','t','-','1','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',',
+'"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t',
+'r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p',
+'e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o',
+'n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s',
+'"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l',
+'S','t','a','c','k','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','b','o','o',
+'l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f',
+'"',':','"','U','s','e','F','I','P','S','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',
+':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[',
+'{','"','r','e','f','"',':','"','A','c','c','e','l','e','r','a','t','e','"','}',',','f','a','l','s',
+'e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{',
+'"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e',
+'f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',':',
+'"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a',
+'l','"',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"',
+'h','t','t','p','s',':','/','/','{','B','u','c','k','e','t','}','.','s','3','-','f','i','p','s','.',
+'d','u','a','l','s','t','a','c','k','.','u','s','-','e','a','s','t','-','1','.','{','p','a','r','t',
+'i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f','f','i','x','}','"',',','"',
+'p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s',
+'"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i',
+'n','g','R','e','g','i','o','n','"',':','"','u','s','-','e','a','s','t','-','1','"',',','"','s','i',
+'g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D',
+'o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h',
+'e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o',
+'i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n',
+'"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':',
+'[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','t',
+'r','u','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l',
+'s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P',
+'S','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n',
+'E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','A',
+'c','c','e','l','e','r','a','t','e','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',
+':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','i','s','S',
+'e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o',
+'i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r',
+'g','v','"',':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',
+',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',
+',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']','}',',','{','"','f','n','"',':','"',
+'b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','U','s','e','G','l','o','b','a','l','E','n','d','p','o','i','n','t','"','}',
+',','t','r','u','e',']','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r',
+'u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"',
+'e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/',
+'/','{','B','u','c','k','e','t','}','.','s','3','-','f','i','p','s','.','d','u','a','l','s','t','a',
+'c','k','.','{','R','e','g','i','o','n','}','.','{','p','a','r','t','i','t','i','o','n','R','e','s',
+'u','l','t','#','d','n','s','S','u','f','f','i','x','}','"',',','"','p','r','o','p','e','r','t','i',
+'e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m',
+'e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n',
+'"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e',
+'"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o',
+'d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{',
+'}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',']','}',',','{',
+'"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l',
+'e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','t','r','u','e',']','}',',','{',
+'"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g',
+'v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','t','r','u',
+'e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',
+',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','A','c','c','e','l','e','r','a',
+'t','e','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',',
+'"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r',
+'g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',
+']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"',
+'f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g',
+'l','o','b','a','l','"',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n',
+'E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U',
+'s','e','G','l','o','b','a','l','E','n','d','p','o','i','n','t','"','}',',','f','a','l','s','e',']',
+'}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t','t',
+'p','s',':','/','/','{','B','u','c','k','e','t','}','.','s','3','-','f','i','p','s','.','d','u','a',
+'l','s','t','a','c','k','.','{','R','e','g','i','o','n','}','.','{','p','a','r','t','i','t','i','o',
+'n','R','e','s','u','l','t','#','d','n','s','S','u','f','f','i','x','}','"',',','"','p','r','o','p',
+'e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{',
+'"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e',
+'g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g',
+'N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e',
+'E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r',
+'s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',
+',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o',
+'o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e',
+'f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']',
+'}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"',
+'a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',',
+'t','r','u','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a',
+'l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','A','c','c','e','l',
+'e','r','a','t','e','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','n','o',
+'t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"',
+'}',']','}',']','}',',','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s',
+'"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"',
+'}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']',',','"','e','n','d','p','o','i',
+'n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/','{','B','u','c','k',
+'e','t','}','.','s','3','-','f','i','p','s','.','u','s','-','e','a','s','t','-','1','.','{','p','a',
+'r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f','f','i','x','}','"',
+',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m',
+'e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g',
+'n','i','n','g','R','e','g','i','o','n','"',':','"','u','s','-','e','a','s','t','-','1','"',',','"',
+'s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l',
+'e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',',
+'"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d',
+'p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"',
+'f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',
+',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q',
+'u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e',
+'F','I','P','S','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l',
+'e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','A','c','c','e','l','e','r','a','t','e','"','}',',','f','a','l','s','e',']','}',',','{','"',
+'f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"',
+'i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n',
+'d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"','s','t','r','i','n',
+'g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"',
+'R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']',',',
+'"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':',
+'/','/','{','B','u','c','k','e','t','}','.','s','3','-','f','i','p','s','.','u','s','-','e','a','s',
+'t','-','1','.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S',
+'u','f','f','i','x','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u',
+'t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v',
+'4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','u','s','-','e','a',
+'s','t','-','1','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',',
+'"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t',
+'r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p',
+'e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o',
+'n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s',
+'"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l',
+'S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','b','o',
+'o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e',
+'f','"',':','"','U','s','e','F','I','P','S','"','}',',','t','r','u','e',']','}',',','{','"','f','n',
+'"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':',
+'[','{','"','r','e','f','"',':','"','A','c','c','e','l','e','r','a','t','e','"','}',',','f','a','l',
+'s','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[',
+'{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',
+':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','s','t','r',
+'i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',
+']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','G','l','o','b','a','l',
+'E','n','d','p','o','i','n','t','"','}',',','t','r','u','e',']','}',']',',','"','t','y','p','e','"',
+':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t',
+'i','o','n','s','"',':','[',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l',
+'"',':','"','h','t','t','p','s',':','/','/','{','B','u','c','k','e','t','}','.','s','3','-','f','i',
+'p','s','.','{','R','e','g','i','o','n','}','.','{','p','a','r','t','i','t','i','o','n','R','e','s',
+'u','l','t','#','d','n','s','S','u','f','f','i','x','}','"',',','"','p','r','o','p','e','r','t','i',
+'e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m',
+'e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n',
+'"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e',
+'"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o',
+'d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{',
+'}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',']','}',',','{',
+'"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l',
+'e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',',
+'{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r',
+'g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','t','r',
+'u','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s',
+'"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','A','c','c','e','l','e','r',
+'a','t','e','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',
+',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']',
+'}',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{',
+'"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-',
+'g','l','o','b','a','l','"',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a',
+'n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"',
+'U','s','e','G','l','o','b','a','l','E','n','d','p','o','i','n','t','"','}',',','f','a','l','s','e',
+']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t',
+'t','p','s',':','/','/','{','B','u','c','k','e','t','}','.','s','3','-','f','i','p','s','.','{','R',
+'e','g','i','o','n','}','.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d',
+'n','s','S','u','f','f','i','x','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{',
+'"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s',
+'i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','{','R',
+'e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3',
+'"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',
+':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t',
+'y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t',
+'i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a',
+'l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u',
+'a','l','S','t','a','c','k','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','b',
+'o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','f','a','l','s','e',']','}',',','{','"',
+'f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','A','c','c','e','l','e','r','a','t','e','"','}',',','t',
+'r','u','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':',
+'[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n',
+'"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[',
+'{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o',
+'b','a','l','"',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',
+':','"','h','t','t','p','s',':','/','/','{','B','u','c','k','e','t','}','.','s','3','-','a','c','c',
+'e','l','e','r','a','t','e','.','d','u','a','l','s','t','a','c','k','.','u','s','-','e','a','s','t',
+'-','1','.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u',
+'f','f','i','x','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t',
+'h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4',
+'"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','u','s','-','e','a','s',
+'t','-','1','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"',
+'d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r',
+'u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e',
+'"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n',
+'s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',
+',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S',
+'t','a','c','k','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l',
+'e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','U','s','e','F','I','P','S','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',
+':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[',
+'{','"','r','e','f','"',':','"','A','c','c','e','l','e','r','a','t','e','"','}',',','t','r','u','e',
+']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"',
+'f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f',
+'"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"',
+'s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l',
+'"',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h',
+'t','t','p','s',':','/','/','{','B','u','c','k','e','t','}','.','s','3','-','a','c','c','e','l','e',
+'r','a','t','e','.','d','u','a','l','s','t','a','c','k','.','u','s','-','e','a','s','t','-','1','.',
+'{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f','f','i',
+'x','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c',
+'h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"',
+'s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','u','s','-','e','a','s','t','-','1',
+'"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s',
+'a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',
+']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"',
+'e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':',
+'[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c',
+'k','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n',
+'E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U',
+'s','e','F','I','P','S','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','b',
+'o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','A','c','c','e','l','e','r','a','t','e','"','}',',','t','r','u','e',']','}',',',
+'{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',
+':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"',
+'E','n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"','n','o','t',
+'"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q',
+'u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g',
+'i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']','}',',','{','"',
+'f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','U','s','e','G','l','o','b','a','l','E','n','d','p','o',
+'i','n','t','"','}',',','t','r','u','e',']','}',']',',','"','t','y','p','e','"',':','"','t','r','e',
+'e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',
+':','[',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t',
+'t','p','s',':','/','/','{','B','u','c','k','e','t','}','.','s','3','-','a','c','c','e','l','e','r',
+'a','t','e','.','d','u','a','l','s','t','a','c','k','.','{','p','a','r','t','i','t','i','o','n','R',
+'e','s','u','l','t','#','d','n','s','S','u','f','f','i','x','}','"',',','"','p','r','o','p','e','r',
+'t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n',
+'a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i',
+'o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a',
+'m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n',
+'c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',
+':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',']','}',
+',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o',
+'o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e',
+'f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','t','r','u','e',']','}',
+',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','f',
+'a','l','s','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a',
+'l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','A','c','c','e','l',
+'e','r','a','t','e','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','n','o','t',
+'"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"',
+'a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',
+']','}',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[',
+'{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g',
+'v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s',
+'-','g','l','o','b','a','l','"',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e',
+'a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','U','s','e','G','l','o','b','a','l','E','n','d','p','o','i','n','t','"','}',',','f','a','l','s',
+'e',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h',
+'t','t','p','s',':','/','/','{','B','u','c','k','e','t','}','.','s','3','-','a','c','c','e','l','e',
+'r','a','t','e','.','d','u','a','l','s','t','a','c','k','.','{','p','a','r','t','i','t','i','o','n',
+'R','e','s','u','l','t','#','d','n','s','S','u','f','f','i','x','}','"',',','"','p','r','o','p','e',
+'r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"',
+'n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g',
+'i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N',
+'a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E',
+'n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s',
+'"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',',
+'{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o',
+'l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f',
+'"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','t','r','u','e',']','}',',',
+'{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r',
+'g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','f','a',
+'l','s','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l',
+'s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','A','c','c','e','l','e',
+'r','a','t','e','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','n','o','t',
+'"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"',
+'a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',
+']','}',']','}',',','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',
+',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',
+',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']',',','"','e','n','d','p','o','i','n',
+'t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/','{','B','u','c','k','e',
+'t','}','.','s','3','.','d','u','a','l','s','t','a','c','k','.','u','s','-','e','a','s','t','-','1',
+'.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f','f',
+'i','x','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S',
+'c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',',
+'"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','u','s','-','e','a','s','t','-',
+'1','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i',
+'s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e',
+'}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':',
+'"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',
+':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"',
+'a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a',
+'c','k','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a',
+'n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"',
+'U','s','e','F','I','P','S','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"',
+'b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','A','c','c','e','l','e','r','a','t','e','"','}',',','f','a','l','s','e',']',
+'}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f',
+'n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"','s',
+'t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e',
+'f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',
+']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t',
+'t','p','s',':','/','/','{','B','u','c','k','e','t','}','.','s','3','.','d','u','a','l','s','t','a',
+'c','k','.','u','s','-','e','a','s','t','-','1','.','{','p','a','r','t','i','t','i','o','n','R','e',
+'s','u','l','t','#','d','n','s','S','u','f','f','i','x','}','"',',','"','p','r','o','p','e','r','t',
+'i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a',
+'m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o',
+'n','"',':','"','u','s','-','e','a','s','t','-','1','"',',','"','s','i','g','n','i','n','g','N','a',
+'m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n',
+'c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',
+':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{',
+'"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l',
+'e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','t','r','u','e',']','}',',','{',
+'"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g',
+'v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','f','a','l',
+'s','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s',
+'"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','A','c','c','e','l','e','r',
+'a','t','e','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',
+',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']',
+'}',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{',
+'"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-',
+'g','l','o','b','a','l','"',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a',
+'n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"',
+'U','s','e','G','l','o','b','a','l','E','n','d','p','o','i','n','t','"','}',',','t','r','u','e',']',
+'}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':',
+'[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','n','d','p','o','i',
+'n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/','{','B','u','c','k',
+'e','t','}','.','s','3','.','d','u','a','l','s','t','a','c','k','.','{','R','e','g','i','o','n','}',
+'.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f','f',
+'i','x','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S',
+'c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',',
+'"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','{','R','e','g','i','o','n','}',
+'"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s',
+'a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',
+']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"',
+'e','n','d','p','o','i','n','t','"','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s',
+'"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t',
+'a','c','k','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e',
+'a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','U','s','e','F','I','P','S','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':',
+'"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','A','c','c','e','l','e','r','a','t','e','"','}',',','f','a','l','s','e',
+']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"',
+'f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f',
+'"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"',
+'n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','s','t','r','i','n',
+'g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"',
+'R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']','}',
+',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','G','l','o','b','a','l','E','n',
+'d','p','o','i','n','t','"','}',',','f','a','l','s','e',']','}',']',',','"','e','n','d','p','o','i',
+'n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/','{','B','u','c','k',
+'e','t','}','.','s','3','.','d','u','a','l','s','t','a','c','k','.','{','R','e','g','i','o','n','}',
+'.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f','f',
+'i','x','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S',
+'c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',',
+'"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','{','R','e','g','i','o','n','}',
+'"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s',
+'a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',
+']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"',
+'e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':',
+'[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c',
+'k','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a',
+'n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"',
+'U','s','e','F','I','P','S','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"',
+'b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','A','c','c','e','l','e','r','a','t','e','"','}',',','f','a','l','s','e',']',
+'}',',','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',',','{','"','f','n','"',
+':','"','p','a','r','s','e','U','R','L','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f',
+'"',':','"','E','n','d','p','o','i','n','t','"','}',']',',','"','a','s','s','i','g','n','"',':','"',
+'u','r','l','"','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l',
+'s','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','g','e','t','A','t','t','r',
+'"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','u','r','l','"','}',',','"',
+'i','s','I','p','"',']','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','s','t','r',
+'i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',
+']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','{','u','r','l',
+'#','s','c','h','e','m','e','}',':','/','/','{','u','r','l','#','a','u','t','h','o','r','i','t','y',
+'}','{','u','r','l','#','n','o','r','m','a','l','i','z','e','d','P','a','t','h','}','{','B','u','c',
+'k','e','t','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h',
+'S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',
+',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','u','s','-','e','a','s','t',
+'-','1','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d',
+'i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u',
+'e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',
+':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s',
+'"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t',
+'a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l',
+'e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','U','s','e','F','I','P','S','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',
+':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[',
+'{','"','r','e','f','"',':','"','A','c','c','e','l','e','r','a','t','e','"','}',',','f','a','l','s',
+'e',']','}',',','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':',
+'[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',',','{','"','f',
+'n','"',':','"','p','a','r','s','e','U','R','L','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']',',','"','a','s','s','i','g','n','"',
+':','"','u','r','l','"','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u',
+'a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','g','e','t','A','t',
+'t','r','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','u','r','l','"','}',
+',','"','i','s','I','p','"',']','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"',
+'s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l',
+'"',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','{',
+'u','r','l','#','s','c','h','e','m','e','}',':','/','/','{','B','u','c','k','e','t','}','.','{','u',
+'r','l','#','a','u','t','h','o','r','i','t','y','}','{','u','r','l','#','p','a','t','h','}','"',',',
+'"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e',
+'s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n',
+'i','n','g','R','e','g','i','o','n','"',':','"','u','s','-','e','a','s','t','-','1','"',',','"','s',
+'i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e',
+'D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"',
+'h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p',
+'o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f',
+'n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',',
+'f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u',
+'a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F',
+'I','P','S','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l',
+'e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','A','c','c','e','l','e','r','a','t','e','"','}',',','f','a','l','s','e',']','}',',','{','"',
+'f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f',
+'"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',',','{','"','f','n','"',':','"','p','a',
+'r','s','e','U','R','L','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E',
+'n','d','p','o','i','n','t','"','}',']',',','"','a','s','s','i','g','n','"',':','"','u','r','l','"',
+'}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"',
+'a','r','g','v','"',':','[','{','"','f','n','"',':','"','g','e','t','A','t','t','r','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','u','r','l','"','}',',','"','i','s','I','p',
+'"',']','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','s','t','r','i','n','g','E',
+'q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e',
+'g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']',',','"','e',
+'n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','{','u','r','l','#','s','c','h',
+'e','m','e','}',':','/','/','{','u','r','l','#','a','u','t','h','o','r','i','t','y','}','{','u','r',
+'l','#','n','o','r','m','a','l','i','z','e','d','P','a','t','h','}','{','B','u','c','k','e','t','}',
+'"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e',
+'m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i',
+'g','n','i','n','g','R','e','g','i','o','n','"',':','"','u','s','-','e','a','s','t','-','1','"',',',
+'"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b',
+'l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',
+',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n',
+'d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{',
+'"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g',
+'v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"',
+'}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E',
+'q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s',
+'e','F','I','P','S','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','b','o',
+'o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e',
+'f','"',':','"','A','c','c','e','l','e','r','a','t','e','"','}',',','f','a','l','s','e',']','}',',',
+'{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',',','{','"','f','n','"',':','"',
+'p','a','r','s','e','U','R','L','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','E','n','d','p','o','i','n','t','"','}',']',',','"','a','s','s','i','g','n','"',':','"','u','r',
+'l','"','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',
+',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','g','e','t','A','t','t','r','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','u','r','l','"','}',',','"','i','s',
+'I','p','"',']','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','s','t','r','i',
+'n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']',
+',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','{','u','r','l','#',
+'s','c','h','e','m','e','}',':','/','/','{','B','u','c','k','e','t','}','.','{','u','r','l','#','a',
+'u','t','h','o','r','i','t','y','}','{','u','r','l','#','p','a','t','h','}','"',',','"','p','r','o',
+'p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[',
+'{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R',
+'e','g','i','o','n','"',':','"','u','s','-','e','a','s','t','-','1','"',',','"','s','i','g','n','i',
+'n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b',
+'l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d',
+'e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t',
+'"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"',
+'b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s',
+'e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',
+',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"',
+'}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E',
+'q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','A','c',
+'c','e','l','e','r','a','t','e','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':',
+'"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E',
+'n','d','p','o','i','n','t','"','}',']','}',',','{','"','f','n','"',':','"','p','a','r','s','e','U',
+'R','L','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o',
+'i','n','t','"','}',']',',','"','a','s','s','i','g','n','"',':','"','u','r','l','"','}',',','{','"',
+'f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v',
+'"',':','[','{','"','f','n','"',':','"','g','e','t','A','t','t','r','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','u','r','l','"','}',',','"','i','s','I','p','"',']','}',',',
+'t','r','u','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',
+':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a',
+'w','s','-','g','l','o','b','a','l','"',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o',
+'l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f',
+'"',':','"','U','s','e','G','l','o','b','a','l','E','n','d','p','o','i','n','t','"','}',',','t','r',
+'u','e',']','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e',
+'s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':',
+'"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','u','s','-','e','a','s','t','-','1',
+'"',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','{',
+'u','r','l','#','s','c','h','e','m','e','}',':','/','/','{','u','r','l','#','a','u','t','h','o','r',
+'i','t','y','}','{','u','r','l','#','n','o','r','m','a','l','i','z','e','d','P','a','t','h','}','{',
+'B','u','c','k','e','t','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a',
+'u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g',
+'v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','{','R','e','g',
+'i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',',
+'"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t',
+'r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p',
+'e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o',
+'n','s','"',':','[',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':',
+'"','{','u','r','l','#','s','c','h','e','m','e','}',':','/','/','{','u','r','l','#','a','u','t','h',
+'o','r','i','t','y','}','{','u','r','l','#','n','o','r','m','a','l','i','z','e','d','P','a','t','h',
+'}','{','B','u','c','k','e','t','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{',
+'"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s',
+'i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','{','R',
+'e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3',
+'"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',
+':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t',
+'y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',']','}',',','{','"','c','o','n','d',
+'i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q',
+'u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e',
+'D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',
+':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[',
+'{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','f','a','l','s','e',']','}',
+',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','A','c','c','e','l','e','r','a','t','e','"',
+'}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"',
+'a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',
+']','}',',','{','"','f','n','"',':','"','p','a','r','s','e','U','R','L','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']',',','"','a',
+'s','s','i','g','n','"',':','"','u','r','l','"','}',',','{','"','f','n','"',':','"','b','o','o','l',
+'e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':',
+'"','g','e','t','A','t','t','r','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','u','r','l','"','}',',','"','i','s','I','p','"',']','}',',','f','a','l','s','e',']','}',',','{',
+'"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':',
+'"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a',
+'l','"',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a',
+'l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','G','l',
+'o','b','a','l','E','n','d','p','o','i','n','t','"','}',',','t','r','u','e',']','}',']',',','"','t',
+'y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o',
+'n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E',
+'q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e',
+'g','i','o','n','"','}',',','"','u','s','-','e','a','s','t','-','1','"',']','}',']',',','"','e','n',
+'d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','{','u','r','l','#','s','c','h','e',
+'m','e','}',':','/','/','{','B','u','c','k','e','t','}','.','{','u','r','l','#','a','u','t','h','o',
+'r','i','t','y','}','{','u','r','l','#','p','a','t','h','}','"',',','"','p','r','o','p','e','r','t',
+'i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a',
+'m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o',
+'n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m',
+'e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c',
+'o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':',
+'{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"',
+'c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','n','d','p','o','i','n','t','"',
+':','{','"','u','r','l','"',':','"','{','u','r','l','#','s','c','h','e','m','e','}',':','/','/','{',
+'B','u','c','k','e','t','}','.','{','u','r','l','#','a','u','t','h','o','r','i','t','y','}','{','u',
+'r','l','#','p','a','t','h','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"',
+'a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i',
+'g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','{','R','e',
+'g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',
+',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':',
+'t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y',
+'p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',']','}',',','{','"','c','o','n','d','i',
+'t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u',
+'a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D',
+'u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':',
+'"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','f','a','l','s','e',']','}',',',
+'{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r',
+'g','v','"',':','[','{','"','r','e','f','"',':','"','A','c','c','e','l','e','r','a','t','e','"','}',
+',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']',
+'}',',','{','"','f','n','"',':','"','p','a','r','s','e','U','R','L','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']',',','"','a','s',
+'s','i','g','n','"',':','"','u','r','l','"','}',',','{','"','f','n','"',':','"','b','o','o','l','e',
+'a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"',
+'g','e','t','A','t','t','r','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"',
+'u','r','l','"','}',',','"','i','s','I','p','"',']','}',',','t','r','u','e',']','}',',','{','"','f',
+'n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','s',
+'t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e',
+'f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',
+']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s',
+'"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','G','l','o','b',
+'a','l','E','n','d','p','o','i','n','t','"','}',',','f','a','l','s','e',']','}',']',',','"','e','n',
+'d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','{','u','r','l','#','s','c','h','e',
+'m','e','}',':','/','/','{','u','r','l','#','a','u','t','h','o','r','i','t','y','}','{','u','r','l',
+'#','n','o','r','m','a','l','i','z','e','d','P','a','t','h','}','{','B','u','c','k','e','t','}','"',
+',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m',
+'e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g',
+'n','i','n','g','R','e','g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s',
+'i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e',
+'D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"',
+'h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p',
+'o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f',
+'n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',',
+'f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u',
+'a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F',
+'I','P','S','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l',
+'e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','A','c','c','e','l','e','r','a','t','e','"','}',',','f','a','l','s','e',']','}',',','{','"',
+'f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f',
+'"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',',','{','"','f','n','"',':','"','p','a',
+'r','s','e','U','R','L','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E',
+'n','d','p','o','i','n','t','"','}',']',',','"','a','s','s','i','g','n','"',':','"','u','r','l','"',
+'}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"',
+'a','r','g','v','"',':','[','{','"','f','n','"',':','"','g','e','t','A','t','t','r','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','u','r','l','"','}',',','"','i','s','I','p',
+'"',']','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"',
+'a','r','g','v','"',':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l',
+'s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n',
+'"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']','}',',','{','"','f','n','"',
+':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[',
+'{','"','r','e','f','"',':','"','U','s','e','G','l','o','b','a','l','E','n','d','p','o','i','n','t',
+'"','}',',','f','a','l','s','e',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"',
+'u','r','l','"',':','"','{','u','r','l','#','s','c','h','e','m','e','}',':','/','/','{','B','u','c',
+'k','e','t','}','.','{','u','r','l','#','a','u','t','h','o','r','i','t','y','}','{','u','r','l','#',
+'p','a','t','h','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t',
+'h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4',
+'"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','{','R','e','g','i','o',
+'n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d',
+'i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u',
+'e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',
+':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s',
+'"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t',
+'a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l',
+'e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','U','s','e','F','I','P','S','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',
+':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[',
+'{','"','r','e','f','"',':','"','A','c','c','e','l','e','r','a','t','e','"','}',',','t','r','u','e',
+']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"',
+'f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f',
+'"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"',
+'s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l',
+'"',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h',
+'t','t','p','s',':','/','/','{','B','u','c','k','e','t','}','.','s','3','-','a','c','c','e','l','e',
+'r','a','t','e','.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s',
+'S','u','f','f','i','x','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a',
+'u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g',
+'v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','u','s','-','e',
+'a','s','t','-','1','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',
+',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':',
+'t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y',
+'p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i',
+'o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l',
+'s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a',
+'l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','b',
+'o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','f','a','l','s','e',']','}',',','{','"',
+'f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','A','c','c','e','l','e','r','a','t','e','"','}',',','t',
+'r','u','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':',
+'[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n',
+'"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[',
+'{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o',
+'b','a','l','"',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',
+':','"','h','t','t','p','s',':','/','/','{','B','u','c','k','e','t','}','.','s','3','-','a','c','c',
+'e','l','e','r','a','t','e','.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#',
+'d','n','s','S','u','f','f','i','x','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':',
+'{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"',
+'s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','u',
+'s','-','e','a','s','t','-','1','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"',
+'s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n',
+'g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',',
+'"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d',
+'i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q',
+'u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e',
+'D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',
+':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[',
+'{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','f','a','l','s','e',']','}',
+',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','A','c','c','e','l','e','r','a','t','e','"',
+'}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g',
+'v','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':',
+'[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',','{',
+'"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':',
+'"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a',
+'l','"',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a',
+'l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','G','l',
+'o','b','a','l','E','n','d','p','o','i','n','t','"','}',',','t','r','u','e',']','}',']',',','"','t',
+'y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o',
+'n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E',
+'q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e',
+'g','i','o','n','"','}',',','"','u','s','-','e','a','s','t','-','1','"',']','}',']',',','"','e','n',
+'d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/','{',
+'B','u','c','k','e','t','}','.','s','3','-','a','c','c','e','l','e','r','a','t','e','.','{','p','a',
+'r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f','f','i','x','}','"',
+',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m',
+'e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g',
+'n','i','n','g','R','e','g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s',
+'i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e',
+'D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"',
+'h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p',
+'o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"',
+'e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/',
+'/','{','B','u','c','k','e','t','}','.','s','3','-','a','c','c','e','l','e','r','a','t','e','.','{',
+'p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f','f','i','x',
+'}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h',
+'e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s',
+'i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',',
+'"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b',
+'l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',
+',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n',
+'d','p','o','i','n','t','"','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':',
+'[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c',
+'k','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a',
+'n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"',
+'U','s','e','F','I','P','S','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"',
+'b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','A','c','c','e','l','e','r','a','t','e','"','}',',','t','r','u','e',']','}',
+',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n',
+'"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"','n','o',
+'t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E',
+'q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e',
+'g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']','}',',','{',
+'"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g',
+'v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','G','l','o','b','a','l','E','n','d','p',
+'o','i','n','t','"','}',',','f','a','l','s','e',']','}',']',',','"','e','n','d','p','o','i','n','t',
+'"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/','{','B','u','c','k','e','t',
+'}','.','s','3','-','a','c','c','e','l','e','r','a','t','e','.','{','p','a','r','t','i','t','i','o',
+'n','R','e','s','u','l','t','#','d','n','s','S','u','f','f','i','x','}','"',',','"','p','r','o','p',
+'e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{',
+'"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e',
+'g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g',
+'N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e',
+'E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r',
+'s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',
+',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o',
+'o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e',
+'f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']',
+'}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"',
+'a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',',
+'f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u',
+'a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','A','c','c','e',
+'l','e','r','a','t','e','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','n',
+'o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',
+',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t',
+'"','}',']','}',']','}',',','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l',
+'s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n',
+'"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']',',','"','e','n','d','p','o',
+'i','n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/','{','B','u','c',
+'k','e','t','}','.','s','3','.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#',
+'d','n','s','S','u','f','f','i','x','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':',
+'{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"',
+'s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','u',
+'s','-','e','a','s','t','-','1','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"',
+'s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n',
+'g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',',
+'"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d',
+'i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q',
+'u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e',
+'D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',
+':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[',
+'{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','f','a','l','s','e',']','}',
+',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','A','c','c','e','l','e','r','a','t','e','"',
+'}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r',
+'g','v','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',',
+'{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g',
+'v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s',
+'-','g','l','o','b','a','l','"',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"',
+'u','r','l','"',':','"','h','t','t','p','s',':','/','/','{','B','u','c','k','e','t','}','.','s','3',
+'.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f','f',
+'i','x','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S',
+'c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',',
+'"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','u','s','-','e','a','s','t','-',
+'1','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i',
+'s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e',
+'}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':',
+'"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',
+':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"',
+'a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a',
+'c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e',
+'a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','U','s','e','F','I','P','S','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':',
+'"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','A','c','c','e','l','e','r','a','t','e','"','}',',','f','a','l','s','e',
+']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"',
+'f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f',
+'"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"',
+'n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','s','t','r','i','n',
+'g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"',
+'R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']','}',
+',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','G','l','o','b','a','l','E','n',
+'d','p','o','i','n','t','"','}',',','t','r','u','e',']','}',']',',','"','t','y','p','e','"',':','"',
+'t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o',
+'n','s','"',':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',
+',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',
+',','"','u','s','-','e','a','s','t','-','1','"',']','}',']',',','"','e','n','d','p','o','i','n','t',
+'"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/','{','B','u','c','k','e','t',
+'}','.','s','3','.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s',
+'S','u','f','f','i','x','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a',
+'u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g',
+'v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','{','R','e','g',
+'i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',',
+'"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t',
+'r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p',
+'e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o',
+'n','s','"',':','[',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':',
+'"','h','t','t','p','s',':','/','/','{','B','u','c','k','e','t','}','.','s','3','.','{','R','e','g',
+'i','o','n','}','.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s',
+'S','u','f','f','i','x','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a',
+'u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g',
+'v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','{','R','e','g',
+'i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',',
+'"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t',
+'r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p',
+'e','"',':','"','e','n','d','p','o','i','n','t','"','}',']','}',',','{','"','c','o','n','d','i','t',
+'i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a',
+'l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u',
+'a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"',
+'b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','f','a','l','s','e',']','}',',','{',
+'"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g',
+'v','"',':','[','{','"','r','e','f','"',':','"','A','c','c','e','l','e','r','a','t','e','"','}',',',
+'f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v',
+'"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[',
+'{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"',
+'f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"',
+'s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l',
+'"',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l',
+'s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','G','l','o',
+'b','a','l','E','n','d','p','o','i','n','t','"','}',',','f','a','l','s','e',']','}',']',',','"','e',
+'n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/',
+'{','B','u','c','k','e','t','}','.','s','3','.','{','R','e','g','i','o','n','}','.','{','p','a','r',
+'t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f','f','i','x','}','"',',',
+'"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e',
+'s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n',
+'i','n','g','R','e','g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i',
+'g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D',
+'o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h',
+'e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o',
+'i','n','t','"','}',']','}',']','}',']','}',']','}',']','}',']','}',']','}',']','}',',','{','"','c',
+'o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','r','r','o','r','"',':','"','I','n',
+'v','a','l','i','d',' ','r','e','g','i','o','n',':',' ','r','e','g','i','o','n',' ','w','a','s',' ',
+'n','o','t',' ','a',' ','v','a','l','i','d',' ','D','N','S',' ','n','a','m','e','.','"',',','"','t',
+'y','p','e','"',':','"','e','r','r','o','r','"','}',']','}',']','}',',','{','"','c','o','n','d','i',
+'t','i','o','n','s','"',':','[',']',',','"','e','r','r','o','r','"',':','"','A',' ','v','a','l','i',
+'d',' ','p','a','r','t','i','t','i','o','n',' ','c','o','u','l','d',' ','n','o','t',' ','b','e',' ',
+'d','e','t','e','r','m','i','n','e','d','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r',
+'"','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',
+':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"',
+'E','n','d','p','o','i','n','t','"','}',']','}',',','{','"','f','n','"',':','"','p','a','r','s','e',
+'U','R','L','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p',
+'o','i','n','t','"','}',']',',','"','a','s','s','i','g','n','"',':','"','u','r','l','"','}',',','{',
+'"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v',
+'"',':','[','{','"','f','n','"',':','"','g','e','t','A','t','t','r','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','u','r','l','"','}',',','"','s','c','h','e','m','e','"',']',
+'}',',','"','h','t','t','p','"',']','}',',','{','"','f','n','"',':','"','a','w','s','.','i','s','V',
+'i','r','t','u','a','l','H','o','s','t','a','b','l','e','S','3','B','u','c','k','e','t','"',',','"',
+'a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','B','u','c','k','e','t','"','}',',','t',
+'r','u','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l',
+'s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P',
+'S','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a',
+'n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"',
+'U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"',
+'f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','A','c','c','e','l','e','r','a','t','e','"','}',',','f',
+'a','l','s','e',']','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u',
+'l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n',
+'"',':','"','a','w','s','.','p','a','r','t','i','t','i','o','n','"',',','"','a','r','g','v','"',':',
+'[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',']',',','"','a','s','s','i','g',
+'n','"',':','"','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','"','}',']',',','"','t',
+'y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o',
+'n','d','i','t','i','o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e',
+'"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':',
+'[','{','"','f','n','"',':','"','i','s','V','a','l','i','d','H','o','s','t','L','a','b','e','l','"',
+',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',
+',','f','a','l','s','e',']','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"',
+'r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',',
+'"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','{','u','r','l','#','s',
+'c','h','e','m','e','}',':','/','/','{','B','u','c','k','e','t','}','.','{','u','r','l','#','a','u',
+'t','h','o','r','i','t','y','}','{','u','r','l','#','p','a','t','h','}','"',',','"','p','r','o','p',
+'e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{',
+'"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e',
+'g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g',
+'N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e',
+'E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r',
+'s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',
+']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','r','r','o',
+'r','"',':','"','I','n','v','a','l','i','d',' ','r','e','g','i','o','n',':',' ','r','e','g','i','o',
+'n',' ','w','a','s',' ','n','o','t',' ','a',' ','v','a','l','i','d',' ','D','N','S',' ','n','a','m',
+'e','.','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',']','}',']','}',',','{',
+'"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','r','r','o','r','"',':','"',
+'A',' ','v','a','l','i','d',' ','p','a','r','t','i','t','i','o','n',' ','c','o','u','l','d',' ','n',
+'o','t',' ','b','e',' ','d','e','t','e','r','m','i','n','e','d','"',',','"','t','y','p','e','"',':',
+'"','e','r','r','o','r','"','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':',
+'[','{','"','f','n','"',':','"','a','w','s','.','p','a','r','s','e','A','r','n','"',',','"','a','r',
+'g','v','"',':','[','{','"','r','e','f','"',':','"','B','u','c','k','e','t','"','}',']',',','"','a',
+'s','s','i','g','n','"',':','"','b','u','c','k','e','t','A','r','n','"','}',']',',','"','t','y','p',
+'e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d',
+'i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','g','e','t','A','t','t','r','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','b','u','c','k','e','t','A','r','n',
+'"','}',',','"','r','e','s','o','u','r','c','e','I','d','[','0',']','"',']',',','"','a','s','s','i',
+'g','n','"',':','"','a','r','n','T','y','p','e','"','}',',','{','"','f','n','"',':','"','n','o','t',
+'"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q',
+'u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','a','r','n',
+'T','y','p','e','"','}',',','"','"',']','}',']','}',']',',','"','t','y','p','e','"',':','"','t','r',
+'e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s',
+'"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s',
+'"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"',
+'s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','f',
+'n','"',':','"','g','e','t','A','t','t','r','"',',','"','a','r','g','v','"',':','[','{','"','r','e',
+'f','"',':','"','b','u','c','k','e','t','A','r','n','"','}',',','"','s','e','r','v','i','c','e','"',
+']','}',',','"','s','3','-','o','b','j','e','c','t','-','l','a','m','b','d','a','"',']','}',']',',',
+'"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"',
+'c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','s','t','r','i','n',
+'g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"',
+'a','r','n','T','y','p','e','"','}',',','"','a','c','c','e','s','s','p','o','i','n','t','"',']','}',
+']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[',
+'{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"',
+'t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o',
+'n','s','"',':','[','{','"','f','n','"',':','"','g','e','t','A','t','t','r','"',',','"','a','r','g',
+'v','"',':','[','{','"','r','e','f','"',':','"','b','u','c','k','e','t','A','r','n','"','}',',','"',
+'r','e','s','o','u','r','c','e','I','d','[','1',']','"',']',',','"','a','s','s','i','g','n','"',':',
+'"','a','c','c','e','s','s','P','o','i','n','t','N','a','m','e','"','}',',','{','"','f','n','"',':',
+'"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','s','t','r','i',
+'n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','a','c','c','e','s','s','P','o','i','n','t','N','a','m','e','"','}',',','"','"',']','}',']','}',
+']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[',
+'{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"',
+'t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o',
+'n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s',
+'"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l',
+'S','t','a','c','k','"','}',',','t','r','u','e',']','}',']',',','"','e','r','r','o','r','"',':','"',
+'S','3',' ','O','b','j','e','c','t',' ','L','a','m','b','d','a',' ','d','o','e','s',' ','n','o','t',
+' ','s','u','p','p','o','r','t',' ','D','u','a','l','-','s','t','a','c','k','"',',','"','t','y','p',
+'e','"',':','"','e','r','r','o','r','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',
+':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',
+':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b',
+'o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','A','c','c','e','l','e','r','a','t','e','"','}',',','t','r','u','e',']','}',']',
+',','"','e','r','r','o','r','"',':','"','S','3',' ','O','b','j','e','c','t',' ','L','a','m','b','d',
+'a',' ','d','o','e','s',' ','n','o','t',' ','s','u','p','p','o','r','t',' ','S','3',' ','A','c','c',
+'e','l','e','r','a','t','e','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',',',
+'{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"',
+'t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o',
+'n','s','"',':','[','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[',
+'{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g',
+'v','"',':','[','{','"','f','n','"',':','"','g','e','t','A','t','t','r','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','b','u','c','k','e','t','A','r','n','"','}',',','"','r',
+'e','g','i','o','n','"',']','}',',','"','"',']','}',']','}',']',',','"','t','y','p','e','"',':','"',
+'t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o',
+'n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l',
+'e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',
+':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"',
+'D','i','s','a','b','l','e','A','c','c','e','s','s','P','o','i','n','t','s','"','}',']','}',',','{',
+'"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g',
+'v','"',':','[','{','"','r','e','f','"',':','"','D','i','s','a','b','l','e','A','c','c','e','s','s',
+'P','o','i','n','t','s','"','}',',','t','r','u','e',']','}',']',',','"','e','r','r','o','r','"',':',
+'"','A','c','c','e','s','s',' ','p','o','i','n','t','s',' ','a','r','e',' ','n','o','t',' ','s','u',
+'p','p','o','r','t','e','d',' ','f','o','r',' ','t','h','i','s',' ','o','p','e','r','a','t','i','o',
+'n','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',',','{','"','c','o','n','d',
+'i','t','i','o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',',
+'"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{',
+'"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':',
+'"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','g','e',
+'t','A','t','t','r','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','b','u',
+'c','k','e','t','A','r','n','"','}',',','"','r','e','s','o','u','r','c','e','I','d','[','2',']','"',
+']','}',']','}',']','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u',
+'l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','t',
+'y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o',
+'n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','A','r','n','R','e','g',
+'i','o','n','"','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u',
+'a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','A',
+'r','n','R','e','g','i','o','n','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':',
+'"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','s','t','r','i',
+'n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"',
+'g','e','t','A','t','t','r','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"',
+'b','u','c','k','e','t','A','r','n','"','}',',','"','r','e','g','i','o','n','"',']','}',',','"','{',
+'R','e','g','i','o','n','}','"',']','}',']','}',']',',','"','e','r','r','o','r','"',':','"','I','n',
+'v','a','l','i','d',' ','c','o','n','f','i','g','u','r','a','t','i','o','n',':',' ','r','e','g','i',
+'o','n',' ','f','r','o','m',' ','A','R','N',' ','`','{','b','u','c','k','e','t','A','r','n','#','r',
+'e','g','i','o','n','}','`',' ','d','o','e','s',' ','n','o','t',' ','m','a','t','c','h',' ','c','l',
+'i','e','n','t',' ','r','e','g','i','o','n',' ','`','{','R','e','g','i','o','n','}','`',' ','a','n',
+'d',' ','U','s','e','A','r','n','R','e','g','i','o','n',' ','i','s',' ','`','f','a','l','s','e','`',
+'"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',',','{','"','c','o','n','d','i',
+'t','i','o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"',
+'r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"',
+'f','n','"',':','"','a','w','s','.','p','a','r','t','i','t','i','o','n','"',',','"','a','r','g','v',
+'"',':','[','{','"','f','n','"',':','"','g','e','t','A','t','t','r','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','b','u','c','k','e','t','A','r','n','"','}',',','"','r','e',
+'g','i','o','n','"',']','}',']',',','"','a','s','s','i','g','n','"',':','"','b','u','c','k','e','t',
+'P','a','r','t','i','t','i','o','n','"','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e',
+'"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':',
+'[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':',
+'[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','a','w',
+'s','.','p','a','r','t','i','t','i','o','n','"',',','"','a','r','g','v','"',':','[','{','"','r','e',
+'f','"',':','"','R','e','g','i','o','n','"','}',']',',','"','a','s','s','i','g','n','"',':','"','p',
+'a','r','t','i','t','i','o','n','R','e','s','u','l','t','"','}',']',',','"','t','y','p','e','"',':',
+'"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i',
+'o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u',
+'l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n',
+'"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[',
+'{','"','f','n','"',':','"','g','e','t','A','t','t','r','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','b','u','c','k','e','t','P','a','r','t','i','t','i','o','n','"','}',',',
+'"','n','a','m','e','"',']','}',',','{','"','f','n','"',':','"','g','e','t','A','t','t','r','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','p','a','r','t','i','t','i','o','n',
+'R','e','s','u','l','t','"','}',',','"','n','a','m','e','"',']','}',']','}',']',',','"','t','y','p',
+'e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d',
+'i','t','i','o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',',
+'"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{',
+'"','f','n','"',':','"','i','s','V','a','l','i','d','H','o','s','t','L','a','b','e','l','"',',','"',
+'a','r','g','v','"',':','[','{','"','f','n','"',':','"','g','e','t','A','t','t','r','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','b','u','c','k','e','t','A','r','n','"','}',
+',','"','r','e','g','i','o','n','"',']','}',',','t','r','u','e',']','}',']',',','"','t','y','p','e',
+'"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i',
+'t','i','o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"',
+'r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"',
+'f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',
+':','[','{','"','f','n','"',':','"','g','e','t','A','t','t','r','"',',','"','a','r','g','v','"',':',
+'[','{','"','r','e','f','"',':','"','b','u','c','k','e','t','A','r','n','"','}',',','"','a','c','c',
+'o','u','n','t','I','d','"',']','}',',','"','"',']','}',']',',','"','e','r','r','o','r','"',':','"',
+'I','n','v','a','l','i','d',' ','A','R','N',':',' ','M','i','s','s','i','n','g',' ','a','c','c','o',
+'u','n','t',' ','i','d','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',',','{',
+'"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t',
+'r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n',
+'s','"',':','[','{','"','f','n','"',':','"','i','s','V','a','l','i','d','H','o','s','t','L','a','b',
+'e','l','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','g','e','t','A','t','t',
+'r','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','b','u','c','k','e','t',
+'A','r','n','"','}',',','"','a','c','c','o','u','n','t','I','d','"',']','}',',','f','a','l','s','e',
+']','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',
+':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','t','y','p','e','"',
+':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t',
+'i','o','n','s','"',':','[','{','"','f','n','"',':','"','i','s','V','a','l','i','d','H','o','s','t',
+'L','a','b','e','l','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','a','c',
+'c','e','s','s','P','o','i','n','t','N','a','m','e','"','}',',','f','a','l','s','e',']','}',']',',',
+'"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"',
+'c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t','r',
+'e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s',
+'"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',
+',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a',
+'l','s','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','g','e','t','A','t','t',
+'r','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','b','u','c','k','e','t',
+'P','a','r','t','i','t','i','o','n','"','}',',','"','n','a','m','e','"',']','}',',','"','a','w','s',
+'-','c','n','"',']','}',']',',','"','e','r','r','o','r','"',':','"','P','a','r','t','i','t','i','o',
+'n',' ','d','o','e','s',' ','n','o','t',' ','s','u','p','p','o','r','t',' ','F','I','P','S','"',',',
+'"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',',','{','"','c','o','n','d','i','t','i',
+'o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u',
+'l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n',
+'"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','E','n','d','p','o','i','n','t','"','}',']','}',',','{','"','f','n','"',':','"','p','a','r','s',
+'e','U','R','L','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d',
+'p','o','i','n','t','"','}',']',',','"','a','s','s','i','g','n','"',':','"','u','r','l','"','}',']',
+',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','{','u','r','l','#',
+'s','c','h','e','m','e','}',':','/','/','{','a','c','c','e','s','s','P','o','i','n','t','N','a','m',
+'e','}','-','{','b','u','c','k','e','t','A','r','n','#','a','c','c','o','u','n','t','I','d','}','.',
+'{','u','r','l','#','a','u','t','h','o','r','i','t','y','}','{','u','r','l','#','p','a','t','h','}',
+'"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e',
+'m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i',
+'g','n','i','n','g','R','e','g','i','o','n','"',':','"','{','b','u','c','k','e','t','A','r','n','#',
+'r','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s',
+'3','-','o','b','j','e','c','t','-','l','a','m','b','d','a','"',',','"','d','i','s','a','b','l','e',
+'D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"',
+'h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p',
+'o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f',
+'n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','t','r','u','e',']',
+'}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t','t',
+'p','s',':','/','/','{','a','c','c','e','s','s','P','o','i','n','t','N','a','m','e','}','-','{','b',
+'u','c','k','e','t','A','r','n','#','a','c','c','o','u','n','t','I','d','}','.','s','3','-','o','b',
+'j','e','c','t','-','l','a','m','b','d','a','-','f','i','p','s','.','{','b','u','c','k','e','t','A',
+'r','n','#','r','e','g','i','o','n','}','.','{','b','u','c','k','e','t','P','a','r','t','i','t','i',
+'o','n','#','d','n','s','S','u','f','f','i','x','}','"',',','"','p','r','o','p','e','r','t','i','e',
+'s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e',
+'"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',
+':','"','{','b','u','c','k','e','t','A','r','n','#','r','e','g','i','o','n','}','"',',','"','s','i',
+'g','n','i','n','g','N','a','m','e','"',':','"','s','3','-','o','b','j','e','c','t','-','l','a','m',
+'b','d','a','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i',
+'n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',
+',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n',
+'d','i','t','i','o','n','s','"',':','[',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"',
+'u','r','l','"',':','"','h','t','t','p','s',':','/','/','{','a','c','c','e','s','s','P','o','i','n',
+'t','N','a','m','e','}','-','{','b','u','c','k','e','t','A','r','n','#','a','c','c','o','u','n','t',
+'I','d','}','.','s','3','-','o','b','j','e','c','t','-','l','a','m','b','d','a','.','{','b','u','c',
+'k','e','t','A','r','n','#','r','e','g','i','o','n','}','.','{','b','u','c','k','e','t','P','a','r',
+'t','i','t','i','o','n','#','d','n','s','S','u','f','f','i','x','}','"',',','"','p','r','o','p','e',
+'r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"',
+'n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g',
+'i','o','n','"',':','"','{','b','u','c','k','e','t','A','r','n','#','r','e','g','i','o','n','}','"',
+',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','-','o','b','j','e','c','t',
+'-','l','a','m','b','d','a','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n',
+'c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',
+':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',']','}',
+']','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','r',
+'r','o','r','"',':','"','I','n','v','a','l','i','d',' ','A','R','N',':',' ','T','h','e',' ','a','c',
+'c','e','s','s',' ','p','o','i','n','t',' ','n','a','m','e',' ','m','a','y',' ','o','n','l','y',' ',
+'c','o','n','t','a','i','n',' ','a','-','z',',',' ','A','-','Z',',',' ','0','-','9',' ','a','n','d',
+' ','`','-','`','.',' ','F','o','u','n','d',':',' ','`','{','a','c','c','e','s','s','P','o','i','n',
+'t','N','a','m','e','}','`','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',']',
+'}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','r','r',
+'o','r','"',':','"','I','n','v','a','l','i','d',' ','A','R','N',':',' ','T','h','e',' ','a','c','c',
+'o','u','n','t',' ','i','d',' ','m','a','y',' ','o','n','l','y',' ','c','o','n','t','a','i','n',' ',
+'a','-','z',',',' ','A','-','Z',',',' ','0','-','9',' ','a','n','d',' ','`','-','`','.',' ','F','o',
+'u','n','d',':',' ','`','{','b','u','c','k','e','t','A','r','n','#','a','c','c','o','u','n','t','I',
+'d','}','`','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',']','}',']','}',']',
+'}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','r','r','o','r',
+'"',':','"','I','n','v','a','l','i','d',' ','r','e','g','i','o','n',' ','i','n',' ','A','R','N',':',
+' ','`','{','b','u','c','k','e','t','A','r','n','#','r','e','g','i','o','n','}','`',' ','(','i','n',
+'v','a','l','i','d',' ','D','N','S',' ','n','a','m','e',')','"',',','"','t','y','p','e','"',':','"',
+'e','r','r','o','r','"','}',']','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',
+':','[',']',',','"','e','r','r','o','r','"',':','"','C','l','i','e','n','t',' ','w','a','s',' ','c',
+'o','n','f','i','g','u','r','e','d',' ','f','o','r',' ','p','a','r','t','i','t','i','o','n',' ','`',
+'{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','n','a','m','e','}','`',' ','b',
+'u','t',' ','A','R','N',' ','(','`','{','B','u','c','k','e','t','}','`',')',' ','h','a','s',' ','`',
+'{','b','u','c','k','e','t','P','a','r','t','i','t','i','o','n','#','n','a','m','e','}','`','"',',',
+'"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',']','}',']','}',',','{','"','c','o','n',
+'d','i','t','i','o','n','s','"',':','[',']',',','"','e','r','r','o','r','"',':','"','A',' ','v','a',
+'l','i','d',' ','p','a','r','t','i','t','i','o','n',' ','c','o','u','l','d',' ','n','o','t',' ','b',
+'e',' ','d','e','t','e','r','m','i','n','e','d','"',',','"','t','y','p','e','"',':','"','e','r','r',
+'o','r','"','}',']','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',
+',','"','e','r','r','o','r','"',':','"','C','o','u','l','d',' ','n','o','t',' ','l','o','a','d',' ',
+'p','a','r','t','i','t','i','o','n',' ','f','o','r',' ','A','R','N',' ','r','e','g','i','o','n',' ',
+'`','{','b','u','c','k','e','t','A','r','n','#','r','e','g','i','o','n','}','`','"',',','"','t','y',
+'p','e','"',':','"','e','r','r','o','r','"','}',']','}',']','}',']','}',',','{','"','c','o','n','d',
+'i','t','i','o','n','s','"',':','[',']',',','"','e','r','r','o','r','"',':','"','I','n','v','a','l',
+'i','d',' ','A','R','N',':',' ','T','h','e',' ','A','R','N',' ','m','a','y',' ','o','n','l','y',' ',
+'c','o','n','t','a','i','n',' ','a',' ','s','i','n','g','l','e',' ','r','e','s','o','u','r','c','e',
+' ','c','o','m','p','o','n','e','n','t',' ','a','f','t','e','r',' ','`','a','c','c','e','s','s','p',
+'o','i','n','t','`','.','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',']','}',
+']','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','r',
+'r','o','r','"',':','"','I','n','v','a','l','i','d',' ','A','R','N',':',' ','b','u','c','k','e','t',
+' ','A','R','N',' ','i','s',' ','m','i','s','s','i','n','g',' ','a',' ','r','e','g','i','o','n','"',
+',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',']','}',']','}',']','}',']','}',',',
+'{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','r','r','o','r','"',':',
+'"','I','n','v','a','l','i','d',' ','A','R','N',':',' ','E','x','p','e','c','t','e','d',' ','a',' ',
+'r','e','s','o','u','r','c','e',' ','o','f',' ','t','h','e',' ','f','o','r','m','a','t',' ','`','a',
+'c','c','e','s','s','p','o','i','n','t',':','<','a','c','c','e','s','s','p','o','i','n','t',' ','n',
+'a','m','e','>','`',' ','b','u','t',' ','n','o',' ','n','a','m','e',' ','w','a','s',' ','p','r','o',
+'v','i','d','e','d','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',']','}',']',
+'}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','r','r','o','r',
+'"',':','"','I','n','v','a','l','i','d',' ','A','R','N',':',' ','O','b','j','e','c','t',' ','L','a',
+'m','b','d','a',' ','A','R','N','s',' ','o','n','l','y',' ','s','u','p','p','o','r','t',' ','`','a',
+'c','c','e','s','s','p','o','i','n','t','`',' ','a','r','n',' ','t','y','p','e','s',',',' ','b','u',
+'t',' ','f','o','u','n','d',':',' ','`','{','a','r','n','T','y','p','e','}','`','"',',','"','t','y',
+'p','e','"',':','"','e','r','r','o','r','"','}',']','}',',','{','"','c','o','n','d','i','t','i','o',
+'n','s','"',':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',
+',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','a','r','n','T','y','p','e','"',
+'}',',','"','a','c','c','e','s','s','p','o','i','n','t','"',']','}',']',',','"','t','y','p','e','"',
+':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t',
+'i','o','n','s','"',':','[','{','"','f','n','"',':','"','g','e','t','A','t','t','r','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','b','u','c','k','e','t','A','r','n','"','}',
+',','"','r','e','s','o','u','r','c','e','I','d','[','1',']','"',']',',','"','a','s','s','i','g','n',
+'"',':','"','a','c','c','e','s','s','P','o','i','n','t','N','a','m','e','"','}',',','{','"','f','n',
+'"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','s','t',
+'r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f',
+'"',':','"','a','c','c','e','s','s','P','o','i','n','t','N','a','m','e','"','}',',','"','"',']','}',
+']','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',
+':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','t','y','p','e','"',
+':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t',
+'i','o','n','s','"',':','[','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',
+':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a',
+'r','g','v','"',':','[','{','"','f','n','"',':','"','g','e','t','A','t','t','r','"',',','"','a','r',
+'g','v','"',':','[','{','"','r','e','f','"',':','"','b','u','c','k','e','t','A','r','n','"','}',',',
+'"','r','e','g','i','o','n','"',']','}',',','"','"',']','}',']','}',']',',','"','t','y','p','e','"',
+':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t',
+'i','o','n','s','"',':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l',
+'s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','a','r','n','T','y','p',
+'e','"','}',',','"','a','c','c','e','s','s','p','o','i','n','t','"',']','}',']',',','"','t','y','p',
+'e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d',
+'i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g',
+'v','"',':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',',
+'"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','g','e','t','A','t','t','r','"',',','"',
+'a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','b','u','c','k','e','t','A','r','n','"',
+'}',',','"','r','e','g','i','o','n','"',']','}',',','"','"',']','}',']','}',']',',','"','t','y','p',
+'e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d',
+'i','t','i','o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',',
+'"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{',
+'"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e',
+'f','"',':','"','D','i','s','a','b','l','e','A','c','c','e','s','s','P','o','i','n','t','s','"','}',
+']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','D','i','s','a','b','l','e','A','c',
+'c','e','s','s','P','o','i','n','t','s','"','}',',','t','r','u','e',']','}',']',',','"','e','r','r',
+'o','r','"',':','"','A','c','c','e','s','s',' ','p','o','i','n','t','s',' ','a','r','e',' ','n','o',
+'t',' ','s','u','p','p','o','r','t','e','d',' ','f','o','r',' ','t','h','i','s',' ','o','p','e','r',
+'a','t','i','o','n','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',',','{','"',
+'c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t','r',
+'e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s',
+'"',':','[','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"',
+'f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',
+':','"','g','e','t','A','t','t','r','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','b','u','c','k','e','t','A','r','n','"','}',',','"','r','e','s','o','u','r','c','e','I','d',
+'[','2',']','"',']','}',']','}',']','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',
+',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',
+']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[',
+'{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','i','s','S',
+'e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','A','r',
+'n','R','e','g','i','o','n','"','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a',
+'n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"',
+'U','s','e','A','r','n','R','e','g','i','o','n','"','}',',','f','a','l','s','e',']','}',',','{','"',
+'f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"',
+'s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','f',
+'n','"',':','"','g','e','t','A','t','t','r','"',',','"','a','r','g','v','"',':','[','{','"','r','e',
+'f','"',':','"','b','u','c','k','e','t','A','r','n','"','}',',','"','r','e','g','i','o','n','"',']',
+'}',',','"','{','R','e','g','i','o','n','}','"',']','}',']','}',']',',','"','e','r','r','o','r','"',
+':','"','I','n','v','a','l','i','d',' ','c','o','n','f','i','g','u','r','a','t','i','o','n',':',' ',
+'r','e','g','i','o','n',' ','f','r','o','m',' ','A','R','N',' ','`','{','b','u','c','k','e','t','A',
+'r','n','#','r','e','g','i','o','n','}','`',' ','d','o','e','s',' ','n','o','t',' ','m','a','t','c',
+'h',' ','c','l','i','e','n','t',' ','r','e','g','i','o','n',' ','`','{','R','e','g','i','o','n','}',
+'`',' ','a','n','d',' ','U','s','e','A','r','n','R','e','g','i','o','n',' ','i','s',' ','`','f','a',
+'l','s','e','`','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',',','{','"','c',
+'o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e',
+'e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',
+':','[','{','"','f','n','"',':','"','a','w','s','.','p','a','r','t','i','t','i','o','n','"',',','"',
+'a','r','g','v','"',':','[','{','"','f','n','"',':','"','g','e','t','A','t','t','r','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','b','u','c','k','e','t','A','r','n','"','}',
+',','"','r','e','g','i','o','n','"',']','}',']',',','"','a','s','s','i','g','n','"',':','"','b','u',
+'c','k','e','t','P','a','r','t','i','t','i','o','n','"','}',']',',','"','t','y','p','e','"',':','"',
+'t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o',
+'n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l',
+'e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',
+':','"','a','w','s','.','p','a','r','t','i','t','i','o','n','"',',','"','a','r','g','v','"',':','[',
+'{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',']',',','"','a','s','s','i','g','n',
+'"',':','"','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','"','}',']',',','"','t','y',
+'p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n',
+'d','i','t','i','o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',
+',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',
+'{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g',
+'v','"',':','[','{','"','f','n','"',':','"','g','e','t','A','t','t','r','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','b','u','c','k','e','t','P','a','r','t','i','t','i','o',
+'n','"','}',',','"','n','a','m','e','"',']','}',',','"','{','p','a','r','t','i','t','i','o','n','R',
+'e','s','u','l','t','#','n','a','m','e','}','"',']','}',']',',','"','t','y','p','e','"',':','"','t',
+'r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n',
+'s','"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e',
+'s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':',
+'"','i','s','V','a','l','i','d','H','o','s','t','L','a','b','e','l','"',',','"','a','r','g','v','"',
+':','[','{','"','f','n','"',':','"','g','e','t','A','t','t','r','"',',','"','a','r','g','v','"',':',
+'[','{','"','r','e','f','"',':','"','b','u','c','k','e','t','A','r','n','"','}',',','"','r','e','g',
+'i','o','n','"',']','}',',','t','r','u','e',']','}',']',',','"','t','y','p','e','"',':','"','t','r',
+'e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s',
+'"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s',
+'"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"',
+'s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','f',
+'n','"',':','"','g','e','t','A','t','t','r','"',',','"','a','r','g','v','"',':','[','{','"','r','e',
+'f','"',':','"','b','u','c','k','e','t','A','r','n','"','}',',','"','s','e','r','v','i','c','e','"',
+']','}',',','"','s','3','"',']','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',',
+'"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',
+',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{',
+'"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','i','s','V','a',
+'l','i','d','H','o','s','t','L','a','b','e','l','"',',','"','a','r','g','v','"',':','[','{','"','f',
+'n','"',':','"','g','e','t','A','t','t','r','"',',','"','a','r','g','v','"',':','[','{','"','r','e',
+'f','"',':','"','b','u','c','k','e','t','A','r','n','"','}',',','"','a','c','c','o','u','n','t','I',
+'d','"',']','}',',','f','a','l','s','e',']','}',']',',','"','t','y','p','e','"',':','"','t','r','e',
+'e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',
+':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',
+':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','i',
+'s','V','a','l','i','d','H','o','s','t','L','a','b','e','l','"',',','"','a','r','g','v','"',':','[',
+'{','"','r','e','f','"',':','"','a','c','c','e','s','s','P','o','i','n','t','N','a','m','e','"','}',
+',','f','a','l','s','e',']','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"',
+'r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',',
+'"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"',
+'c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e',
+'a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','A','c','c','e','l','e','r','a','t','e','"','}',',','t','r','u','e',']','}',']',',','"','e','r',
+'r','o','r','"',':','"','A','c','c','e','s','s',' ','P','o','i','n','t','s',' ','d','o',' ','n','o',
+'t',' ','s','u','p','p','o','r','t',' ','S','3',' ','A','c','c','e','l','e','r','a','t','e','"',',',
+'"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',',','{','"','c','o','n','d','i','t','i',
+'o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u',
+'l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n',
+'"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':',
+'[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','t','r','u','e',']','}',
+',','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r',
+'g','v','"',':','[','{','"','f','n','"',':','"','g','e','t','A','t','t','r','"',',','"','a','r','g',
+'v','"',':','[','{','"','r','e','f','"',':','"','b','u','c','k','e','t','P','a','r','t','i','t','i',
+'o','n','"','}',',','"','n','a','m','e','"',']','}',',','"','a','w','s','-','c','n','"',']','}',']',
+',','"','e','r','r','o','r','"',':','"','P','a','r','t','i','t','i','o','n',' ','d','o','e','s',' ',
+'n','o','t',' ','s','u','p','p','o','r','t',' ','F','I','P','S','"',',','"','t','y','p','e','"',':',
+'"','e','r','r','o','r','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',
+',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{',
+'"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l',
+'e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','t','r','u','e',']','}',',','{',
+'"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e',
+'f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']',',','"','e','r','r','o','r','"',
+':','"','D','u','a','l','S','t','a','c','k',' ','c','a','n','n','o','t',' ','b','e',' ','c','o','m',
+'b','i','n','e','d',' ','w','i','t','h',' ','a',' ','H','o','s','t',' ','o','v','e','r','r','i','d',
+'e',' ','(','P','r','i','v','a','t','e','L','i','n','k',')','"',',','"','t','y','p','e','"',':','"',
+'e','r','r','o','r','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',',
+'"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"',
+'c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e',
+'a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','U','s','e','F','I','P','S','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"',
+'b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','t','r','u','e',
+']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t',
+'t','p','s',':','/','/','{','a','c','c','e','s','s','P','o','i','n','t','N','a','m','e','}','-','{',
+'b','u','c','k','e','t','A','r','n','#','a','c','c','o','u','n','t','I','d','}','.','s','3','-','a',
+'c','c','e','s','s','p','o','i','n','t','-','f','i','p','s','.','d','u','a','l','s','t','a','c','k',
+'.','{','b','u','c','k','e','t','A','r','n','#','r','e','g','i','o','n','}','.','{','b','u','c','k',
+'e','t','P','a','r','t','i','t','i','o','n','#','d','n','s','S','u','f','f','i','x','}','"',',','"',
+'p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s',
+'"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i',
+'n','g','R','e','g','i','o','n','"',':','"','{','b','u','c','k','e','t','A','r','n','#','r','e','g',
+'i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',',
+'"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t',
+'r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p',
+'e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o',
+'n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s',
+'"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S',
+'"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E',
+'q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s',
+'e','D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',']',',','"','e','n',
+'d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/','{',
+'a','c','c','e','s','s','P','o','i','n','t','N','a','m','e','}','-','{','b','u','c','k','e','t','A',
+'r','n','#','a','c','c','o','u','n','t','I','d','}','.','s','3','-','a','c','c','e','s','s','p','o',
+'i','n','t','-','f','i','p','s','.','{','b','u','c','k','e','t','A','r','n','#','r','e','g','i','o',
+'n','}','.','{','b','u','c','k','e','t','P','a','r','t','i','t','i','o','n','#','d','n','s','S','u',
+'f','f','i','x','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t',
+'h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4',
+'"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','{','b','u','c','k','e',
+'t','A','r','n','#','r','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m',
+'e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c',
+'o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':',
+'{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"',
+'c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e',
+'a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','U','s','e','F','I','P','S','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':',
+'"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','t','r','u',
+'e',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h',
+'t','t','p','s',':','/','/','{','a','c','c','e','s','s','P','o','i','n','t','N','a','m','e','}','-',
+'{','b','u','c','k','e','t','A','r','n','#','a','c','c','o','u','n','t','I','d','}','.','s','3','-',
+'a','c','c','e','s','s','p','o','i','n','t','.','d','u','a','l','s','t','a','c','k','.','{','b','u',
+'c','k','e','t','A','r','n','#','r','e','g','i','o','n','}','.','{','b','u','c','k','e','t','P','a',
+'r','t','i','t','i','o','n','#','d','n','s','S','u','f','f','i','x','}','"',',','"','p','r','o','p',
+'e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{',
+'"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e',
+'g','i','o','n','"',':','"','{','b','u','c','k','e','t','A','r','n','#','r','e','g','i','o','n','}',
+'"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s',
+'a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',
+']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"',
+'e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':',
+'[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','f',
+'a','l','s','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a',
+'l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u',
+'a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"',
+'i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n',
+'d','p','o','i','n','t','"','}',']','}',',','{','"','f','n','"',':','"','p','a','r','s','e','U','R',
+'L','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i',
+'n','t','"','}',']',',','"','a','s','s','i','g','n','"',':','"','u','r','l','"','}',']',',','"','e',
+'n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','{','u','r','l','#','s','c','h',
+'e','m','e','}',':','/','/','{','a','c','c','e','s','s','P','o','i','n','t','N','a','m','e','}','-',
+'{','b','u','c','k','e','t','A','r','n','#','a','c','c','o','u','n','t','I','d','}','.','{','u','r',
+'l','#','a','u','t','h','o','r','i','t','y','}','{','u','r','l','#','p','a','t','h','}','"',',','"',
+'p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s',
+'"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i',
+'n','g','R','e','g','i','o','n','"',':','"','{','b','u','c','k','e','t','A','r','n','#','r','e','g',
+'i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',',
+'"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t',
+'r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p',
+'e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o',
+'n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s',
+'"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S',
+'"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n',
+'E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U',
+'s','e','D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',']',',','"','e',
+'n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/',
+'{','a','c','c','e','s','s','P','o','i','n','t','N','a','m','e','}','-','{','b','u','c','k','e','t',
+'A','r','n','#','a','c','c','o','u','n','t','I','d','}','.','s','3','-','a','c','c','e','s','s','p',
+'o','i','n','t','.','{','b','u','c','k','e','t','A','r','n','#','r','e','g','i','o','n','}','.','{',
+'b','u','c','k','e','t','P','a','r','t','i','t','i','o','n','#','d','n','s','S','u','f','f','i','x',
+'}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h',
+'e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s',
+'i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','{','b','u','c','k','e','t','A','r','n',
+'#','r','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"',
+'s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n',
+'g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',',
+'"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',']','}',']','}',']','}',']',
+'}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','r','r',
+'o','r','"',':','"','I','n','v','a','l','i','d',' ','A','R','N',':',' ','T','h','e',' ','a','c','c',
+'e','s','s',' ','p','o','i','n','t',' ','n','a','m','e',' ','m','a','y',' ','o','n','l','y',' ','c',
+'o','n','t','a','i','n',' ','a','-','z',',',' ','A','-','Z',',',' ','0','-','9',' ','a','n','d',' ',
+'`','-','`','.',' ','F','o','u','n','d',':',' ','`','{','a','c','c','e','s','s','P','o','i','n','t',
+'N','a','m','e','}','`','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',']','}',
+']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','r','r','o',
+'r','"',':','"','I','n','v','a','l','i','d',' ','A','R','N',':',' ','T','h','e',' ','a','c','c','o',
+'u','n','t',' ','i','d',' ','m','a','y',' ','o','n','l','y',' ','c','o','n','t','a','i','n',' ','a',
+'-','z',',',' ','A','-','Z',',',' ','0','-','9',' ','a','n','d',' ','`','-','`','.',' ','F','o','u',
+'n','d',':',' ','`','{','b','u','c','k','e','t','A','r','n','#','a','c','c','o','u','n','t','I','d',
+'}','`','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',']','}',']','}',',','{',
+'"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','r','r','o','r','"',':','"',
+'I','n','v','a','l','i','d',' ','A','R','N',':',' ','T','h','e',' ','A','R','N',' ','w','a','s',' ',
+'n','o','t',' ','f','o','r',' ','t','h','e',' ','S','3',' ','s','e','r','v','i','c','e',',',' ','f',
+'o','u','n','d',':',' ','{','b','u','c','k','e','t','A','r','n','#','s','e','r','v','i','c','e','}',
+'"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',']','}',']','}',',','{','"','c',
+'o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','r','r','o','r','"',':','"','I','n',
+'v','a','l','i','d',' ','r','e','g','i','o','n',' ','i','n',' ','A','R','N',':',' ','`','{','b','u',
+'c','k','e','t','A','r','n','#','r','e','g','i','o','n','}','`',' ','(','i','n','v','a','l','i','d',
+' ','D','N','S',' ','n','a','m','e',')','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r',
+'"','}',']','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"',
+'e','r','r','o','r','"',':','"','C','l','i','e','n','t',' ','w','a','s',' ','c','o','n','f','i','g',
+'u','r','e','d',' ','f','o','r',' ','p','a','r','t','i','t','i','o','n',' ','`','{','p','a','r','t',
+'i','t','i','o','n','R','e','s','u','l','t','#','n','a','m','e','}','`',' ','b','u','t',' ','A','R',
+'N',' ','(','`','{','B','u','c','k','e','t','}','`',')',' ','h','a','s',' ','`','{','b','u','c','k',
+'e','t','P','a','r','t','i','t','i','o','n','#','n','a','m','e','}','`','"',',','"','t','y','p','e',
+'"',':','"','e','r','r','o','r','"','}',']','}',']','}',',','{','"','c','o','n','d','i','t','i','o',
+'n','s','"',':','[',']',',','"','e','r','r','o','r','"',':','"','A',' ','v','a','l','i','d',' ','p',
+'a','r','t','i','t','i','o','n',' ','c','o','u','l','d',' ','n','o','t',' ','b','e',' ','d','e','t',
+'e','r','m','i','n','e','d','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',']',
+'}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','r','r',
+'o','r','"',':','"','C','o','u','l','d',' ','n','o','t',' ','l','o','a','d',' ','p','a','r','t','i',
+'t','i','o','n',' ','f','o','r',' ','A','R','N',' ','r','e','g','i','o','n',' ','`','{','b','u','c',
+'k','e','t','A','r','n','#','r','e','g','i','o','n','}','`','"',',','"','t','y','p','e','"',':','"',
+'e','r','r','o','r','"','}',']','}',']','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n',
+'s','"',':','[',']',',','"','e','r','r','o','r','"',':','"','I','n','v','a','l','i','d',' ','A','R',
+'N',':',' ','T','h','e',' ','A','R','N',' ','m','a','y',' ','o','n','l','y',' ','c','o','n','t','a',
+'i','n',' ','a',' ','s','i','n','g','l','e',' ','r','e','s','o','u','r','c','e',' ','c','o','m','p',
+'o','n','e','n','t',' ','a','f','t','e','r',' ','`','a','c','c','e','s','s','p','o','i','n','t','`',
+'.','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',']','}',']','}',']','}',',',
+'{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','r','r','o','r','"',':',
+'"','I','n','v','a','l','i','d',' ','A','R','N',':',' ','b','u','c','k','e','t',' ','A','R','N',' ',
+'i','s',' ','m','i','s','s','i','n','g',' ','a',' ','r','e','g','i','o','n','"',',','"','t','y','p',
+'e','"',':','"','e','r','r','o','r','"','}',']','}',']','}',',','{','"','c','o','n','d','i','t','i',
+'o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u',
+'l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n',
+'"',':','"','i','s','V','a','l','i','d','H','o','s','t','L','a','b','e','l','"',',','"','a','r','g',
+'v','"',':','[','{','"','r','e','f','"',':','"','a','c','c','e','s','s','P','o','i','n','t','N','a',
+'m','e','"','}',',','t','r','u','e',']','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e',
+'"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':',
+'[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':',
+'[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o',
+'o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e',
+'f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','t','r','u','e',']','}',
+']',',','"','e','r','r','o','r','"',':','"','S','3',' ','M','R','A','P',' ','d','o','e','s',' ','n',
+'o','t',' ','s','u','p','p','o','r','t',' ','d','u','a','l','-','s','t','a','c','k','"',',','"','t',
+'y','p','e','"',':','"','e','r','r','o','r','"','}',',','{','"','c','o','n','d','i','t','i','o','n',
+'s','"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e',
+'s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':',
+'"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','t','r','u','e',']','}',']',',',
+'"','e','r','r','o','r','"',':','"','S','3',' ','M','R','A','P',' ','d','o','e','s',' ','n','o','t',
+' ','s','u','p','p','o','r','t',' ','F','I','P','S','"',',','"','t','y','p','e','"',':','"','e','r',
+'r','o','r','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','t',
+'y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o',
+'n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n',
+'E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','A',
+'c','c','e','l','e','r','a','t','e','"','}',',','t','r','u','e',']','}',']',',','"','e','r','r','o',
+'r','"',':','"','S','3',' ','M','R','A','P',' ','d','o','e','s',' ','n','o','t',' ','s','u','p','p',
+'o','r','t',' ','S','3',' ','A','c','c','e','l','e','r','a','t','e','"',',','"','t','y','p','e','"',
+':','"','e','r','r','o','r','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',
+']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[',
+'{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o',
+'l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f',
+'"',':','"','D','i','s','a','b','l','e','M','u','l','t','i','R','e','g','i','o','n','A','c','c','e',
+'s','s','P','o','i','n','t','s','"','}',',','t','r','u','e',']','}',']',',','"','e','r','r','o','r',
+'"',':','"','I','n','v','a','l','i','d',' ','c','o','n','f','i','g','u','r','a','t','i','o','n',':',
+' ','M','u','l','t','i','-','R','e','g','i','o','n',' ','A','c','c','e','s','s',' ','P','o','i','n',
+'t',' ','A','R','N','s',' ','a','r','e',' ','d','i','s','a','b','l','e','d','.','"',',','"','t','y',
+'p','e','"',':','"','e','r','r','o','r','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s',
+'"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s',
+'"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"',
+'a','w','s','.','p','a','r','t','i','t','i','o','n','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','R','e','g','i','o','n','"','}',']',',','"','a','s','s','i','g','n','"',':',
+'"','m','r','a','p','P','a','r','t','i','t','i','o','n','"','}',']',',','"','t','y','p','e','"',':',
+'"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i',
+'o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u',
+'l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n',
+'"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[',
+'{','"','f','n','"',':','"','g','e','t','A','t','t','r','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','m','r','a','p','P','a','r','t','i','t','i','o','n','"','}',',','"','n',
+'a','m','e','"',']','}',',','{','"','f','n','"',':','"','g','e','t','A','t','t','r','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','b','u','c','k','e','t','A','r','n','"','}',
+',','"','p','a','r','t','i','t','i','o','n','"',']','}',']','}',']',',','"','t','y','p','e','"',':',
+'"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i',
+'o','n','s','"',':','[',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',
+':','"','h','t','t','p','s',':','/','/','{','a','c','c','e','s','s','P','o','i','n','t','N','a','m',
+'e','}','.','a','c','c','e','s','s','p','o','i','n','t','.','s','3','-','g','l','o','b','a','l','.',
+'{','m','r','a','p','P','a','r','t','i','t','i','o','n','#','d','n','s','S','u','f','f','i','x','}',
+'"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e',
+'m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','a','"',',','"','s',
+'i','g','n','i','n','g','R','e','g','i','o','n','S','e','t','"',':','[','"','*','"',']',',','"','s',
+'i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e',
+'D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"',
+'h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p',
+'o','i','n','t','"','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',
+',','"','e','r','r','o','r','"',':','"','C','l','i','e','n','t',' ','w','a','s',' ','c','o','n','f',
+'i','g','u','r','e','d',' ','f','o','r',' ','p','a','r','t','i','t','i','o','n',' ','`','{','m','r',
+'a','p','P','a','r','t','i','t','i','o','n','#','n','a','m','e','}','`',' ','b','u','t',' ','b','u',
+'c','k','e','t',' ','r','e','f','e','r','r','e','d',' ','t','o',' ','p','a','r','t','i','t','i','o',
+'n',' ','`','{','b','u','c','k','e','t','A','r','n','#','p','a','r','t','i','t','i','o','n','}','`',
+'"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',']','}',']','}',',','{','"','c',
+'o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','r','r','o','r','"',':','"','{','R',
+'e','g','i','o','n','}',' ','w','a','s',' ','n','o','t',' ','a',' ','v','a','l','i','d',' ','r','e',
+'g','i','o','n','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',']','}',']','}',
+']','}',']','}',']','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',
+',','"','e','r','r','o','r','"',':','"','I','n','v','a','l','i','d',' ','A','c','c','e','s','s',' ',
+'P','o','i','n','t',' ','N','a','m','e','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r',
+'"','}',']','}',']','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',
+',','"','e','r','r','o','r','"',':','"','I','n','v','a','l','i','d',' ','A','R','N',':',' ','E','x',
+'p','e','c','t','e','d',' ','a',' ','r','e','s','o','u','r','c','e',' ','o','f',' ','t','h','e',' ',
+'f','o','r','m','a','t',' ','`','a','c','c','e','s','s','p','o','i','n','t',':','<','a','c','c','e',
+'s','s','p','o','i','n','t',' ','n','a','m','e','>','`',' ','b','u','t',' ','n','o',' ','n','a','m',
+'e',' ','w','a','s',' ','p','r','o','v','i','d','e','d','"',',','"','t','y','p','e','"',':','"','e',
+'r','r','o','r','"','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{',
+'"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v',
+'"',':','[','{','"','f','n','"',':','"','g','e','t','A','t','t','r','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','b','u','c','k','e','t','A','r','n','"','}',',','"','s','e',
+'r','v','i','c','e','"',']','}',',','"','s','3','-','o','u','t','p','o','s','t','s','"',']','}',']',
+',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{',
+'"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l',
+'e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','t','r','u','e',']','}',']',',',
+'"','e','r','r','o','r','"',':','"','S','3',' ','O','u','t','p','o','s','t','s',' ','d','o','e','s',
+' ','n','o','t',' ','s','u','p','p','o','r','t',' ','D','u','a','l','-','s','t','a','c','k','"',',',
+'"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',',','{','"','c','o','n','d','i','t','i',
+'o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u',
+'l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n',
+'"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':',
+'[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','t','r','u','e',']','}',
+']',',','"','e','r','r','o','r','"',':','"','S','3',' ','O','u','t','p','o','s','t','s',' ','d','o',
+'e','s',' ','n','o','t',' ','s','u','p','p','o','r','t',' ','F','I','P','S','"',',','"','t','y','p',
+'e','"',':','"','e','r','r','o','r','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',
+':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',
+':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b',
+'o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','A','c','c','e','l','e','r','a','t','e','"','}',',','t','r','u','e',']','}',']',
+',','"','e','r','r','o','r','"',':','"','S','3',' ','O','u','t','p','o','s','t','s',' ','d','o','e',
+'s',' ','n','o','t',' ','s','u','p','p','o','r','t',' ','S','3',' ','A','c','c','e','l','e','r','a',
+'t','e','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',',','{','"','c','o','n',
+'d','i','t','i','o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',
+',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',
+'{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','f',
+'n','"',':','"','g','e','t','A','t','t','r','"',',','"','a','r','g','v','"',':','[','{','"','r','e',
+'f','"',':','"','b','u','c','k','e','t','A','r','n','"','}',',','"','r','e','s','o','u','r','c','e',
+'I','d','[','4',']','"',']','}',']','}',']',',','"','e','r','r','o','r','"',':','"','I','n','v','a',
+'l','i','d',' ','A','r','n',':',' ','O','u','t','p','o','s','t',' ','A','c','c','e','s','s',' ','P',
+'o','i','n','t',' ','A','R','N',' ','c','o','n','t','a','i','n','s',' ','s','u','b',' ','r','e','s',
+'o','u','r','c','e','s','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',',','{',
+'"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t',
+'r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n',
+'s','"',':','[','{','"','f','n','"',':','"','g','e','t','A','t','t','r','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','b','u','c','k','e','t','A','r','n','"','}',',','"','r',
+'e','s','o','u','r','c','e','I','d','[','1',']','"',']',',','"','a','s','s','i','g','n','"',':','"',
+'o','u','t','p','o','s','t','I','d','"','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e',
+'"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':',
+'[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':',
+'[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','i','s',
+'V','a','l','i','d','H','o','s','t','L','a','b','e','l','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','o','u','t','p','o','s','t','I','d','"','}',',','f','a','l','s','e',']',
+'}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':',
+'[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','t','y','p','e','"',':',
+'"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i',
+'o','n','s','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','U','s','e','A','r','n','R','e','g','i','o','n','"','}',
+']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','A','r','n','R','e','g',
+'i','o','n','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',
+',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u',
+'a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','g','e','t','A','t',
+'t','r','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','b','u','c','k','e',
+'t','A','r','n','"','}',',','"','r','e','g','i','o','n','"',']','}',',','"','{','R','e','g','i','o',
+'n','}','"',']','}',']','}',']',',','"','e','r','r','o','r','"',':','"','I','n','v','a','l','i','d',
+' ','c','o','n','f','i','g','u','r','a','t','i','o','n',':',' ','r','e','g','i','o','n',' ','f','r',
+'o','m',' ','A','R','N',' ','`','{','b','u','c','k','e','t','A','r','n','#','r','e','g','i','o','n',
+'}','`',' ','d','o','e','s',' ','n','o','t',' ','m','a','t','c','h',' ','c','l','i','e','n','t',' ',
+'r','e','g','i','o','n',' ','`','{','R','e','g','i','o','n','}','`',' ','a','n','d',' ','U','s','e',
+'A','r','n','R','e','g','i','o','n',' ','i','s',' ','`','f','a','l','s','e','`','"',',','"','t','y',
+'p','e','"',':','"','e','r','r','o','r','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s',
+'"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s',
+'"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"',
+'a','w','s','.','p','a','r','t','i','t','i','o','n','"',',','"','a','r','g','v','"',':','[','{','"',
+'f','n','"',':','"','g','e','t','A','t','t','r','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','b','u','c','k','e','t','A','r','n','"','}',',','"','r','e','g','i','o','n','"',
+']','}',']',',','"','a','s','s','i','g','n','"',':','"','b','u','c','k','e','t','P','a','r','t','i',
+'t','i','o','n','"','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u',
+'l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','t',
+'y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o',
+'n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','a','w','s','.','p','a','r',
+'t','i','t','i','o','n','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R',
+'e','g','i','o','n','"','}',']',',','"','a','s','s','i','g','n','"',':','"','p','a','r','t','i','t',
+'i','o','n','R','e','s','u','l','t','"','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e',
+'"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':',
+'[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':',
+'[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','s','t',
+'r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',
+':','"','g','e','t','A','t','t','r','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','b','u','c','k','e','t','P','a','r','t','i','t','i','o','n','"','}',',','"','n','a','m','e',
+'"',']','}',',','{','"','f','n','"',':','"','g','e','t','A','t','t','r','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','p','a','r','t','i','t','i','o','n','R','e','s','u','l',
+'t','"','}',',','"','n','a','m','e','"',']','}',']','}',']',',','"','t','y','p','e','"',':','"','t',
+'r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n',
+'s','"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e',
+'s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':',
+'"','i','s','V','a','l','i','d','H','o','s','t','L','a','b','e','l','"',',','"','a','r','g','v','"',
+':','[','{','"','f','n','"',':','"','g','e','t','A','t','t','r','"',',','"','a','r','g','v','"',':',
+'[','{','"','r','e','f','"',':','"','b','u','c','k','e','t','A','r','n','"','}',',','"','r','e','g',
+'i','o','n','"',']','}',',','t','r','u','e',']','}',']',',','"','t','y','p','e','"',':','"','t','r',
+'e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s',
+'"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s',
+'"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"',
+'i','s','V','a','l','i','d','H','o','s','t','L','a','b','e','l','"',',','"','a','r','g','v','"',':',
+'[','{','"','f','n','"',':','"','g','e','t','A','t','t','r','"',',','"','a','r','g','v','"',':','[',
+'{','"','r','e','f','"',':','"','b','u','c','k','e','t','A','r','n','"','}',',','"','a','c','c','o',
+'u','n','t','I','d','"',']','}',',','f','a','l','s','e',']','}',']',',','"','t','y','p','e','"',':',
+'"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i',
+'o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u',
+'l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n',
+'"',':','"','g','e','t','A','t','t','r','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f',
+'"',':','"','b','u','c','k','e','t','A','r','n','"','}',',','"','r','e','s','o','u','r','c','e','I',
+'d','[','2',']','"',']',',','"','a','s','s','i','g','n','"',':','"','o','u','t','p','o','s','t','T',
+'y','p','e','"','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l',
+'e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','t','y',
+'p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n',
+'d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','g','e','t','A','t','t','r','"',
+',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','b','u','c','k','e','t','A','r',
+'n','"','}',',','"','r','e','s','o','u','r','c','e','I','d','[','3',']','"',']',',','"','a','s','s',
+'i','g','n','"',':','"','a','c','c','e','s','s','P','o','i','n','t','N','a','m','e','"','}',']',',',
+'"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"',
+'c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t','r',
+'e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s',
+'"',':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"',
+'a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','o','u','t','p','o','s','t','T','y','p',
+'e','"','}',',','"','a','c','c','e','s','s','p','o','i','n','t','"',']','}',']',',','"','t','y','p',
+'e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d',
+'i','t','i','o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',',
+'"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{',
+'"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e',
+'f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',',','{','"','f','n','"',':','"','p',
+'a','r','s','e','U','R','L','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"',
+'E','n','d','p','o','i','n','t','"','}',']',',','"','a','s','s','i','g','n','"',':','"','u','r','l',
+'"','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t',
+'t','p','s',':','/','/','{','a','c','c','e','s','s','P','o','i','n','t','N','a','m','e','}','-','{',
+'b','u','c','k','e','t','A','r','n','#','a','c','c','o','u','n','t','I','d','}','.','{','o','u','t',
+'p','o','s','t','I','d','}','.','{','u','r','l','#','a','u','t','h','o','r','i','t','y','}','"',',',
+'"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e',
+'s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n',
+'i','n','g','R','e','g','i','o','n','"',':','"','{','b','u','c','k','e','t','A','r','n','#','r','e',
+'g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','-',
+'o','u','t','p','o','s','t','s','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E',
+'n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s',
+'"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',',
+'{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','n','d','p','o','i','n',
+'t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/','{','a','c','c','e','s',
+'s','P','o','i','n','t','N','a','m','e','}','-','{','b','u','c','k','e','t','A','r','n','#','a','c',
+'c','o','u','n','t','I','d','}','.','{','o','u','t','p','o','s','t','I','d','}','.','s','3','-','o',
+'u','t','p','o','s','t','s','.','{','b','u','c','k','e','t','A','r','n','#','r','e','g','i','o','n',
+'}','.','{','b','u','c','k','e','t','P','a','r','t','i','t','i','o','n','#','d','n','s','S','u','f',
+'f','i','x','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h',
+'S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',
+',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','{','b','u','c','k','e','t',
+'A','r','n','#','r','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e',
+'"',':','"','s','3','-','o','u','t','p','o','s','t','s','"',',','"','d','i','s','a','b','l','e','D',
+'o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h',
+'e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o',
+'i','n','t','"','}',']','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',
+']',',','"','e','r','r','o','r','"',':','"','E','x','p','e','c','t','e','d',' ','a','n',' ','o','u',
+'t','p','o','s','t',' ','t','y','p','e',' ','`','a','c','c','e','s','s','p','o','i','n','t','`',',',
+' ','f','o','u','n','d',' ','{','o','u','t','p','o','s','t','T','y','p','e','}','"',',','"','t','y',
+'p','e','"',':','"','e','r','r','o','r','"','}',']','}',']','}',',','{','"','c','o','n','d','i','t',
+'i','o','n','s','"',':','[',']',',','"','e','r','r','o','r','"',':','"','I','n','v','a','l','i','d',
+' ','A','R','N',':',' ','e','x','p','e','c','t','e','d',' ','a','n',' ','a','c','c','e','s','s',' ',
+'p','o','i','n','t',' ','n','a','m','e','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r',
+'"','}',']','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"',
+'e','r','r','o','r','"',':','"','I','n','v','a','l','i','d',' ','A','R','N',':',' ','E','x','p','e',
+'c','t','e','d',' ','a',' ','4','-','c','o','m','p','o','n','e','n','t',' ','r','e','s','o','u','r',
+'c','e','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',']','}',']','}',',','{',
+'"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','r','r','o','r','"',':','"',
+'I','n','v','a','l','i','d',' ','A','R','N',':',' ','T','h','e',' ','a','c','c','o','u','n','t',' ',
+'i','d',' ','m','a','y',' ','o','n','l','y',' ','c','o','n','t','a','i','n',' ','a','-','z',',',' ',
+'A','-','Z',',',' ','0','-','9',' ','a','n','d',' ','`','-','`','.',' ','F','o','u','n','d',':',' ',
+'`','{','b','u','c','k','e','t','A','r','n','#','a','c','c','o','u','n','t','I','d','}','`','"',',',
+'"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',']','}',']','}',',','{','"','c','o','n',
+'d','i','t','i','o','n','s','"',':','[',']',',','"','e','r','r','o','r','"',':','"','I','n','v','a',
+'l','i','d',' ','r','e','g','i','o','n',' ','i','n',' ','A','R','N',':',' ','`','{','b','u','c','k',
+'e','t','A','r','n','#','r','e','g','i','o','n','}','`',' ','(','i','n','v','a','l','i','d',' ','D',
+'N','S',' ','n','a','m','e',')','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',
+']','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','r',
+'r','o','r','"',':','"','C','l','i','e','n','t',' ','w','a','s',' ','c','o','n','f','i','g','u','r',
+'e','d',' ','f','o','r',' ','p','a','r','t','i','t','i','o','n',' ','`','{','p','a','r','t','i','t',
+'i','o','n','R','e','s','u','l','t','#','n','a','m','e','}','`',' ','b','u','t',' ','A','R','N',' ',
+'(','`','{','B','u','c','k','e','t','}','`',')',' ','h','a','s',' ','`','{','b','u','c','k','e','t',
+'P','a','r','t','i','t','i','o','n','#','n','a','m','e','}','`','"',',','"','t','y','p','e','"',':',
+'"','e','r','r','o','r','"','}',']','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s',
+'"',':','[',']',',','"','e','r','r','o','r','"',':','"','A',' ','v','a','l','i','d',' ','p','a','r',
+'t','i','t','i','o','n',' ','c','o','u','l','d',' ','n','o','t',' ','b','e',' ','d','e','t','e','r',
+'m','i','n','e','d','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',']','}',']',
+'}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','r','r','o','r',
+'"',':','"','C','o','u','l','d',' ','n','o','t',' ','l','o','a','d',' ','p','a','r','t','i','t','i',
+'o','n',' ','f','o','r',' ','A','R','N',' ','r','e','g','i','o','n',' ','{','b','u','c','k','e','t',
+'A','r','n','#','r','e','g','i','o','n','}','"',',','"','t','y','p','e','"',':','"','e','r','r','o',
+'r','"','}',']','}',']','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',
+']',',','"','e','r','r','o','r','"',':','"','I','n','v','a','l','i','d',' ','A','R','N',':',' ','T',
+'h','e',' ','o','u','t','p','o','s','t',' ','I','d',' ','m','a','y',' ','o','n','l','y',' ','c','o',
+'n','t','a','i','n',' ','a','-','z',',',' ','A','-','Z',',',' ','0','-','9',' ','a','n','d',' ','`',
+'-','`','.',' ','F','o','u','n','d',':',' ','`','{','o','u','t','p','o','s','t','I','d','}','`','"',
+',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',']','}',']','}',',','{','"','c','o',
+'n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','r','r','o','r','"',':','"','I','n','v',
+'a','l','i','d',' ','A','R','N',':',' ','T','h','e',' ','O','u','t','p','o','s','t',' ','I','d',' ',
+'w','a','s',' ','n','o','t',' ','s','e','t','"',',','"','t','y','p','e','"',':','"','e','r','r','o',
+'r','"','}',']','}',']','}',']','}',']','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n',
+'s','"',':','[',']',',','"','e','r','r','o','r','"',':','"','I','n','v','a','l','i','d',' ','A','R',
+'N',':',' ','U','n','r','e','c','o','g','n','i','z','e','d',' ','f','o','r','m','a','t',':',' ','{',
+'B','u','c','k','e','t','}',' ','(','t','y','p','e',':',' ','{','a','r','n','T','y','p','e','}',')',
+'"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',']','}',']','}',',','{','"','c',
+'o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','r','r','o','r','"',':','"','I','n',
+'v','a','l','i','d',' ','A','R','N',':',' ','N','o',' ','A','R','N',' ','t','y','p','e',' ','s','p',
+'e','c','i','f','i','e','d','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',']',
+'}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','s',
+'u','b','s','t','r','i','n','g','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','B','u','c','k','e','t','"','}',',','0',',','4',',','f','a','l','s','e',']',',','"','a','s','s',
+'i','g','n','"',':','"','a','r','n','P','r','e','f','i','x','"','}',',','{','"','f','n','"',':','"',
+'s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','a','r','n','P','r','e','f','i','x','"','}',',','"','a','r','n',':','"',']','}',
+',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n',
+'"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"',
+'a','w','s','.','p','a','r','s','e','A','r','n','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','B','u','c','k','e','t','"','}',']','}',']','}',']','}',']',',','"','e','r','r',
+'o','r','"',':','"','I','n','v','a','l','i','d',' ','A','R','N',':',' ','`','{','B','u','c','k','e',
+'t','}','`',' ','w','a','s',' ','n','o','t',' ','a',' ','v','a','l','i','d',' ','A','R','N','"',',',
+'"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',',','{','"','c','o','n','d','i','t','i',
+'o','n','s','"',':','[','{','"','f','n','"',':','"','u','r','i','E','n','c','o','d','e','"',',','"',
+'a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','B','u','c','k','e','t','"','}',']',',',
+'"','a','s','s','i','g','n','"',':','"','u','r','i','_','e','n','c','o','d','e','d','_','b','u','c',
+'k','e','t','"','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l',
+'e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',
+':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[',
+'{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','t','r',
+'u','e',']','}',',','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']',',','"',
+'e','r','r','o','r','"',':','"','C','a','n','n','o','t',' ','s','e','t',' ','d','u','a','l','-','s',
+'t','a','c','k',' ','i','n',' ','c','o','m','b','i','n','a','t','i','o','n',' ','w','i','t','h',' ',
+'a',' ','c','u','s','t','o','m',' ','e','n','d','p','o','i','n','t','.','"',',','"','t','y','p','e',
+'"',':','"','e','r','r','o','r','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':',
+'[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':',
+'[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','a','w',
+'s','.','p','a','r','t','i','t','i','o','n','"',',','"','a','r','g','v','"',':','[','{','"','r','e',
+'f','"',':','"','R','e','g','i','o','n','"','}',']',',','"','a','s','s','i','g','n','"',':','"','p',
+'a','r','t','i','t','i','o','n','R','e','s','u','l','t','"','}',']',',','"','t','y','p','e','"',':',
+'"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i',
+'o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u',
+'l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n',
+'"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':',
+'[','{','"','r','e','f','"',':','"','A','c','c','e','l','e','r','a','t','e','"','}',',','f','a','l',
+'s','e',']','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e',
+'s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','t','y','p',
+'e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d',
+'i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q',
+'u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e',
+'D','u','a','l','S','t','a','c','k','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':',
+'"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','i','s','S','e',
+'t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i',
+'n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q',
+'u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e',
+'F','I','P','S','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','s','t','r','i',
+'n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']',
+',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',
+':','/','/','s','3','-','f','i','p','s','.','d','u','a','l','s','t','a','c','k','.','u','s','-','e',
+'a','s','t','-','1','.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n',
+'s','S','u','f','f','i','x','}','/','{','u','r','i','_','e','n','c','o','d','e','d','_','b','u','c',
+'k','e','t','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h',
+'S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',
+',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','u','s','-','e','a','s','t',
+'-','1','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d',
+'i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u',
+'e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',
+':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s',
+'"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t',
+'a','c','k','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',',
+'"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r',
+'g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',
+']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',
+',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a',
+'l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o',
+'n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']',',','"','e','n','d','p',
+'o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/','s','3','-',
+'f','i','p','s','.','d','u','a','l','s','t','a','c','k','.','u','s','-','e','a','s','t','-','1','.',
+'{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f','f','i',
+'x','}','/','{','u','r','i','_','e','n','c','o','d','e','d','_','b','u','c','k','e','t','}','"',',',
+'"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e',
+'s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n',
+'i','n','g','R','e','g','i','o','n','"',':','"','u','s','-','e','a','s','t','-','1','"',',','"','s',
+'i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e',
+'D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"',
+'h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p',
+'o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f',
+'n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',',
+'t','r','u','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',
+':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f',
+'n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','t','r','u','e',']',
+'}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f',
+'n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':',
+'[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l',
+'o','b','a','l','"',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E',
+'q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s',
+'e','G','l','o','b','a','l','E','n','d','p','o','i','n','t','"','}',',','t','r','u','e',']','}',']',
+',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{',
+'"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','n','d','p','o','i','n','t',
+'"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/','s','3','-','f','i','p','s',
+'.','d','u','a','l','s','t','a','c','k','.','{','R','e','g','i','o','n','}','.','{','p','a','r','t',
+'i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f','f','i','x','}','/','{','u',
+'r','i','_','e','n','c','o','d','e','d','_','b','u','c','k','e','t','}','"',',','"','p','r','o','p',
+'e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{',
+'"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e',
+'g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g',
+'N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e',
+'E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r',
+'s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',
+']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"',
+'b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','t','r','u','e',
+']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"',
+'f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f',
+'"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"',
+'b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','t','r','u','e',']','}',',','{','"',
+'f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"',
+'s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l',
+'"',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l',
+'s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','G','l','o',
+'b','a','l','E','n','d','p','o','i','n','t','"','}',',','f','a','l','s','e',']','}',']',',','"','e',
+'n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/',
+'s','3','-','f','i','p','s','.','d','u','a','l','s','t','a','c','k','.','{','R','e','g','i','o','n',
+'}','.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f',
+'f','i','x','}','/','{','u','r','i','_','e','n','c','o','d','e','d','_','b','u','c','k','e','t','}',
+'"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e',
+'m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i',
+'g','n','i','n','g','R','e','g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"',
+'s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l',
+'e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',',
+'"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d',
+'p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"',
+'f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',
+',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']',
+'}',',','{','"','f','n','"',':','"','p','a','r','s','e','U','R','L','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']',',','"','a','s',
+'s','i','g','n','"',':','"','u','r','l','"','}',',','{','"','f','n','"',':','"','b','o','o','l','e',
+'a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','U','s','e','F','I','P','S','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"',
+'s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l',
+'"',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','{',
+'u','r','l','#','s','c','h','e','m','e','}',':','/','/','{','u','r','l','#','a','u','t','h','o','r',
+'i','t','y','}','{','u','r','l','#','n','o','r','m','a','l','i','z','e','d','P','a','t','h','}','{',
+'u','r','i','_','e','n','c','o','d','e','d','_','b','u','c','k','e','t','}','"',',','"','p','r','o',
+'p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[',
+'{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R',
+'e','g','i','o','n','"',':','"','u','s','-','e','a','s','t','-','1','"',',','"','s','i','g','n','i',
+'n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b',
+'l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d',
+'e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t',
+'"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"',
+'b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s',
+'e',']','}',',','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':',
+'[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',',','{','"','f',
+'n','"',':','"','p','a','r','s','e','U','R','L','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']',',','"','a','s','s','i','g','n','"',
+':','"','u','r','l','"','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u',
+'a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F',
+'I','P','S','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','s','t','r','i','n',
+'g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"',
+'R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']',',',
+'"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','{','u','r','l','#','s',
+'c','h','e','m','e','}',':','/','/','{','u','r','l','#','a','u','t','h','o','r','i','t','y','}','{',
+'u','r','l','#','n','o','r','m','a','l','i','z','e','d','P','a','t','h','}','{','u','r','i','_','e',
+'n','c','o','d','e','d','_','b','u','c','k','e','t','}','"',',','"','p','r','o','p','e','r','t','i',
+'e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m',
+'e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n',
+'"',':','"','u','s','-','e','a','s','t','-','1','"',',','"','s','i','g','n','i','n','g','N','a','m',
+'e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c',
+'o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':',
+'{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"',
+'c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e',
+'a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{',
+'"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e',
+'f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',',','{','"','f','n','"',':','"','p',
+'a','r','s','e','U','R','L','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"',
+'E','n','d','p','o','i','n','t','"','}',']',',','"','a','s','s','i','g','n','"',':','"','u','r','l',
+'"','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',
+',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v',
+'"',':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"',
+'a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"',
+'a','w','s','-','g','l','o','b','a','l','"',']','}',']','}',',','{','"','f','n','"',':','"','b','o',
+'o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e',
+'f','"',':','"','U','s','e','G','l','o','b','a','l','E','n','d','p','o','i','n','t','"','}',',','t',
+'r','u','e',']','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l',
+'e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','n',
+'d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','{','u','r','l','#','s','c','h','e',
+'m','e','}',':','/','/','{','u','r','l','#','a','u','t','h','o','r','i','t','y','}','{','u','r','l',
+'#','n','o','r','m','a','l','i','z','e','d','P','a','t','h','}','{','u','r','i','_','e','n','c','o',
+'d','e','d','_','b','u','c','k','e','t','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',
+':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':',
+'"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"',
+'{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"',
+'s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n',
+'g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',',
+'"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',']','}',',','{','"','c','o',
+'n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n',
+'E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U',
+'s','e','D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f',
+'n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','E','n','d','p','o','i','n','t','"','}',']','}',',','{','"','f','n','"',':','"','p','a','r',
+'s','e','U','R','L','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n',
+'d','p','o','i','n','t','"','}',']',',','"','a','s','s','i','g','n','"',':','"','u','r','l','"','}',
+',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','t',
+'r','u','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':',
+'[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r',
+'g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w',
+'s','-','g','l','o','b','a','l','"',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l',
+'e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','U','s','e','G','l','o','b','a','l','E','n','d','p','o','i','n','t','"','}',',','f','a','l',
+'s','e',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"',
+'{','u','r','l','#','s','c','h','e','m','e','}',':','/','/','{','u','r','l','#','a','u','t','h','o',
+'r','i','t','y','}','{','u','r','l','#','n','o','r','m','a','l','i','z','e','d','P','a','t','h','}',
+'{','u','r','i','_','e','n','c','o','d','e','d','_','b','u','c','k','e','t','}','"',',','"','p','r',
+'o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':',
+'[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g',
+'R','e','g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i',
+'n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b',
+'l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d',
+'e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t',
+'"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"',
+'b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s',
+'e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{',
+'"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e',
+'f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',':',
+'"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','t','r','u','e',']','}',',','{',
+'"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-',
+'g','l','o','b','a','l','"',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u',
+'r','l','"',':','"','h','t','t','p','s',':','/','/','s','3','-','f','i','p','s','.','u','s','-','e',
+'a','s','t','-','1','.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n',
+'s','S','u','f','f','i','x','}','/','{','u','r','i','_','e','n','c','o','d','e','d','_','b','u','c',
+'k','e','t','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h',
+'S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',
+',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','u','s','-','e','a','s','t',
+'-','1','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d',
+'i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u',
+'e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',
+':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s',
+'"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t',
+'a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',
+',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']',
+'}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',
+',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"',
+'}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u',
+'a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i',
+'o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']',',','"','e','n','d',
+'p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/','s','3',
+'-','f','i','p','s','.','u','s','-','e','a','s','t','-','1','.','{','p','a','r','t','i','t','i','o',
+'n','R','e','s','u','l','t','#','d','n','s','S','u','f','f','i','x','}','/','{','u','r','i','_','e',
+'n','c','o','d','e','d','_','b','u','c','k','e','t','}','"',',','"','p','r','o','p','e','r','t','i',
+'e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m',
+'e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n',
+'"',':','"','u','s','-','e','a','s','t','-','1','"',',','"','s','i','g','n','i','n','g','N','a','m',
+'e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c',
+'o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':',
+'{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"',
+'c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e',
+'a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{',
+'"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':',
+'"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E',
+'n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l',
+'e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','U','s','e','F','I','P','S','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':',
+'"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','s','t','r','i',
+'n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']',
+'}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"',
+'a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','G','l','o','b','a','l','E',
+'n','d','p','o','i','n','t','"','}',',','t','r','u','e',']','}',']',',','"','t','y','p','e','"',':',
+'"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i',
+'o','n','s','"',':','[',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',
+':','"','h','t','t','p','s',':','/','/','s','3','-','f','i','p','s','.','{','R','e','g','i','o','n',
+'}','.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f',
+'f','i','x','}','/','{','u','r','i','_','e','n','c','o','d','e','d','_','b','u','c','k','e','t','}',
+'"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e',
+'m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i',
+'g','n','i','n','g','R','e','g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"',
+'s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l',
+'e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',',
+'"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d',
+'p','o','i','n','t','"','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',
+'{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r',
+'g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k',
+'"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a',
+'r','g','v','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',
+',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','t',
+'r','u','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':',
+'[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r',
+'g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w',
+'s','-','g','l','o','b','a','l','"',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l',
+'e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','U','s','e','G','l','o','b','a','l','E','n','d','p','o','i','n','t','"','}',',','f','a','l',
+'s','e',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"',
+'h','t','t','p','s',':','/','/','s','3','-','f','i','p','s','.','{','R','e','g','i','o','n','}','.',
+'{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f','f','i',
+'x','}','/','{','u','r','i','_','e','n','c','o','d','e','d','_','b','u','c','k','e','t','}','"',',',
+'"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e',
+'s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n',
+'i','n','g','R','e','g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i',
+'g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D',
+'o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h',
+'e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o',
+'i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n',
+'"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':',
+'[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','t',
+'r','u','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':',
+'[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n',
+'"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':',
+'[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','f','a','l','s','e',']',
+'}',',','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a',
+'w','s','-','g','l','o','b','a','l','"',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':',
+'{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/','s','3','.','d','u','a','l','s','t',
+'a','c','k','.','u','s','-','e','a','s','t','-','1','.','{','p','a','r','t','i','t','i','o','n','R',
+'e','s','u','l','t','#','d','n','s','S','u','f','f','i','x','}','/','{','u','r','i','_','e','n','c',
+'o','d','e','d','_','b','u','c','k','e','t','}','"',',','"','p','r','o','p','e','r','t','i','e','s',
+'"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',
+':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':',
+'"','u','s','-','e','a','s','t','-','1','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',
+':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d',
+'i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}',
+'}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o',
+'n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n',
+'E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U',
+'s','e','D','u','a','l','S','t','a','c','k','"','}',',','t','r','u','e',']','}',',','{','"','f','n',
+'"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','i','s',
+'S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p',
+'o','i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n',
+'E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U',
+'s','e','F','I','P','S','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','s',
+'t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e',
+'f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',
+']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t',
+'t','p','s',':','/','/','s','3','.','d','u','a','l','s','t','a','c','k','.','u','s','-','e','a','s',
+'t','-','1','.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S',
+'u','f','f','i','x','}','/','{','u','r','i','_','e','n','c','o','d','e','d','_','b','u','c','k','e',
+'t','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c',
+'h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"',
+'s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','u','s','-','e','a','s','t','-','1',
+'"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s',
+'a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',
+']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"',
+'e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':',
+'[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c',
+'k','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a',
+'r','g','v','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',
+',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','f',
+'a','l','s','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',
+':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a',
+'w','s','-','g','l','o','b','a','l','"',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o',
+'l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f',
+'"',':','"','U','s','e','G','l','o','b','a','l','E','n','d','p','o','i','n','t','"','}',',','t','r',
+'u','e',']','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e',
+'s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','n','d',
+'p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/','s','3',
+'.','d','u','a','l','s','t','a','c','k','.','{','R','e','g','i','o','n','}','.','{','p','a','r','t',
+'i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f','f','i','x','}','/','{','u',
+'r','i','_','e','n','c','o','d','e','d','_','b','u','c','k','e','t','}','"',',','"','p','r','o','p',
+'e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{',
+'"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e',
+'g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g',
+'N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e',
+'E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r',
+'s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',
+']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"',
+'b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','t','r','u','e',
+']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"',
+'f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f',
+'"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"',
+'b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','f','a','l','s','e',']','}',',','{',
+'"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':',
+'"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a',
+'l','"',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a',
+'l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','G','l',
+'o','b','a','l','E','n','d','p','o','i','n','t','"','}',',','f','a','l','s','e',']','}',']',',','"',
+'e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/',
+'/','s','3','.','d','u','a','l','s','t','a','c','k','.','{','R','e','g','i','o','n','}','.','{','p',
+'a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f','f','i','x','}',
+'/','{','u','r','i','_','e','n','c','o','d','e','d','_','b','u','c','k','e','t','}','"',',','"','p',
+'r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',
+':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n',
+'g','R','e','g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n',
+'i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u',
+'b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a',
+'d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n',
+'t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':',
+'"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','f','a','l',
+'s','e',']','}',',','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',',','{','"',
+'f','n','"',':','"','p','a','r','s','e','U','R','L','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']',',','"','a','s','s','i','g','n',
+'"',':','"','u','r','l','"','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q',
+'u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e',
+'F','I','P','S','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','s','t','r',
+'i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',
+']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','{','u','r','l',
+'#','s','c','h','e','m','e','}',':','/','/','{','u','r','l','#','a','u','t','h','o','r','i','t','y',
+'}','{','u','r','l','#','n','o','r','m','a','l','i','z','e','d','P','a','t','h','}','{','u','r','i',
+'_','e','n','c','o','d','e','d','_','b','u','c','k','e','t','}','"',',','"','p','r','o','p','e','r',
+'t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n',
+'a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i',
+'o','n','"',':','"','u','s','-','e','a','s','t','-','1','"',',','"','s','i','g','n','i','n','g','N',
+'a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E',
+'n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s',
+'"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',',
+'{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o',
+'l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f',
+'"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',
+',','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',',','{','"','f','n','"',':',
+'"','p','a','r','s','e','U','R','L','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','E','n','d','p','o','i','n','t','"','}',']',',','"','a','s','s','i','g','n','"',':','"','u',
+'r','l','"','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s',
+'"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S',
+'"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','s','t','r','i','n','g','E',
+'q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e',
+'g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']',',','"','e',
+'n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','{','u','r','l','#','s','c','h',
+'e','m','e','}',':','/','/','{','u','r','l','#','a','u','t','h','o','r','i','t','y','}','{','u','r',
+'l','#','n','o','r','m','a','l','i','z','e','d','P','a','t','h','}','{','u','r','i','_','e','n','c',
+'o','d','e','d','_','b','u','c','k','e','t','}','"',',','"','p','r','o','p','e','r','t','i','e','s',
+'"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',
+':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':',
+'"','u','s','-','e','a','s','t','-','1','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',
+':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d',
+'i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}',
+'}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o',
+'n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n',
+'E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U',
+'s','e','D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f',
+'n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','E','n','d','p','o','i','n','t','"','}',']','}',',','{','"','f','n','"',':','"','p','a','r',
+'s','e','U','R','L','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n',
+'d','p','o','i','n','t','"','}',']',',','"','a','s','s','i','g','n','"',':','"','u','r','l','"','}',
+',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','f',
+'a','l','s','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',
+':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a',
+'w','s','-','g','l','o','b','a','l','"',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o',
+'l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f',
+'"',':','"','U','s','e','G','l','o','b','a','l','E','n','d','p','o','i','n','t','"','}',',','t','r',
+'u','e',']','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e',
+'s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':',
+'"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','u','s','-','e','a','s','t','-','1',
+'"',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','{',
+'u','r','l','#','s','c','h','e','m','e','}',':','/','/','{','u','r','l','#','a','u','t','h','o','r',
+'i','t','y','}','{','u','r','l','#','n','o','r','m','a','l','i','z','e','d','P','a','t','h','}','{',
+'u','r','i','_','e','n','c','o','d','e','d','_','b','u','c','k','e','t','}','"',',','"','p','r','o',
+'p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[',
+'{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R',
+'e','g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n',
+'g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l',
+'e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e',
+'r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"',
+'}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','n','d','p','o',
+'i','n','t','"',':','{','"','u','r','l','"',':','"','{','u','r','l','#','s','c','h','e','m','e','}',
+':','/','/','{','u','r','l','#','a','u','t','h','o','r','i','t','y','}','{','u','r','l','#','n','o',
+'r','m','a','l','i','z','e','d','P','a','t','h','}','{','u','r','i','_','e','n','c','o','d','e','d',
+'_','b','u','c','k','e','t','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"',
+'a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i',
+'g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','{','R','e',
+'g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',
+',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':',
+'t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y',
+'p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',']','}',',','{','"','c','o','n','d','i',
+'t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u',
+'a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D',
+'u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':',
+'"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E',
+'n','d','p','o','i','n','t','"','}',']','}',',','{','"','f','n','"',':','"','p','a','r','s','e','U',
+'R','L','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o',
+'i','n','t','"','}',']',',','"','a','s','s','i','g','n','"',':','"','u','r','l','"','}',',','{','"',
+'f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','f','a','l','s',
+'e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{',
+'"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-',
+'g','l','o','b','a','l','"',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a',
+'n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"',
+'U','s','e','G','l','o','b','a','l','E','n','d','p','o','i','n','t','"','}',',','f','a','l','s','e',
+']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','{','u',
+'r','l','#','s','c','h','e','m','e','}',':','/','/','{','u','r','l','#','a','u','t','h','o','r','i',
+'t','y','}','{','u','r','l','#','n','o','r','m','a','l','i','z','e','d','P','a','t','h','}','{','u',
+'r','i','_','e','n','c','o','d','e','d','_','b','u','c','k','e','t','}','"',',','"','p','r','o','p',
+'e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{',
+'"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e',
+'g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g',
+'N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e',
+'E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r',
+'s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',
+',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o',
+'o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e',
+'f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']',
+'}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f',
+'n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"','b',
+'o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','f','a','l','s','e',']','}',',','{','"',
+'f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g',
+'l','o','b','a','l','"',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r',
+'l','"',':','"','h','t','t','p','s',':','/','/','s','3','.','{','p','a','r','t','i','t','i','o','n',
+'R','e','s','u','l','t','#','d','n','s','S','u','f','f','i','x','}','/','{','u','r','i','_','e','n',
+'c','o','d','e','d','_','b','u','c','k','e','t','}','"',',','"','p','r','o','p','e','r','t','i','e',
+'s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e',
+'"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',
+':','"','u','s','-','e','a','s','t','-','1','"',',','"','s','i','g','n','i','n','g','N','a','m','e',
+'"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o',
+'d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{',
+'}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c',
+'o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a',
+'n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"',
+'U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"',
+'f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"',
+'i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n',
+'d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e',
+'a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','U','s','e','F','I','P','S','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':',
+'"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a',
+'l','"',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"',
+'h','t','t','p','s',':','/','/','s','3','.','{','p','a','r','t','i','t','i','o','n','R','e','s','u',
+'l','t','#','d','n','s','S','u','f','f','i','x','}','/','{','u','r','i','_','e','n','c','o','d','e',
+'d','_','b','u','c','k','e','t','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{',
+'"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s',
+'i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','u','s',
+'-','e','a','s','t','-','1','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s',
+'3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g',
+'"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"',
+'t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i',
+'t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u',
+'a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D',
+'u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':',
+'"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','i','s','S','e',
+'t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i',
+'n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q',
+'u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e',
+'F','I','P','S','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','n','o','t',
+'"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q',
+'u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g',
+'i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']','}',',','{','"',
+'f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','U','s','e','G','l','o','b','a','l','E','n','d','p','o',
+'i','n','t','"','}',',','t','r','u','e',']','}',']',',','"','t','y','p','e','"',':','"','t','r','e',
+'e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',
+':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','u',
+'s','-','e','a','s','t','-','1','"',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{',
+'"','u','r','l','"',':','"','h','t','t','p','s',':','/','/','s','3','.','{','p','a','r','t','i','t',
+'i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f','f','i','x','}','/','{','u','r','i',
+'_','e','n','c','o','d','e','d','_','b','u','c','k','e','t','}','"',',','"','p','r','o','p','e','r',
+'t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n',
+'a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i',
+'o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a',
+'m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n',
+'c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',
+':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{',
+'"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','n','d','p','o','i','n','t',
+'"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/','s','3','.','{','R','e','g',
+'i','o','n','}','.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s',
+'S','u','f','f','i','x','}','/','{','u','r','i','_','e','n','c','o','d','e','d','_','b','u','c','k',
+'e','t','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S',
+'c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',',
+'"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','{','R','e','g','i','o','n','}',
+'"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s',
+'a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',
+']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"',
+'e','n','d','p','o','i','n','t','"','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s',
+'"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t',
+'a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',
+',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']',
+'}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',
+',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"',
+'}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r',
+'g','v','"',':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',
+',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',
+',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']','}',',','{','"','f','n','"',':','"',
+'b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','U','s','e','G','l','o','b','a','l','E','n','d','p','o','i','n','t','"','}',
+',','f','a','l','s','e',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r',
+'l','"',':','"','h','t','t','p','s',':','/','/','s','3','.','{','R','e','g','i','o','n','}','.','{',
+'p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f','f','i','x',
+'}','/','{','u','r','i','_','e','n','c','o','d','e','d','_','b','u','c','k','e','t','}','"',',','"',
+'p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s',
+'"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i',
+'n','g','R','e','g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g',
+'n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o',
+'u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e',
+'a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i',
+'n','t','"','}',']','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',
+',','"','e','r','r','o','r','"',':','"','P','a','t','h','-','s','t','y','l','e',' ','a','d','d','r',
+'e','s','s','i','n','g',' ','c','a','n','n','o','t',' ','b','e',' ','u','s','e','d',' ','w','i','t',
+'h',' ','S','3',' ','A','c','c','e','l','e','r','a','t','e','"',',','"','t','y','p','e','"',':','"',
+'e','r','r','o','r','"','}',']','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',
+':','[',']',',','"','e','r','r','o','r','"',':','"','A',' ','v','a','l','i','d',' ','p','a','r','t',
+'i','t','i','o','n',' ','c','o','u','l','d',' ','n','o','t',' ','b','e',' ','d','e','t','e','r','m',
+'i','n','e','d','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',']','}',']','}',
+']','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',
+':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"',
+'U','s','e','O','b','j','e','c','t','L','a','m','b','d','a','E','n','d','p','o','i','n','t','"','}',
+']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','O','b','j','e','c','t',
+'L','a','m','b','d','a','E','n','d','p','o','i','n','t','"','}',',','t','r','u','e',']','}',']',',',
+'"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"',
+'c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','a','w','s','.','p',
+'a','r','t','i','t','i','o','n','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','R','e','g','i','o','n','"','}',']',',','"','a','s','s','i','g','n','"',':','"','p','a','r','t',
+'i','t','i','o','n','R','e','s','u','l','t','"','}',']',',','"','t','y','p','e','"',':','"','t','r',
+'e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s',
+'"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s',
+'"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"',
+'i','s','V','a','l','i','d','H','o','s','t','L','a','b','e','l','"',',','"','a','r','g','v','"',':',
+'[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','t','r','u','e',']','}',']',
+',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{',
+'"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t',
+'r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n',
+'s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',
+',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S',
+'t','a','c','k','"','}',',','t','r','u','e',']','}',']',',','"','e','r','r','o','r','"',':','"','S',
+'3',' ','O','b','j','e','c','t',' ','L','a','m','b','d','a',' ','d','o','e','s',' ','n','o','t',' ',
+'s','u','p','p','o','r','t',' ','D','u','a','l','-','s','t','a','c','k','"',',','"','t','y','p','e',
+'"',':','"','e','r','r','o','r','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':',
+'[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':',
+'[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o',
+'o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e',
+'f','"',':','"','A','c','c','e','l','e','r','a','t','e','"','}',',','t','r','u','e',']','}',']',',',
+'"','e','r','r','o','r','"',':','"','S','3',' ','O','b','j','e','c','t',' ','L','a','m','b','d','a',
+' ','d','o','e','s',' ','n','o','t',' ','s','u','p','p','o','r','t',' ','S','3',' ','A','c','c','e',
+'l','e','r','a','t','e','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',',','{',
+'"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t',
+'r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n',
+'s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',
+',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"',
+'}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u',
+'a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','g','e','t','A','t',
+'t','r','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','p','a','r','t','i',
+'t','i','o','n','R','e','s','u','l','t','"','}',',','"','n','a','m','e','"',']','}',',','"','a','w',
+'s','-','c','n','"',']','}',']',',','"','e','r','r','o','r','"',':','"','P','a','r','t','i','t','i',
+'o','n',' ','d','o','e','s',' ','n','o','t',' ','s','u','p','p','o','r','t',' ','F','I','P','S','"',
+',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',',','{','"','c','o','n','d','i','t',
+'i','o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r',
+'u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f',
+'n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','E','n','d','p','o','i','n','t','"','}',']','}',',','{','"','f','n','"',':','"','p','a','r',
+'s','e','U','R','L','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n',
+'d','p','o','i','n','t','"','}',']',',','"','a','s','s','i','g','n','"',':','"','u','r','l','"','}',
+']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','{','u','r','l',
+'#','s','c','h','e','m','e','}',':','/','/','{','u','r','l','#','a','u','t','h','o','r','i','t','y',
+'}','{','u','r','l','#','p','a','t','h','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',
+':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':',
+'"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"',
+'{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"',
+'s','3','-','o','b','j','e','c','t','-','l','a','m','b','d','a','"',',','"','d','i','s','a','b','l',
+'e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',',
+'"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d',
+'p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"',
+'f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','t','r','u','e',
+']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t',
+'t','p','s',':','/','/','s','3','-','o','b','j','e','c','t','-','l','a','m','b','d','a','-','f','i',
+'p','s','.','{','R','e','g','i','o','n','}','.','{','p','a','r','t','i','t','i','o','n','R','e','s',
+'u','l','t','#','d','n','s','S','u','f','f','i','x','}','"',',','"','p','r','o','p','e','r','t','i',
+'e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m',
+'e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n',
+'"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e',
+'"',':','"','s','3','-','o','b','j','e','c','t','-','l','a','m','b','d','a','"',',','"','d','i','s',
+'a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',
+']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"',
+'e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':',
+'[',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t','t',
+'p','s',':','/','/','s','3','-','o','b','j','e','c','t','-','l','a','m','b','d','a','.','{','R','e',
+'g','i','o','n','}','.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n',
+'s','S','u','f','f','i','x','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"',
+'a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i',
+'g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','{','R','e',
+'g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','-',
+'o','b','j','e','c','t','-','l','a','m','b','d','a','"',',','"','d','i','s','a','b','l','e','D','o',
+'u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e',
+'a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i',
+'n','t','"','}',']','}',']','}',']','}',']','}',']','}',',','{','"','c','o','n','d','i','t','i','o',
+'n','s','"',':','[',']',',','"','e','r','r','o','r','"',':','"','I','n','v','a','l','i','d',' ','r',
+'e','g','i','o','n',':',' ','r','e','g','i','o','n',' ','w','a','s',' ','n','o','t',' ','a',' ','v',
+'a','l','i','d',' ','D','N','S',' ','n','a','m','e','.','"',',','"','t','y','p','e','"',':','"','e',
+'r','r','o','r','"','}',']','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':',
+'[',']',',','"','e','r','r','o','r','"',':','"','A',' ','v','a','l','i','d',' ','p','a','r','t','i',
+'t','i','o','n',' ','c','o','u','l','d',' ','n','o','t',' ','b','e',' ','d','e','t','e','r','m','i',
+'n','e','d','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',']','}',',','{','"',
+'c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','n','o','t','"',',',
+'"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r',
+'g','v','"',':','[','{','"','r','e','f','"',':','"','B','u','c','k','e','t','"','}',']','}',']','}',
+']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[',
+'{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','a','w','s',
+'.','p','a','r','t','i','t','i','o','n','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f',
+'"',':','"','R','e','g','i','o','n','"','}',']',',','"','a','s','s','i','g','n','"',':','"','p','a',
+'r','t','i','t','i','o','n','R','e','s','u','l','t','"','}',']',',','"','t','y','p','e','"',':','"',
+'t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o',
+'n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l',
+'e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',
+':','"','i','s','V','a','l','i','d','H','o','s','t','L','a','b','e','l','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','t','r','u','e',']',
+'}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':',
+'[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','t','y','p','e','"',':',
+'"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i',
+'o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l',
+'s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P',
+'S','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','s','t','r','i','n','g','E',
+'q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','g','e','t',
+'A','t','t','r','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','p','a','r',
+'t','i','t','i','o','n','R','e','s','u','l','t','"','}',',','"','n','a','m','e','"',']','}',',','"',
+'a','w','s','-','c','n','"',']','}',']',',','"','e','r','r','o','r','"',':','"','P','a','r','t','i',
+'t','i','o','n',' ','d','o','e','s',' ','n','o','t',' ','s','u','p','p','o','r','t',' ','F','I','P',
+'S','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',',','{','"','c','o','n','d',
+'i','t','i','o','n','s','"',':','[',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',',
+'"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{',
+'"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g',
+'v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','t','r','u',
+'e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',
+',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S',
+'t','a','c','k','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','i','s','S','e',
+'t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i',
+'n','t','"','}',']','}',',','{','"','f','n','"',':','"','p','a','r','s','e','U','R','L','"',',','"',
+'a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',
+']',',','"','a','s','s','i','g','n','"',':','"','u','r','l','"','}',',','{','"','f','n','"',':','"',
+'s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l',
+'"',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','{',
+'u','r','l','#','s','c','h','e','m','e','}',':','/','/','{','u','r','l','#','a','u','t','h','o','r',
+'i','t','y','}','{','u','r','l','#','p','a','t','h','}','"',',','"','p','r','o','p','e','r','t','i',
+'e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m',
+'e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n',
+'"',':','"','u','s','-','e','a','s','t','-','1','"',',','"','s','i','g','n','i','n','g','N','a','m',
+'e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c',
+'o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':',
+'{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"',
+'c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e',
+'a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','U','s','e','F','I','P','S','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"',
+'b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','t','r','u','e',
+']','}',',','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[',
+'{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',',','{','"','f','n',
+'"',':','"','p','a','r','s','e','U','R','L','"',',','"','a','r','g','v','"',':','[','{','"','r','e',
+'f','"',':','"','E','n','d','p','o','i','n','t','"','}',']',',','"','a','s','s','i','g','n','"',':',
+'"','u','r','l','"','}',',','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l',
+'s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n',
+'"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']',',','"','e','n','d','p','o',
+'i','n','t','"',':','{','"','u','r','l','"',':','"','{','u','r','l','#','s','c','h','e','m','e','}',
+':','/','/','{','u','r','l','#','a','u','t','h','o','r','i','t','y','}','{','u','r','l','#','p','a',
+'t','h','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S',
+'c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',',
+'"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','u','s','-','e','a','s','t','-',
+'1','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i',
+'s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e',
+'}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':',
+'"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',
+':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"',
+'a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',',
+'t','r','u','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a',
+'l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u',
+'a','l','S','t','a','c','k','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','i',
+'s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d',
+'p','o','i','n','t','"','}',']','}',',','{','"','f','n','"',':','"','p','a','r','s','e','U','R','L',
+'"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n',
+'t','"','}',']',',','"','a','s','s','i','g','n','"',':','"','u','r','l','"','}',',','{','"','f','n',
+'"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','s','t',
+'r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f',
+'"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']',
+'}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',
+',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','G','l','o','b','a',
+'l','E','n','d','p','o','i','n','t','"','}',',','t','r','u','e',']','}',']',',','"','t','y','p','e',
+'"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i',
+'t','i','o','n','s','"',':','[',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r',
+'l','"',':','"','{','u','r','l','#','s','c','h','e','m','e','}',':','/','/','{','u','r','l','#','a',
+'u','t','h','o','r','i','t','y','}','{','u','r','l','#','p','a','t','h','}','"',',','"','p','r','o',
+'p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[',
+'{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R',
+'e','g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n',
+'g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l',
+'e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e',
+'r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"',
+'}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':',
+'"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','t','r','u','e',']','}',',','{',
+'"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g',
+'v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"',
+'}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']',
+'}',',','{','"','f','n','"',':','"','p','a','r','s','e','U','R','L','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']',',','"','a','s',
+'s','i','g','n','"',':','"','u','r','l','"','}',',','{','"','f','n','"',':','"','n','o','t','"',',',
+'"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a',
+'l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o',
+'n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']','}',',','{','"','f','n',
+'"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':',
+'[','{','"','r','e','f','"',':','"','U','s','e','G','l','o','b','a','l','E','n','d','p','o','i','n',
+'t','"','}',',','f','a','l','s','e',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{',
+'"','u','r','l','"',':','"','{','u','r','l','#','s','c','h','e','m','e','}',':','/','/','{','u','r',
+'l','#','a','u','t','h','o','r','i','t','y','}','{','u','r','l','#','p','a','t','h','}','"',',','"',
+'p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s',
+'"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i',
+'n','g','R','e','g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g',
+'n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o',
+'u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e',
+'a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i',
+'n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',
+':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[',
+'{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','t','r','u','e',']','}',',',
+'{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r',
+'g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k',
+'"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r',
+'g','v','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',',
+'{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g',
+'v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s',
+'-','g','l','o','b','a','l','"',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"',
+'u','r','l','"',':','"','h','t','t','p','s',':','/','/','s','3','-','f','i','p','s','.','d','u','a',
+'l','s','t','a','c','k','.','u','s','-','e','a','s','t','-','1','.','{','p','a','r','t','i','t','i',
+'o','n','R','e','s','u','l','t','#','d','n','s','S','u','f','f','i','x','}','"',',','"','p','r','o',
+'p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[',
+'{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R',
+'e','g','i','o','n','"',':','"','u','s','-','e','a','s','t','-','1','"',',','"','s','i','g','n','i',
+'n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b',
+'l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d',
+'e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t',
+'"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"',
+'b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','t','r','u','e',']','}',',','{','"',
+'f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',
+',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v',
+'"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[',
+'{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"',
+'f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g',
+'l','o','b','a','l','"',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r',
+'l','"',':','"','h','t','t','p','s',':','/','/','s','3','-','f','i','p','s','.','d','u','a','l','s',
+'t','a','c','k','.','u','s','-','e','a','s','t','-','1','.','{','p','a','r','t','i','t','i','o','n',
+'R','e','s','u','l','t','#','d','n','s','S','u','f','f','i','x','}','"',',','"','p','r','o','p','e',
+'r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"',
+'n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g',
+'i','o','n','"',':','"','u','s','-','e','a','s','t','-','1','"',',','"','s','i','g','n','i','n','g',
+'N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e',
+'E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r',
+'s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',
+',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o',
+'o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e',
+'f','"',':','"','U','s','e','F','I','P','S','"','}',',','t','r','u','e',']','}',',','{','"','f','n',
+'"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':',
+'[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','t',
+'r','u','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':',
+'[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n',
+'"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','s','t',
+'r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f',
+'"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']',
+'}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',
+',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','G','l','o','b','a',
+'l','E','n','d','p','o','i','n','t','"','}',',','t','r','u','e',']','}',']',',','"','t','y','p','e',
+'"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i',
+'t','i','o','n','s','"',':','[',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r',
+'l','"',':','"','h','t','t','p','s',':','/','/','s','3','-','f','i','p','s','.','d','u','a','l','s',
+'t','a','c','k','.','{','R','e','g','i','o','n','}','.','{','p','a','r','t','i','t','i','o','n','R',
+'e','s','u','l','t','#','d','n','s','S','u','f','f','i','x','}','"',',','"','p','r','o','p','e','r',
+'t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n',
+'a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i',
+'o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a',
+'m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n',
+'c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',
+':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',']','}',
+',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o',
+'o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e',
+'f','"',':','"','U','s','e','F','I','P','S','"','}',',','t','r','u','e',']','}',',','{','"','f','n',
+'"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':',
+'[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','t',
+'r','u','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':',
+'[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n',
+'"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','s','t',
+'r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f',
+'"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']',
+'}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',
+',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','G','l','o','b','a',
+'l','E','n','d','p','o','i','n','t','"','}',',','f','a','l','s','e',']','}',']',',','"','e','n','d',
+'p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/','s','3',
+'-','f','i','p','s','.','d','u','a','l','s','t','a','c','k','.','{','R','e','g','i','o','n','}','.',
+'{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f','f','i',
+'x','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c',
+'h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"',
+'s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',
+',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a',
+'b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']',
+'}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e',
+'n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',
+'{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r',
+'g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','t','r',
+'u','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s',
+'"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l',
+'S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','i','s',
+'S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p',
+'o','i','n','t','"','}',']','}',',','{','"','f','n','"',':','"','p','a','r','s','e','U','R','L','"',
+',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t',
+'"','}',']',',','"','a','s','s','i','g','n','"',':','"','u','r','l','"','}',',','{','"','f','n','"',
+':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b',
+'a','l','"',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':',
+'"','{','u','r','l','#','s','c','h','e','m','e','}',':','/','/','{','u','r','l','#','a','u','t','h',
+'o','r','i','t','y','}','{','u','r','l','#','p','a','t','h','}','"',',','"','p','r','o','p','e','r',
+'t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n',
+'a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i',
+'o','n','"',':','"','u','s','-','e','a','s','t','-','1','"',',','"','s','i','g','n','i','n','g','N',
+'a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E',
+'n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s',
+'"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',',
+'{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o',
+'l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f',
+'"',':','"','U','s','e','F','I','P','S','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',
+':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[',
+'{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','f','a',
+'l','s','e',']','}',',','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',',','{',
+'"','f','n','"',':','"','p','a','r','s','e','U','R','L','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']',',','"','a','s','s','i','g',
+'n','"',':','"','u','r','l','"','}',',','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q',
+'u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g',
+'i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']',',','"','e','n',
+'d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','{','u','r','l','#','s','c','h','e',
+'m','e','}',':','/','/','{','u','r','l','#','a','u','t','h','o','r','i','t','y','}','{','u','r','l',
+'#','p','a','t','h','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u',
+'t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v',
+'4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','u','s','-','e','a',
+'s','t','-','1','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',',
+'"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t',
+'r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p',
+'e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o',
+'n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s',
+'"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S',
+'"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E',
+'q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s',
+'e','D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n',
+'"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','E','n','d','p','o','i','n','t','"','}',']','}',',','{','"','f','n','"',':','"','p','a','r','s',
+'e','U','R','L','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d',
+'p','o','i','n','t','"','}',']',',','"','a','s','s','i','g','n','"',':','"','u','r','l','"','}',',',
+'{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',
+':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b',
+'a','l','"',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u',
+'a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','G',
+'l','o','b','a','l','E','n','d','p','o','i','n','t','"','}',',','t','r','u','e',']','}',']',',','"',
+'t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c',
+'o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','n','d','p','o','i','n','t','"',':',
+'{','"','u','r','l','"',':','"','{','u','r','l','#','s','c','h','e','m','e','}',':','/','/','{','u',
+'r','l','#','a','u','t','h','o','r','i','t','y','}','{','u','r','l','#','p','a','t','h','}','"',',',
+'"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e',
+'s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n',
+'i','n','g','R','e','g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i',
+'g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D',
+'o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h',
+'e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o',
+'i','n','t','"','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"',
+'f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','t','r','u','e',
+']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t',
+'a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','i','s','S','e',
+'t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i',
+'n','t','"','}',']','}',',','{','"','f','n','"',':','"','p','a','r','s','e','U','R','L','"',',','"',
+'a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',
+']',',','"','a','s','s','i','g','n','"',':','"','u','r','l','"','}',',','{','"','f','n','"',':','"',
+'n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','s','t','r','i','n',
+'g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"',
+'R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']','}',
+',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a',
+'r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','G','l','o','b','a','l','E','n',
+'d','p','o','i','n','t','"','}',',','f','a','l','s','e',']','}',']',',','"','e','n','d','p','o','i',
+'n','t','"',':','{','"','u','r','l','"',':','"','{','u','r','l','#','s','c','h','e','m','e','}',':',
+'/','/','{','u','r','l','#','a','u','t','h','o','r','i','t','y','}','{','u','r','l','#','p','a','t',
+'h','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c',
+'h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"',
+'s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',
+',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a',
+'b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']',
+'}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e',
+'n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',
+'{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r',
+'g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','t','r',
+'u','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s',
+'"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l',
+'S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','n','o',
+'t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"',
+'}',']','}',']','}',',','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s',
+'"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"',
+'}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']',',','"','e','n','d','p','o','i',
+'n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/','s','3','-','f','i',
+'p','s','.','u','s','-','e','a','s','t','-','1','.','{','p','a','r','t','i','t','i','o','n','R','e',
+'s','u','l','t','#','d','n','s','S','u','f','f','i','x','}','"',',','"','p','r','o','p','e','r','t',
+'i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a',
+'m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o',
+'n','"',':','"','u','s','-','e','a','s','t','-','1','"',',','"','s','i','g','n','i','n','g','N','a',
+'m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n',
+'c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',
+':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{',
+'"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l',
+'e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','U','s','e','F','I','P','S','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':',
+'"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','f','a','l',
+'s','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[',
+'{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',
+':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b',
+'a','l','"',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':',
+'"','h','t','t','p','s',':','/','/','s','3','-','f','i','p','s','.','u','s','-','e','a','s','t','-',
+'1','.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f',
+'f','i','x','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h',
+'S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',
+',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','u','s','-','e','a','s','t',
+'-','1','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d',
+'i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u',
+'e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',
+':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s',
+'"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',
+',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u',
+'a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D',
+'u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':',
+'"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','i','s','S','e',
+'t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i',
+'n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g',
+'v','"',':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',',
+'"','a','w','s','-','g','l','o','b','a','l','"',']','}',']','}',',','{','"','f','n','"',':','"','b',
+'o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','U','s','e','G','l','o','b','a','l','E','n','d','p','o','i','n','t','"','}',',',
+'t','r','u','e',']','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"','r','u',
+'l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e',
+'n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/',
+'s','3','-','f','i','p','s','.','{','R','e','g','i','o','n','}','.','{','p','a','r','t','i','t','i',
+'o','n','R','e','s','u','l','t','#','d','n','s','S','u','f','f','i','x','}','"',',','"','p','r','o',
+'p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[',
+'{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R',
+'e','g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n',
+'g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l',
+'e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e',
+'r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"',
+'}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':',
+'"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','t','r','u','e',']','}',',','{',
+'"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g',
+'v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"',
+'}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r',
+'g','v','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',',
+'{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',
+':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b',
+'a','l','"',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u',
+'a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','G',
+'l','o','b','a','l','E','n','d','p','o','i','n','t','"','}',',','f','a','l','s','e',']','}',']',',',
+'"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':',
+'/','/','s','3','-','f','i','p','s','.','{','R','e','g','i','o','n','}','.','{','p','a','r','t','i',
+'t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f','f','i','x','}','"',',','"','p',
+'r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',
+':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n',
+'g','R','e','g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n',
+'i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u',
+'b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a',
+'d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n',
+'t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':',
+'"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','f','a','l','s','e',']','}',',',
+'{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r',
+'g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k',
+'"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"',
+'a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',
+']','}',',','{','"','f','n','"',':','"','p','a','r','s','e','U','R','L','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']',',','"','a',
+'s','s','i','g','n','"',':','"','u','r','l','"','}',',','{','"','f','n','"',':','"','s','t','r','i',
+'n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']',
+',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','{','u','r','l','#',
+'s','c','h','e','m','e','}',':','/','/','{','u','r','l','#','a','u','t','h','o','r','i','t','y','}',
+'{','u','r','l','#','p','a','t','h','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':',
+'{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"',
+'s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','u',
+'s','-','e','a','s','t','-','1','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"',
+'s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n',
+'g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',',
+'"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d',
+'i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q',
+'u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e',
+'F','I','P','S','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','b','o','o',
+'l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f',
+'"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','t','r','u','e',']','}',',',
+'{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',',','{','"','f','n','"',':','"',
+'p','a','r','s','e','U','R','L','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','E','n','d','p','o','i','n','t','"','}',']',',','"','a','s','s','i','g','n','"',':','"','u','r',
+'l','"','}',',','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',',
+'"','a','w','s','-','g','l','o','b','a','l','"',']','}',']',',','"','e','n','d','p','o','i','n','t',
+'"',':','{','"','u','r','l','"',':','"','{','u','r','l','#','s','c','h','e','m','e','}',':','/','/',
+'{','u','r','l','#','a','u','t','h','o','r','i','t','y','}','{','u','r','l','#','p','a','t','h','}',
+'"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e',
+'m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i',
+'g','n','i','n','g','R','e','g','i','o','n','"',':','"','u','s','-','e','a','s','t','-','1','"',',',
+'"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b',
+'l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',
+',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n',
+'d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{',
+'"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g',
+'v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','f','a','l',
+'s','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s',
+'"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l',
+'S','t','a','c','k','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','i','s','S',
+'e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o',
+'i','n','t','"','}',']','}',',','{','"','f','n','"',':','"','p','a','r','s','e','U','R','L','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"',
+'}',']',',','"','a','s','s','i','g','n','"',':','"','u','r','l','"','}',',','{','"','f','n','"',':',
+'"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','s','t','r','i',
+'n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']',
+'}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"',
+'a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','G','l','o','b','a','l','E',
+'n','d','p','o','i','n','t','"','}',',','t','r','u','e',']','}',']',',','"','t','y','p','e','"',':',
+'"','t','r','e','e','"',',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i',
+'o','n','s','"',':','[',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',
+':','"','{','u','r','l','#','s','c','h','e','m','e','}',':','/','/','{','u','r','l','#','a','u','t',
+'h','o','r','i','t','y','}','{','u','r','l','#','p','a','t','h','}','"',',','"','p','r','o','p','e',
+'r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"',
+'n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g',
+'i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N',
+'a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E',
+'n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s',
+'"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',']',
+'}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b',
+'o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','f','a','l','s','e',']','}',',','{','"',
+'f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',
+',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r',
+'g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',
+',','{','"','f','n','"',':','"','p','a','r','s','e','U','R','L','"',',','"','a','r','g','v','"',':',
+'[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']',',','"','a','s','s',
+'i','g','n','"',':','"','u','r','l','"','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"',
+'a','r','g','v','"',':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l',
+'s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n',
+'"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']','}',',','{','"','f','n','"',
+':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[',
+'{','"','r','e','f','"',':','"','U','s','e','G','l','o','b','a','l','E','n','d','p','o','i','n','t',
+'"','}',',','f','a','l','s','e',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"',
+'u','r','l','"',':','"','{','u','r','l','#','s','c','h','e','m','e','}',':','/','/','{','u','r','l',
+'#','a','u','t','h','o','r','i','t','y','}','{','u','r','l','#','p','a','t','h','}','"',',','"','p',
+'r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',
+':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n',
+'g','R','e','g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n',
+'i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u',
+'b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a',
+'d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n',
+'t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':',
+'"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','f','a','l','s','e',']','}',',',
+'{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r',
+'g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k',
+'"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r',
+'g','v','"',':','[','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',',
+'{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g',
+'v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s',
+'-','g','l','o','b','a','l','"',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"',
+'u','r','l','"',':','"','h','t','t','p','s',':','/','/','s','3','.','d','u','a','l','s','t','a','c',
+'k','.','u','s','-','e','a','s','t','-','1','.','{','p','a','r','t','i','t','i','o','n','R','e','s',
+'u','l','t','#','d','n','s','S','u','f','f','i','x','}','"',',','"','p','r','o','p','e','r','t','i',
+'e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m',
+'e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n',
+'"',':','"','u','s','-','e','a','s','t','-','1','"',',','"','s','i','g','n','i','n','g','N','a','m',
+'e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c',
+'o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':',
+'{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"',
+'c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e',
+'a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','U','s','e','F','I','P','S','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':',
+'"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','t','r','u',
+'e',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{',
+'"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e',
+'f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',':',
+'"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"',
+'r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a',
+'l','"',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"',
+'h','t','t','p','s',':','/','/','s','3','.','d','u','a','l','s','t','a','c','k','.','u','s','-','e',
+'a','s','t','-','1','.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n',
+'s','S','u','f','f','i','x','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"',
+'a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i',
+'g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','u','s','-',
+'e','a','s','t','-','1','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3',
+'"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',
+':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t',
+'y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t',
+'i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a',
+'l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I',
+'P','S','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e',
+'a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','t','r','u','e',']','}',',','{','"',
+'f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"',
+'i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n',
+'d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',',
+'"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a',
+'l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o',
+'n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']','}',',','{','"','f','n',
+'"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':',
+'[','{','"','r','e','f','"',':','"','U','s','e','G','l','o','b','a','l','E','n','d','p','o','i','n',
+'t','"','}',',','t','r','u','e',']','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',
+',','"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',
+']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p',
+'s',':','/','/','s','3','.','d','u','a','l','s','t','a','c','k','.','{','R','e','g','i','o','n','}',
+'.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S','u','f','f',
+'i','x','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S',
+'c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',',
+'"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','{','R','e','g','i','o','n','}',
+'"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s',
+'a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',
+']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"',
+'e','n','d','p','o','i','n','t','"','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s',
+'"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',
+',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q',
+'u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e',
+'D','u','a','l','S','t','a','c','k','"','}',',','t','r','u','e',']','}',',','{','"','f','n','"',':',
+'"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','i','s','S','e',
+'t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i',
+'n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g',
+'v','"',':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',',
+'"','a','w','s','-','g','l','o','b','a','l','"',']','}',']','}',',','{','"','f','n','"',':','"','b',
+'o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','U','s','e','G','l','o','b','a','l','E','n','d','p','o','i','n','t','"','}',',',
+'f','a','l','s','e',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l',
+'"',':','"','h','t','t','p','s',':','/','/','s','3','.','d','u','a','l','s','t','a','c','k','.','{',
+'R','e','g','i','o','n','}','.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#',
+'d','n','s','S','u','f','f','i','x','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':',
+'{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"',
+'s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','{',
+'R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s',
+'3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g',
+'"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"',
+'t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i',
+'t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u',
+'a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F',
+'I','P','S','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l',
+'e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',',
+'{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',',','{','"','f','n','"',':','"',
+'p','a','r','s','e','U','R','L','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':',
+'"','E','n','d','p','o','i','n','t','"','}',']',',','"','a','s','s','i','g','n','"',':','"','u','r',
+'l','"','}',',','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',',
+'"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',',
+'"','a','w','s','-','g','l','o','b','a','l','"',']','}',']',',','"','e','n','d','p','o','i','n','t',
+'"',':','{','"','u','r','l','"',':','"','{','u','r','l','#','s','c','h','e','m','e','}',':','/','/',
+'{','u','r','l','#','a','u','t','h','o','r','i','t','y','}','{','u','r','l','#','p','a','t','h','}',
+'"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e',
+'m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i',
+'g','n','i','n','g','R','e','g','i','o','n','"',':','"','u','s','-','e','a','s','t','-','1','"',',',
+'"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b',
+'l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',
+',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n',
+'d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{',
+'"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g',
+'v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',','f','a','l',
+'s','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s',
+'"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l',
+'S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','i','s',
+'S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p',
+'o','i','n','t','"','}',']','}',',','{','"','f','n','"',':','"','p','a','r','s','e','U','R','L','"',
+',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t',
+'"','}',']',',','"','a','s','s','i','g','n','"',':','"','u','r','l','"','}',',','{','"','f','n','"',
+':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b',
+'a','l','"',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':',
+'"','{','u','r','l','#','s','c','h','e','m','e','}',':','/','/','{','u','r','l','#','a','u','t','h',
+'o','r','i','t','y','}','{','u','r','l','#','p','a','t','h','}','"',',','"','p','r','o','p','e','r',
+'t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n',
+'a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i',
+'o','n','"',':','"','u','s','-','e','a','s','t','-','1','"',',','"','s','i','g','n','i','n','g','N',
+'a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E',
+'n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s',
+'"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',',
+'{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o',
+'l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f',
+'"',':','"','U','s','e','F','I','P','S','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n',
+'"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':',
+'[','{','"','r','e','f','"',':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','f',
+'a','l','s','e',']','}',',','{','"','f','n','"',':','"','i','s','S','e','t','"',',','"','a','r','g',
+'v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']','}',',',
+'{','"','f','n','"',':','"','p','a','r','s','e','U','R','L','"',',','"','a','r','g','v','"',':','[',
+'{','"','r','e','f','"',':','"','E','n','d','p','o','i','n','t','"','}',']',',','"','a','s','s','i',
+'g','n','"',':','"','u','r','l','"','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"','a',
+'r','g','v','"',':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s',
+'"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"',
+'}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']','}',',','{','"','f','n','"',':',
+'"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{',
+'"','r','e','f','"',':','"','U','s','e','G','l','o','b','a','l','E','n','d','p','o','i','n','t','"',
+'}',',','t','r','u','e',']','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',','"',
+'r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{','"',
+'f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',
+':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','u','s','-','e','a',
+'s','t','-','1','"',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r','l',
+'"',':','"','{','u','r','l','#','s','c','h','e','m','e','}',':','/','/','{','u','r','l','#','a','u',
+'t','h','o','r','i','t','y','}','{','u','r','l','#','p','a','t','h','}','"',',','"','p','r','o','p',
+'e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{',
+'"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e',
+'g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g',
+'N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e',
+'E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r',
+'s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',
+',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','n','d','p','o','i',
+'n','t','"',':','{','"','u','r','l','"',':','"','{','u','r','l','#','s','c','h','e','m','e','}',':',
+'/','/','{','u','r','l','#','a','u','t','h','o','r','i','t','y','}','{','u','r','l','#','p','a','t',
+'h','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u','t','h','S','c',
+'h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v','4','"',',','"',
+'s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',
+',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"','d','i','s','a',
+'b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r','u','e','}',']',
+'}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e','"',':','"','e',
+'n','d','p','o','i','n','t','"','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',
+':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"',
+'a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S','"','}',',',
+'f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u',
+'a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','D',
+'u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':',
+'"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E',
+'n','d','p','o','i','n','t','"','}',']','}',',','{','"','f','n','"',':','"','p','a','r','s','e','U',
+'R','L','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d','p','o',
+'i','n','t','"','}',']',',','"','a','s','s','i','g','n','"',':','"','u','r','l','"','}',',','{','"',
+'f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"',
+'s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r',
+'e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l',
+'"',']','}',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l',
+'s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','G','l','o',
+'b','a','l','E','n','d','p','o','i','n','t','"','}',',','f','a','l','s','e',']','}',']',',','"','e',
+'n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','{','u','r','l','#','s','c','h',
+'e','m','e','}',':','/','/','{','u','r','l','#','a','u','t','h','o','r','i','t','y','}','{','u','r',
+'l','#','p','a','t','h','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a',
+'u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g',
+'v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','{','R','e','g',
+'i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',',
+'"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t',
+'r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p',
+'e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o',
+'n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s',
+'"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S',
+'"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n',
+'E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U',
+'s','e','D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f',
+'n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','i',
+'s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d',
+'p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"','s','t','r','i','n','g',
+'E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R',
+'e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']',',','"',
+'e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/',
+'/','s','3','.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S',
+'u','f','f','i','x','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u',
+'t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v',
+'4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','u','s','-','e','a',
+'s','t','-','1','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',',
+'"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t',
+'r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p',
+'e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o',
+'n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s',
+'"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S',
+'"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n',
+'E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U',
+'s','e','D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f',
+'n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','i',
+'s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d',
+'p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"','s','t','r','i','n','g',
+'E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R',
+'e','g','i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']',',','"',
+'e','n','d','p','o','i','n','t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/',
+'/','s','3','.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S',
+'u','f','f','i','x','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u',
+'t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v',
+'4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','u','s','-','e','a',
+'s','t','-','1','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',',
+'"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t',
+'r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p',
+'e','"',':','"','e','n','d','p','o','i','n','t','"','}',',','{','"','c','o','n','d','i','t','i','o',
+'n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s',
+'"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F','I','P','S',
+'"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l','e','a','n',
+'E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U',
+'s','e','D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',','{','"','f',
+'n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','i',
+'s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','E','n','d',
+'p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"','n','o','t','"',',','"',
+'a','r','g','v','"',':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l',
+'s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n',
+'"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']','}',',','{','"','f','n','"',
+':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[',
+'{','"','r','e','f','"',':','"','U','s','e','G','l','o','b','a','l','E','n','d','p','o','i','n','t',
+'"','}',',','t','r','u','e',']','}',']',',','"','t','y','p','e','"',':','"','t','r','e','e','"',',',
+'"','r','u','l','e','s','"',':','[','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[','{',
+'"','f','n','"',':','"','s','t','r','i','n','g','E','q','u','a','l','s','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','R','e','g','i','o','n','"','}',',','"','u','s','-','e',
+'a','s','t','-','1','"',']','}',']',',','"','e','n','d','p','o','i','n','t','"',':','{','"','u','r',
+'l','"',':','"','h','t','t','p','s',':','/','/','s','3','.','{','p','a','r','t','i','t','i','o','n',
+'R','e','s','u','l','t','#','d','n','s','S','u','f','f','i','x','}','"',',','"','p','r','o','p','e',
+'r','t','i','e','s','"',':','{','"','a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"',
+'n','a','m','e','"',':','"','s','i','g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g',
+'i','o','n','"',':','"','{','R','e','g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N',
+'a','m','e','"',':','"','s','3','"',',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E',
+'n','c','o','d','i','n','g','"',':','t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s',
+'"',':','{','}','}',',','"','t','y','p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',',',
+'{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"','e','n','d','p','o','i','n',
+'t','"',':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/','s','3','.','{','R','e',
+'g','i','o','n','}','.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n',
+'s','S','u','f','f','i','x','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"',
+'a','u','t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i',
+'g','v','4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','{','R','e',
+'g','i','o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',
+',','"','d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':',
+'t','r','u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y',
+'p','e','"',':','"','e','n','d','p','o','i','n','t','"','}',']','}',',','{','"','c','o','n','d','i',
+'t','i','o','n','s','"',':','[','{','"','f','n','"',':','"','b','o','o','l','e','a','n','E','q','u',
+'a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','U','s','e','F',
+'I','P','S','"','}',',','f','a','l','s','e',']','}',',','{','"','f','n','"',':','"','b','o','o','l',
+'e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',
+':','"','U','s','e','D','u','a','l','S','t','a','c','k','"','}',',','f','a','l','s','e',']','}',',',
+'{','"','f','n','"',':','"','n','o','t','"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',
+':','"','i','s','S','e','t','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"',
+'E','n','d','p','o','i','n','t','"','}',']','}',']','}',',','{','"','f','n','"',':','"','n','o','t',
+'"',',','"','a','r','g','v','"',':','[','{','"','f','n','"',':','"','s','t','r','i','n','g','E','q',
+'u','a','l','s','"',',','"','a','r','g','v','"',':','[','{','"','r','e','f','"',':','"','R','e','g',
+'i','o','n','"','}',',','"','a','w','s','-','g','l','o','b','a','l','"',']','}',']','}',',','{','"',
+'f','n','"',':','"','b','o','o','l','e','a','n','E','q','u','a','l','s','"',',','"','a','r','g','v',
+'"',':','[','{','"','r','e','f','"',':','"','U','s','e','G','l','o','b','a','l','E','n','d','p','o',
+'i','n','t','"','}',',','f','a','l','s','e',']','}',']',',','"','e','n','d','p','o','i','n','t','"',
+':','{','"','u','r','l','"',':','"','h','t','t','p','s',':','/','/','s','3','.','{','R','e','g','i',
+'o','n','}','.','{','p','a','r','t','i','t','i','o','n','R','e','s','u','l','t','#','d','n','s','S',
+'u','f','f','i','x','}','"',',','"','p','r','o','p','e','r','t','i','e','s','"',':','{','"','a','u',
+'t','h','S','c','h','e','m','e','s','"',':','[','{','"','n','a','m','e','"',':','"','s','i','g','v',
+'4','"',',','"','s','i','g','n','i','n','g','R','e','g','i','o','n','"',':','"','{','R','e','g','i',
+'o','n','}','"',',','"','s','i','g','n','i','n','g','N','a','m','e','"',':','"','s','3','"',',','"',
+'d','i','s','a','b','l','e','D','o','u','b','l','e','E','n','c','o','d','i','n','g','"',':','t','r',
+'u','e','}',']','}',',','"','h','e','a','d','e','r','s','"',':','{','}','}',',','"','t','y','p','e',
+'"',':','"','e','n','d','p','o','i','n','t','"','}',']','}',']','}',']','}',',','{','"','c','o','n',
+'d','i','t','i','o','n','s','"',':','[',']',',','"','e','r','r','o','r','"',':','"','I','n','v','a',
+'l','i','d',' ','r','e','g','i','o','n',':',' ','r','e','g','i','o','n',' ','w','a','s',' ','n','o',
+'t',' ','a',' ','v','a','l','i','d',' ','D','N','S',' ','n','a','m','e','.','"',',','"','t','y','p',
+'e','"',':','"','e','r','r','o','r','"','}',']','}',']','}',',','{','"','c','o','n','d','i','t','i',
+'o','n','s','"',':','[',']',',','"','e','r','r','o','r','"',':','"','A',' ','v','a','l','i','d',' ',
+'p','a','r','t','i','t','i','o','n',' ','c','o','u','l','d',' ','n','o','t',' ','b','e',' ','d','e',
+'t','e','r','m','i','n','e','d','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',
+']','}',']','}',']','}',',','{','"','c','o','n','d','i','t','i','o','n','s','"',':','[',']',',','"',
+'e','r','r','o','r','"',':','"','A',' ','r','e','g','i','o','n',' ','m','u','s','t',' ','b','e',' ',
+'s','e','t',' ','w','h','e','n',' ','s','e','n','d','i','n','g',' ','r','e','q','u','e','s','t','s',
+' ','t','o',' ','S','3','.','"',',','"','t','y','p','e','"',':','"','e','r','r','o','r','"','}',']',
+'}',']','}','\0'
+}};
+
+const char* S3EndpointRules::GetRulesBlob()
+{
+ return RulesBlob.data();
+}
+
+} // namespace S3
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Request.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Request.cpp
new file mode 100644
index 0000000000..1b93c81846
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/S3Request.cpp
@@ -0,0 +1,14 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+
+#include <aws/s3/S3Request.h>
+
+namespace Aws
+{
+namespace S3
+{
+} // namespace S3
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/AbortMultipartUploadRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/AbortMultipartUploadRequest.cpp
index 4d8645caab..299825faff 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/AbortMultipartUploadRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/AbortMultipartUploadRequest.cpp
@@ -79,3 +79,13 @@ Aws::Http::HeaderValueCollection AbortMultipartUploadRequest::GetRequestSpecific
return headers;
}
+
+AbortMultipartUploadRequest::EndpointParameters AbortMultipartUploadRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Bucket.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Bucket.cpp
index 1ad9b5f310..cf66979ae2 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Bucket.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Bucket.cpp
@@ -48,7 +48,7 @@ Bucket& Bucket::operator =(const XmlNode& xmlNode)
XmlNode creationDateNode = resultNode.FirstChild("CreationDate");
if(!creationDateNode.IsNull())
{
- m_creationDate = DateTime(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(creationDateNode.GetText()).c_str()).c_str(), DateFormat::ISO_8601);
+ m_creationDate = DateTime(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(creationDateNode.GetText()).c_str()).c_str(), Aws::Utils::DateFormat::ISO_8601);
m_creationDateHasBeenSet = true;
}
}
@@ -68,7 +68,7 @@ void Bucket::AddToNode(XmlNode& parentNode) const
if(m_creationDateHasBeenSet)
{
XmlNode creationDateNode = parentNode.CreateChildElement("CreationDate");
- creationDateNode.SetText(m_creationDate.ToGmtString(DateFormat::ISO_8601));
+ creationDateNode.SetText(m_creationDate.ToGmtString(Aws::Utils::DateFormat::ISO_8601));
}
}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/BucketLocationConstraint.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/BucketLocationConstraint.cpp
index 1bcdd2b471..4b83e3bad1 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/BucketLocationConstraint.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/BucketLocationConstraint.cpp
@@ -28,6 +28,7 @@ namespace Aws
static const int ap_south_1_HASH = HashingUtils::HashString("ap-south-1");
static const int ap_southeast_1_HASH = HashingUtils::HashString("ap-southeast-1");
static const int ap_southeast_2_HASH = HashingUtils::HashString("ap-southeast-2");
+ static const int ap_southeast_3_HASH = HashingUtils::HashString("ap-southeast-3");
static const int ca_central_1_HASH = HashingUtils::HashString("ca-central-1");
static const int cn_north_1_HASH = HashingUtils::HashString("cn-north-1");
static const int cn_northwest_1_HASH = HashingUtils::HashString("cn-northwest-1");
@@ -45,6 +46,7 @@ namespace Aws
static const int us_gov_west_1_HASH = HashingUtils::HashString("us-gov-west-1");
static const int us_west_1_HASH = HashingUtils::HashString("us-west-1");
static const int us_west_2_HASH = HashingUtils::HashString("us-west-2");
+ static const int us_iso_west_1_HASH = HashingUtils::HashString("us-iso-west-1");
static const int us_east_1_HASH = HashingUtils::HashString("us-east-1");
@@ -83,6 +85,10 @@ namespace Aws
{
return BucketLocationConstraint::ap_southeast_2;
}
+ else if (hashCode == ap_southeast_3_HASH)
+ {
+ return BucketLocationConstraint::ap_southeast_3;
+ }
else if (hashCode == ca_central_1_HASH)
{
return BucketLocationConstraint::ca_central_1;
@@ -151,6 +157,10 @@ namespace Aws
{
return BucketLocationConstraint::us_west_2;
}
+ else if (hashCode == us_iso_west_1_HASH)
+ {
+ return BucketLocationConstraint::us_iso_west_1;
+ }
else if (hashCode == us_east_1_HASH)
{
return BucketLocationConstraint::us_east_1;
@@ -185,6 +195,8 @@ namespace Aws
return "ap-southeast-1";
case BucketLocationConstraint::ap_southeast_2:
return "ap-southeast-2";
+ case BucketLocationConstraint::ap_southeast_3:
+ return "ap-southeast-3";
case BucketLocationConstraint::ca_central_1:
return "ca-central-1";
case BucketLocationConstraint::cn_north_1:
@@ -219,6 +231,8 @@ namespace Aws
return "us-west-1";
case BucketLocationConstraint::us_west_2:
return "us-west-2";
+ case BucketLocationConstraint::us_iso_west_1:
+ return "us-iso-west-1";
case BucketLocationConstraint::us_east_1:
return "us-east-1";
default:
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Checksum.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Checksum.cpp
new file mode 100644
index 0000000000..bd4e0f4dc4
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Checksum.cpp
@@ -0,0 +1,106 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/s3/model/Checksum.h>
+#include <aws/core/utils/xml/XmlSerializer.h>
+#include <aws/core/utils/StringUtils.h>
+#include <aws/core/utils/memory/stl/AWSStringStream.h>
+
+#include <utility>
+
+using namespace Aws::Utils::Xml;
+using namespace Aws::Utils;
+
+namespace Aws
+{
+namespace S3
+{
+namespace Model
+{
+
+Checksum::Checksum() :
+ m_checksumCRC32HasBeenSet(false),
+ m_checksumCRC32CHasBeenSet(false),
+ m_checksumSHA1HasBeenSet(false),
+ m_checksumSHA256HasBeenSet(false)
+{
+}
+
+Checksum::Checksum(const XmlNode& xmlNode) :
+ m_checksumCRC32HasBeenSet(false),
+ m_checksumCRC32CHasBeenSet(false),
+ m_checksumSHA1HasBeenSet(false),
+ m_checksumSHA256HasBeenSet(false)
+{
+ *this = xmlNode;
+}
+
+Checksum& Checksum::operator =(const XmlNode& xmlNode)
+{
+ XmlNode resultNode = xmlNode;
+
+ if(!resultNode.IsNull())
+ {
+ XmlNode checksumCRC32Node = resultNode.FirstChild("ChecksumCRC32");
+ if(!checksumCRC32Node.IsNull())
+ {
+ m_checksumCRC32 = Aws::Utils::Xml::DecodeEscapedXmlText(checksumCRC32Node.GetText());
+ m_checksumCRC32HasBeenSet = true;
+ }
+ XmlNode checksumCRC32CNode = resultNode.FirstChild("ChecksumCRC32C");
+ if(!checksumCRC32CNode.IsNull())
+ {
+ m_checksumCRC32C = Aws::Utils::Xml::DecodeEscapedXmlText(checksumCRC32CNode.GetText());
+ m_checksumCRC32CHasBeenSet = true;
+ }
+ XmlNode checksumSHA1Node = resultNode.FirstChild("ChecksumSHA1");
+ if(!checksumSHA1Node.IsNull())
+ {
+ m_checksumSHA1 = Aws::Utils::Xml::DecodeEscapedXmlText(checksumSHA1Node.GetText());
+ m_checksumSHA1HasBeenSet = true;
+ }
+ XmlNode checksumSHA256Node = resultNode.FirstChild("ChecksumSHA256");
+ if(!checksumSHA256Node.IsNull())
+ {
+ m_checksumSHA256 = Aws::Utils::Xml::DecodeEscapedXmlText(checksumSHA256Node.GetText());
+ m_checksumSHA256HasBeenSet = true;
+ }
+ }
+
+ return *this;
+}
+
+void Checksum::AddToNode(XmlNode& parentNode) const
+{
+ Aws::StringStream ss;
+ if(m_checksumCRC32HasBeenSet)
+ {
+ XmlNode checksumCRC32Node = parentNode.CreateChildElement("ChecksumCRC32");
+ checksumCRC32Node.SetText(m_checksumCRC32);
+ }
+
+ if(m_checksumCRC32CHasBeenSet)
+ {
+ XmlNode checksumCRC32CNode = parentNode.CreateChildElement("ChecksumCRC32C");
+ checksumCRC32CNode.SetText(m_checksumCRC32C);
+ }
+
+ if(m_checksumSHA1HasBeenSet)
+ {
+ XmlNode checksumSHA1Node = parentNode.CreateChildElement("ChecksumSHA1");
+ checksumSHA1Node.SetText(m_checksumSHA1);
+ }
+
+ if(m_checksumSHA256HasBeenSet)
+ {
+ XmlNode checksumSHA256Node = parentNode.CreateChildElement("ChecksumSHA256");
+ checksumSHA256Node.SetText(m_checksumSHA256);
+ }
+
+}
+
+} // namespace Model
+} // namespace S3
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ChecksumAlgorithm.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ChecksumAlgorithm.cpp
new file mode 100644
index 0000000000..674e726dd7
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ChecksumAlgorithm.cpp
@@ -0,0 +1,84 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/s3/model/ChecksumAlgorithm.h>
+#include <aws/core/utils/HashingUtils.h>
+#include <aws/core/Globals.h>
+#include <aws/core/utils/EnumParseOverflowContainer.h>
+
+using namespace Aws::Utils;
+
+
+namespace Aws
+{
+ namespace S3
+ {
+ namespace Model
+ {
+ namespace ChecksumAlgorithmMapper
+ {
+
+ static const int CRC32_HASH = HashingUtils::HashString("CRC32");
+ static const int CRC32C_HASH = HashingUtils::HashString("CRC32C");
+ static const int SHA1_HASH = HashingUtils::HashString("SHA1");
+ static const int SHA256_HASH = HashingUtils::HashString("SHA256");
+
+
+ ChecksumAlgorithm GetChecksumAlgorithmForName(const Aws::String& name)
+ {
+ int hashCode = HashingUtils::HashString(name.c_str());
+ if (hashCode == CRC32_HASH)
+ {
+ return ChecksumAlgorithm::CRC32;
+ }
+ else if (hashCode == CRC32C_HASH)
+ {
+ return ChecksumAlgorithm::CRC32C;
+ }
+ else if (hashCode == SHA1_HASH)
+ {
+ return ChecksumAlgorithm::SHA1;
+ }
+ else if (hashCode == SHA256_HASH)
+ {
+ return ChecksumAlgorithm::SHA256;
+ }
+ EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer();
+ if(overflowContainer)
+ {
+ overflowContainer->StoreOverflow(hashCode, name);
+ return static_cast<ChecksumAlgorithm>(hashCode);
+ }
+
+ return ChecksumAlgorithm::NOT_SET;
+ }
+
+ Aws::String GetNameForChecksumAlgorithm(ChecksumAlgorithm enumValue)
+ {
+ switch(enumValue)
+ {
+ case ChecksumAlgorithm::CRC32:
+ return "CRC32";
+ case ChecksumAlgorithm::CRC32C:
+ return "CRC32C";
+ case ChecksumAlgorithm::SHA1:
+ return "SHA1";
+ case ChecksumAlgorithm::SHA256:
+ return "SHA256";
+ default:
+ EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer();
+ if(overflowContainer)
+ {
+ return overflowContainer->RetrieveOverflow(static_cast<int>(enumValue));
+ }
+
+ return {};
+ }
+ }
+
+ } // namespace ChecksumAlgorithmMapper
+ } // namespace Model
+ } // namespace S3
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ChecksumMode.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ChecksumMode.cpp
new file mode 100644
index 0000000000..d3085bfb8a
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ChecksumMode.cpp
@@ -0,0 +1,63 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/s3/model/ChecksumMode.h>
+#include <aws/core/utils/HashingUtils.h>
+#include <aws/core/Globals.h>
+#include <aws/core/utils/EnumParseOverflowContainer.h>
+
+using namespace Aws::Utils;
+
+
+namespace Aws
+{
+ namespace S3
+ {
+ namespace Model
+ {
+ namespace ChecksumModeMapper
+ {
+
+ static const int ENABLED_HASH = HashingUtils::HashString("ENABLED");
+
+
+ ChecksumMode GetChecksumModeForName(const Aws::String& name)
+ {
+ int hashCode = HashingUtils::HashString(name.c_str());
+ if (hashCode == ENABLED_HASH)
+ {
+ return ChecksumMode::ENABLED;
+ }
+ EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer();
+ if(overflowContainer)
+ {
+ overflowContainer->StoreOverflow(hashCode, name);
+ return static_cast<ChecksumMode>(hashCode);
+ }
+
+ return ChecksumMode::NOT_SET;
+ }
+
+ Aws::String GetNameForChecksumMode(ChecksumMode enumValue)
+ {
+ switch(enumValue)
+ {
+ case ChecksumMode::ENABLED:
+ return "ENABLED";
+ default:
+ EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer();
+ if(overflowContainer)
+ {
+ return overflowContainer->RetrieveOverflow(static_cast<int>(enumValue));
+ }
+
+ return {};
+ }
+ }
+
+ } // namespace ChecksumModeMapper
+ } // namespace Model
+ } // namespace S3
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CompleteMultipartUploadRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CompleteMultipartUploadRequest.cpp
index 8c058ba507..03a8a23f96 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CompleteMultipartUploadRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CompleteMultipartUploadRequest.cpp
@@ -21,13 +21,42 @@ CompleteMultipartUploadRequest::CompleteMultipartUploadRequest() :
m_keyHasBeenSet(false),
m_multipartUploadHasBeenSet(false),
m_uploadIdHasBeenSet(false),
+ m_checksumCRC32HasBeenSet(false),
+ m_checksumCRC32CHasBeenSet(false),
+ m_checksumSHA1HasBeenSet(false),
+ m_checksumSHA256HasBeenSet(false),
m_requestPayer(RequestPayer::NOT_SET),
m_requestPayerHasBeenSet(false),
m_expectedBucketOwnerHasBeenSet(false),
+ m_sSECustomerAlgorithmHasBeenSet(false),
+ m_sSECustomerKeyHasBeenSet(false),
+ m_sSECustomerKeyMD5HasBeenSet(false),
m_customizedAccessLogTagHasBeenSet(false)
{
}
+bool CompleteMultipartUploadRequest::HasEmbeddedError(Aws::IOStream &body,
+ const Aws::Http::HeaderValueCollection &header) const
+{
+ // Header is unused
+ (void) header;
+
+ auto readPointer = body.tellg();
+ XmlDocument doc = XmlDocument::CreateFromXmlStream(body);
+
+ if (!doc.WasParseSuccessful()) {
+ body.seekg(readPointer);
+ return false;
+ }
+
+ if (doc.GetRootElement().GetName() == "Error") {
+ body.seekg(readPointer);
+ return true;
+ }
+ body.seekg(readPointer);
+ return false;
+}
+
Aws::String CompleteMultipartUploadRequest::SerializePayload() const
{
XmlDocument payloadDoc = XmlDocument::CreateWithRootNode("CompleteMultipartUpload");
@@ -77,6 +106,34 @@ Aws::Http::HeaderValueCollection CompleteMultipartUploadRequest::GetRequestSpeci
{
Aws::Http::HeaderValueCollection headers;
Aws::StringStream ss;
+ if(m_checksumCRC32HasBeenSet)
+ {
+ ss << m_checksumCRC32;
+ headers.emplace("x-amz-checksum-crc32", ss.str());
+ ss.str("");
+ }
+
+ if(m_checksumCRC32CHasBeenSet)
+ {
+ ss << m_checksumCRC32C;
+ headers.emplace("x-amz-checksum-crc32c", ss.str());
+ ss.str("");
+ }
+
+ if(m_checksumSHA1HasBeenSet)
+ {
+ ss << m_checksumSHA1;
+ headers.emplace("x-amz-checksum-sha1", ss.str());
+ ss.str("");
+ }
+
+ if(m_checksumSHA256HasBeenSet)
+ {
+ ss << m_checksumSHA256;
+ headers.emplace("x-amz-checksum-sha256", ss.str());
+ ss.str("");
+ }
+
if(m_requestPayerHasBeenSet)
{
headers.emplace("x-amz-request-payer", RequestPayerMapper::GetNameForRequestPayer(m_requestPayer));
@@ -89,5 +146,36 @@ Aws::Http::HeaderValueCollection CompleteMultipartUploadRequest::GetRequestSpeci
ss.str("");
}
+ if(m_sSECustomerAlgorithmHasBeenSet)
+ {
+ ss << m_sSECustomerAlgorithm;
+ headers.emplace("x-amz-server-side-encryption-customer-algorithm", ss.str());
+ ss.str("");
+ }
+
+ if(m_sSECustomerKeyHasBeenSet)
+ {
+ ss << m_sSECustomerKey;
+ headers.emplace("x-amz-server-side-encryption-customer-key", ss.str());
+ ss.str("");
+ }
+
+ if(m_sSECustomerKeyMD5HasBeenSet)
+ {
+ ss << m_sSECustomerKeyMD5;
+ headers.emplace("x-amz-server-side-encryption-customer-key-md5", ss.str());
+ ss.str("");
+ }
+
return headers;
}
+
+CompleteMultipartUploadRequest::EndpointParameters CompleteMultipartUploadRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CompleteMultipartUploadResult.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CompleteMultipartUploadResult.cpp
index da3f191993..ad87777a2c 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CompleteMultipartUploadResult.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CompleteMultipartUploadResult.cpp
@@ -58,6 +58,26 @@ CompleteMultipartUploadResult& CompleteMultipartUploadResult::operator =(const A
{
m_eTag = Aws::Utils::Xml::DecodeEscapedXmlText(eTagNode.GetText());
}
+ XmlNode checksumCRC32Node = resultNode.FirstChild("ChecksumCRC32");
+ if(!checksumCRC32Node.IsNull())
+ {
+ m_checksumCRC32 = Aws::Utils::Xml::DecodeEscapedXmlText(checksumCRC32Node.GetText());
+ }
+ XmlNode checksumCRC32CNode = resultNode.FirstChild("ChecksumCRC32C");
+ if(!checksumCRC32CNode.IsNull())
+ {
+ m_checksumCRC32C = Aws::Utils::Xml::DecodeEscapedXmlText(checksumCRC32CNode.GetText());
+ }
+ XmlNode checksumSHA1Node = resultNode.FirstChild("ChecksumSHA1");
+ if(!checksumSHA1Node.IsNull())
+ {
+ m_checksumSHA1 = Aws::Utils::Xml::DecodeEscapedXmlText(checksumSHA1Node.GetText());
+ }
+ XmlNode checksumSHA256Node = resultNode.FirstChild("ChecksumSHA256");
+ if(!checksumSHA256Node.IsNull())
+ {
+ m_checksumSHA256 = Aws::Utils::Xml::DecodeEscapedXmlText(checksumSHA256Node.GetText());
+ }
}
const auto& headers = result.GetHeaderValueCollection();
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CompletedPart.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CompletedPart.cpp
index 8fb63ee1f3..a6bce6a9f4 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CompletedPart.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CompletedPart.cpp
@@ -22,6 +22,10 @@ namespace Model
CompletedPart::CompletedPart() :
m_eTagHasBeenSet(false),
+ m_checksumCRC32HasBeenSet(false),
+ m_checksumCRC32CHasBeenSet(false),
+ m_checksumSHA1HasBeenSet(false),
+ m_checksumSHA256HasBeenSet(false),
m_partNumber(0),
m_partNumberHasBeenSet(false)
{
@@ -29,6 +33,10 @@ CompletedPart::CompletedPart() :
CompletedPart::CompletedPart(const XmlNode& xmlNode) :
m_eTagHasBeenSet(false),
+ m_checksumCRC32HasBeenSet(false),
+ m_checksumCRC32CHasBeenSet(false),
+ m_checksumSHA1HasBeenSet(false),
+ m_checksumSHA256HasBeenSet(false),
m_partNumber(0),
m_partNumberHasBeenSet(false)
{
@@ -47,6 +55,30 @@ CompletedPart& CompletedPart::operator =(const XmlNode& xmlNode)
m_eTag = Aws::Utils::Xml::DecodeEscapedXmlText(eTagNode.GetText());
m_eTagHasBeenSet = true;
}
+ XmlNode checksumCRC32Node = resultNode.FirstChild("ChecksumCRC32");
+ if(!checksumCRC32Node.IsNull())
+ {
+ m_checksumCRC32 = Aws::Utils::Xml::DecodeEscapedXmlText(checksumCRC32Node.GetText());
+ m_checksumCRC32HasBeenSet = true;
+ }
+ XmlNode checksumCRC32CNode = resultNode.FirstChild("ChecksumCRC32C");
+ if(!checksumCRC32CNode.IsNull())
+ {
+ m_checksumCRC32C = Aws::Utils::Xml::DecodeEscapedXmlText(checksumCRC32CNode.GetText());
+ m_checksumCRC32CHasBeenSet = true;
+ }
+ XmlNode checksumSHA1Node = resultNode.FirstChild("ChecksumSHA1");
+ if(!checksumSHA1Node.IsNull())
+ {
+ m_checksumSHA1 = Aws::Utils::Xml::DecodeEscapedXmlText(checksumSHA1Node.GetText());
+ m_checksumSHA1HasBeenSet = true;
+ }
+ XmlNode checksumSHA256Node = resultNode.FirstChild("ChecksumSHA256");
+ if(!checksumSHA256Node.IsNull())
+ {
+ m_checksumSHA256 = Aws::Utils::Xml::DecodeEscapedXmlText(checksumSHA256Node.GetText());
+ m_checksumSHA256HasBeenSet = true;
+ }
XmlNode partNumberNode = resultNode.FirstChild("PartNumber");
if(!partNumberNode.IsNull())
{
@@ -67,6 +99,30 @@ void CompletedPart::AddToNode(XmlNode& parentNode) const
eTagNode.SetText(m_eTag);
}
+ if(m_checksumCRC32HasBeenSet)
+ {
+ XmlNode checksumCRC32Node = parentNode.CreateChildElement("ChecksumCRC32");
+ checksumCRC32Node.SetText(m_checksumCRC32);
+ }
+
+ if(m_checksumCRC32CHasBeenSet)
+ {
+ XmlNode checksumCRC32CNode = parentNode.CreateChildElement("ChecksumCRC32C");
+ checksumCRC32CNode.SetText(m_checksumCRC32C);
+ }
+
+ if(m_checksumSHA1HasBeenSet)
+ {
+ XmlNode checksumSHA1Node = parentNode.CreateChildElement("ChecksumSHA1");
+ checksumSHA1Node.SetText(m_checksumSHA1);
+ }
+
+ if(m_checksumSHA256HasBeenSet)
+ {
+ XmlNode checksumSHA256Node = parentNode.CreateChildElement("ChecksumSHA256");
+ checksumSHA256Node.SetText(m_checksumSHA256);
+ }
+
if(m_partNumberHasBeenSet)
{
XmlNode partNumberNode = parentNode.CreateChildElement("PartNumber");
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CopyObjectRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CopyObjectRequest.cpp
index 94493cfdcf..e4e1b52819 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CopyObjectRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CopyObjectRequest.cpp
@@ -21,6 +21,8 @@ CopyObjectRequest::CopyObjectRequest() :
m_aCLHasBeenSet(false),
m_bucketHasBeenSet(false),
m_cacheControlHasBeenSet(false),
+ m_checksumAlgorithm(ChecksumAlgorithm::NOT_SET),
+ m_checksumAlgorithmHasBeenSet(false),
m_contentDispositionHasBeenSet(false),
m_contentEncodingHasBeenSet(false),
m_contentLanguageHasBeenSet(false),
@@ -70,6 +72,28 @@ CopyObjectRequest::CopyObjectRequest() :
{
}
+bool CopyObjectRequest::HasEmbeddedError(Aws::IOStream &body,
+ const Aws::Http::HeaderValueCollection &header) const
+{
+ // Header is unused
+ (void) header;
+
+ auto readPointer = body.tellg();
+ XmlDocument doc = XmlDocument::CreateFromXmlStream(body);
+
+ if (!doc.WasParseSuccessful()) {
+ body.seekg(readPointer);
+ return false;
+ }
+
+ if (doc.GetRootElement().GetName() == "Error") {
+ body.seekg(readPointer);
+ return true;
+ }
+ body.seekg(readPointer);
+ return false;
+}
+
Aws::String CopyObjectRequest::SerializePayload() const
{
return {};
@@ -113,6 +137,11 @@ Aws::Http::HeaderValueCollection CopyObjectRequest::GetRequestSpecificHeaders()
ss.str("");
}
+ if(m_checksumAlgorithmHasBeenSet)
+ {
+ headers.emplace("x-amz-checksum-algorithm", ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm));
+ }
+
if(m_contentDispositionHasBeenSet)
{
ss << m_contentDisposition;
@@ -157,7 +186,7 @@ Aws::Http::HeaderValueCollection CopyObjectRequest::GetRequestSpecificHeaders()
if(m_copySourceIfModifiedSinceHasBeenSet)
{
- headers.emplace("x-amz-copy-source-if-modified-since", m_copySourceIfModifiedSince.ToGmtString(DateFormat::RFC822));
+ headers.emplace("x-amz-copy-source-if-modified-since", m_copySourceIfModifiedSince.ToGmtString(Aws::Utils::DateFormat::RFC822));
}
if(m_copySourceIfNoneMatchHasBeenSet)
@@ -169,12 +198,12 @@ Aws::Http::HeaderValueCollection CopyObjectRequest::GetRequestSpecificHeaders()
if(m_copySourceIfUnmodifiedSinceHasBeenSet)
{
- headers.emplace("x-amz-copy-source-if-unmodified-since", m_copySourceIfUnmodifiedSince.ToGmtString(DateFormat::RFC822));
+ headers.emplace("x-amz-copy-source-if-unmodified-since", m_copySourceIfUnmodifiedSince.ToGmtString(Aws::Utils::DateFormat::RFC822));
}
if(m_expiresHasBeenSet)
{
- headers.emplace("expires", m_expires.ToGmtString(DateFormat::RFC822));
+ headers.emplace("expires", m_expires.ToGmtString(Aws::Utils::DateFormat::RFC822));
}
if(m_grantFullControlHasBeenSet)
@@ -324,7 +353,7 @@ Aws::Http::HeaderValueCollection CopyObjectRequest::GetRequestSpecificHeaders()
if(m_objectLockRetainUntilDateHasBeenSet)
{
- headers.emplace("x-amz-object-lock-retain-until-date", m_objectLockRetainUntilDate.ToGmtString(DateFormat::ISO_8601));
+ headers.emplace("x-amz-object-lock-retain-until-date", m_objectLockRetainUntilDate.ToGmtString(Aws::Utils::DateFormat::ISO_8601));
}
if(m_objectLockLegalHoldStatusHasBeenSet)
@@ -348,3 +377,13 @@ Aws::Http::HeaderValueCollection CopyObjectRequest::GetRequestSpecificHeaders()
return headers;
}
+
+CopyObjectRequest::EndpointParameters CopyObjectRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CopyObjectResultDetails.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CopyObjectResultDetails.cpp
index 6e03159046..313d2cde14 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CopyObjectResultDetails.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CopyObjectResultDetails.cpp
@@ -22,13 +22,21 @@ namespace Model
CopyObjectResultDetails::CopyObjectResultDetails() :
m_eTagHasBeenSet(false),
- m_lastModifiedHasBeenSet(false)
+ m_lastModifiedHasBeenSet(false),
+ m_checksumCRC32HasBeenSet(false),
+ m_checksumCRC32CHasBeenSet(false),
+ m_checksumSHA1HasBeenSet(false),
+ m_checksumSHA256HasBeenSet(false)
{
}
CopyObjectResultDetails::CopyObjectResultDetails(const XmlNode& xmlNode) :
m_eTagHasBeenSet(false),
- m_lastModifiedHasBeenSet(false)
+ m_lastModifiedHasBeenSet(false),
+ m_checksumCRC32HasBeenSet(false),
+ m_checksumCRC32CHasBeenSet(false),
+ m_checksumSHA1HasBeenSet(false),
+ m_checksumSHA256HasBeenSet(false)
{
*this = xmlNode;
}
@@ -48,9 +56,33 @@ CopyObjectResultDetails& CopyObjectResultDetails::operator =(const XmlNode& xmlN
XmlNode lastModifiedNode = resultNode.FirstChild("LastModified");
if(!lastModifiedNode.IsNull())
{
- m_lastModified = DateTime(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(lastModifiedNode.GetText()).c_str()).c_str(), DateFormat::ISO_8601);
+ m_lastModified = DateTime(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(lastModifiedNode.GetText()).c_str()).c_str(), Aws::Utils::DateFormat::ISO_8601);
m_lastModifiedHasBeenSet = true;
}
+ XmlNode checksumCRC32Node = resultNode.FirstChild("ChecksumCRC32");
+ if(!checksumCRC32Node.IsNull())
+ {
+ m_checksumCRC32 = Aws::Utils::Xml::DecodeEscapedXmlText(checksumCRC32Node.GetText());
+ m_checksumCRC32HasBeenSet = true;
+ }
+ XmlNode checksumCRC32CNode = resultNode.FirstChild("ChecksumCRC32C");
+ if(!checksumCRC32CNode.IsNull())
+ {
+ m_checksumCRC32C = Aws::Utils::Xml::DecodeEscapedXmlText(checksumCRC32CNode.GetText());
+ m_checksumCRC32CHasBeenSet = true;
+ }
+ XmlNode checksumSHA1Node = resultNode.FirstChild("ChecksumSHA1");
+ if(!checksumSHA1Node.IsNull())
+ {
+ m_checksumSHA1 = Aws::Utils::Xml::DecodeEscapedXmlText(checksumSHA1Node.GetText());
+ m_checksumSHA1HasBeenSet = true;
+ }
+ XmlNode checksumSHA256Node = resultNode.FirstChild("ChecksumSHA256");
+ if(!checksumSHA256Node.IsNull())
+ {
+ m_checksumSHA256 = Aws::Utils::Xml::DecodeEscapedXmlText(checksumSHA256Node.GetText());
+ m_checksumSHA256HasBeenSet = true;
+ }
}
return *this;
@@ -68,7 +100,31 @@ void CopyObjectResultDetails::AddToNode(XmlNode& parentNode) const
if(m_lastModifiedHasBeenSet)
{
XmlNode lastModifiedNode = parentNode.CreateChildElement("LastModified");
- lastModifiedNode.SetText(m_lastModified.ToGmtString(DateFormat::ISO_8601));
+ lastModifiedNode.SetText(m_lastModified.ToGmtString(Aws::Utils::DateFormat::ISO_8601));
+ }
+
+ if(m_checksumCRC32HasBeenSet)
+ {
+ XmlNode checksumCRC32Node = parentNode.CreateChildElement("ChecksumCRC32");
+ checksumCRC32Node.SetText(m_checksumCRC32);
+ }
+
+ if(m_checksumCRC32CHasBeenSet)
+ {
+ XmlNode checksumCRC32CNode = parentNode.CreateChildElement("ChecksumCRC32C");
+ checksumCRC32CNode.SetText(m_checksumCRC32C);
+ }
+
+ if(m_checksumSHA1HasBeenSet)
+ {
+ XmlNode checksumSHA1Node = parentNode.CreateChildElement("ChecksumSHA1");
+ checksumSHA1Node.SetText(m_checksumSHA1);
+ }
+
+ if(m_checksumSHA256HasBeenSet)
+ {
+ XmlNode checksumSHA256Node = parentNode.CreateChildElement("ChecksumSHA256");
+ checksumSHA256Node.SetText(m_checksumSHA256);
}
}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CopyPartResult.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CopyPartResult.cpp
index f8f5c9809c..6c8f942caf 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CopyPartResult.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CopyPartResult.cpp
@@ -22,13 +22,21 @@ namespace Model
CopyPartResult::CopyPartResult() :
m_eTagHasBeenSet(false),
- m_lastModifiedHasBeenSet(false)
+ m_lastModifiedHasBeenSet(false),
+ m_checksumCRC32HasBeenSet(false),
+ m_checksumCRC32CHasBeenSet(false),
+ m_checksumSHA1HasBeenSet(false),
+ m_checksumSHA256HasBeenSet(false)
{
}
CopyPartResult::CopyPartResult(const XmlNode& xmlNode) :
m_eTagHasBeenSet(false),
- m_lastModifiedHasBeenSet(false)
+ m_lastModifiedHasBeenSet(false),
+ m_checksumCRC32HasBeenSet(false),
+ m_checksumCRC32CHasBeenSet(false),
+ m_checksumSHA1HasBeenSet(false),
+ m_checksumSHA256HasBeenSet(false)
{
*this = xmlNode;
}
@@ -48,9 +56,33 @@ CopyPartResult& CopyPartResult::operator =(const XmlNode& xmlNode)
XmlNode lastModifiedNode = resultNode.FirstChild("LastModified");
if(!lastModifiedNode.IsNull())
{
- m_lastModified = DateTime(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(lastModifiedNode.GetText()).c_str()).c_str(), DateFormat::ISO_8601);
+ m_lastModified = DateTime(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(lastModifiedNode.GetText()).c_str()).c_str(), Aws::Utils::DateFormat::ISO_8601);
m_lastModifiedHasBeenSet = true;
}
+ XmlNode checksumCRC32Node = resultNode.FirstChild("ChecksumCRC32");
+ if(!checksumCRC32Node.IsNull())
+ {
+ m_checksumCRC32 = Aws::Utils::Xml::DecodeEscapedXmlText(checksumCRC32Node.GetText());
+ m_checksumCRC32HasBeenSet = true;
+ }
+ XmlNode checksumCRC32CNode = resultNode.FirstChild("ChecksumCRC32C");
+ if(!checksumCRC32CNode.IsNull())
+ {
+ m_checksumCRC32C = Aws::Utils::Xml::DecodeEscapedXmlText(checksumCRC32CNode.GetText());
+ m_checksumCRC32CHasBeenSet = true;
+ }
+ XmlNode checksumSHA1Node = resultNode.FirstChild("ChecksumSHA1");
+ if(!checksumSHA1Node.IsNull())
+ {
+ m_checksumSHA1 = Aws::Utils::Xml::DecodeEscapedXmlText(checksumSHA1Node.GetText());
+ m_checksumSHA1HasBeenSet = true;
+ }
+ XmlNode checksumSHA256Node = resultNode.FirstChild("ChecksumSHA256");
+ if(!checksumSHA256Node.IsNull())
+ {
+ m_checksumSHA256 = Aws::Utils::Xml::DecodeEscapedXmlText(checksumSHA256Node.GetText());
+ m_checksumSHA256HasBeenSet = true;
+ }
}
return *this;
@@ -68,7 +100,31 @@ void CopyPartResult::AddToNode(XmlNode& parentNode) const
if(m_lastModifiedHasBeenSet)
{
XmlNode lastModifiedNode = parentNode.CreateChildElement("LastModified");
- lastModifiedNode.SetText(m_lastModified.ToGmtString(DateFormat::ISO_8601));
+ lastModifiedNode.SetText(m_lastModified.ToGmtString(Aws::Utils::DateFormat::ISO_8601));
+ }
+
+ if(m_checksumCRC32HasBeenSet)
+ {
+ XmlNode checksumCRC32Node = parentNode.CreateChildElement("ChecksumCRC32");
+ checksumCRC32Node.SetText(m_checksumCRC32);
+ }
+
+ if(m_checksumCRC32CHasBeenSet)
+ {
+ XmlNode checksumCRC32CNode = parentNode.CreateChildElement("ChecksumCRC32C");
+ checksumCRC32CNode.SetText(m_checksumCRC32C);
+ }
+
+ if(m_checksumSHA1HasBeenSet)
+ {
+ XmlNode checksumSHA1Node = parentNode.CreateChildElement("ChecksumSHA1");
+ checksumSHA1Node.SetText(m_checksumSHA1);
+ }
+
+ if(m_checksumSHA256HasBeenSet)
+ {
+ XmlNode checksumSHA256Node = parentNode.CreateChildElement("ChecksumSHA256");
+ checksumSHA256Node.SetText(m_checksumSHA256);
}
}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CreateBucketRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CreateBucketRequest.cpp
index 0bc3641a33..5e8ade4bdf 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CreateBucketRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CreateBucketRequest.cpp
@@ -28,6 +28,8 @@ CreateBucketRequest::CreateBucketRequest() :
m_grantWriteACPHasBeenSet(false),
m_objectLockEnabledForBucket(false),
m_objectLockEnabledForBucketHasBeenSet(false),
+ m_objectOwnership(ObjectOwnership::NOT_SET),
+ m_objectOwnershipHasBeenSet(false),
m_customizedAccessLogTagHasBeenSet(false)
{
}
@@ -121,5 +123,22 @@ Aws::Http::HeaderValueCollection CreateBucketRequest::GetRequestSpecificHeaders(
ss.str("");
}
+ if(m_objectOwnershipHasBeenSet)
+ {
+ headers.emplace("x-amz-object-ownership", ObjectOwnershipMapper::GetNameForObjectOwnership(m_objectOwnership));
+ }
+
return headers;
}
+
+CreateBucketRequest::EndpointParameters CreateBucketRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Static context parameters
+ parameters.emplace_back(Aws::String("DisableAccessPoints"), true, Aws::Endpoint::EndpointParameter::ParameterOrigin::STATIC_CONTEXT);
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CreateMultipartUploadRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CreateMultipartUploadRequest.cpp
index 73a88bf2dc..2db712e113 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CreateMultipartUploadRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CreateMultipartUploadRequest.cpp
@@ -53,6 +53,8 @@ CreateMultipartUploadRequest::CreateMultipartUploadRequest() :
m_objectLockLegalHoldStatus(ObjectLockLegalHoldStatus::NOT_SET),
m_objectLockLegalHoldStatusHasBeenSet(false),
m_expectedBucketOwnerHasBeenSet(false),
+ m_checksumAlgorithm(ChecksumAlgorithm::NOT_SET),
+ m_checksumAlgorithmHasBeenSet(false),
m_customizedAccessLogTagHasBeenSet(false)
{
}
@@ -130,7 +132,7 @@ Aws::Http::HeaderValueCollection CreateMultipartUploadRequest::GetRequestSpecifi
if(m_expiresHasBeenSet)
{
- headers.emplace("expires", m_expires.ToGmtString(DateFormat::RFC822));
+ headers.emplace("expires", m_expires.ToGmtString(Aws::Utils::DateFormat::RFC822));
}
if(m_grantFullControlHasBeenSet)
@@ -249,7 +251,7 @@ Aws::Http::HeaderValueCollection CreateMultipartUploadRequest::GetRequestSpecifi
if(m_objectLockRetainUntilDateHasBeenSet)
{
- headers.emplace("x-amz-object-lock-retain-until-date", m_objectLockRetainUntilDate.ToGmtString(DateFormat::ISO_8601));
+ headers.emplace("x-amz-object-lock-retain-until-date", m_objectLockRetainUntilDate.ToGmtString(Aws::Utils::DateFormat::ISO_8601));
}
if(m_objectLockLegalHoldStatusHasBeenSet)
@@ -264,5 +266,20 @@ Aws::Http::HeaderValueCollection CreateMultipartUploadRequest::GetRequestSpecifi
ss.str("");
}
+ if(m_checksumAlgorithmHasBeenSet)
+ {
+ headers.emplace("x-amz-checksum-algorithm", ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm));
+ }
+
return headers;
}
+
+CreateMultipartUploadRequest::EndpointParameters CreateMultipartUploadRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CreateMultipartUploadResult.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CreateMultipartUploadResult.cpp
index 4a2772c9ef..d77d44d362 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CreateMultipartUploadResult.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/CreateMultipartUploadResult.cpp
@@ -19,14 +19,16 @@ using namespace Aws;
CreateMultipartUploadResult::CreateMultipartUploadResult() :
m_serverSideEncryption(ServerSideEncryption::NOT_SET),
m_bucketKeyEnabled(false),
- m_requestCharged(RequestCharged::NOT_SET)
+ m_requestCharged(RequestCharged::NOT_SET),
+ m_checksumAlgorithm(ChecksumAlgorithm::NOT_SET)
{
}
CreateMultipartUploadResult::CreateMultipartUploadResult(const Aws::AmazonWebServiceResult<XmlDocument>& result) :
m_serverSideEncryption(ServerSideEncryption::NOT_SET),
m_bucketKeyEnabled(false),
- m_requestCharged(RequestCharged::NOT_SET)
+ m_requestCharged(RequestCharged::NOT_SET),
+ m_checksumAlgorithm(ChecksumAlgorithm::NOT_SET)
{
*this = result;
}
@@ -59,7 +61,7 @@ CreateMultipartUploadResult& CreateMultipartUploadResult::operator =(const Aws::
const auto& abortDateIter = headers.find("x-amz-abort-date");
if(abortDateIter != headers.end())
{
- m_abortDate = DateTime(abortDateIter->second, DateFormat::RFC822);
+ m_abortDate = DateTime(abortDateIter->second, Aws::Utils::DateFormat::RFC822);
}
const auto& abortRuleIdIter = headers.find("x-amz-abort-rule-id");
@@ -110,5 +112,11 @@ CreateMultipartUploadResult& CreateMultipartUploadResult::operator =(const Aws::
m_requestCharged = RequestChargedMapper::GetRequestChargedForName(requestChargedIter->second);
}
+ const auto& checksumAlgorithmIter = headers.find("x-amz-checksum-algorithm");
+ if(checksumAlgorithmIter != headers.end())
+ {
+ m_checksumAlgorithm = ChecksumAlgorithmMapper::GetChecksumAlgorithmForName(checksumAlgorithmIter->second);
+ }
+
return *this;
}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketAnalyticsConfigurationRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketAnalyticsConfigurationRequest.cpp
index bcd1e8f320..721721bf25 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketAnalyticsConfigurationRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketAnalyticsConfigurationRequest.cpp
@@ -71,3 +71,13 @@ Aws::Http::HeaderValueCollection DeleteBucketAnalyticsConfigurationRequest::GetR
return headers;
}
+
+DeleteBucketAnalyticsConfigurationRequest::EndpointParameters DeleteBucketAnalyticsConfigurationRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketCorsRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketCorsRequest.cpp
index b52d99571a..9eb64d50c1 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketCorsRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketCorsRequest.cpp
@@ -63,3 +63,13 @@ Aws::Http::HeaderValueCollection DeleteBucketCorsRequest::GetRequestSpecificHead
return headers;
}
+
+DeleteBucketCorsRequest::EndpointParameters DeleteBucketCorsRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketEncryptionRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketEncryptionRequest.cpp
index 3e01a0b9dc..cbd8f93536 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketEncryptionRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketEncryptionRequest.cpp
@@ -63,3 +63,13 @@ Aws::Http::HeaderValueCollection DeleteBucketEncryptionRequest::GetRequestSpecif
return headers;
}
+
+DeleteBucketEncryptionRequest::EndpointParameters DeleteBucketEncryptionRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketIntelligentTieringConfigurationRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketIntelligentTieringConfigurationRequest.cpp
index b48d573ee6..52855aeaa7 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketIntelligentTieringConfigurationRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketIntelligentTieringConfigurationRequest.cpp
@@ -57,3 +57,13 @@ void DeleteBucketIntelligentTieringConfigurationRequest::AddQueryStringParameter
}
}
+
+DeleteBucketIntelligentTieringConfigurationRequest::EndpointParameters DeleteBucketIntelligentTieringConfigurationRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketInventoryConfigurationRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketInventoryConfigurationRequest.cpp
index 7bc2d179d6..2f421dac17 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketInventoryConfigurationRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketInventoryConfigurationRequest.cpp
@@ -71,3 +71,13 @@ Aws::Http::HeaderValueCollection DeleteBucketInventoryConfigurationRequest::GetR
return headers;
}
+
+DeleteBucketInventoryConfigurationRequest::EndpointParameters DeleteBucketInventoryConfigurationRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketLifecycleRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketLifecycleRequest.cpp
index 54311cff3e..b8afa094ff 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketLifecycleRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketLifecycleRequest.cpp
@@ -63,3 +63,13 @@ Aws::Http::HeaderValueCollection DeleteBucketLifecycleRequest::GetRequestSpecifi
return headers;
}
+
+DeleteBucketLifecycleRequest::EndpointParameters DeleteBucketLifecycleRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketMetricsConfigurationRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketMetricsConfigurationRequest.cpp
index 631dee94a7..29f50338ab 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketMetricsConfigurationRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketMetricsConfigurationRequest.cpp
@@ -71,3 +71,13 @@ Aws::Http::HeaderValueCollection DeleteBucketMetricsConfigurationRequest::GetReq
return headers;
}
+
+DeleteBucketMetricsConfigurationRequest::EndpointParameters DeleteBucketMetricsConfigurationRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketOwnershipControlsRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketOwnershipControlsRequest.cpp
index 5b18b12424..4ed933cda9 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketOwnershipControlsRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketOwnershipControlsRequest.cpp
@@ -63,3 +63,13 @@ Aws::Http::HeaderValueCollection DeleteBucketOwnershipControlsRequest::GetReques
return headers;
}
+
+DeleteBucketOwnershipControlsRequest::EndpointParameters DeleteBucketOwnershipControlsRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketPolicyRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketPolicyRequest.cpp
index 33bb2cef34..b9bda713ca 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketPolicyRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketPolicyRequest.cpp
@@ -63,3 +63,13 @@ Aws::Http::HeaderValueCollection DeleteBucketPolicyRequest::GetRequestSpecificHe
return headers;
}
+
+DeleteBucketPolicyRequest::EndpointParameters DeleteBucketPolicyRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketReplicationRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketReplicationRequest.cpp
index af6092ee6c..0bcdf577cf 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketReplicationRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketReplicationRequest.cpp
@@ -63,3 +63,13 @@ Aws::Http::HeaderValueCollection DeleteBucketReplicationRequest::GetRequestSpeci
return headers;
}
+
+DeleteBucketReplicationRequest::EndpointParameters DeleteBucketReplicationRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketRequest.cpp
index 3cfa02d882..fe34195378 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketRequest.cpp
@@ -63,3 +63,13 @@ Aws::Http::HeaderValueCollection DeleteBucketRequest::GetRequestSpecificHeaders(
return headers;
}
+
+DeleteBucketRequest::EndpointParameters DeleteBucketRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketTaggingRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketTaggingRequest.cpp
index 7d61b959f5..5df949880a 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketTaggingRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketTaggingRequest.cpp
@@ -63,3 +63,13 @@ Aws::Http::HeaderValueCollection DeleteBucketTaggingRequest::GetRequestSpecificH
return headers;
}
+
+DeleteBucketTaggingRequest::EndpointParameters DeleteBucketTaggingRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketWebsiteRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketWebsiteRequest.cpp
index 59bc58a8d7..2a7b23cfb6 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketWebsiteRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteBucketWebsiteRequest.cpp
@@ -63,3 +63,13 @@ Aws::Http::HeaderValueCollection DeleteBucketWebsiteRequest::GetRequestSpecificH
return headers;
}
+
+DeleteBucketWebsiteRequest::EndpointParameters DeleteBucketWebsiteRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteMarkerEntry.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteMarkerEntry.cpp
index 05766bb960..25ea77fa9b 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteMarkerEntry.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteMarkerEntry.cpp
@@ -74,7 +74,7 @@ DeleteMarkerEntry& DeleteMarkerEntry::operator =(const XmlNode& xmlNode)
XmlNode lastModifiedNode = resultNode.FirstChild("LastModified");
if(!lastModifiedNode.IsNull())
{
- m_lastModified = DateTime(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(lastModifiedNode.GetText()).c_str()).c_str(), DateFormat::ISO_8601);
+ m_lastModified = DateTime(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(lastModifiedNode.GetText()).c_str()).c_str(), Aws::Utils::DateFormat::ISO_8601);
m_lastModifiedHasBeenSet = true;
}
}
@@ -114,7 +114,7 @@ void DeleteMarkerEntry::AddToNode(XmlNode& parentNode) const
if(m_lastModifiedHasBeenSet)
{
XmlNode lastModifiedNode = parentNode.CreateChildElement("LastModified");
- lastModifiedNode.SetText(m_lastModified.ToGmtString(DateFormat::ISO_8601));
+ lastModifiedNode.SetText(m_lastModified.ToGmtString(Aws::Utils::DateFormat::ISO_8601));
}
}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteObjectRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteObjectRequest.cpp
index 6bd83e7cbe..6a27dbf097 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteObjectRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteObjectRequest.cpp
@@ -96,3 +96,13 @@ Aws::Http::HeaderValueCollection DeleteObjectRequest::GetRequestSpecificHeaders(
return headers;
}
+
+DeleteObjectRequest::EndpointParameters DeleteObjectRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteObjectTaggingRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteObjectTaggingRequest.cpp
index 580a7fa70b..5d19b31c49 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteObjectTaggingRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteObjectTaggingRequest.cpp
@@ -72,3 +72,13 @@ Aws::Http::HeaderValueCollection DeleteObjectTaggingRequest::GetRequestSpecificH
return headers;
}
+
+DeleteObjectTaggingRequest::EndpointParameters DeleteObjectTaggingRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteObjectsRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteObjectsRequest.cpp
index 3f7759ca0a..1b1841fe8b 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteObjectsRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeleteObjectsRequest.cpp
@@ -25,6 +25,8 @@ DeleteObjectsRequest::DeleteObjectsRequest() :
m_bypassGovernanceRetention(false),
m_bypassGovernanceRetentionHasBeenSet(false),
m_expectedBucketOwnerHasBeenSet(false),
+ m_checksumAlgorithm(ChecksumAlgorithm::NOT_SET),
+ m_checksumAlgorithmHasBeenSet(false),
m_customizedAccessLogTagHasBeenSet(false)
{
}
@@ -97,5 +99,33 @@ Aws::Http::HeaderValueCollection DeleteObjectsRequest::GetRequestSpecificHeaders
ss.str("");
}
+ if(m_checksumAlgorithmHasBeenSet)
+ {
+ headers.emplace("x-amz-sdk-checksum-algorithm", ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm));
+ }
+
return headers;
}
+
+DeleteObjectsRequest::EndpointParameters DeleteObjectsRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
+
+Aws::String DeleteObjectsRequest::GetChecksumAlgorithmName() const
+{
+ if (m_checksumAlgorithm == ChecksumAlgorithm::NOT_SET)
+ {
+ return "md5";
+ }
+ else
+ {
+ return ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm);
+ }
+}
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeletePublicAccessBlockRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeletePublicAccessBlockRequest.cpp
index 750457f594..0f087377a7 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeletePublicAccessBlockRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/DeletePublicAccessBlockRequest.cpp
@@ -63,3 +63,13 @@ Aws::Http::HeaderValueCollection DeletePublicAccessBlockRequest::GetRequestSpeci
return headers;
}
+
+DeletePublicAccessBlockRequest::EndpointParameters DeletePublicAccessBlockRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Event.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Event.cpp
index c6e779ab06..e6aa390f59 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Event.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Event.cpp
@@ -37,6 +37,16 @@ namespace Aws
static const int s3_Replication_OperationNotTracked_HASH = HashingUtils::HashString("s3:Replication:OperationNotTracked");
static const int s3_Replication_OperationMissedThreshold_HASH = HashingUtils::HashString("s3:Replication:OperationMissedThreshold");
static const int s3_Replication_OperationReplicatedAfterThreshold_HASH = HashingUtils::HashString("s3:Replication:OperationReplicatedAfterThreshold");
+ static const int s3_ObjectRestore_Delete_HASH = HashingUtils::HashString("s3:ObjectRestore:Delete");
+ static const int s3_LifecycleTransition_HASH = HashingUtils::HashString("s3:LifecycleTransition");
+ static const int s3_IntelligentTiering_HASH = HashingUtils::HashString("s3:IntelligentTiering");
+ static const int s3_ObjectAcl_Put_HASH = HashingUtils::HashString("s3:ObjectAcl:Put");
+ static const int s3_LifecycleExpiration_HASH = HashingUtils::HashString("s3:LifecycleExpiration:*");
+ static const int s3_LifecycleExpiration_Delete_HASH = HashingUtils::HashString("s3:LifecycleExpiration:Delete");
+ static const int s3_LifecycleExpiration_DeleteMarkerCreated_HASH = HashingUtils::HashString("s3:LifecycleExpiration:DeleteMarkerCreated");
+ static const int s3_ObjectTagging_HASH = HashingUtils::HashString("s3:ObjectTagging:*");
+ static const int s3_ObjectTagging_Put_HASH = HashingUtils::HashString("s3:ObjectTagging:Put");
+ static const int s3_ObjectTagging_Delete_HASH = HashingUtils::HashString("s3:ObjectTagging:Delete");
Event GetEventForName(const Aws::String& name)
@@ -110,6 +120,46 @@ namespace Aws
{
return Event::s3_Replication_OperationReplicatedAfterThreshold;
}
+ else if (hashCode == s3_ObjectRestore_Delete_HASH)
+ {
+ return Event::s3_ObjectRestore_Delete;
+ }
+ else if (hashCode == s3_LifecycleTransition_HASH)
+ {
+ return Event::s3_LifecycleTransition;
+ }
+ else if (hashCode == s3_IntelligentTiering_HASH)
+ {
+ return Event::s3_IntelligentTiering;
+ }
+ else if (hashCode == s3_ObjectAcl_Put_HASH)
+ {
+ return Event::s3_ObjectAcl_Put;
+ }
+ else if (hashCode == s3_LifecycleExpiration_HASH)
+ {
+ return Event::s3_LifecycleExpiration;
+ }
+ else if (hashCode == s3_LifecycleExpiration_Delete_HASH)
+ {
+ return Event::s3_LifecycleExpiration_Delete;
+ }
+ else if (hashCode == s3_LifecycleExpiration_DeleteMarkerCreated_HASH)
+ {
+ return Event::s3_LifecycleExpiration_DeleteMarkerCreated;
+ }
+ else if (hashCode == s3_ObjectTagging_HASH)
+ {
+ return Event::s3_ObjectTagging;
+ }
+ else if (hashCode == s3_ObjectTagging_Put_HASH)
+ {
+ return Event::s3_ObjectTagging_Put;
+ }
+ else if (hashCode == s3_ObjectTagging_Delete_HASH)
+ {
+ return Event::s3_ObjectTagging_Delete;
+ }
EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer();
if(overflowContainer)
{
@@ -158,6 +208,26 @@ namespace Aws
return "s3:Replication:OperationMissedThreshold";
case Event::s3_Replication_OperationReplicatedAfterThreshold:
return "s3:Replication:OperationReplicatedAfterThreshold";
+ case Event::s3_ObjectRestore_Delete:
+ return "s3:ObjectRestore:Delete";
+ case Event::s3_LifecycleTransition:
+ return "s3:LifecycleTransition";
+ case Event::s3_IntelligentTiering:
+ return "s3:IntelligentTiering";
+ case Event::s3_ObjectAcl_Put:
+ return "s3:ObjectAcl:Put";
+ case Event::s3_LifecycleExpiration:
+ return "s3:LifecycleExpiration:*";
+ case Event::s3_LifecycleExpiration_Delete:
+ return "s3:LifecycleExpiration:Delete";
+ case Event::s3_LifecycleExpiration_DeleteMarkerCreated:
+ return "s3:LifecycleExpiration:DeleteMarkerCreated";
+ case Event::s3_ObjectTagging:
+ return "s3:ObjectTagging:*";
+ case Event::s3_ObjectTagging_Put:
+ return "s3:ObjectTagging:Put";
+ case Event::s3_ObjectTagging_Delete:
+ return "s3:ObjectTagging:Delete";
default:
EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer();
if(overflowContainer)
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/EventBridgeConfiguration.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/EventBridgeConfiguration.cpp
new file mode 100644
index 0000000000..ebf1ca13d9
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/EventBridgeConfiguration.cpp
@@ -0,0 +1,51 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/s3/model/EventBridgeConfiguration.h>
+#include <aws/core/utils/xml/XmlSerializer.h>
+#include <aws/core/utils/StringUtils.h>
+#include <aws/core/utils/memory/stl/AWSStringStream.h>
+
+#include <utility>
+
+using namespace Aws::Utils::Xml;
+using namespace Aws::Utils;
+
+namespace Aws
+{
+namespace S3
+{
+namespace Model
+{
+
+EventBridgeConfiguration::EventBridgeConfiguration()
+{
+}
+
+EventBridgeConfiguration::EventBridgeConfiguration(const XmlNode& xmlNode)
+{
+ *this = xmlNode;
+}
+
+EventBridgeConfiguration& EventBridgeConfiguration::operator =(const XmlNode& xmlNode)
+{
+ XmlNode resultNode = xmlNode;
+
+ if(!resultNode.IsNull())
+ {
+ }
+
+ return *this;
+}
+
+void EventBridgeConfiguration::AddToNode(XmlNode& parentNode) const
+{
+ Aws::StringStream ss;
+ AWS_UNREFERENCED_PARAM(parentNode);
+}
+
+} // namespace Model
+} // namespace S3
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketAccelerateConfigurationRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketAccelerateConfigurationRequest.cpp
index 3bc67a43cc..f144bc0c22 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketAccelerateConfigurationRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketAccelerateConfigurationRequest.cpp
@@ -63,3 +63,13 @@ Aws::Http::HeaderValueCollection GetBucketAccelerateConfigurationRequest::GetReq
return headers;
}
+
+GetBucketAccelerateConfigurationRequest::EndpointParameters GetBucketAccelerateConfigurationRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketAclRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketAclRequest.cpp
index df77a9e7f6..962a4eb555 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketAclRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketAclRequest.cpp
@@ -63,3 +63,13 @@ Aws::Http::HeaderValueCollection GetBucketAclRequest::GetRequestSpecificHeaders(
return headers;
}
+
+GetBucketAclRequest::EndpointParameters GetBucketAclRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketAnalyticsConfigurationRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketAnalyticsConfigurationRequest.cpp
index 5e3d792e2c..8dde64a2b2 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketAnalyticsConfigurationRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketAnalyticsConfigurationRequest.cpp
@@ -71,3 +71,13 @@ Aws::Http::HeaderValueCollection GetBucketAnalyticsConfigurationRequest::GetRequ
return headers;
}
+
+GetBucketAnalyticsConfigurationRequest::EndpointParameters GetBucketAnalyticsConfigurationRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketCorsRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketCorsRequest.cpp
index 92cc762080..2f3e70a8eb 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketCorsRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketCorsRequest.cpp
@@ -63,3 +63,13 @@ Aws::Http::HeaderValueCollection GetBucketCorsRequest::GetRequestSpecificHeaders
return headers;
}
+
+GetBucketCorsRequest::EndpointParameters GetBucketCorsRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketEncryptionRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketEncryptionRequest.cpp
index ad69f94035..1d679807c7 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketEncryptionRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketEncryptionRequest.cpp
@@ -63,3 +63,13 @@ Aws::Http::HeaderValueCollection GetBucketEncryptionRequest::GetRequestSpecificH
return headers;
}
+
+GetBucketEncryptionRequest::EndpointParameters GetBucketEncryptionRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketIntelligentTieringConfigurationRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketIntelligentTieringConfigurationRequest.cpp
index ac5e664e69..1aeb94e440 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketIntelligentTieringConfigurationRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketIntelligentTieringConfigurationRequest.cpp
@@ -57,3 +57,13 @@ void GetBucketIntelligentTieringConfigurationRequest::AddQueryStringParameters(U
}
}
+
+GetBucketIntelligentTieringConfigurationRequest::EndpointParameters GetBucketIntelligentTieringConfigurationRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketInventoryConfigurationRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketInventoryConfigurationRequest.cpp
index f3ec3acdc5..cf462b3fa9 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketInventoryConfigurationRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketInventoryConfigurationRequest.cpp
@@ -71,3 +71,13 @@ Aws::Http::HeaderValueCollection GetBucketInventoryConfigurationRequest::GetRequ
return headers;
}
+
+GetBucketInventoryConfigurationRequest::EndpointParameters GetBucketInventoryConfigurationRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketLifecycleConfigurationRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketLifecycleConfigurationRequest.cpp
index 5abc090ac4..019d5457a9 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketLifecycleConfigurationRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketLifecycleConfigurationRequest.cpp
@@ -63,3 +63,13 @@ Aws::Http::HeaderValueCollection GetBucketLifecycleConfigurationRequest::GetRequ
return headers;
}
+
+GetBucketLifecycleConfigurationRequest::EndpointParameters GetBucketLifecycleConfigurationRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketLocationRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketLocationRequest.cpp
index 68e7f366f6..6958be9895 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketLocationRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketLocationRequest.cpp
@@ -63,3 +63,13 @@ Aws::Http::HeaderValueCollection GetBucketLocationRequest::GetRequestSpecificHea
return headers;
}
+
+GetBucketLocationRequest::EndpointParameters GetBucketLocationRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketLocationResult.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketLocationResult.cpp
index 24fc4e62b0..f748617711 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketLocationResult.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketLocationResult.cpp
@@ -36,6 +36,5 @@ GetBucketLocationResult& GetBucketLocationResult::operator =(const AmazonWebServ
m_locationConstraint = BucketLocationConstraintMapper::GetBucketLocationConstraintForName(StringUtils::Trim(resultNode.GetText().c_str()).c_str());
}
- return *this;
+ return *this;
}
-
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketLoggingRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketLoggingRequest.cpp
index ee141ec35c..38a1cbd48b 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketLoggingRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketLoggingRequest.cpp
@@ -63,3 +63,13 @@ Aws::Http::HeaderValueCollection GetBucketLoggingRequest::GetRequestSpecificHead
return headers;
}
+
+GetBucketLoggingRequest::EndpointParameters GetBucketLoggingRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketMetricsConfigurationRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketMetricsConfigurationRequest.cpp
index c384ad6f1e..11f5eacbb6 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketMetricsConfigurationRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketMetricsConfigurationRequest.cpp
@@ -71,3 +71,13 @@ Aws::Http::HeaderValueCollection GetBucketMetricsConfigurationRequest::GetReques
return headers;
}
+
+GetBucketMetricsConfigurationRequest::EndpointParameters GetBucketMetricsConfigurationRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketNotificationConfigurationRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketNotificationConfigurationRequest.cpp
index 6e892ead3b..dd672659eb 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketNotificationConfigurationRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketNotificationConfigurationRequest.cpp
@@ -63,3 +63,13 @@ Aws::Http::HeaderValueCollection GetBucketNotificationConfigurationRequest::GetR
return headers;
}
+
+GetBucketNotificationConfigurationRequest::EndpointParameters GetBucketNotificationConfigurationRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketNotificationConfigurationResult.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketNotificationConfigurationResult.cpp
index de68f4316a..9e7ff9481f 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketNotificationConfigurationResult.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketNotificationConfigurationResult.cpp
@@ -64,6 +64,11 @@ GetBucketNotificationConfigurationResult& GetBucketNotificationConfigurationResu
}
}
+ XmlNode eventBridgeConfigurationNode = resultNode.FirstChild("EventBridgeConfiguration");
+ if(!eventBridgeConfigurationNode.IsNull())
+ {
+ m_eventBridgeConfiguration = eventBridgeConfigurationNode;
+ }
}
return *this;
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketOwnershipControlsRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketOwnershipControlsRequest.cpp
index efaa529d2e..b3f153a9b5 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketOwnershipControlsRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketOwnershipControlsRequest.cpp
@@ -63,3 +63,13 @@ Aws::Http::HeaderValueCollection GetBucketOwnershipControlsRequest::GetRequestSp
return headers;
}
+
+GetBucketOwnershipControlsRequest::EndpointParameters GetBucketOwnershipControlsRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketPolicyRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketPolicyRequest.cpp
index e41b1fe0c5..e6571c8a6f 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketPolicyRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketPolicyRequest.cpp
@@ -63,3 +63,13 @@ Aws::Http::HeaderValueCollection GetBucketPolicyRequest::GetRequestSpecificHeade
return headers;
}
+
+GetBucketPolicyRequest::EndpointParameters GetBucketPolicyRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketPolicyStatusRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketPolicyStatusRequest.cpp
index 3c487f9634..c3af674bbd 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketPolicyStatusRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketPolicyStatusRequest.cpp
@@ -63,3 +63,13 @@ Aws::Http::HeaderValueCollection GetBucketPolicyStatusRequest::GetRequestSpecifi
return headers;
}
+
+GetBucketPolicyStatusRequest::EndpointParameters GetBucketPolicyStatusRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketReplicationRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketReplicationRequest.cpp
index 3dcadfa2be..7ef6b8a1b2 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketReplicationRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketReplicationRequest.cpp
@@ -63,3 +63,13 @@ Aws::Http::HeaderValueCollection GetBucketReplicationRequest::GetRequestSpecific
return headers;
}
+
+GetBucketReplicationRequest::EndpointParameters GetBucketReplicationRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketRequestPaymentRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketRequestPaymentRequest.cpp
index 7f3e734b70..e369ccaab7 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketRequestPaymentRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketRequestPaymentRequest.cpp
@@ -63,3 +63,13 @@ Aws::Http::HeaderValueCollection GetBucketRequestPaymentRequest::GetRequestSpeci
return headers;
}
+
+GetBucketRequestPaymentRequest::EndpointParameters GetBucketRequestPaymentRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketTaggingRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketTaggingRequest.cpp
index 72373deaf1..f3e5ea4b74 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketTaggingRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketTaggingRequest.cpp
@@ -63,3 +63,13 @@ Aws::Http::HeaderValueCollection GetBucketTaggingRequest::GetRequestSpecificHead
return headers;
}
+
+GetBucketTaggingRequest::EndpointParameters GetBucketTaggingRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketVersioningRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketVersioningRequest.cpp
index 7583f79117..830ab5425d 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketVersioningRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketVersioningRequest.cpp
@@ -63,3 +63,13 @@ Aws::Http::HeaderValueCollection GetBucketVersioningRequest::GetRequestSpecificH
return headers;
}
+
+GetBucketVersioningRequest::EndpointParameters GetBucketVersioningRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketWebsiteRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketWebsiteRequest.cpp
index 067a281fa9..73510697c2 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketWebsiteRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetBucketWebsiteRequest.cpp
@@ -63,3 +63,13 @@ Aws::Http::HeaderValueCollection GetBucketWebsiteRequest::GetRequestSpecificHead
return headers;
}
+
+GetBucketWebsiteRequest::EndpointParameters GetBucketWebsiteRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAclRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAclRequest.cpp
index 9019eea1c7..f0ee229d60 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAclRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAclRequest.cpp
@@ -79,3 +79,13 @@ Aws::Http::HeaderValueCollection GetObjectAclRequest::GetRequestSpecificHeaders(
return headers;
}
+
+GetObjectAclRequest::EndpointParameters GetObjectAclRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAttributesParts.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAttributesParts.cpp
new file mode 100644
index 0000000000..14b9a8ac28
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAttributesParts.cpp
@@ -0,0 +1,163 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/s3/model/GetObjectAttributesParts.h>
+#include <aws/core/utils/xml/XmlSerializer.h>
+#include <aws/core/utils/StringUtils.h>
+#include <aws/core/utils/memory/stl/AWSStringStream.h>
+
+#include <utility>
+
+using namespace Aws::Utils::Xml;
+using namespace Aws::Utils;
+
+namespace Aws
+{
+namespace S3
+{
+namespace Model
+{
+
+GetObjectAttributesParts::GetObjectAttributesParts() :
+ m_totalPartsCount(0),
+ m_totalPartsCountHasBeenSet(false),
+ m_partNumberMarker(0),
+ m_partNumberMarkerHasBeenSet(false),
+ m_nextPartNumberMarker(0),
+ m_nextPartNumberMarkerHasBeenSet(false),
+ m_maxParts(0),
+ m_maxPartsHasBeenSet(false),
+ m_isTruncated(false),
+ m_isTruncatedHasBeenSet(false),
+ m_partsHasBeenSet(false)
+{
+}
+
+GetObjectAttributesParts::GetObjectAttributesParts(const XmlNode& xmlNode) :
+ m_totalPartsCount(0),
+ m_totalPartsCountHasBeenSet(false),
+ m_partNumberMarker(0),
+ m_partNumberMarkerHasBeenSet(false),
+ m_nextPartNumberMarker(0),
+ m_nextPartNumberMarkerHasBeenSet(false),
+ m_maxParts(0),
+ m_maxPartsHasBeenSet(false),
+ m_isTruncated(false),
+ m_isTruncatedHasBeenSet(false),
+ m_partsHasBeenSet(false)
+{
+ *this = xmlNode;
+}
+
+GetObjectAttributesParts& GetObjectAttributesParts::operator =(const XmlNode& xmlNode)
+{
+ XmlNode resultNode = xmlNode;
+
+ if(!resultNode.IsNull())
+ {
+ XmlNode totalPartsCountNode = resultNode.FirstChild("PartsCount");
+ if(!totalPartsCountNode.IsNull())
+ {
+ m_totalPartsCount = StringUtils::ConvertToInt32(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(totalPartsCountNode.GetText()).c_str()).c_str());
+ m_totalPartsCountHasBeenSet = true;
+ }
+ XmlNode partNumberMarkerNode = resultNode.FirstChild("PartNumberMarker");
+ if(!partNumberMarkerNode.IsNull())
+ {
+ m_partNumberMarker = StringUtils::ConvertToInt32(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(partNumberMarkerNode.GetText()).c_str()).c_str());
+ m_partNumberMarkerHasBeenSet = true;
+ }
+ XmlNode nextPartNumberMarkerNode = resultNode.FirstChild("NextPartNumberMarker");
+ if(!nextPartNumberMarkerNode.IsNull())
+ {
+ m_nextPartNumberMarker = StringUtils::ConvertToInt32(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(nextPartNumberMarkerNode.GetText()).c_str()).c_str());
+ m_nextPartNumberMarkerHasBeenSet = true;
+ }
+ XmlNode maxPartsNode = resultNode.FirstChild("MaxParts");
+ if(!maxPartsNode.IsNull())
+ {
+ m_maxParts = StringUtils::ConvertToInt32(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(maxPartsNode.GetText()).c_str()).c_str());
+ m_maxPartsHasBeenSet = true;
+ }
+ XmlNode isTruncatedNode = resultNode.FirstChild("IsTruncated");
+ if(!isTruncatedNode.IsNull())
+ {
+ m_isTruncated = StringUtils::ConvertToBool(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(isTruncatedNode.GetText()).c_str()).c_str());
+ m_isTruncatedHasBeenSet = true;
+ }
+ XmlNode partsNode = resultNode.FirstChild("Part");
+ if(!partsNode.IsNull())
+ {
+ XmlNode partMember = partsNode;
+ while(!partMember.IsNull())
+ {
+ m_parts.push_back(partMember);
+ partMember = partMember.NextNode("Part");
+ }
+
+ m_partsHasBeenSet = true;
+ }
+ }
+
+ return *this;
+}
+
+void GetObjectAttributesParts::AddToNode(XmlNode& parentNode) const
+{
+ Aws::StringStream ss;
+ if(m_totalPartsCountHasBeenSet)
+ {
+ XmlNode totalPartsCountNode = parentNode.CreateChildElement("PartsCount");
+ ss << m_totalPartsCount;
+ totalPartsCountNode.SetText(ss.str());
+ ss.str("");
+ }
+
+ if(m_partNumberMarkerHasBeenSet)
+ {
+ XmlNode partNumberMarkerNode = parentNode.CreateChildElement("PartNumberMarker");
+ ss << m_partNumberMarker;
+ partNumberMarkerNode.SetText(ss.str());
+ ss.str("");
+ }
+
+ if(m_nextPartNumberMarkerHasBeenSet)
+ {
+ XmlNode nextPartNumberMarkerNode = parentNode.CreateChildElement("NextPartNumberMarker");
+ ss << m_nextPartNumberMarker;
+ nextPartNumberMarkerNode.SetText(ss.str());
+ ss.str("");
+ }
+
+ if(m_maxPartsHasBeenSet)
+ {
+ XmlNode maxPartsNode = parentNode.CreateChildElement("MaxParts");
+ ss << m_maxParts;
+ maxPartsNode.SetText(ss.str());
+ ss.str("");
+ }
+
+ if(m_isTruncatedHasBeenSet)
+ {
+ XmlNode isTruncatedNode = parentNode.CreateChildElement("IsTruncated");
+ ss << std::boolalpha << m_isTruncated;
+ isTruncatedNode.SetText(ss.str());
+ ss.str("");
+ }
+
+ if(m_partsHasBeenSet)
+ {
+ for(const auto& item : m_parts)
+ {
+ XmlNode partsNode = parentNode.CreateChildElement("Part");
+ item.AddToNode(partsNode);
+ }
+ }
+
+}
+
+} // namespace Model
+} // namespace S3
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAttributesRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAttributesRequest.cpp
new file mode 100644
index 0000000000..87e567b7d5
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAttributesRequest.cpp
@@ -0,0 +1,144 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/s3/model/GetObjectAttributesRequest.h>
+#include <aws/core/utils/xml/XmlSerializer.h>
+#include <aws/core/utils/memory/stl/AWSStringStream.h>
+#include <aws/core/http/URI.h>
+#include <aws/core/utils/memory/stl/AWSStringStream.h>
+
+#include <utility>
+
+using namespace Aws::S3::Model;
+using namespace Aws::Utils::Xml;
+using namespace Aws::Utils;
+using namespace Aws::Http;
+
+GetObjectAttributesRequest::GetObjectAttributesRequest() :
+ m_bucketHasBeenSet(false),
+ m_keyHasBeenSet(false),
+ m_versionIdHasBeenSet(false),
+ m_maxParts(0),
+ m_maxPartsHasBeenSet(false),
+ m_partNumberMarker(0),
+ m_partNumberMarkerHasBeenSet(false),
+ m_sSECustomerAlgorithmHasBeenSet(false),
+ m_sSECustomerKeyHasBeenSet(false),
+ m_sSECustomerKeyMD5HasBeenSet(false),
+ m_requestPayer(RequestPayer::NOT_SET),
+ m_requestPayerHasBeenSet(false),
+ m_expectedBucketOwnerHasBeenSet(false),
+ m_objectAttributesHasBeenSet(false),
+ m_customizedAccessLogTagHasBeenSet(false)
+{
+}
+
+Aws::String GetObjectAttributesRequest::SerializePayload() const
+{
+ return {};
+}
+
+void GetObjectAttributesRequest::AddQueryStringParameters(URI& uri) const
+{
+ Aws::StringStream ss;
+ if(m_versionIdHasBeenSet)
+ {
+ ss << m_versionId;
+ uri.AddQueryStringParameter("versionId", ss.str());
+ ss.str("");
+ }
+
+ if(!m_customizedAccessLogTag.empty())
+ {
+ // only accept customized LogTag which starts with "x-"
+ Aws::Map<Aws::String, Aws::String> collectedLogTags;
+ for(const auto& entry: m_customizedAccessLogTag)
+ {
+ if (!entry.first.empty() && !entry.second.empty() && entry.first.substr(0, 2) == "x-")
+ {
+ collectedLogTags.emplace(entry.first, entry.second);
+ }
+ }
+
+ if (!collectedLogTags.empty())
+ {
+ uri.AddQueryStringParameter(collectedLogTags);
+ }
+ }
+}
+
+Aws::Http::HeaderValueCollection GetObjectAttributesRequest::GetRequestSpecificHeaders() const
+{
+ Aws::Http::HeaderValueCollection headers;
+ Aws::StringStream ss;
+ if(m_maxPartsHasBeenSet)
+ {
+ ss << m_maxParts;
+ headers.emplace("x-amz-max-parts", ss.str());
+ ss.str("");
+ }
+
+ if(m_partNumberMarkerHasBeenSet)
+ {
+ ss << m_partNumberMarker;
+ headers.emplace("x-amz-part-number-marker", ss.str());
+ ss.str("");
+ }
+
+ if(m_sSECustomerAlgorithmHasBeenSet)
+ {
+ ss << m_sSECustomerAlgorithm;
+ headers.emplace("x-amz-server-side-encryption-customer-algorithm", ss.str());
+ ss.str("");
+ }
+
+ if(m_sSECustomerKeyHasBeenSet)
+ {
+ ss << m_sSECustomerKey;
+ headers.emplace("x-amz-server-side-encryption-customer-key", ss.str());
+ ss.str("");
+ }
+
+ if(m_sSECustomerKeyMD5HasBeenSet)
+ {
+ ss << m_sSECustomerKeyMD5;
+ headers.emplace("x-amz-server-side-encryption-customer-key-md5", ss.str());
+ ss.str("");
+ }
+
+ if(m_requestPayerHasBeenSet)
+ {
+ headers.emplace("x-amz-request-payer", RequestPayerMapper::GetNameForRequestPayer(m_requestPayer));
+ }
+
+ if(m_expectedBucketOwnerHasBeenSet)
+ {
+ ss << m_expectedBucketOwner;
+ headers.emplace("x-amz-expected-bucket-owner", ss.str());
+ ss.str("");
+ }
+
+ if(m_objectAttributesHasBeenSet)
+ {
+ for(const auto& item : m_objectAttributes)
+ {
+ ss << ObjectAttributesMapper::GetNameForObjectAttributes(item);
+ headers.emplace("x-amz-object-attributes", ss.str());
+ ss.str("");
+ }
+ }
+
+ return headers;
+}
+
+GetObjectAttributesRequest::EndpointParameters GetObjectAttributesRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAttributesResult.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAttributesResult.cpp
new file mode 100644
index 0000000000..2cb06ee5fe
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectAttributesResult.cpp
@@ -0,0 +1,96 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/s3/model/GetObjectAttributesResult.h>
+#include <aws/core/utils/xml/XmlSerializer.h>
+#include <aws/core/AmazonWebServiceResult.h>
+#include <aws/core/utils/StringUtils.h>
+#include <aws/core/utils/memory/stl/AWSStringStream.h>
+
+#include <utility>
+
+using namespace Aws::S3::Model;
+using namespace Aws::Utils::Xml;
+using namespace Aws::Utils;
+using namespace Aws;
+
+GetObjectAttributesResult::GetObjectAttributesResult() :
+ m_deleteMarker(false),
+ m_requestCharged(RequestCharged::NOT_SET),
+ m_storageClass(StorageClass::NOT_SET),
+ m_objectSize(0)
+{
+}
+
+GetObjectAttributesResult::GetObjectAttributesResult(const Aws::AmazonWebServiceResult<XmlDocument>& result) :
+ m_deleteMarker(false),
+ m_requestCharged(RequestCharged::NOT_SET),
+ m_storageClass(StorageClass::NOT_SET),
+ m_objectSize(0)
+{
+ *this = result;
+}
+
+GetObjectAttributesResult& GetObjectAttributesResult::operator =(const Aws::AmazonWebServiceResult<XmlDocument>& result)
+{
+ const XmlDocument& xmlDocument = result.GetPayload();
+ XmlNode resultNode = xmlDocument.GetRootElement();
+
+ if(!resultNode.IsNull())
+ {
+ XmlNode eTagNode = resultNode.FirstChild("ETag");
+ if(!eTagNode.IsNull())
+ {
+ m_eTag = Aws::Utils::Xml::DecodeEscapedXmlText(eTagNode.GetText());
+ }
+ XmlNode checksumNode = resultNode.FirstChild("Checksum");
+ if(!checksumNode.IsNull())
+ {
+ m_checksum = checksumNode;
+ }
+ XmlNode objectPartsNode = resultNode.FirstChild("ObjectParts");
+ if(!objectPartsNode.IsNull())
+ {
+ m_objectParts = objectPartsNode;
+ }
+ XmlNode storageClassNode = resultNode.FirstChild("StorageClass");
+ if(!storageClassNode.IsNull())
+ {
+ m_storageClass = StorageClassMapper::GetStorageClassForName(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(storageClassNode.GetText()).c_str()).c_str());
+ }
+ XmlNode objectSizeNode = resultNode.FirstChild("ObjectSize");
+ if(!objectSizeNode.IsNull())
+ {
+ m_objectSize = StringUtils::ConvertToInt64(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(objectSizeNode.GetText()).c_str()).c_str());
+ }
+ }
+
+ const auto& headers = result.GetHeaderValueCollection();
+ const auto& deleteMarkerIter = headers.find("x-amz-delete-marker");
+ if(deleteMarkerIter != headers.end())
+ {
+ m_deleteMarker = StringUtils::ConvertToBool(deleteMarkerIter->second.c_str());
+ }
+
+ const auto& lastModifiedIter = headers.find("last-modified");
+ if(lastModifiedIter != headers.end())
+ {
+ m_lastModified = DateTime(lastModifiedIter->second, Aws::Utils::DateFormat::RFC822);
+ }
+
+ const auto& versionIdIter = headers.find("x-amz-version-id");
+ if(versionIdIter != headers.end())
+ {
+ m_versionId = versionIdIter->second;
+ }
+
+ const auto& requestChargedIter = headers.find("x-amz-request-charged");
+ if(requestChargedIter != headers.end())
+ {
+ m_requestCharged = RequestChargedMapper::GetRequestChargedForName(requestChargedIter->second);
+ }
+
+ return *this;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectLegalHoldRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectLegalHoldRequest.cpp
index 6a3ba662dc..f6b41c0e8f 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectLegalHoldRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectLegalHoldRequest.cpp
@@ -79,3 +79,13 @@ Aws::Http::HeaderValueCollection GetObjectLegalHoldRequest::GetRequestSpecificHe
return headers;
}
+
+GetObjectLegalHoldRequest::EndpointParameters GetObjectLegalHoldRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectLockConfigurationRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectLockConfigurationRequest.cpp
index d1cf37a030..59ebdb725e 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectLockConfigurationRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectLockConfigurationRequest.cpp
@@ -63,3 +63,13 @@ Aws::Http::HeaderValueCollection GetObjectLockConfigurationRequest::GetRequestSp
return headers;
}
+
+GetObjectLockConfigurationRequest::EndpointParameters GetObjectLockConfigurationRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectRequest.cpp
index a1939e2471..94dcf96673 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectRequest.cpp
@@ -39,6 +39,8 @@ GetObjectRequest::GetObjectRequest() :
m_partNumber(0),
m_partNumberHasBeenSet(false),
m_expectedBucketOwnerHasBeenSet(false),
+ m_checksumMode(ChecksumMode::NOT_SET),
+ m_checksumModeHasBeenSet(false),
m_customizedAccessLogTagHasBeenSet(false)
{
}
@@ -88,7 +90,7 @@ void GetObjectRequest::AddQueryStringParameters(URI& uri) const
if(m_responseExpiresHasBeenSet)
{
- ss << m_responseExpires.ToGmtString(DateFormat::RFC822);
+ ss << m_responseExpires.ToGmtString(Aws::Utils::DateFormat::RFC822);
uri.AddQueryStringParameter("response-expires", ss.str());
ss.str("");
}
@@ -139,7 +141,7 @@ Aws::Http::HeaderValueCollection GetObjectRequest::GetRequestSpecificHeaders() c
if(m_ifModifiedSinceHasBeenSet)
{
- headers.emplace("if-modified-since", m_ifModifiedSince.ToGmtString(DateFormat::RFC822));
+ headers.emplace("if-modified-since", m_ifModifiedSince.ToGmtString(Aws::Utils::DateFormat::RFC822));
}
if(m_ifNoneMatchHasBeenSet)
@@ -151,7 +153,7 @@ Aws::Http::HeaderValueCollection GetObjectRequest::GetRequestSpecificHeaders() c
if(m_ifUnmodifiedSinceHasBeenSet)
{
- headers.emplace("if-unmodified-since", m_ifUnmodifiedSince.ToGmtString(DateFormat::RFC822));
+ headers.emplace("if-unmodified-since", m_ifUnmodifiedSince.ToGmtString(Aws::Utils::DateFormat::RFC822));
}
if(m_rangeHasBeenSet)
@@ -194,5 +196,35 @@ Aws::Http::HeaderValueCollection GetObjectRequest::GetRequestSpecificHeaders() c
ss.str("");
}
+ if(m_checksumModeHasBeenSet)
+ {
+ headers.emplace("x-amz-checksum-mode", ChecksumModeMapper::GetNameForChecksumMode(m_checksumMode));
+ }
+
return headers;
}
+
+GetObjectRequest::EndpointParameters GetObjectRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
+bool GetObjectRequest::ShouldValidateResponseChecksum() const
+{
+ return m_checksumMode == ChecksumMode::ENABLED;
+}
+
+Aws::Vector<Aws::String> GetObjectRequest::GetResponseChecksumAlgorithmNames() const
+{
+ Aws::Vector<Aws::String> responseChecksumAlgorithmNames;
+ responseChecksumAlgorithmNames.push_back("CRC32");
+ responseChecksumAlgorithmNames.push_back("CRC32C");
+ responseChecksumAlgorithmNames.push_back("SHA256");
+ responseChecksumAlgorithmNames.push_back("SHA1");
+ return responseChecksumAlgorithmNames;
+}
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectResult.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectResult.cpp
index f0959a570a..50be648cd6 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectResult.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectResult.cpp
@@ -41,6 +41,10 @@ GetObjectResult::GetObjectResult(GetObjectResult&& toMove) :
m_lastModified(std::move(toMove.m_lastModified)),
m_contentLength(toMove.m_contentLength),
m_eTag(std::move(toMove.m_eTag)),
+ m_checksumCRC32(std::move(toMove.m_checksumCRC32)),
+ m_checksumCRC32C(std::move(toMove.m_checksumCRC32C)),
+ m_checksumSHA1(std::move(toMove.m_checksumSHA1)),
+ m_checksumSHA256(std::move(toMove.m_checksumSHA256)),
m_missingMeta(toMove.m_missingMeta),
m_versionId(std::move(toMove.m_versionId)),
m_cacheControl(std::move(toMove.m_cacheControl)),
@@ -85,6 +89,10 @@ GetObjectResult& GetObjectResult::operator=(GetObjectResult&& toMove)
m_lastModified = std::move(toMove.m_lastModified);
m_contentLength = toMove.m_contentLength;
m_eTag = std::move(toMove.m_eTag);
+ m_checksumCRC32 = std::move(toMove.m_checksumCRC32);
+ m_checksumCRC32C = std::move(toMove.m_checksumCRC32C);
+ m_checksumSHA1 = std::move(toMove.m_checksumSHA1);
+ m_checksumSHA256 = std::move(toMove.m_checksumSHA256);
m_missingMeta = toMove.m_missingMeta;
m_versionId = std::move(toMove.m_versionId);
m_cacheControl = std::move(toMove.m_cacheControl);
@@ -164,7 +172,7 @@ GetObjectResult& GetObjectResult::operator =(Aws::AmazonWebServiceResult<Respons
const auto& lastModifiedIter = headers.find("last-modified");
if(lastModifiedIter != headers.end())
{
- m_lastModified = DateTime(lastModifiedIter->second, DateFormat::RFC822);
+ m_lastModified = DateTime(lastModifiedIter->second, Aws::Utils::DateFormat::RFC822);
}
const auto& contentLengthIter = headers.find("content-length");
@@ -179,6 +187,30 @@ GetObjectResult& GetObjectResult::operator =(Aws::AmazonWebServiceResult<Respons
m_eTag = eTagIter->second;
}
+ const auto& checksumCRC32Iter = headers.find("x-amz-checksum-crc32");
+ if(checksumCRC32Iter != headers.end())
+ {
+ m_checksumCRC32 = checksumCRC32Iter->second;
+ }
+
+ const auto& checksumCRC32CIter = headers.find("x-amz-checksum-crc32c");
+ if(checksumCRC32CIter != headers.end())
+ {
+ m_checksumCRC32C = checksumCRC32CIter->second;
+ }
+
+ const auto& checksumSHA1Iter = headers.find("x-amz-checksum-sha1");
+ if(checksumSHA1Iter != headers.end())
+ {
+ m_checksumSHA1 = checksumSHA1Iter->second;
+ }
+
+ const auto& checksumSHA256Iter = headers.find("x-amz-checksum-sha256");
+ if(checksumSHA256Iter != headers.end())
+ {
+ m_checksumSHA256 = checksumSHA256Iter->second;
+ }
+
const auto& missingMetaIter = headers.find("x-amz-missing-meta");
if(missingMetaIter != headers.end())
{
@@ -230,7 +262,7 @@ GetObjectResult& GetObjectResult::operator =(Aws::AmazonWebServiceResult<Respons
const auto& expiresIter = headers.find("expires");
if(expiresIter != headers.end())
{
- m_expires = DateTime(expiresIter->second, DateFormat::RFC822);
+ m_expires = DateTime(expiresIter->second, Aws::Utils::DateFormat::RFC822);
}
const auto& websiteRedirectLocationIter = headers.find("x-amz-website-redirect-location");
@@ -319,7 +351,7 @@ GetObjectResult& GetObjectResult::operator =(Aws::AmazonWebServiceResult<Respons
const auto& objectLockRetainUntilDateIter = headers.find("x-amz-object-lock-retain-until-date");
if(objectLockRetainUntilDateIter != headers.end())
{
- m_objectLockRetainUntilDate = DateTime(objectLockRetainUntilDateIter->second, DateFormat::ISO_8601);
+ m_objectLockRetainUntilDate = DateTime(objectLockRetainUntilDateIter->second, Aws::Utils::DateFormat::ISO_8601);
}
const auto& objectLockLegalHoldStatusIter = headers.find("x-amz-object-lock-legal-hold");
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectRetentionRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectRetentionRequest.cpp
index d1917da723..a825b3ba4c 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectRetentionRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectRetentionRequest.cpp
@@ -79,3 +79,13 @@ Aws::Http::HeaderValueCollection GetObjectRetentionRequest::GetRequestSpecificHe
return headers;
}
+
+GetObjectRetentionRequest::EndpointParameters GetObjectRetentionRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectTaggingRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectTaggingRequest.cpp
index fb9a5d55e4..d2f2fa1127 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectTaggingRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectTaggingRequest.cpp
@@ -79,3 +79,13 @@ Aws::Http::HeaderValueCollection GetObjectTaggingRequest::GetRequestSpecificHead
return headers;
}
+
+GetObjectTaggingRequest::EndpointParameters GetObjectTaggingRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectTorrentRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectTorrentRequest.cpp
index 05aae88aa2..f0975e6b60 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectTorrentRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetObjectTorrentRequest.cpp
@@ -71,3 +71,13 @@ Aws::Http::HeaderValueCollection GetObjectTorrentRequest::GetRequestSpecificHead
return headers;
}
+
+GetObjectTorrentRequest::EndpointParameters GetObjectTorrentRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetPublicAccessBlockRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetPublicAccessBlockRequest.cpp
index 6616e19c0b..137a2d2a36 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetPublicAccessBlockRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/GetPublicAccessBlockRequest.cpp
@@ -63,3 +63,13 @@ Aws::Http::HeaderValueCollection GetPublicAccessBlockRequest::GetRequestSpecific
return headers;
}
+
+GetPublicAccessBlockRequest::EndpointParameters GetPublicAccessBlockRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/HeadBucketRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/HeadBucketRequest.cpp
index b503f37b81..9cc24f6e14 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/HeadBucketRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/HeadBucketRequest.cpp
@@ -63,3 +63,13 @@ Aws::Http::HeaderValueCollection HeadBucketRequest::GetRequestSpecificHeaders()
return headers;
}
+
+HeadBucketRequest::EndpointParameters HeadBucketRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/HeadObjectRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/HeadObjectRequest.cpp
index 7bf7b7239e..e40ec1f7cf 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/HeadObjectRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/HeadObjectRequest.cpp
@@ -33,6 +33,8 @@ HeadObjectRequest::HeadObjectRequest() :
m_partNumber(0),
m_partNumberHasBeenSet(false),
m_expectedBucketOwnerHasBeenSet(false),
+ m_checksumMode(ChecksumMode::NOT_SET),
+ m_checksumModeHasBeenSet(false),
m_customizedAccessLogTagHasBeenSet(false)
{
}
@@ -91,7 +93,7 @@ Aws::Http::HeaderValueCollection HeadObjectRequest::GetRequestSpecificHeaders()
if(m_ifModifiedSinceHasBeenSet)
{
- headers.emplace("if-modified-since", m_ifModifiedSince.ToGmtString(DateFormat::RFC822));
+ headers.emplace("if-modified-since", m_ifModifiedSince.ToGmtString(Aws::Utils::DateFormat::RFC822));
}
if(m_ifNoneMatchHasBeenSet)
@@ -103,7 +105,7 @@ Aws::Http::HeaderValueCollection HeadObjectRequest::GetRequestSpecificHeaders()
if(m_ifUnmodifiedSinceHasBeenSet)
{
- headers.emplace("if-unmodified-since", m_ifUnmodifiedSince.ToGmtString(DateFormat::RFC822));
+ headers.emplace("if-unmodified-since", m_ifUnmodifiedSince.ToGmtString(Aws::Utils::DateFormat::RFC822));
}
if(m_rangeHasBeenSet)
@@ -146,5 +148,20 @@ Aws::Http::HeaderValueCollection HeadObjectRequest::GetRequestSpecificHeaders()
ss.str("");
}
+ if(m_checksumModeHasBeenSet)
+ {
+ headers.emplace("x-amz-checksum-mode", ChecksumModeMapper::GetNameForChecksumMode(m_checksumMode));
+ }
+
return headers;
}
+
+HeadObjectRequest::EndpointParameters HeadObjectRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/HeadObjectResult.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/HeadObjectResult.cpp
index f498d56aab..f68958d355 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/HeadObjectResult.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/HeadObjectResult.cpp
@@ -92,7 +92,7 @@ HeadObjectResult& HeadObjectResult::operator =(const Aws::AmazonWebServiceResult
const auto& lastModifiedIter = headers.find("last-modified");
if(lastModifiedIter != headers.end())
{
- m_lastModified = DateTime(lastModifiedIter->second, DateFormat::RFC822);
+ m_lastModified = DateTime(lastModifiedIter->second, Aws::Utils::DateFormat::RFC822);
}
const auto& contentLengthIter = headers.find("content-length");
@@ -101,6 +101,30 @@ HeadObjectResult& HeadObjectResult::operator =(const Aws::AmazonWebServiceResult
m_contentLength = StringUtils::ConvertToInt64(contentLengthIter->second.c_str());
}
+ const auto& checksumCRC32Iter = headers.find("x-amz-checksum-crc32");
+ if(checksumCRC32Iter != headers.end())
+ {
+ m_checksumCRC32 = checksumCRC32Iter->second;
+ }
+
+ const auto& checksumCRC32CIter = headers.find("x-amz-checksum-crc32c");
+ if(checksumCRC32CIter != headers.end())
+ {
+ m_checksumCRC32C = checksumCRC32CIter->second;
+ }
+
+ const auto& checksumSHA1Iter = headers.find("x-amz-checksum-sha1");
+ if(checksumSHA1Iter != headers.end())
+ {
+ m_checksumSHA1 = checksumSHA1Iter->second;
+ }
+
+ const auto& checksumSHA256Iter = headers.find("x-amz-checksum-sha256");
+ if(checksumSHA256Iter != headers.end())
+ {
+ m_checksumSHA256 = checksumSHA256Iter->second;
+ }
+
const auto& eTagIter = headers.find("etag");
if(eTagIter != headers.end())
{
@@ -152,7 +176,7 @@ HeadObjectResult& HeadObjectResult::operator =(const Aws::AmazonWebServiceResult
const auto& expiresIter = headers.find("expires");
if(expiresIter != headers.end())
{
- m_expires = DateTime(expiresIter->second, DateFormat::RFC822);
+ m_expires = DateTime(expiresIter->second, Aws::Utils::DateFormat::RFC822);
}
const auto& websiteRedirectLocationIter = headers.find("x-amz-website-redirect-location");
@@ -235,7 +259,7 @@ HeadObjectResult& HeadObjectResult::operator =(const Aws::AmazonWebServiceResult
const auto& objectLockRetainUntilDateIter = headers.find("x-amz-object-lock-retain-until-date");
if(objectLockRetainUntilDateIter != headers.end())
{
- m_objectLockRetainUntilDate = DateTime(objectLockRetainUntilDateIter->second, DateFormat::ISO_8601);
+ m_objectLockRetainUntilDate = DateTime(objectLockRetainUntilDateIter->second, Aws::Utils::DateFormat::ISO_8601);
}
const auto& objectLockLegalHoldStatusIter = headers.find("x-amz-object-lock-legal-hold");
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/InventoryOptionalField.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/InventoryOptionalField.cpp
index 00ae5fda49..a9b64dd1f1 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/InventoryOptionalField.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/InventoryOptionalField.cpp
@@ -31,6 +31,8 @@ namespace Aws
static const int ObjectLockMode_HASH = HashingUtils::HashString("ObjectLockMode");
static const int ObjectLockLegalHoldStatus_HASH = HashingUtils::HashString("ObjectLockLegalHoldStatus");
static const int IntelligentTieringAccessTier_HASH = HashingUtils::HashString("IntelligentTieringAccessTier");
+ static const int BucketKeyStatus_HASH = HashingUtils::HashString("BucketKeyStatus");
+ static const int ChecksumAlgorithm_HASH = HashingUtils::HashString("ChecksumAlgorithm");
InventoryOptionalField GetInventoryOptionalFieldForName(const Aws::String& name)
@@ -80,6 +82,14 @@ namespace Aws
{
return InventoryOptionalField::IntelligentTieringAccessTier;
}
+ else if (hashCode == BucketKeyStatus_HASH)
+ {
+ return InventoryOptionalField::BucketKeyStatus;
+ }
+ else if (hashCode == ChecksumAlgorithm_HASH)
+ {
+ return InventoryOptionalField::ChecksumAlgorithm;
+ }
EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer();
if(overflowContainer)
{
@@ -116,6 +126,10 @@ namespace Aws
return "ObjectLockLegalHoldStatus";
case InventoryOptionalField::IntelligentTieringAccessTier:
return "IntelligentTieringAccessTier";
+ case InventoryOptionalField::BucketKeyStatus:
+ return "BucketKeyStatus";
+ case InventoryOptionalField::ChecksumAlgorithm:
+ return "ChecksumAlgorithm";
default:
EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer();
if(overflowContainer)
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/LifecycleExpiration.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/LifecycleExpiration.cpp
index be347d18f0..9d579d1c5c 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/LifecycleExpiration.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/LifecycleExpiration.cpp
@@ -48,7 +48,7 @@ LifecycleExpiration& LifecycleExpiration::operator =(const XmlNode& xmlNode)
XmlNode dateNode = resultNode.FirstChild("Date");
if(!dateNode.IsNull())
{
- m_date = DateTime(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(dateNode.GetText()).c_str()).c_str(), DateFormat::ISO_8601);
+ m_date = DateTime(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(dateNode.GetText()).c_str()).c_str(), Aws::Utils::DateFormat::ISO_8601);
m_dateHasBeenSet = true;
}
XmlNode daysNode = resultNode.FirstChild("Days");
@@ -74,7 +74,7 @@ void LifecycleExpiration::AddToNode(XmlNode& parentNode) const
if(m_dateHasBeenSet)
{
XmlNode dateNode = parentNode.CreateChildElement("Date");
- dateNode.SetText(m_date.ToGmtString(DateFormat::ISO_8601));
+ dateNode.SetText(m_date.ToGmtString(Aws::Utils::DateFormat::ISO_8601));
}
if(m_daysHasBeenSet)
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/LifecycleRuleAndOperator.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/LifecycleRuleAndOperator.cpp
index 833c1d99ea..b3358b10c3 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/LifecycleRuleAndOperator.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/LifecycleRuleAndOperator.cpp
@@ -22,13 +22,21 @@ namespace Model
LifecycleRuleAndOperator::LifecycleRuleAndOperator() :
m_prefixHasBeenSet(false),
- m_tagsHasBeenSet(false)
+ m_tagsHasBeenSet(false),
+ m_objectSizeGreaterThan(0),
+ m_objectSizeGreaterThanHasBeenSet(false),
+ m_objectSizeLessThan(0),
+ m_objectSizeLessThanHasBeenSet(false)
{
}
LifecycleRuleAndOperator::LifecycleRuleAndOperator(const XmlNode& xmlNode) :
m_prefixHasBeenSet(false),
- m_tagsHasBeenSet(false)
+ m_tagsHasBeenSet(false),
+ m_objectSizeGreaterThan(0),
+ m_objectSizeGreaterThanHasBeenSet(false),
+ m_objectSizeLessThan(0),
+ m_objectSizeLessThanHasBeenSet(false)
{
*this = xmlNode;
}
@@ -57,6 +65,18 @@ LifecycleRuleAndOperator& LifecycleRuleAndOperator::operator =(const XmlNode& xm
m_tagsHasBeenSet = true;
}
+ XmlNode objectSizeGreaterThanNode = resultNode.FirstChild("ObjectSizeGreaterThan");
+ if(!objectSizeGreaterThanNode.IsNull())
+ {
+ m_objectSizeGreaterThan = StringUtils::ConvertToInt64(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(objectSizeGreaterThanNode.GetText()).c_str()).c_str());
+ m_objectSizeGreaterThanHasBeenSet = true;
+ }
+ XmlNode objectSizeLessThanNode = resultNode.FirstChild("ObjectSizeLessThan");
+ if(!objectSizeLessThanNode.IsNull())
+ {
+ m_objectSizeLessThan = StringUtils::ConvertToInt64(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(objectSizeLessThanNode.GetText()).c_str()).c_str());
+ m_objectSizeLessThanHasBeenSet = true;
+ }
}
return *this;
@@ -81,6 +101,22 @@ void LifecycleRuleAndOperator::AddToNode(XmlNode& parentNode) const
}
}
+ if(m_objectSizeGreaterThanHasBeenSet)
+ {
+ XmlNode objectSizeGreaterThanNode = parentNode.CreateChildElement("ObjectSizeGreaterThan");
+ ss << m_objectSizeGreaterThan;
+ objectSizeGreaterThanNode.SetText(ss.str());
+ ss.str("");
+ }
+
+ if(m_objectSizeLessThanHasBeenSet)
+ {
+ XmlNode objectSizeLessThanNode = parentNode.CreateChildElement("ObjectSizeLessThan");
+ ss << m_objectSizeLessThan;
+ objectSizeLessThanNode.SetText(ss.str());
+ ss.str("");
+ }
+
}
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/LifecycleRuleFilter.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/LifecycleRuleFilter.cpp
index 0f56fb9044..ee9aedeed5 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/LifecycleRuleFilter.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/LifecycleRuleFilter.cpp
@@ -23,6 +23,10 @@ namespace Model
LifecycleRuleFilter::LifecycleRuleFilter() :
m_prefixHasBeenSet(false),
m_tagHasBeenSet(false),
+ m_objectSizeGreaterThan(0),
+ m_objectSizeGreaterThanHasBeenSet(false),
+ m_objectSizeLessThan(0),
+ m_objectSizeLessThanHasBeenSet(false),
m_andHasBeenSet(false)
{
}
@@ -30,6 +34,10 @@ LifecycleRuleFilter::LifecycleRuleFilter() :
LifecycleRuleFilter::LifecycleRuleFilter(const XmlNode& xmlNode) :
m_prefixHasBeenSet(false),
m_tagHasBeenSet(false),
+ m_objectSizeGreaterThan(0),
+ m_objectSizeGreaterThanHasBeenSet(false),
+ m_objectSizeLessThan(0),
+ m_objectSizeLessThanHasBeenSet(false),
m_andHasBeenSet(false)
{
*this = xmlNode;
@@ -53,6 +61,18 @@ LifecycleRuleFilter& LifecycleRuleFilter::operator =(const XmlNode& xmlNode)
m_tag = tagNode;
m_tagHasBeenSet = true;
}
+ XmlNode objectSizeGreaterThanNode = resultNode.FirstChild("ObjectSizeGreaterThan");
+ if(!objectSizeGreaterThanNode.IsNull())
+ {
+ m_objectSizeGreaterThan = StringUtils::ConvertToInt64(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(objectSizeGreaterThanNode.GetText()).c_str()).c_str());
+ m_objectSizeGreaterThanHasBeenSet = true;
+ }
+ XmlNode objectSizeLessThanNode = resultNode.FirstChild("ObjectSizeLessThan");
+ if(!objectSizeLessThanNode.IsNull())
+ {
+ m_objectSizeLessThan = StringUtils::ConvertToInt64(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(objectSizeLessThanNode.GetText()).c_str()).c_str());
+ m_objectSizeLessThanHasBeenSet = true;
+ }
XmlNode andNode = resultNode.FirstChild("And");
if(!andNode.IsNull())
{
@@ -79,6 +99,22 @@ void LifecycleRuleFilter::AddToNode(XmlNode& parentNode) const
m_tag.AddToNode(tagNode);
}
+ if(m_objectSizeGreaterThanHasBeenSet)
+ {
+ XmlNode objectSizeGreaterThanNode = parentNode.CreateChildElement("ObjectSizeGreaterThan");
+ ss << m_objectSizeGreaterThan;
+ objectSizeGreaterThanNode.SetText(ss.str());
+ ss.str("");
+ }
+
+ if(m_objectSizeLessThanHasBeenSet)
+ {
+ XmlNode objectSizeLessThanNode = parentNode.CreateChildElement("ObjectSizeLessThan");
+ ss << m_objectSizeLessThan;
+ objectSizeLessThanNode.SetText(ss.str());
+ ss.str("");
+ }
+
if(m_andHasBeenSet)
{
XmlNode andNode = parentNode.CreateChildElement("And");
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListBucketAnalyticsConfigurationsRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListBucketAnalyticsConfigurationsRequest.cpp
index 5b114920e1..3c79cd9684 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListBucketAnalyticsConfigurationsRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListBucketAnalyticsConfigurationsRequest.cpp
@@ -71,3 +71,13 @@ Aws::Http::HeaderValueCollection ListBucketAnalyticsConfigurationsRequest::GetRe
return headers;
}
+
+ListBucketAnalyticsConfigurationsRequest::EndpointParameters ListBucketAnalyticsConfigurationsRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListBucketIntelligentTieringConfigurationsRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListBucketIntelligentTieringConfigurationsRequest.cpp
index 8e83bdec35..80991f96d3 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListBucketIntelligentTieringConfigurationsRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListBucketIntelligentTieringConfigurationsRequest.cpp
@@ -57,3 +57,13 @@ void ListBucketIntelligentTieringConfigurationsRequest::AddQueryStringParameters
}
}
+
+ListBucketIntelligentTieringConfigurationsRequest::EndpointParameters ListBucketIntelligentTieringConfigurationsRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListBucketInventoryConfigurationsRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListBucketInventoryConfigurationsRequest.cpp
index fbd8f9ff83..4487984600 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListBucketInventoryConfigurationsRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListBucketInventoryConfigurationsRequest.cpp
@@ -71,3 +71,13 @@ Aws::Http::HeaderValueCollection ListBucketInventoryConfigurationsRequest::GetRe
return headers;
}
+
+ListBucketInventoryConfigurationsRequest::EndpointParameters ListBucketInventoryConfigurationsRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListBucketMetricsConfigurationsRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListBucketMetricsConfigurationsRequest.cpp
index 3978ec46f8..db5bd39a59 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListBucketMetricsConfigurationsRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListBucketMetricsConfigurationsRequest.cpp
@@ -71,3 +71,13 @@ Aws::Http::HeaderValueCollection ListBucketMetricsConfigurationsRequest::GetRequ
return headers;
}
+
+ListBucketMetricsConfigurationsRequest::EndpointParameters ListBucketMetricsConfigurationsRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListMultipartUploadsRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListMultipartUploadsRequest.cpp
index 9723e16a9d..528104a503 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListMultipartUploadsRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListMultipartUploadsRequest.cpp
@@ -113,3 +113,13 @@ Aws::Http::HeaderValueCollection ListMultipartUploadsRequest::GetRequestSpecific
return headers;
}
+
+ListMultipartUploadsRequest::EndpointParameters ListMultipartUploadsRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListObjectVersionsRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListObjectVersionsRequest.cpp
index 2336f09ef8..471cb4a33d 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListObjectVersionsRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListObjectVersionsRequest.cpp
@@ -113,3 +113,13 @@ Aws::Http::HeaderValueCollection ListObjectVersionsRequest::GetRequestSpecificHe
return headers;
}
+
+ListObjectVersionsRequest::EndpointParameters ListObjectVersionsRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListObjectsRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListObjectsRequest.cpp
index 33abec092f..a32047ac5f 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListObjectsRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListObjectsRequest.cpp
@@ -112,3 +112,13 @@ Aws::Http::HeaderValueCollection ListObjectsRequest::GetRequestSpecificHeaders()
return headers;
}
+
+ListObjectsRequest::EndpointParameters ListObjectsRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListObjectsV2Request.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListObjectsV2Request.cpp
index 2feb9b83c1..68a4ddc2df 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListObjectsV2Request.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListObjectsV2Request.cpp
@@ -129,3 +129,13 @@ Aws::Http::HeaderValueCollection ListObjectsV2Request::GetRequestSpecificHeaders
return headers;
}
+
+ListObjectsV2Request::EndpointParameters ListObjectsV2Request::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListPartsRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListPartsRequest.cpp
index 59ff12e4d1..2ddc183789 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListPartsRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListPartsRequest.cpp
@@ -27,6 +27,9 @@ ListPartsRequest::ListPartsRequest() :
m_requestPayer(RequestPayer::NOT_SET),
m_requestPayerHasBeenSet(false),
m_expectedBucketOwnerHasBeenSet(false),
+ m_sSECustomerAlgorithmHasBeenSet(false),
+ m_sSECustomerKeyHasBeenSet(false),
+ m_sSECustomerKeyMD5HasBeenSet(false),
m_customizedAccessLogTagHasBeenSet(false)
{
}
@@ -95,5 +98,36 @@ Aws::Http::HeaderValueCollection ListPartsRequest::GetRequestSpecificHeaders() c
ss.str("");
}
+ if(m_sSECustomerAlgorithmHasBeenSet)
+ {
+ ss << m_sSECustomerAlgorithm;
+ headers.emplace("x-amz-server-side-encryption-customer-algorithm", ss.str());
+ ss.str("");
+ }
+
+ if(m_sSECustomerKeyHasBeenSet)
+ {
+ ss << m_sSECustomerKey;
+ headers.emplace("x-amz-server-side-encryption-customer-key", ss.str());
+ ss.str("");
+ }
+
+ if(m_sSECustomerKeyMD5HasBeenSet)
+ {
+ ss << m_sSECustomerKeyMD5;
+ headers.emplace("x-amz-server-side-encryption-customer-key-md5", ss.str());
+ ss.str("");
+ }
+
return headers;
}
+
+ListPartsRequest::EndpointParameters ListPartsRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListPartsResult.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListPartsResult.cpp
index f4638f2a06..d61990cf1b 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListPartsResult.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ListPartsResult.cpp
@@ -22,7 +22,8 @@ ListPartsResult::ListPartsResult() :
m_maxParts(0),
m_isTruncated(false),
m_storageClass(StorageClass::NOT_SET),
- m_requestCharged(RequestCharged::NOT_SET)
+ m_requestCharged(RequestCharged::NOT_SET),
+ m_checksumAlgorithm(ChecksumAlgorithm::NOT_SET)
{
}
@@ -32,7 +33,8 @@ ListPartsResult::ListPartsResult(const Aws::AmazonWebServiceResult<XmlDocument>&
m_maxParts(0),
m_isTruncated(false),
m_storageClass(StorageClass::NOT_SET),
- m_requestCharged(RequestCharged::NOT_SET)
+ m_requestCharged(RequestCharged::NOT_SET),
+ m_checksumAlgorithm(ChecksumAlgorithm::NOT_SET)
{
*this = result;
}
@@ -105,13 +107,18 @@ ListPartsResult& ListPartsResult::operator =(const Aws::AmazonWebServiceResult<X
{
m_storageClass = StorageClassMapper::GetStorageClassForName(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(storageClassNode.GetText()).c_str()).c_str());
}
+ XmlNode checksumAlgorithmNode = resultNode.FirstChild("ChecksumAlgorithm");
+ if(!checksumAlgorithmNode.IsNull())
+ {
+ m_checksumAlgorithm = ChecksumAlgorithmMapper::GetChecksumAlgorithmForName(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(checksumAlgorithmNode.GetText()).c_str()).c_str());
+ }
}
const auto& headers = result.GetHeaderValueCollection();
const auto& abortDateIter = headers.find("x-amz-abort-date");
if(abortDateIter != headers.end())
{
- m_abortDate = DateTime(abortDateIter->second, DateFormat::RFC822);
+ m_abortDate = DateTime(abortDateIter->second, Aws::Utils::DateFormat::RFC822);
}
const auto& abortRuleIdIter = headers.find("x-amz-abort-rule-id");
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/MetricsAndOperator.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/MetricsAndOperator.cpp
index 74951cd0eb..711740443b 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/MetricsAndOperator.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/MetricsAndOperator.cpp
@@ -22,13 +22,15 @@ namespace Model
MetricsAndOperator::MetricsAndOperator() :
m_prefixHasBeenSet(false),
- m_tagsHasBeenSet(false)
+ m_tagsHasBeenSet(false),
+ m_accessPointArnHasBeenSet(false)
{
}
MetricsAndOperator::MetricsAndOperator(const XmlNode& xmlNode) :
m_prefixHasBeenSet(false),
- m_tagsHasBeenSet(false)
+ m_tagsHasBeenSet(false),
+ m_accessPointArnHasBeenSet(false)
{
*this = xmlNode;
}
@@ -57,6 +59,12 @@ MetricsAndOperator& MetricsAndOperator::operator =(const XmlNode& xmlNode)
m_tagsHasBeenSet = true;
}
+ XmlNode accessPointArnNode = resultNode.FirstChild("AccessPointArn");
+ if(!accessPointArnNode.IsNull())
+ {
+ m_accessPointArn = Aws::Utils::Xml::DecodeEscapedXmlText(accessPointArnNode.GetText());
+ m_accessPointArnHasBeenSet = true;
+ }
}
return *this;
@@ -81,6 +89,12 @@ void MetricsAndOperator::AddToNode(XmlNode& parentNode) const
}
}
+ if(m_accessPointArnHasBeenSet)
+ {
+ XmlNode accessPointArnNode = parentNode.CreateChildElement("AccessPointArn");
+ accessPointArnNode.SetText(m_accessPointArn);
+ }
+
}
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/MetricsFilter.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/MetricsFilter.cpp
index fa7ade5849..90cda86e09 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/MetricsFilter.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/MetricsFilter.cpp
@@ -23,6 +23,7 @@ namespace Model
MetricsFilter::MetricsFilter() :
m_prefixHasBeenSet(false),
m_tagHasBeenSet(false),
+ m_accessPointArnHasBeenSet(false),
m_andHasBeenSet(false)
{
}
@@ -30,6 +31,7 @@ MetricsFilter::MetricsFilter() :
MetricsFilter::MetricsFilter(const XmlNode& xmlNode) :
m_prefixHasBeenSet(false),
m_tagHasBeenSet(false),
+ m_accessPointArnHasBeenSet(false),
m_andHasBeenSet(false)
{
*this = xmlNode;
@@ -53,6 +55,12 @@ MetricsFilter& MetricsFilter::operator =(const XmlNode& xmlNode)
m_tag = tagNode;
m_tagHasBeenSet = true;
}
+ XmlNode accessPointArnNode = resultNode.FirstChild("AccessPointArn");
+ if(!accessPointArnNode.IsNull())
+ {
+ m_accessPointArn = Aws::Utils::Xml::DecodeEscapedXmlText(accessPointArnNode.GetText());
+ m_accessPointArnHasBeenSet = true;
+ }
XmlNode andNode = resultNode.FirstChild("And");
if(!andNode.IsNull())
{
@@ -79,6 +87,12 @@ void MetricsFilter::AddToNode(XmlNode& parentNode) const
m_tag.AddToNode(tagNode);
}
+ if(m_accessPointArnHasBeenSet)
+ {
+ XmlNode accessPointArnNode = parentNode.CreateChildElement("AccessPointArn");
+ accessPointArnNode.SetText(m_accessPointArn);
+ }
+
if(m_andHasBeenSet)
{
XmlNode andNode = parentNode.CreateChildElement("And");
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/MultipartUpload.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/MultipartUpload.cpp
index 6d92ea2cb6..c64926a272 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/MultipartUpload.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/MultipartUpload.cpp
@@ -27,7 +27,9 @@ MultipartUpload::MultipartUpload() :
m_storageClass(StorageClass::NOT_SET),
m_storageClassHasBeenSet(false),
m_ownerHasBeenSet(false),
- m_initiatorHasBeenSet(false)
+ m_initiatorHasBeenSet(false),
+ m_checksumAlgorithm(ChecksumAlgorithm::NOT_SET),
+ m_checksumAlgorithmHasBeenSet(false)
{
}
@@ -38,7 +40,9 @@ MultipartUpload::MultipartUpload(const XmlNode& xmlNode) :
m_storageClass(StorageClass::NOT_SET),
m_storageClassHasBeenSet(false),
m_ownerHasBeenSet(false),
- m_initiatorHasBeenSet(false)
+ m_initiatorHasBeenSet(false),
+ m_checksumAlgorithm(ChecksumAlgorithm::NOT_SET),
+ m_checksumAlgorithmHasBeenSet(false)
{
*this = xmlNode;
}
@@ -64,7 +68,7 @@ MultipartUpload& MultipartUpload::operator =(const XmlNode& xmlNode)
XmlNode initiatedNode = resultNode.FirstChild("Initiated");
if(!initiatedNode.IsNull())
{
- m_initiated = DateTime(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(initiatedNode.GetText()).c_str()).c_str(), DateFormat::ISO_8601);
+ m_initiated = DateTime(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(initiatedNode.GetText()).c_str()).c_str(), Aws::Utils::DateFormat::ISO_8601);
m_initiatedHasBeenSet = true;
}
XmlNode storageClassNode = resultNode.FirstChild("StorageClass");
@@ -85,6 +89,12 @@ MultipartUpload& MultipartUpload::operator =(const XmlNode& xmlNode)
m_initiator = initiatorNode;
m_initiatorHasBeenSet = true;
}
+ XmlNode checksumAlgorithmNode = resultNode.FirstChild("ChecksumAlgorithm");
+ if(!checksumAlgorithmNode.IsNull())
+ {
+ m_checksumAlgorithm = ChecksumAlgorithmMapper::GetChecksumAlgorithmForName(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(checksumAlgorithmNode.GetText()).c_str()).c_str());
+ m_checksumAlgorithmHasBeenSet = true;
+ }
}
return *this;
@@ -108,7 +118,7 @@ void MultipartUpload::AddToNode(XmlNode& parentNode) const
if(m_initiatedHasBeenSet)
{
XmlNode initiatedNode = parentNode.CreateChildElement("Initiated");
- initiatedNode.SetText(m_initiated.ToGmtString(DateFormat::ISO_8601));
+ initiatedNode.SetText(m_initiated.ToGmtString(Aws::Utils::DateFormat::ISO_8601));
}
if(m_storageClassHasBeenSet)
@@ -129,6 +139,12 @@ void MultipartUpload::AddToNode(XmlNode& parentNode) const
m_initiator.AddToNode(initiatorNode);
}
+ if(m_checksumAlgorithmHasBeenSet)
+ {
+ XmlNode checksumAlgorithmNode = parentNode.CreateChildElement("ChecksumAlgorithm");
+ checksumAlgorithmNode.SetText(ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm));
+ }
+
}
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/NoncurrentVersionExpiration.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/NoncurrentVersionExpiration.cpp
index ab6db39f61..a3282e5fc9 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/NoncurrentVersionExpiration.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/NoncurrentVersionExpiration.cpp
@@ -22,13 +22,17 @@ namespace Model
NoncurrentVersionExpiration::NoncurrentVersionExpiration() :
m_noncurrentDays(0),
- m_noncurrentDaysHasBeenSet(false)
+ m_noncurrentDaysHasBeenSet(false),
+ m_newerNoncurrentVersions(0),
+ m_newerNoncurrentVersionsHasBeenSet(false)
{
}
NoncurrentVersionExpiration::NoncurrentVersionExpiration(const XmlNode& xmlNode) :
m_noncurrentDays(0),
- m_noncurrentDaysHasBeenSet(false)
+ m_noncurrentDaysHasBeenSet(false),
+ m_newerNoncurrentVersions(0),
+ m_newerNoncurrentVersionsHasBeenSet(false)
{
*this = xmlNode;
}
@@ -45,6 +49,12 @@ NoncurrentVersionExpiration& NoncurrentVersionExpiration::operator =(const XmlNo
m_noncurrentDays = StringUtils::ConvertToInt32(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(noncurrentDaysNode.GetText()).c_str()).c_str());
m_noncurrentDaysHasBeenSet = true;
}
+ XmlNode newerNoncurrentVersionsNode = resultNode.FirstChild("NewerNoncurrentVersions");
+ if(!newerNoncurrentVersionsNode.IsNull())
+ {
+ m_newerNoncurrentVersions = StringUtils::ConvertToInt32(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(newerNoncurrentVersionsNode.GetText()).c_str()).c_str());
+ m_newerNoncurrentVersionsHasBeenSet = true;
+ }
}
return *this;
@@ -61,6 +71,14 @@ void NoncurrentVersionExpiration::AddToNode(XmlNode& parentNode) const
ss.str("");
}
+ if(m_newerNoncurrentVersionsHasBeenSet)
+ {
+ XmlNode newerNoncurrentVersionsNode = parentNode.CreateChildElement("NewerNoncurrentVersions");
+ ss << m_newerNoncurrentVersions;
+ newerNoncurrentVersionsNode.SetText(ss.str());
+ ss.str("");
+ }
+
}
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/NoncurrentVersionTransition.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/NoncurrentVersionTransition.cpp
index e7027d679b..71d5880890 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/NoncurrentVersionTransition.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/NoncurrentVersionTransition.cpp
@@ -24,7 +24,9 @@ NoncurrentVersionTransition::NoncurrentVersionTransition() :
m_noncurrentDays(0),
m_noncurrentDaysHasBeenSet(false),
m_storageClass(TransitionStorageClass::NOT_SET),
- m_storageClassHasBeenSet(false)
+ m_storageClassHasBeenSet(false),
+ m_newerNoncurrentVersions(0),
+ m_newerNoncurrentVersionsHasBeenSet(false)
{
}
@@ -32,7 +34,9 @@ NoncurrentVersionTransition::NoncurrentVersionTransition(const XmlNode& xmlNode)
m_noncurrentDays(0),
m_noncurrentDaysHasBeenSet(false),
m_storageClass(TransitionStorageClass::NOT_SET),
- m_storageClassHasBeenSet(false)
+ m_storageClassHasBeenSet(false),
+ m_newerNoncurrentVersions(0),
+ m_newerNoncurrentVersionsHasBeenSet(false)
{
*this = xmlNode;
}
@@ -55,6 +59,12 @@ NoncurrentVersionTransition& NoncurrentVersionTransition::operator =(const XmlNo
m_storageClass = TransitionStorageClassMapper::GetTransitionStorageClassForName(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(storageClassNode.GetText()).c_str()).c_str());
m_storageClassHasBeenSet = true;
}
+ XmlNode newerNoncurrentVersionsNode = resultNode.FirstChild("NewerNoncurrentVersions");
+ if(!newerNoncurrentVersionsNode.IsNull())
+ {
+ m_newerNoncurrentVersions = StringUtils::ConvertToInt32(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(newerNoncurrentVersionsNode.GetText()).c_str()).c_str());
+ m_newerNoncurrentVersionsHasBeenSet = true;
+ }
}
return *this;
@@ -77,6 +87,14 @@ void NoncurrentVersionTransition::AddToNode(XmlNode& parentNode) const
storageClassNode.SetText(TransitionStorageClassMapper::GetNameForTransitionStorageClass(m_storageClass));
}
+ if(m_newerNoncurrentVersionsHasBeenSet)
+ {
+ XmlNode newerNoncurrentVersionsNode = parentNode.CreateChildElement("NewerNoncurrentVersions");
+ ss << m_newerNoncurrentVersions;
+ newerNoncurrentVersionsNode.SetText(ss.str());
+ ss.str("");
+ }
+
}
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/NotificationConfiguration.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/NotificationConfiguration.cpp
index bb62ecb84d..b3b79d4a5f 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/NotificationConfiguration.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/NotificationConfiguration.cpp
@@ -23,14 +23,16 @@ namespace Model
NotificationConfiguration::NotificationConfiguration() :
m_topicConfigurationsHasBeenSet(false),
m_queueConfigurationsHasBeenSet(false),
- m_lambdaFunctionConfigurationsHasBeenSet(false)
+ m_lambdaFunctionConfigurationsHasBeenSet(false),
+ m_eventBridgeConfigurationHasBeenSet(false)
{
}
NotificationConfiguration::NotificationConfiguration(const XmlNode& xmlNode) :
m_topicConfigurationsHasBeenSet(false),
m_queueConfigurationsHasBeenSet(false),
- m_lambdaFunctionConfigurationsHasBeenSet(false)
+ m_lambdaFunctionConfigurationsHasBeenSet(false),
+ m_eventBridgeConfigurationHasBeenSet(false)
{
*this = xmlNode;
}
@@ -77,6 +79,12 @@ NotificationConfiguration& NotificationConfiguration::operator =(const XmlNode&
m_lambdaFunctionConfigurationsHasBeenSet = true;
}
+ XmlNode eventBridgeConfigurationNode = resultNode.FirstChild("EventBridgeConfiguration");
+ if(!eventBridgeConfigurationNode.IsNull())
+ {
+ m_eventBridgeConfiguration = eventBridgeConfigurationNode;
+ m_eventBridgeConfigurationHasBeenSet = true;
+ }
}
return *this;
@@ -112,6 +120,12 @@ void NotificationConfiguration::AddToNode(XmlNode& parentNode) const
}
}
+ if(m_eventBridgeConfigurationHasBeenSet)
+ {
+ XmlNode eventBridgeConfigurationNode = parentNode.CreateChildElement("EventBridgeConfiguration");
+ m_eventBridgeConfiguration.AddToNode(eventBridgeConfigurationNode);
+ }
+
}
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Object.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Object.cpp
index d7d963fc21..41f985e168 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Object.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Object.cpp
@@ -24,6 +24,7 @@ Object::Object() :
m_keyHasBeenSet(false),
m_lastModifiedHasBeenSet(false),
m_eTagHasBeenSet(false),
+ m_checksumAlgorithmHasBeenSet(false),
m_size(0),
m_sizeHasBeenSet(false),
m_storageClass(ObjectStorageClass::NOT_SET),
@@ -36,6 +37,7 @@ Object::Object(const XmlNode& xmlNode) :
m_keyHasBeenSet(false),
m_lastModifiedHasBeenSet(false),
m_eTagHasBeenSet(false),
+ m_checksumAlgorithmHasBeenSet(false),
m_size(0),
m_sizeHasBeenSet(false),
m_storageClass(ObjectStorageClass::NOT_SET),
@@ -60,7 +62,7 @@ Object& Object::operator =(const XmlNode& xmlNode)
XmlNode lastModifiedNode = resultNode.FirstChild("LastModified");
if(!lastModifiedNode.IsNull())
{
- m_lastModified = DateTime(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(lastModifiedNode.GetText()).c_str()).c_str(), DateFormat::ISO_8601);
+ m_lastModified = DateTime(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(lastModifiedNode.GetText()).c_str()).c_str(), Aws::Utils::DateFormat::ISO_8601);
m_lastModifiedHasBeenSet = true;
}
XmlNode eTagNode = resultNode.FirstChild("ETag");
@@ -69,6 +71,18 @@ Object& Object::operator =(const XmlNode& xmlNode)
m_eTag = Aws::Utils::Xml::DecodeEscapedXmlText(eTagNode.GetText());
m_eTagHasBeenSet = true;
}
+ XmlNode checksumAlgorithmNode = resultNode.FirstChild("ChecksumAlgorithm");
+ if(!checksumAlgorithmNode.IsNull())
+ {
+ XmlNode checksumAlgorithmMember = checksumAlgorithmNode;
+ while(!checksumAlgorithmMember.IsNull())
+ {
+ m_checksumAlgorithm.push_back(ChecksumAlgorithmMapper::GetChecksumAlgorithmForName(StringUtils::Trim(checksumAlgorithmMember.GetText().c_str())));
+ checksumAlgorithmMember = checksumAlgorithmMember.NextNode("ChecksumAlgorithm");
+ }
+
+ m_checksumAlgorithmHasBeenSet = true;
+ }
XmlNode sizeNode = resultNode.FirstChild("Size");
if(!sizeNode.IsNull())
{
@@ -104,7 +118,7 @@ void Object::AddToNode(XmlNode& parentNode) const
if(m_lastModifiedHasBeenSet)
{
XmlNode lastModifiedNode = parentNode.CreateChildElement("LastModified");
- lastModifiedNode.SetText(m_lastModified.ToGmtString(DateFormat::ISO_8601));
+ lastModifiedNode.SetText(m_lastModified.ToGmtString(Aws::Utils::DateFormat::ISO_8601));
}
if(m_eTagHasBeenSet)
@@ -113,6 +127,16 @@ void Object::AddToNode(XmlNode& parentNode) const
eTagNode.SetText(m_eTag);
}
+ if(m_checksumAlgorithmHasBeenSet)
+ {
+ XmlNode checksumAlgorithmParentNode = parentNode.CreateChildElement("ChecksumAlgorithm");
+ for(const auto& item : m_checksumAlgorithm)
+ {
+ XmlNode checksumAlgorithmNode = checksumAlgorithmParentNode.CreateChildElement("ChecksumAlgorithm");
+ checksumAlgorithmNode.SetText(ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(item));
+ }
+ }
+
if(m_sizeHasBeenSet)
{
XmlNode sizeNode = parentNode.CreateChildElement("Size");
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectAttributes.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectAttributes.cpp
new file mode 100644
index 0000000000..c4159dcde2
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectAttributes.cpp
@@ -0,0 +1,91 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/s3/model/ObjectAttributes.h>
+#include <aws/core/utils/HashingUtils.h>
+#include <aws/core/Globals.h>
+#include <aws/core/utils/EnumParseOverflowContainer.h>
+
+using namespace Aws::Utils;
+
+
+namespace Aws
+{
+ namespace S3
+ {
+ namespace Model
+ {
+ namespace ObjectAttributesMapper
+ {
+
+ static const int ETag_HASH = HashingUtils::HashString("ETag");
+ static const int Checksum_HASH = HashingUtils::HashString("Checksum");
+ static const int ObjectParts_HASH = HashingUtils::HashString("ObjectParts");
+ static const int StorageClass_HASH = HashingUtils::HashString("StorageClass");
+ static const int ObjectSize_HASH = HashingUtils::HashString("ObjectSize");
+
+
+ ObjectAttributes GetObjectAttributesForName(const Aws::String& name)
+ {
+ int hashCode = HashingUtils::HashString(name.c_str());
+ if (hashCode == ETag_HASH)
+ {
+ return ObjectAttributes::ETag;
+ }
+ else if (hashCode == Checksum_HASH)
+ {
+ return ObjectAttributes::Checksum;
+ }
+ else if (hashCode == ObjectParts_HASH)
+ {
+ return ObjectAttributes::ObjectParts;
+ }
+ else if (hashCode == StorageClass_HASH)
+ {
+ return ObjectAttributes::StorageClass;
+ }
+ else if (hashCode == ObjectSize_HASH)
+ {
+ return ObjectAttributes::ObjectSize;
+ }
+ EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer();
+ if(overflowContainer)
+ {
+ overflowContainer->StoreOverflow(hashCode, name);
+ return static_cast<ObjectAttributes>(hashCode);
+ }
+
+ return ObjectAttributes::NOT_SET;
+ }
+
+ Aws::String GetNameForObjectAttributes(ObjectAttributes enumValue)
+ {
+ switch(enumValue)
+ {
+ case ObjectAttributes::ETag:
+ return "ETag";
+ case ObjectAttributes::Checksum:
+ return "Checksum";
+ case ObjectAttributes::ObjectParts:
+ return "ObjectParts";
+ case ObjectAttributes::StorageClass:
+ return "StorageClass";
+ case ObjectAttributes::ObjectSize:
+ return "ObjectSize";
+ default:
+ EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer();
+ if(overflowContainer)
+ {
+ return overflowContainer->RetrieveOverflow(static_cast<int>(enumValue));
+ }
+
+ return {};
+ }
+ }
+
+ } // namespace ObjectAttributesMapper
+ } // namespace Model
+ } // namespace S3
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectLockRetention.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectLockRetention.cpp
index 3ba5d5590e..f9b3d70c7f 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectLockRetention.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectLockRetention.cpp
@@ -50,7 +50,7 @@ ObjectLockRetention& ObjectLockRetention::operator =(const XmlNode& xmlNode)
XmlNode retainUntilDateNode = resultNode.FirstChild("RetainUntilDate");
if(!retainUntilDateNode.IsNull())
{
- m_retainUntilDate = DateTime(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(retainUntilDateNode.GetText()).c_str()).c_str(), DateFormat::ISO_8601);
+ m_retainUntilDate = DateTime(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(retainUntilDateNode.GetText()).c_str()).c_str(), Aws::Utils::DateFormat::ISO_8601);
m_retainUntilDateHasBeenSet = true;
}
}
@@ -70,7 +70,7 @@ void ObjectLockRetention::AddToNode(XmlNode& parentNode) const
if(m_retainUntilDateHasBeenSet)
{
XmlNode retainUntilDateNode = parentNode.CreateChildElement("RetainUntilDate");
- retainUntilDateNode.SetText(m_retainUntilDate.ToGmtString(DateFormat::ISO_8601));
+ retainUntilDateNode.SetText(m_retainUntilDate.ToGmtString(Aws::Utils::DateFormat::ISO_8601));
}
}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectOwnership.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectOwnership.cpp
index 4e2a106c8e..7fba54e6c8 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectOwnership.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectOwnership.cpp
@@ -22,6 +22,7 @@ namespace Aws
static const int BucketOwnerPreferred_HASH = HashingUtils::HashString("BucketOwnerPreferred");
static const int ObjectWriter_HASH = HashingUtils::HashString("ObjectWriter");
+ static const int BucketOwnerEnforced_HASH = HashingUtils::HashString("BucketOwnerEnforced");
ObjectOwnership GetObjectOwnershipForName(const Aws::String& name)
@@ -35,6 +36,10 @@ namespace Aws
{
return ObjectOwnership::ObjectWriter;
}
+ else if (hashCode == BucketOwnerEnforced_HASH)
+ {
+ return ObjectOwnership::BucketOwnerEnforced;
+ }
EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer();
if(overflowContainer)
{
@@ -53,6 +58,8 @@ namespace Aws
return "BucketOwnerPreferred";
case ObjectOwnership::ObjectWriter:
return "ObjectWriter";
+ case ObjectOwnership::BucketOwnerEnforced:
+ return "BucketOwnerEnforced";
default:
EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer();
if(overflowContainer)
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectPart.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectPart.cpp
new file mode 100644
index 0000000000..fdbd9eebc0
--- /dev/null
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectPart.cpp
@@ -0,0 +1,142 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/s3/model/ObjectPart.h>
+#include <aws/core/utils/xml/XmlSerializer.h>
+#include <aws/core/utils/StringUtils.h>
+#include <aws/core/utils/memory/stl/AWSStringStream.h>
+
+#include <utility>
+
+using namespace Aws::Utils::Xml;
+using namespace Aws::Utils;
+
+namespace Aws
+{
+namespace S3
+{
+namespace Model
+{
+
+ObjectPart::ObjectPart() :
+ m_partNumber(0),
+ m_partNumberHasBeenSet(false),
+ m_size(0),
+ m_sizeHasBeenSet(false),
+ m_checksumCRC32HasBeenSet(false),
+ m_checksumCRC32CHasBeenSet(false),
+ m_checksumSHA1HasBeenSet(false),
+ m_checksumSHA256HasBeenSet(false)
+{
+}
+
+ObjectPart::ObjectPart(const XmlNode& xmlNode) :
+ m_partNumber(0),
+ m_partNumberHasBeenSet(false),
+ m_size(0),
+ m_sizeHasBeenSet(false),
+ m_checksumCRC32HasBeenSet(false),
+ m_checksumCRC32CHasBeenSet(false),
+ m_checksumSHA1HasBeenSet(false),
+ m_checksumSHA256HasBeenSet(false)
+{
+ *this = xmlNode;
+}
+
+ObjectPart& ObjectPart::operator =(const XmlNode& xmlNode)
+{
+ XmlNode resultNode = xmlNode;
+
+ if(!resultNode.IsNull())
+ {
+ XmlNode partNumberNode = resultNode.FirstChild("PartNumber");
+ if(!partNumberNode.IsNull())
+ {
+ m_partNumber = StringUtils::ConvertToInt32(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(partNumberNode.GetText()).c_str()).c_str());
+ m_partNumberHasBeenSet = true;
+ }
+ XmlNode sizeNode = resultNode.FirstChild("Size");
+ if(!sizeNode.IsNull())
+ {
+ m_size = StringUtils::ConvertToInt64(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(sizeNode.GetText()).c_str()).c_str());
+ m_sizeHasBeenSet = true;
+ }
+ XmlNode checksumCRC32Node = resultNode.FirstChild("ChecksumCRC32");
+ if(!checksumCRC32Node.IsNull())
+ {
+ m_checksumCRC32 = Aws::Utils::Xml::DecodeEscapedXmlText(checksumCRC32Node.GetText());
+ m_checksumCRC32HasBeenSet = true;
+ }
+ XmlNode checksumCRC32CNode = resultNode.FirstChild("ChecksumCRC32C");
+ if(!checksumCRC32CNode.IsNull())
+ {
+ m_checksumCRC32C = Aws::Utils::Xml::DecodeEscapedXmlText(checksumCRC32CNode.GetText());
+ m_checksumCRC32CHasBeenSet = true;
+ }
+ XmlNode checksumSHA1Node = resultNode.FirstChild("ChecksumSHA1");
+ if(!checksumSHA1Node.IsNull())
+ {
+ m_checksumSHA1 = Aws::Utils::Xml::DecodeEscapedXmlText(checksumSHA1Node.GetText());
+ m_checksumSHA1HasBeenSet = true;
+ }
+ XmlNode checksumSHA256Node = resultNode.FirstChild("ChecksumSHA256");
+ if(!checksumSHA256Node.IsNull())
+ {
+ m_checksumSHA256 = Aws::Utils::Xml::DecodeEscapedXmlText(checksumSHA256Node.GetText());
+ m_checksumSHA256HasBeenSet = true;
+ }
+ }
+
+ return *this;
+}
+
+void ObjectPart::AddToNode(XmlNode& parentNode) const
+{
+ Aws::StringStream ss;
+ if(m_partNumberHasBeenSet)
+ {
+ XmlNode partNumberNode = parentNode.CreateChildElement("PartNumber");
+ ss << m_partNumber;
+ partNumberNode.SetText(ss.str());
+ ss.str("");
+ }
+
+ if(m_sizeHasBeenSet)
+ {
+ XmlNode sizeNode = parentNode.CreateChildElement("Size");
+ ss << m_size;
+ sizeNode.SetText(ss.str());
+ ss.str("");
+ }
+
+ if(m_checksumCRC32HasBeenSet)
+ {
+ XmlNode checksumCRC32Node = parentNode.CreateChildElement("ChecksumCRC32");
+ checksumCRC32Node.SetText(m_checksumCRC32);
+ }
+
+ if(m_checksumCRC32CHasBeenSet)
+ {
+ XmlNode checksumCRC32CNode = parentNode.CreateChildElement("ChecksumCRC32C");
+ checksumCRC32CNode.SetText(m_checksumCRC32C);
+ }
+
+ if(m_checksumSHA1HasBeenSet)
+ {
+ XmlNode checksumSHA1Node = parentNode.CreateChildElement("ChecksumSHA1");
+ checksumSHA1Node.SetText(m_checksumSHA1);
+ }
+
+ if(m_checksumSHA256HasBeenSet)
+ {
+ XmlNode checksumSHA256Node = parentNode.CreateChildElement("ChecksumSHA256");
+ checksumSHA256Node.SetText(m_checksumSHA256);
+ }
+
+}
+
+} // namespace Model
+} // namespace S3
+} // namespace Aws
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectStorageClass.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectStorageClass.cpp
index 572041c798..fd0e06fbc4 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectStorageClass.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectStorageClass.cpp
@@ -28,6 +28,7 @@ namespace Aws
static const int INTELLIGENT_TIERING_HASH = HashingUtils::HashString("INTELLIGENT_TIERING");
static const int DEEP_ARCHIVE_HASH = HashingUtils::HashString("DEEP_ARCHIVE");
static const int OUTPOSTS_HASH = HashingUtils::HashString("OUTPOSTS");
+ static const int GLACIER_IR_HASH = HashingUtils::HashString("GLACIER_IR");
ObjectStorageClass GetObjectStorageClassForName(const Aws::String& name)
@@ -65,6 +66,10 @@ namespace Aws
{
return ObjectStorageClass::OUTPOSTS;
}
+ else if (hashCode == GLACIER_IR_HASH)
+ {
+ return ObjectStorageClass::GLACIER_IR;
+ }
EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer();
if(overflowContainer)
{
@@ -95,6 +100,8 @@ namespace Aws
return "DEEP_ARCHIVE";
case ObjectStorageClass::OUTPOSTS:
return "OUTPOSTS";
+ case ObjectStorageClass::GLACIER_IR:
+ return "GLACIER_IR";
default:
EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer();
if(overflowContainer)
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectVersion.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectVersion.cpp
index c7b720f252..11a8542a02 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectVersion.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/ObjectVersion.cpp
@@ -22,6 +22,7 @@ namespace Model
ObjectVersion::ObjectVersion() :
m_eTagHasBeenSet(false),
+ m_checksumAlgorithmHasBeenSet(false),
m_size(0),
m_sizeHasBeenSet(false),
m_storageClass(ObjectVersionStorageClass::NOT_SET),
@@ -37,6 +38,7 @@ ObjectVersion::ObjectVersion() :
ObjectVersion::ObjectVersion(const XmlNode& xmlNode) :
m_eTagHasBeenSet(false),
+ m_checksumAlgorithmHasBeenSet(false),
m_size(0),
m_sizeHasBeenSet(false),
m_storageClass(ObjectVersionStorageClass::NOT_SET),
@@ -63,6 +65,18 @@ ObjectVersion& ObjectVersion::operator =(const XmlNode& xmlNode)
m_eTag = Aws::Utils::Xml::DecodeEscapedXmlText(eTagNode.GetText());
m_eTagHasBeenSet = true;
}
+ XmlNode checksumAlgorithmNode = resultNode.FirstChild("ChecksumAlgorithm");
+ if(!checksumAlgorithmNode.IsNull())
+ {
+ XmlNode checksumAlgorithmMember = checksumAlgorithmNode;
+ while(!checksumAlgorithmMember.IsNull())
+ {
+ m_checksumAlgorithm.push_back(ChecksumAlgorithmMapper::GetChecksumAlgorithmForName(StringUtils::Trim(checksumAlgorithmMember.GetText().c_str())));
+ checksumAlgorithmMember = checksumAlgorithmMember.NextNode("ChecksumAlgorithm");
+ }
+
+ m_checksumAlgorithmHasBeenSet = true;
+ }
XmlNode sizeNode = resultNode.FirstChild("Size");
if(!sizeNode.IsNull())
{
@@ -96,7 +110,7 @@ ObjectVersion& ObjectVersion::operator =(const XmlNode& xmlNode)
XmlNode lastModifiedNode = resultNode.FirstChild("LastModified");
if(!lastModifiedNode.IsNull())
{
- m_lastModified = DateTime(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(lastModifiedNode.GetText()).c_str()).c_str(), DateFormat::ISO_8601);
+ m_lastModified = DateTime(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(lastModifiedNode.GetText()).c_str()).c_str(), Aws::Utils::DateFormat::ISO_8601);
m_lastModifiedHasBeenSet = true;
}
XmlNode ownerNode = resultNode.FirstChild("Owner");
@@ -119,6 +133,16 @@ void ObjectVersion::AddToNode(XmlNode& parentNode) const
eTagNode.SetText(m_eTag);
}
+ if(m_checksumAlgorithmHasBeenSet)
+ {
+ XmlNode checksumAlgorithmParentNode = parentNode.CreateChildElement("ChecksumAlgorithm");
+ for(const auto& item : m_checksumAlgorithm)
+ {
+ XmlNode checksumAlgorithmNode = checksumAlgorithmParentNode.CreateChildElement("ChecksumAlgorithm");
+ checksumAlgorithmNode.SetText(ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(item));
+ }
+ }
+
if(m_sizeHasBeenSet)
{
XmlNode sizeNode = parentNode.CreateChildElement("Size");
@@ -156,7 +180,7 @@ void ObjectVersion::AddToNode(XmlNode& parentNode) const
if(m_lastModifiedHasBeenSet)
{
XmlNode lastModifiedNode = parentNode.CreateChildElement("LastModified");
- lastModifiedNode.SetText(m_lastModified.ToGmtString(DateFormat::ISO_8601));
+ lastModifiedNode.SetText(m_lastModified.ToGmtString(Aws::Utils::DateFormat::ISO_8601));
}
if(m_ownerHasBeenSet)
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Part.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Part.cpp
index a243f72d10..d0cd0f01cf 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Part.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Part.cpp
@@ -26,7 +26,11 @@ Part::Part() :
m_lastModifiedHasBeenSet(false),
m_eTagHasBeenSet(false),
m_size(0),
- m_sizeHasBeenSet(false)
+ m_sizeHasBeenSet(false),
+ m_checksumCRC32HasBeenSet(false),
+ m_checksumCRC32CHasBeenSet(false),
+ m_checksumSHA1HasBeenSet(false),
+ m_checksumSHA256HasBeenSet(false)
{
}
@@ -36,7 +40,11 @@ Part::Part(const XmlNode& xmlNode) :
m_lastModifiedHasBeenSet(false),
m_eTagHasBeenSet(false),
m_size(0),
- m_sizeHasBeenSet(false)
+ m_sizeHasBeenSet(false),
+ m_checksumCRC32HasBeenSet(false),
+ m_checksumCRC32CHasBeenSet(false),
+ m_checksumSHA1HasBeenSet(false),
+ m_checksumSHA256HasBeenSet(false)
{
*this = xmlNode;
}
@@ -56,7 +64,7 @@ Part& Part::operator =(const XmlNode& xmlNode)
XmlNode lastModifiedNode = resultNode.FirstChild("LastModified");
if(!lastModifiedNode.IsNull())
{
- m_lastModified = DateTime(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(lastModifiedNode.GetText()).c_str()).c_str(), DateFormat::ISO_8601);
+ m_lastModified = DateTime(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(lastModifiedNode.GetText()).c_str()).c_str(), Aws::Utils::DateFormat::ISO_8601);
m_lastModifiedHasBeenSet = true;
}
XmlNode eTagNode = resultNode.FirstChild("ETag");
@@ -71,6 +79,30 @@ Part& Part::operator =(const XmlNode& xmlNode)
m_size = StringUtils::ConvertToInt64(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(sizeNode.GetText()).c_str()).c_str());
m_sizeHasBeenSet = true;
}
+ XmlNode checksumCRC32Node = resultNode.FirstChild("ChecksumCRC32");
+ if(!checksumCRC32Node.IsNull())
+ {
+ m_checksumCRC32 = Aws::Utils::Xml::DecodeEscapedXmlText(checksumCRC32Node.GetText());
+ m_checksumCRC32HasBeenSet = true;
+ }
+ XmlNode checksumCRC32CNode = resultNode.FirstChild("ChecksumCRC32C");
+ if(!checksumCRC32CNode.IsNull())
+ {
+ m_checksumCRC32C = Aws::Utils::Xml::DecodeEscapedXmlText(checksumCRC32CNode.GetText());
+ m_checksumCRC32CHasBeenSet = true;
+ }
+ XmlNode checksumSHA1Node = resultNode.FirstChild("ChecksumSHA1");
+ if(!checksumSHA1Node.IsNull())
+ {
+ m_checksumSHA1 = Aws::Utils::Xml::DecodeEscapedXmlText(checksumSHA1Node.GetText());
+ m_checksumSHA1HasBeenSet = true;
+ }
+ XmlNode checksumSHA256Node = resultNode.FirstChild("ChecksumSHA256");
+ if(!checksumSHA256Node.IsNull())
+ {
+ m_checksumSHA256 = Aws::Utils::Xml::DecodeEscapedXmlText(checksumSHA256Node.GetText());
+ m_checksumSHA256HasBeenSet = true;
+ }
}
return *this;
@@ -90,7 +122,7 @@ void Part::AddToNode(XmlNode& parentNode) const
if(m_lastModifiedHasBeenSet)
{
XmlNode lastModifiedNode = parentNode.CreateChildElement("LastModified");
- lastModifiedNode.SetText(m_lastModified.ToGmtString(DateFormat::ISO_8601));
+ lastModifiedNode.SetText(m_lastModified.ToGmtString(Aws::Utils::DateFormat::ISO_8601));
}
if(m_eTagHasBeenSet)
@@ -107,6 +139,30 @@ void Part::AddToNode(XmlNode& parentNode) const
ss.str("");
}
+ if(m_checksumCRC32HasBeenSet)
+ {
+ XmlNode checksumCRC32Node = parentNode.CreateChildElement("ChecksumCRC32");
+ checksumCRC32Node.SetText(m_checksumCRC32);
+ }
+
+ if(m_checksumCRC32CHasBeenSet)
+ {
+ XmlNode checksumCRC32CNode = parentNode.CreateChildElement("ChecksumCRC32C");
+ checksumCRC32CNode.SetText(m_checksumCRC32C);
+ }
+
+ if(m_checksumSHA1HasBeenSet)
+ {
+ XmlNode checksumSHA1Node = parentNode.CreateChildElement("ChecksumSHA1");
+ checksumSHA1Node.SetText(m_checksumSHA1);
+ }
+
+ if(m_checksumSHA256HasBeenSet)
+ {
+ XmlNode checksumSHA256Node = parentNode.CreateChildElement("ChecksumSHA256");
+ checksumSHA256Node.SetText(m_checksumSHA256);
+ }
+
}
} // namespace Model
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketAccelerateConfigurationRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketAccelerateConfigurationRequest.cpp
index 15da0c32a1..c58ed72a04 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketAccelerateConfigurationRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketAccelerateConfigurationRequest.cpp
@@ -20,6 +20,8 @@ PutBucketAccelerateConfigurationRequest::PutBucketAccelerateConfigurationRequest
m_bucketHasBeenSet(false),
m_accelerateConfigurationHasBeenSet(false),
m_expectedBucketOwnerHasBeenSet(false),
+ m_checksumAlgorithm(ChecksumAlgorithm::NOT_SET),
+ m_checksumAlgorithmHasBeenSet(false),
m_customizedAccessLogTagHasBeenSet(false)
{
}
@@ -73,5 +75,33 @@ Aws::Http::HeaderValueCollection PutBucketAccelerateConfigurationRequest::GetReq
ss.str("");
}
+ if(m_checksumAlgorithmHasBeenSet)
+ {
+ headers.emplace("x-amz-sdk-checksum-algorithm", ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm));
+ }
+
return headers;
}
+
+PutBucketAccelerateConfigurationRequest::EndpointParameters PutBucketAccelerateConfigurationRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
+
+Aws::String PutBucketAccelerateConfigurationRequest::GetChecksumAlgorithmName() const
+{
+ if (m_checksumAlgorithm == ChecksumAlgorithm::NOT_SET)
+ {
+ return "md5";
+ }
+ else
+ {
+ return ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm);
+ }
+}
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketAclRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketAclRequest.cpp
index 5a3efcac19..a7273a85a4 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketAclRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketAclRequest.cpp
@@ -22,6 +22,8 @@ PutBucketAclRequest::PutBucketAclRequest() :
m_accessControlPolicyHasBeenSet(false),
m_bucketHasBeenSet(false),
m_contentMD5HasBeenSet(false),
+ m_checksumAlgorithm(ChecksumAlgorithm::NOT_SET),
+ m_checksumAlgorithmHasBeenSet(false),
m_grantFullControlHasBeenSet(false),
m_grantReadHasBeenSet(false),
m_grantReadACPHasBeenSet(false),
@@ -86,6 +88,11 @@ Aws::Http::HeaderValueCollection PutBucketAclRequest::GetRequestSpecificHeaders(
ss.str("");
}
+ if(m_checksumAlgorithmHasBeenSet)
+ {
+ headers.emplace("x-amz-sdk-checksum-algorithm", ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm));
+ }
+
if(m_grantFullControlHasBeenSet)
{
ss << m_grantFullControl;
@@ -130,3 +137,26 @@ Aws::Http::HeaderValueCollection PutBucketAclRequest::GetRequestSpecificHeaders(
return headers;
}
+
+PutBucketAclRequest::EndpointParameters PutBucketAclRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
+
+Aws::String PutBucketAclRequest::GetChecksumAlgorithmName() const
+{
+ if (m_checksumAlgorithm == ChecksumAlgorithm::NOT_SET)
+ {
+ return "md5";
+ }
+ else
+ {
+ return ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm);
+ }
+}
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketAnalyticsConfigurationRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketAnalyticsConfigurationRequest.cpp
index 73f9111f63..29d6e03e59 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketAnalyticsConfigurationRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketAnalyticsConfigurationRequest.cpp
@@ -83,3 +83,13 @@ Aws::Http::HeaderValueCollection PutBucketAnalyticsConfigurationRequest::GetRequ
return headers;
}
+
+PutBucketAnalyticsConfigurationRequest::EndpointParameters PutBucketAnalyticsConfigurationRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketCorsRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketCorsRequest.cpp
index 6f90824e2c..4e8ee9730f 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketCorsRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketCorsRequest.cpp
@@ -20,6 +20,8 @@ PutBucketCorsRequest::PutBucketCorsRequest() :
m_bucketHasBeenSet(false),
m_cORSConfigurationHasBeenSet(false),
m_contentMD5HasBeenSet(false),
+ m_checksumAlgorithm(ChecksumAlgorithm::NOT_SET),
+ m_checksumAlgorithmHasBeenSet(false),
m_expectedBucketOwnerHasBeenSet(false),
m_customizedAccessLogTagHasBeenSet(false)
{
@@ -74,6 +76,11 @@ Aws::Http::HeaderValueCollection PutBucketCorsRequest::GetRequestSpecificHeaders
ss.str("");
}
+ if(m_checksumAlgorithmHasBeenSet)
+ {
+ headers.emplace("x-amz-sdk-checksum-algorithm", ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm));
+ }
+
if(m_expectedBucketOwnerHasBeenSet)
{
ss << m_expectedBucketOwner;
@@ -83,3 +90,26 @@ Aws::Http::HeaderValueCollection PutBucketCorsRequest::GetRequestSpecificHeaders
return headers;
}
+
+PutBucketCorsRequest::EndpointParameters PutBucketCorsRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
+
+Aws::String PutBucketCorsRequest::GetChecksumAlgorithmName() const
+{
+ if (m_checksumAlgorithm == ChecksumAlgorithm::NOT_SET)
+ {
+ return "md5";
+ }
+ else
+ {
+ return ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm);
+ }
+}
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketEncryptionRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketEncryptionRequest.cpp
index 26b60f385e..7a9e14da68 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketEncryptionRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketEncryptionRequest.cpp
@@ -19,6 +19,8 @@ using namespace Aws::Http;
PutBucketEncryptionRequest::PutBucketEncryptionRequest() :
m_bucketHasBeenSet(false),
m_contentMD5HasBeenSet(false),
+ m_checksumAlgorithm(ChecksumAlgorithm::NOT_SET),
+ m_checksumAlgorithmHasBeenSet(false),
m_serverSideEncryptionConfigurationHasBeenSet(false),
m_expectedBucketOwnerHasBeenSet(false),
m_customizedAccessLogTagHasBeenSet(false)
@@ -74,6 +76,11 @@ Aws::Http::HeaderValueCollection PutBucketEncryptionRequest::GetRequestSpecificH
ss.str("");
}
+ if(m_checksumAlgorithmHasBeenSet)
+ {
+ headers.emplace("x-amz-sdk-checksum-algorithm", ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm));
+ }
+
if(m_expectedBucketOwnerHasBeenSet)
{
ss << m_expectedBucketOwner;
@@ -83,3 +90,26 @@ Aws::Http::HeaderValueCollection PutBucketEncryptionRequest::GetRequestSpecificH
return headers;
}
+
+PutBucketEncryptionRequest::EndpointParameters PutBucketEncryptionRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
+
+Aws::String PutBucketEncryptionRequest::GetChecksumAlgorithmName() const
+{
+ if (m_checksumAlgorithm == ChecksumAlgorithm::NOT_SET)
+ {
+ return "md5";
+ }
+ else
+ {
+ return ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm);
+ }
+}
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketIntelligentTieringConfigurationRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketIntelligentTieringConfigurationRequest.cpp
index 24bc947b71..17c63d7456 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketIntelligentTieringConfigurationRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketIntelligentTieringConfigurationRequest.cpp
@@ -69,3 +69,13 @@ void PutBucketIntelligentTieringConfigurationRequest::AddQueryStringParameters(U
}
}
+
+PutBucketIntelligentTieringConfigurationRequest::EndpointParameters PutBucketIntelligentTieringConfigurationRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketInventoryConfigurationRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketInventoryConfigurationRequest.cpp
index 31f0365528..5ec6f7d24f 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketInventoryConfigurationRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketInventoryConfigurationRequest.cpp
@@ -83,3 +83,13 @@ Aws::Http::HeaderValueCollection PutBucketInventoryConfigurationRequest::GetRequ
return headers;
}
+
+PutBucketInventoryConfigurationRequest::EndpointParameters PutBucketInventoryConfigurationRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketLifecycleConfigurationRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketLifecycleConfigurationRequest.cpp
index 2d8b5471f2..a197ebe9fb 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketLifecycleConfigurationRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketLifecycleConfigurationRequest.cpp
@@ -18,6 +18,8 @@ using namespace Aws::Http;
PutBucketLifecycleConfigurationRequest::PutBucketLifecycleConfigurationRequest() :
m_bucketHasBeenSet(false),
+ m_checksumAlgorithm(ChecksumAlgorithm::NOT_SET),
+ m_checksumAlgorithmHasBeenSet(false),
m_lifecycleConfigurationHasBeenSet(false),
m_expectedBucketOwnerHasBeenSet(false),
m_customizedAccessLogTagHasBeenSet(false)
@@ -66,6 +68,11 @@ Aws::Http::HeaderValueCollection PutBucketLifecycleConfigurationRequest::GetRequ
{
Aws::Http::HeaderValueCollection headers;
Aws::StringStream ss;
+ if(m_checksumAlgorithmHasBeenSet)
+ {
+ headers.emplace("x-amz-sdk-checksum-algorithm", ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm));
+ }
+
if(m_expectedBucketOwnerHasBeenSet)
{
ss << m_expectedBucketOwner;
@@ -75,3 +82,26 @@ Aws::Http::HeaderValueCollection PutBucketLifecycleConfigurationRequest::GetRequ
return headers;
}
+
+PutBucketLifecycleConfigurationRequest::EndpointParameters PutBucketLifecycleConfigurationRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
+
+Aws::String PutBucketLifecycleConfigurationRequest::GetChecksumAlgorithmName() const
+{
+ if (m_checksumAlgorithm == ChecksumAlgorithm::NOT_SET)
+ {
+ return "md5";
+ }
+ else
+ {
+ return ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm);
+ }
+}
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketLoggingRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketLoggingRequest.cpp
index 1057a8f122..34ca2d83ca 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketLoggingRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketLoggingRequest.cpp
@@ -20,6 +20,8 @@ PutBucketLoggingRequest::PutBucketLoggingRequest() :
m_bucketHasBeenSet(false),
m_bucketLoggingStatusHasBeenSet(false),
m_contentMD5HasBeenSet(false),
+ m_checksumAlgorithm(ChecksumAlgorithm::NOT_SET),
+ m_checksumAlgorithmHasBeenSet(false),
m_expectedBucketOwnerHasBeenSet(false),
m_customizedAccessLogTagHasBeenSet(false)
{
@@ -74,6 +76,11 @@ Aws::Http::HeaderValueCollection PutBucketLoggingRequest::GetRequestSpecificHead
ss.str("");
}
+ if(m_checksumAlgorithmHasBeenSet)
+ {
+ headers.emplace("x-amz-sdk-checksum-algorithm", ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm));
+ }
+
if(m_expectedBucketOwnerHasBeenSet)
{
ss << m_expectedBucketOwner;
@@ -83,3 +90,26 @@ Aws::Http::HeaderValueCollection PutBucketLoggingRequest::GetRequestSpecificHead
return headers;
}
+
+PutBucketLoggingRequest::EndpointParameters PutBucketLoggingRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
+
+Aws::String PutBucketLoggingRequest::GetChecksumAlgorithmName() const
+{
+ if (m_checksumAlgorithm == ChecksumAlgorithm::NOT_SET)
+ {
+ return "md5";
+ }
+ else
+ {
+ return ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm);
+ }
+}
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketMetricsConfigurationRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketMetricsConfigurationRequest.cpp
index 48e5714b3a..0f8ac13170 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketMetricsConfigurationRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketMetricsConfigurationRequest.cpp
@@ -83,3 +83,13 @@ Aws::Http::HeaderValueCollection PutBucketMetricsConfigurationRequest::GetReques
return headers;
}
+
+PutBucketMetricsConfigurationRequest::EndpointParameters PutBucketMetricsConfigurationRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketNotificationConfigurationRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketNotificationConfigurationRequest.cpp
index 52f921af4f..0a2e9ba847 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketNotificationConfigurationRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketNotificationConfigurationRequest.cpp
@@ -20,6 +20,8 @@ PutBucketNotificationConfigurationRequest::PutBucketNotificationConfigurationReq
m_bucketHasBeenSet(false),
m_notificationConfigurationHasBeenSet(false),
m_expectedBucketOwnerHasBeenSet(false),
+ m_skipDestinationValidation(false),
+ m_skipDestinationValidationHasBeenSet(false),
m_customizedAccessLogTagHasBeenSet(false)
{
}
@@ -69,5 +71,22 @@ Aws::Http::HeaderValueCollection PutBucketNotificationConfigurationRequest::GetR
ss.str("");
}
+ if(m_skipDestinationValidationHasBeenSet)
+ {
+ ss << std::boolalpha << m_skipDestinationValidation;
+ headers.emplace("x-amz-skip-destination-validation", ss.str());
+ ss.str("");
+ }
+
return headers;
}
+
+PutBucketNotificationConfigurationRequest::EndpointParameters PutBucketNotificationConfigurationRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketOwnershipControlsRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketOwnershipControlsRequest.cpp
index 5a6119e233..46dde75d47 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketOwnershipControlsRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketOwnershipControlsRequest.cpp
@@ -83,3 +83,13 @@ Aws::Http::HeaderValueCollection PutBucketOwnershipControlsRequest::GetRequestSp
return headers;
}
+
+PutBucketOwnershipControlsRequest::EndpointParameters PutBucketOwnershipControlsRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketPolicyRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketPolicyRequest.cpp
index d7fc444c2d..92f16a6381 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketPolicyRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketPolicyRequest.cpp
@@ -19,6 +19,8 @@ using namespace Aws;
PutBucketPolicyRequest::PutBucketPolicyRequest() :
m_bucketHasBeenSet(false),
m_contentMD5HasBeenSet(false),
+ m_checksumAlgorithm(ChecksumAlgorithm::NOT_SET),
+ m_checksumAlgorithmHasBeenSet(false),
m_confirmRemoveSelfBucketAccess(false),
m_confirmRemoveSelfBucketAccessHasBeenSet(false),
m_expectedBucketOwnerHasBeenSet(false),
@@ -60,6 +62,11 @@ Aws::Http::HeaderValueCollection PutBucketPolicyRequest::GetRequestSpecificHeade
ss.str("");
}
+ if(m_checksumAlgorithmHasBeenSet)
+ {
+ headers.emplace("x-amz-sdk-checksum-algorithm", ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm));
+ }
+
if(m_confirmRemoveSelfBucketAccessHasBeenSet)
{
ss << std::boolalpha << m_confirmRemoveSelfBucketAccess;
@@ -77,3 +84,26 @@ Aws::Http::HeaderValueCollection PutBucketPolicyRequest::GetRequestSpecificHeade
return headers;
}
+
+PutBucketPolicyRequest::EndpointParameters PutBucketPolicyRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
+
+Aws::String PutBucketPolicyRequest::GetChecksumAlgorithmName() const
+{
+ if (m_checksumAlgorithm == ChecksumAlgorithm::NOT_SET)
+ {
+ return "md5";
+ }
+ else
+ {
+ return ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm);
+ }
+}
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketReplicationRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketReplicationRequest.cpp
index 44244a8560..50b1abbebd 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketReplicationRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketReplicationRequest.cpp
@@ -19,6 +19,8 @@ using namespace Aws::Http;
PutBucketReplicationRequest::PutBucketReplicationRequest() :
m_bucketHasBeenSet(false),
m_contentMD5HasBeenSet(false),
+ m_checksumAlgorithm(ChecksumAlgorithm::NOT_SET),
+ m_checksumAlgorithmHasBeenSet(false),
m_replicationConfigurationHasBeenSet(false),
m_tokenHasBeenSet(false),
m_expectedBucketOwnerHasBeenSet(false),
@@ -75,6 +77,11 @@ Aws::Http::HeaderValueCollection PutBucketReplicationRequest::GetRequestSpecific
ss.str("");
}
+ if(m_checksumAlgorithmHasBeenSet)
+ {
+ headers.emplace("x-amz-sdk-checksum-algorithm", ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm));
+ }
+
if(m_tokenHasBeenSet)
{
ss << m_token;
@@ -91,3 +98,26 @@ Aws::Http::HeaderValueCollection PutBucketReplicationRequest::GetRequestSpecific
return headers;
}
+
+PutBucketReplicationRequest::EndpointParameters PutBucketReplicationRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
+
+Aws::String PutBucketReplicationRequest::GetChecksumAlgorithmName() const
+{
+ if (m_checksumAlgorithm == ChecksumAlgorithm::NOT_SET)
+ {
+ return "md5";
+ }
+ else
+ {
+ return ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm);
+ }
+}
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketRequestPaymentRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketRequestPaymentRequest.cpp
index f6be9cc855..643f26b8f1 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketRequestPaymentRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketRequestPaymentRequest.cpp
@@ -19,6 +19,8 @@ using namespace Aws::Http;
PutBucketRequestPaymentRequest::PutBucketRequestPaymentRequest() :
m_bucketHasBeenSet(false),
m_contentMD5HasBeenSet(false),
+ m_checksumAlgorithm(ChecksumAlgorithm::NOT_SET),
+ m_checksumAlgorithmHasBeenSet(false),
m_requestPaymentConfigurationHasBeenSet(false),
m_expectedBucketOwnerHasBeenSet(false),
m_customizedAccessLogTagHasBeenSet(false)
@@ -74,6 +76,11 @@ Aws::Http::HeaderValueCollection PutBucketRequestPaymentRequest::GetRequestSpeci
ss.str("");
}
+ if(m_checksumAlgorithmHasBeenSet)
+ {
+ headers.emplace("x-amz-sdk-checksum-algorithm", ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm));
+ }
+
if(m_expectedBucketOwnerHasBeenSet)
{
ss << m_expectedBucketOwner;
@@ -83,3 +90,26 @@ Aws::Http::HeaderValueCollection PutBucketRequestPaymentRequest::GetRequestSpeci
return headers;
}
+
+PutBucketRequestPaymentRequest::EndpointParameters PutBucketRequestPaymentRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
+
+Aws::String PutBucketRequestPaymentRequest::GetChecksumAlgorithmName() const
+{
+ if (m_checksumAlgorithm == ChecksumAlgorithm::NOT_SET)
+ {
+ return "md5";
+ }
+ else
+ {
+ return ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm);
+ }
+}
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketTaggingRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketTaggingRequest.cpp
index 748ede6e75..fa0861e2cd 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketTaggingRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketTaggingRequest.cpp
@@ -19,6 +19,8 @@ using namespace Aws::Http;
PutBucketTaggingRequest::PutBucketTaggingRequest() :
m_bucketHasBeenSet(false),
m_contentMD5HasBeenSet(false),
+ m_checksumAlgorithm(ChecksumAlgorithm::NOT_SET),
+ m_checksumAlgorithmHasBeenSet(false),
m_taggingHasBeenSet(false),
m_expectedBucketOwnerHasBeenSet(false),
m_customizedAccessLogTagHasBeenSet(false)
@@ -74,6 +76,11 @@ Aws::Http::HeaderValueCollection PutBucketTaggingRequest::GetRequestSpecificHead
ss.str("");
}
+ if(m_checksumAlgorithmHasBeenSet)
+ {
+ headers.emplace("x-amz-sdk-checksum-algorithm", ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm));
+ }
+
if(m_expectedBucketOwnerHasBeenSet)
{
ss << m_expectedBucketOwner;
@@ -83,3 +90,26 @@ Aws::Http::HeaderValueCollection PutBucketTaggingRequest::GetRequestSpecificHead
return headers;
}
+
+PutBucketTaggingRequest::EndpointParameters PutBucketTaggingRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
+
+Aws::String PutBucketTaggingRequest::GetChecksumAlgorithmName() const
+{
+ if (m_checksumAlgorithm == ChecksumAlgorithm::NOT_SET)
+ {
+ return "md5";
+ }
+ else
+ {
+ return ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm);
+ }
+}
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketVersioningRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketVersioningRequest.cpp
index 5e72efb1e1..6dbcd39433 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketVersioningRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketVersioningRequest.cpp
@@ -19,6 +19,8 @@ using namespace Aws::Http;
PutBucketVersioningRequest::PutBucketVersioningRequest() :
m_bucketHasBeenSet(false),
m_contentMD5HasBeenSet(false),
+ m_checksumAlgorithm(ChecksumAlgorithm::NOT_SET),
+ m_checksumAlgorithmHasBeenSet(false),
m_mFAHasBeenSet(false),
m_versioningConfigurationHasBeenSet(false),
m_expectedBucketOwnerHasBeenSet(false),
@@ -75,6 +77,11 @@ Aws::Http::HeaderValueCollection PutBucketVersioningRequest::GetRequestSpecificH
ss.str("");
}
+ if(m_checksumAlgorithmHasBeenSet)
+ {
+ headers.emplace("x-amz-sdk-checksum-algorithm", ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm));
+ }
+
if(m_mFAHasBeenSet)
{
ss << m_mFA;
@@ -91,3 +98,26 @@ Aws::Http::HeaderValueCollection PutBucketVersioningRequest::GetRequestSpecificH
return headers;
}
+
+PutBucketVersioningRequest::EndpointParameters PutBucketVersioningRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
+
+Aws::String PutBucketVersioningRequest::GetChecksumAlgorithmName() const
+{
+ if (m_checksumAlgorithm == ChecksumAlgorithm::NOT_SET)
+ {
+ return "md5";
+ }
+ else
+ {
+ return ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm);
+ }
+}
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketWebsiteRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketWebsiteRequest.cpp
index d20032e466..adea6161f5 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketWebsiteRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutBucketWebsiteRequest.cpp
@@ -19,6 +19,8 @@ using namespace Aws::Http;
PutBucketWebsiteRequest::PutBucketWebsiteRequest() :
m_bucketHasBeenSet(false),
m_contentMD5HasBeenSet(false),
+ m_checksumAlgorithm(ChecksumAlgorithm::NOT_SET),
+ m_checksumAlgorithmHasBeenSet(false),
m_websiteConfigurationHasBeenSet(false),
m_expectedBucketOwnerHasBeenSet(false),
m_customizedAccessLogTagHasBeenSet(false)
@@ -74,6 +76,11 @@ Aws::Http::HeaderValueCollection PutBucketWebsiteRequest::GetRequestSpecificHead
ss.str("");
}
+ if(m_checksumAlgorithmHasBeenSet)
+ {
+ headers.emplace("x-amz-sdk-checksum-algorithm", ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm));
+ }
+
if(m_expectedBucketOwnerHasBeenSet)
{
ss << m_expectedBucketOwner;
@@ -83,3 +90,26 @@ Aws::Http::HeaderValueCollection PutBucketWebsiteRequest::GetRequestSpecificHead
return headers;
}
+
+PutBucketWebsiteRequest::EndpointParameters PutBucketWebsiteRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
+
+Aws::String PutBucketWebsiteRequest::GetChecksumAlgorithmName() const
+{
+ if (m_checksumAlgorithm == ChecksumAlgorithm::NOT_SET)
+ {
+ return "md5";
+ }
+ else
+ {
+ return ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm);
+ }
+}
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectAclRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectAclRequest.cpp
index 5dbd1a3fa6..fb859d91e4 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectAclRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectAclRequest.cpp
@@ -22,6 +22,8 @@ PutObjectAclRequest::PutObjectAclRequest() :
m_accessControlPolicyHasBeenSet(false),
m_bucketHasBeenSet(false),
m_contentMD5HasBeenSet(false),
+ m_checksumAlgorithm(ChecksumAlgorithm::NOT_SET),
+ m_checksumAlgorithmHasBeenSet(false),
m_grantFullControlHasBeenSet(false),
m_grantReadHasBeenSet(false),
m_grantReadACPHasBeenSet(false),
@@ -97,6 +99,11 @@ Aws::Http::HeaderValueCollection PutObjectAclRequest::GetRequestSpecificHeaders(
ss.str("");
}
+ if(m_checksumAlgorithmHasBeenSet)
+ {
+ headers.emplace("x-amz-sdk-checksum-algorithm", ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm));
+ }
+
if(m_grantFullControlHasBeenSet)
{
ss << m_grantFullControl;
@@ -146,3 +153,26 @@ Aws::Http::HeaderValueCollection PutObjectAclRequest::GetRequestSpecificHeaders(
return headers;
}
+
+PutObjectAclRequest::EndpointParameters PutObjectAclRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
+
+Aws::String PutObjectAclRequest::GetChecksumAlgorithmName() const
+{
+ if (m_checksumAlgorithm == ChecksumAlgorithm::NOT_SET)
+ {
+ return "md5";
+ }
+ else
+ {
+ return ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm);
+ }
+}
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectLegalHoldRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectLegalHoldRequest.cpp
index 8c07b952d7..4ecc41790e 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectLegalHoldRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectLegalHoldRequest.cpp
@@ -24,6 +24,8 @@ PutObjectLegalHoldRequest::PutObjectLegalHoldRequest() :
m_requestPayerHasBeenSet(false),
m_versionIdHasBeenSet(false),
m_contentMD5HasBeenSet(false),
+ m_checksumAlgorithm(ChecksumAlgorithm::NOT_SET),
+ m_checksumAlgorithmHasBeenSet(false),
m_expectedBucketOwnerHasBeenSet(false),
m_customizedAccessLogTagHasBeenSet(false)
{
@@ -90,6 +92,11 @@ Aws::Http::HeaderValueCollection PutObjectLegalHoldRequest::GetRequestSpecificHe
ss.str("");
}
+ if(m_checksumAlgorithmHasBeenSet)
+ {
+ headers.emplace("x-amz-sdk-checksum-algorithm", ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm));
+ }
+
if(m_expectedBucketOwnerHasBeenSet)
{
ss << m_expectedBucketOwner;
@@ -99,3 +106,26 @@ Aws::Http::HeaderValueCollection PutObjectLegalHoldRequest::GetRequestSpecificHe
return headers;
}
+
+PutObjectLegalHoldRequest::EndpointParameters PutObjectLegalHoldRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
+
+Aws::String PutObjectLegalHoldRequest::GetChecksumAlgorithmName() const
+{
+ if (m_checksumAlgorithm == ChecksumAlgorithm::NOT_SET)
+ {
+ return "md5";
+ }
+ else
+ {
+ return ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm);
+ }
+}
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectLockConfigurationRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectLockConfigurationRequest.cpp
index 347b9e668d..b0e89445dc 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectLockConfigurationRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectLockConfigurationRequest.cpp
@@ -23,6 +23,8 @@ PutObjectLockConfigurationRequest::PutObjectLockConfigurationRequest() :
m_requestPayerHasBeenSet(false),
m_tokenHasBeenSet(false),
m_contentMD5HasBeenSet(false),
+ m_checksumAlgorithm(ChecksumAlgorithm::NOT_SET),
+ m_checksumAlgorithmHasBeenSet(false),
m_expectedBucketOwnerHasBeenSet(false),
m_customizedAccessLogTagHasBeenSet(false)
{
@@ -89,6 +91,11 @@ Aws::Http::HeaderValueCollection PutObjectLockConfigurationRequest::GetRequestSp
ss.str("");
}
+ if(m_checksumAlgorithmHasBeenSet)
+ {
+ headers.emplace("x-amz-sdk-checksum-algorithm", ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm));
+ }
+
if(m_expectedBucketOwnerHasBeenSet)
{
ss << m_expectedBucketOwner;
@@ -98,3 +105,26 @@ Aws::Http::HeaderValueCollection PutObjectLockConfigurationRequest::GetRequestSp
return headers;
}
+
+PutObjectLockConfigurationRequest::EndpointParameters PutObjectLockConfigurationRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
+
+Aws::String PutObjectLockConfigurationRequest::GetChecksumAlgorithmName() const
+{
+ if (m_checksumAlgorithm == ChecksumAlgorithm::NOT_SET)
+ {
+ return "md5";
+ }
+ else
+ {
+ return ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm);
+ }
+}
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectRequest.cpp
index 18cdc3866f..1c2aa6e34f 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectRequest.cpp
@@ -28,6 +28,12 @@ PutObjectRequest::PutObjectRequest() :
m_contentLength(0),
m_contentLengthHasBeenSet(false),
m_contentMD5HasBeenSet(false),
+ m_checksumAlgorithm(ChecksumAlgorithm::NOT_SET),
+ m_checksumAlgorithmHasBeenSet(false),
+ m_checksumCRC32HasBeenSet(false),
+ m_checksumCRC32CHasBeenSet(false),
+ m_checksumSHA1HasBeenSet(false),
+ m_checksumSHA256HasBeenSet(false),
m_expiresHasBeenSet(false),
m_grantFullControlHasBeenSet(false),
m_grantReadHasBeenSet(false),
@@ -134,9 +140,42 @@ Aws::Http::HeaderValueCollection PutObjectRequest::GetRequestSpecificHeaders() c
ss.str("");
}
+ if(m_checksumAlgorithmHasBeenSet)
+ {
+ headers.emplace("x-amz-sdk-checksum-algorithm", ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm));
+ }
+
+ if(m_checksumCRC32HasBeenSet)
+ {
+ ss << m_checksumCRC32;
+ headers.emplace("x-amz-checksum-crc32", ss.str());
+ ss.str("");
+ }
+
+ if(m_checksumCRC32CHasBeenSet)
+ {
+ ss << m_checksumCRC32C;
+ headers.emplace("x-amz-checksum-crc32c", ss.str());
+ ss.str("");
+ }
+
+ if(m_checksumSHA1HasBeenSet)
+ {
+ ss << m_checksumSHA1;
+ headers.emplace("x-amz-checksum-sha1", ss.str());
+ ss.str("");
+ }
+
+ if(m_checksumSHA256HasBeenSet)
+ {
+ ss << m_checksumSHA256;
+ headers.emplace("x-amz-checksum-sha256", ss.str());
+ ss.str("");
+ }
+
if(m_expiresHasBeenSet)
{
- headers.emplace("expires", m_expires.ToGmtString(DateFormat::RFC822));
+ headers.emplace("expires", m_expires.ToGmtString(Aws::Utils::DateFormat::RFC822));
}
if(m_grantFullControlHasBeenSet)
@@ -255,7 +294,7 @@ Aws::Http::HeaderValueCollection PutObjectRequest::GetRequestSpecificHeaders() c
if(m_objectLockRetainUntilDateHasBeenSet)
{
- headers.emplace("x-amz-object-lock-retain-until-date", m_objectLockRetainUntilDate.ToGmtString(DateFormat::ISO_8601));
+ headers.emplace("x-amz-object-lock-retain-until-date", m_objectLockRetainUntilDate.ToGmtString(Aws::Utils::DateFormat::ISO_8601));
}
if(m_objectLockLegalHoldStatusHasBeenSet)
@@ -273,3 +312,26 @@ Aws::Http::HeaderValueCollection PutObjectRequest::GetRequestSpecificHeaders() c
return headers;
}
+
+PutObjectRequest::EndpointParameters PutObjectRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
+
+Aws::String PutObjectRequest::GetChecksumAlgorithmName() const
+{
+ if (m_checksumAlgorithm == ChecksumAlgorithm::NOT_SET)
+ {
+ return "md5";
+ }
+ else
+ {
+ return ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm);
+ }
+}
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectResult.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectResult.cpp
index 0dfcb17280..21a34122e5 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectResult.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectResult.cpp
@@ -53,6 +53,30 @@ PutObjectResult& PutObjectResult::operator =(const Aws::AmazonWebServiceResult<X
m_eTag = eTagIter->second;
}
+ const auto& checksumCRC32Iter = headers.find("x-amz-checksum-crc32");
+ if(checksumCRC32Iter != headers.end())
+ {
+ m_checksumCRC32 = checksumCRC32Iter->second;
+ }
+
+ const auto& checksumCRC32CIter = headers.find("x-amz-checksum-crc32c");
+ if(checksumCRC32CIter != headers.end())
+ {
+ m_checksumCRC32C = checksumCRC32CIter->second;
+ }
+
+ const auto& checksumSHA1Iter = headers.find("x-amz-checksum-sha1");
+ if(checksumSHA1Iter != headers.end())
+ {
+ m_checksumSHA1 = checksumSHA1Iter->second;
+ }
+
+ const auto& checksumSHA256Iter = headers.find("x-amz-checksum-sha256");
+ if(checksumSHA256Iter != headers.end())
+ {
+ m_checksumSHA256 = checksumSHA256Iter->second;
+ }
+
const auto& serverSideEncryptionIter = headers.find("x-amz-server-side-encryption");
if(serverSideEncryptionIter != headers.end())
{
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectRetentionRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectRetentionRequest.cpp
index 779ac68bd3..47e0cd7039 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectRetentionRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectRetentionRequest.cpp
@@ -26,6 +26,8 @@ PutObjectRetentionRequest::PutObjectRetentionRequest() :
m_bypassGovernanceRetention(false),
m_bypassGovernanceRetentionHasBeenSet(false),
m_contentMD5HasBeenSet(false),
+ m_checksumAlgorithm(ChecksumAlgorithm::NOT_SET),
+ m_checksumAlgorithmHasBeenSet(false),
m_expectedBucketOwnerHasBeenSet(false),
m_customizedAccessLogTagHasBeenSet(false)
{
@@ -99,6 +101,11 @@ Aws::Http::HeaderValueCollection PutObjectRetentionRequest::GetRequestSpecificHe
ss.str("");
}
+ if(m_checksumAlgorithmHasBeenSet)
+ {
+ headers.emplace("x-amz-sdk-checksum-algorithm", ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm));
+ }
+
if(m_expectedBucketOwnerHasBeenSet)
{
ss << m_expectedBucketOwner;
@@ -108,3 +115,26 @@ Aws::Http::HeaderValueCollection PutObjectRetentionRequest::GetRequestSpecificHe
return headers;
}
+
+PutObjectRetentionRequest::EndpointParameters PutObjectRetentionRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
+
+Aws::String PutObjectRetentionRequest::GetChecksumAlgorithmName() const
+{
+ if (m_checksumAlgorithm == ChecksumAlgorithm::NOT_SET)
+ {
+ return "md5";
+ }
+ else
+ {
+ return ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm);
+ }
+}
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectTaggingRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectTaggingRequest.cpp
index 612eb0eb8e..a2da55ab0e 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectTaggingRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutObjectTaggingRequest.cpp
@@ -21,6 +21,8 @@ PutObjectTaggingRequest::PutObjectTaggingRequest() :
m_keyHasBeenSet(false),
m_versionIdHasBeenSet(false),
m_contentMD5HasBeenSet(false),
+ m_checksumAlgorithm(ChecksumAlgorithm::NOT_SET),
+ m_checksumAlgorithmHasBeenSet(false),
m_taggingHasBeenSet(false),
m_expectedBucketOwnerHasBeenSet(false),
m_requestPayer(RequestPayer::NOT_SET),
@@ -85,6 +87,11 @@ Aws::Http::HeaderValueCollection PutObjectTaggingRequest::GetRequestSpecificHead
ss.str("");
}
+ if(m_checksumAlgorithmHasBeenSet)
+ {
+ headers.emplace("x-amz-sdk-checksum-algorithm", ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm));
+ }
+
if(m_expectedBucketOwnerHasBeenSet)
{
ss << m_expectedBucketOwner;
@@ -99,3 +106,26 @@ Aws::Http::HeaderValueCollection PutObjectTaggingRequest::GetRequestSpecificHead
return headers;
}
+
+PutObjectTaggingRequest::EndpointParameters PutObjectTaggingRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
+
+Aws::String PutObjectTaggingRequest::GetChecksumAlgorithmName() const
+{
+ if (m_checksumAlgorithm == ChecksumAlgorithm::NOT_SET)
+ {
+ return "md5";
+ }
+ else
+ {
+ return ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm);
+ }
+}
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutPublicAccessBlockRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutPublicAccessBlockRequest.cpp
index f06ab1ed3a..f8479fbe44 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutPublicAccessBlockRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/PutPublicAccessBlockRequest.cpp
@@ -19,6 +19,8 @@ using namespace Aws::Http;
PutPublicAccessBlockRequest::PutPublicAccessBlockRequest() :
m_bucketHasBeenSet(false),
m_contentMD5HasBeenSet(false),
+ m_checksumAlgorithm(ChecksumAlgorithm::NOT_SET),
+ m_checksumAlgorithmHasBeenSet(false),
m_publicAccessBlockConfigurationHasBeenSet(false),
m_expectedBucketOwnerHasBeenSet(false),
m_customizedAccessLogTagHasBeenSet(false)
@@ -74,6 +76,11 @@ Aws::Http::HeaderValueCollection PutPublicAccessBlockRequest::GetRequestSpecific
ss.str("");
}
+ if(m_checksumAlgorithmHasBeenSet)
+ {
+ headers.emplace("x-amz-sdk-checksum-algorithm", ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm));
+ }
+
if(m_expectedBucketOwnerHasBeenSet)
{
ss << m_expectedBucketOwner;
@@ -83,3 +90,26 @@ Aws::Http::HeaderValueCollection PutPublicAccessBlockRequest::GetRequestSpecific
return headers;
}
+
+PutPublicAccessBlockRequest::EndpointParameters PutPublicAccessBlockRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
+
+Aws::String PutPublicAccessBlockRequest::GetChecksumAlgorithmName() const
+{
+ if (m_checksumAlgorithm == ChecksumAlgorithm::NOT_SET)
+ {
+ return "md5";
+ }
+ else
+ {
+ return ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm);
+ }
+}
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/RestoreObjectRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/RestoreObjectRequest.cpp
index a755fc7828..85f5f73706 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/RestoreObjectRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/RestoreObjectRequest.cpp
@@ -23,6 +23,8 @@ RestoreObjectRequest::RestoreObjectRequest() :
m_restoreRequestHasBeenSet(false),
m_requestPayer(RequestPayer::NOT_SET),
m_requestPayerHasBeenSet(false),
+ m_checksumAlgorithm(ChecksumAlgorithm::NOT_SET),
+ m_checksumAlgorithmHasBeenSet(false),
m_expectedBucketOwnerHasBeenSet(false),
m_customizedAccessLogTagHasBeenSet(false)
{
@@ -82,6 +84,11 @@ Aws::Http::HeaderValueCollection RestoreObjectRequest::GetRequestSpecificHeaders
headers.emplace("x-amz-request-payer", RequestPayerMapper::GetNameForRequestPayer(m_requestPayer));
}
+ if(m_checksumAlgorithmHasBeenSet)
+ {
+ headers.emplace("x-amz-sdk-checksum-algorithm", ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm));
+ }
+
if(m_expectedBucketOwnerHasBeenSet)
{
ss << m_expectedBucketOwner;
@@ -91,3 +98,26 @@ Aws::Http::HeaderValueCollection RestoreObjectRequest::GetRequestSpecificHeaders
return headers;
}
+
+RestoreObjectRequest::EndpointParameters RestoreObjectRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
+
+Aws::String RestoreObjectRequest::GetChecksumAlgorithmName() const
+{
+ if (m_checksumAlgorithm == ChecksumAlgorithm::NOT_SET)
+ {
+ return "md5";
+ }
+ else
+ {
+ return ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm);
+ }
+}
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/SelectObjectContentRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/SelectObjectContentRequest.cpp
index f3443311dd..20f0007fd9 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/SelectObjectContentRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/SelectObjectContentRequest.cpp
@@ -31,7 +31,7 @@ SelectObjectContentRequest::SelectObjectContentRequest() :
m_scanRangeHasBeenSet(false),
m_expectedBucketOwnerHasBeenSet(false),
m_customizedAccessLogTagHasBeenSet(false),
- m_decoder(Aws::Utils::Event::EventStreamDecoder(&m_handler))
+ m_handler(), m_decoder(Aws::Utils::Event::EventStreamDecoder(&m_handler))
{
}
@@ -138,3 +138,13 @@ Aws::Http::HeaderValueCollection SelectObjectContentRequest::GetRequestSpecificH
return headers;
}
+
+SelectObjectContentRequest::EndpointParameters SelectObjectContentRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/StorageClass.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/StorageClass.cpp
index 0ab7edd5de..886777a137 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/StorageClass.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/StorageClass.cpp
@@ -28,6 +28,7 @@ namespace Aws
static const int GLACIER_HASH = HashingUtils::HashString("GLACIER");
static const int DEEP_ARCHIVE_HASH = HashingUtils::HashString("DEEP_ARCHIVE");
static const int OUTPOSTS_HASH = HashingUtils::HashString("OUTPOSTS");
+ static const int GLACIER_IR_HASH = HashingUtils::HashString("GLACIER_IR");
StorageClass GetStorageClassForName(const Aws::String& name)
@@ -65,6 +66,10 @@ namespace Aws
{
return StorageClass::OUTPOSTS;
}
+ else if (hashCode == GLACIER_IR_HASH)
+ {
+ return StorageClass::GLACIER_IR;
+ }
EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer();
if(overflowContainer)
{
@@ -95,6 +100,8 @@ namespace Aws
return "DEEP_ARCHIVE";
case StorageClass::OUTPOSTS:
return "OUTPOSTS";
+ case StorageClass::GLACIER_IR:
+ return "GLACIER_IR";
default:
EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer();
if(overflowContainer)
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Transition.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Transition.cpp
index 8dcefe5df2..e837cd964e 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Transition.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/Transition.cpp
@@ -48,7 +48,7 @@ Transition& Transition::operator =(const XmlNode& xmlNode)
XmlNode dateNode = resultNode.FirstChild("Date");
if(!dateNode.IsNull())
{
- m_date = DateTime(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(dateNode.GetText()).c_str()).c_str(), DateFormat::ISO_8601);
+ m_date = DateTime(StringUtils::Trim(Aws::Utils::Xml::DecodeEscapedXmlText(dateNode.GetText()).c_str()).c_str(), Aws::Utils::DateFormat::ISO_8601);
m_dateHasBeenSet = true;
}
XmlNode daysNode = resultNode.FirstChild("Days");
@@ -74,7 +74,7 @@ void Transition::AddToNode(XmlNode& parentNode) const
if(m_dateHasBeenSet)
{
XmlNode dateNode = parentNode.CreateChildElement("Date");
- dateNode.SetText(m_date.ToGmtString(DateFormat::ISO_8601));
+ dateNode.SetText(m_date.ToGmtString(Aws::Utils::DateFormat::ISO_8601));
}
if(m_daysHasBeenSet)
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/TransitionStorageClass.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/TransitionStorageClass.cpp
index 2149177071..59cc36b0b7 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/TransitionStorageClass.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/TransitionStorageClass.cpp
@@ -25,6 +25,7 @@ namespace Aws
static const int ONEZONE_IA_HASH = HashingUtils::HashString("ONEZONE_IA");
static const int INTELLIGENT_TIERING_HASH = HashingUtils::HashString("INTELLIGENT_TIERING");
static const int DEEP_ARCHIVE_HASH = HashingUtils::HashString("DEEP_ARCHIVE");
+ static const int GLACIER_IR_HASH = HashingUtils::HashString("GLACIER_IR");
TransitionStorageClass GetTransitionStorageClassForName(const Aws::String& name)
@@ -50,6 +51,10 @@ namespace Aws
{
return TransitionStorageClass::DEEP_ARCHIVE;
}
+ else if (hashCode == GLACIER_IR_HASH)
+ {
+ return TransitionStorageClass::GLACIER_IR;
+ }
EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer();
if(overflowContainer)
{
@@ -74,6 +79,8 @@ namespace Aws
return "INTELLIGENT_TIERING";
case TransitionStorageClass::DEEP_ARCHIVE:
return "DEEP_ARCHIVE";
+ case TransitionStorageClass::GLACIER_IR:
+ return "GLACIER_IR";
default:
EnumParseOverflowContainer* overflowContainer = Aws::GetEnumOverflowContainer();
if(overflowContainer)
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/UploadPartCopyRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/UploadPartCopyRequest.cpp
index 364af9b8fc..f45d88ac80 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/UploadPartCopyRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/UploadPartCopyRequest.cpp
@@ -42,6 +42,28 @@ UploadPartCopyRequest::UploadPartCopyRequest() :
{
}
+bool UploadPartCopyRequest::HasEmbeddedError(Aws::IOStream &body,
+ const Aws::Http::HeaderValueCollection &header) const
+{
+ // Header is unused
+ (void) header;
+
+ auto readPointer = body.tellg();
+ XmlDocument doc = XmlDocument::CreateFromXmlStream(body);
+
+ if (!doc.WasParseSuccessful()) {
+ body.seekg(readPointer);
+ return false;
+ }
+
+ if (doc.GetRootElement().GetName() == "Error") {
+ body.seekg(readPointer);
+ return true;
+ }
+ body.seekg(readPointer);
+ return false;
+}
+
Aws::String UploadPartCopyRequest::SerializePayload() const
{
return {};
@@ -103,7 +125,7 @@ Aws::Http::HeaderValueCollection UploadPartCopyRequest::GetRequestSpecificHeader
if(m_copySourceIfModifiedSinceHasBeenSet)
{
- headers.emplace("x-amz-copy-source-if-modified-since", m_copySourceIfModifiedSince.ToGmtString(DateFormat::RFC822));
+ headers.emplace("x-amz-copy-source-if-modified-since", m_copySourceIfModifiedSince.ToGmtString(Aws::Utils::DateFormat::RFC822));
}
if(m_copySourceIfNoneMatchHasBeenSet)
@@ -115,7 +137,7 @@ Aws::Http::HeaderValueCollection UploadPartCopyRequest::GetRequestSpecificHeader
if(m_copySourceIfUnmodifiedSinceHasBeenSet)
{
- headers.emplace("x-amz-copy-source-if-unmodified-since", m_copySourceIfUnmodifiedSince.ToGmtString(DateFormat::RFC822));
+ headers.emplace("x-amz-copy-source-if-unmodified-since", m_copySourceIfUnmodifiedSince.ToGmtString(Aws::Utils::DateFormat::RFC822));
}
if(m_copySourceRangeHasBeenSet)
@@ -188,3 +210,13 @@ Aws::Http::HeaderValueCollection UploadPartCopyRequest::GetRequestSpecificHeader
return headers;
}
+
+UploadPartCopyRequest::EndpointParameters UploadPartCopyRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/UploadPartRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/UploadPartRequest.cpp
index ee4a67ee07..59edcc90e0 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/UploadPartRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/UploadPartRequest.cpp
@@ -22,6 +22,12 @@ UploadPartRequest::UploadPartRequest() :
m_contentLength(0),
m_contentLengthHasBeenSet(false),
m_contentMD5HasBeenSet(false),
+ m_checksumAlgorithm(ChecksumAlgorithm::NOT_SET),
+ m_checksumAlgorithmHasBeenSet(false),
+ m_checksumCRC32HasBeenSet(false),
+ m_checksumCRC32CHasBeenSet(false),
+ m_checksumSHA1HasBeenSet(false),
+ m_checksumSHA256HasBeenSet(false),
m_keyHasBeenSet(false),
m_partNumber(0),
m_partNumberHasBeenSet(false),
@@ -91,6 +97,39 @@ Aws::Http::HeaderValueCollection UploadPartRequest::GetRequestSpecificHeaders()
ss.str("");
}
+ if(m_checksumAlgorithmHasBeenSet)
+ {
+ headers.emplace("x-amz-sdk-checksum-algorithm", ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm));
+ }
+
+ if(m_checksumCRC32HasBeenSet)
+ {
+ ss << m_checksumCRC32;
+ headers.emplace("x-amz-checksum-crc32", ss.str());
+ ss.str("");
+ }
+
+ if(m_checksumCRC32CHasBeenSet)
+ {
+ ss << m_checksumCRC32C;
+ headers.emplace("x-amz-checksum-crc32c", ss.str());
+ ss.str("");
+ }
+
+ if(m_checksumSHA1HasBeenSet)
+ {
+ ss << m_checksumSHA1;
+ headers.emplace("x-amz-checksum-sha1", ss.str());
+ ss.str("");
+ }
+
+ if(m_checksumSHA256HasBeenSet)
+ {
+ ss << m_checksumSHA256;
+ headers.emplace("x-amz-checksum-sha256", ss.str());
+ ss.str("");
+ }
+
if(m_sSECustomerAlgorithmHasBeenSet)
{
ss << m_sSECustomerAlgorithm;
@@ -127,3 +166,26 @@ Aws::Http::HeaderValueCollection UploadPartRequest::GetRequestSpecificHeaders()
return headers;
}
+
+UploadPartRequest::EndpointParameters UploadPartRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Operation context parameters
+ if (BucketHasBeenSet()) {
+ parameters.emplace_back(Aws::String("Bucket"), this->GetBucket(), Aws::Endpoint::EndpointParameter::ParameterOrigin::OPERATION_CONTEXT);
+ }
+ return parameters;
+}
+
+Aws::String UploadPartRequest::GetChecksumAlgorithmName() const
+{
+ if (m_checksumAlgorithm == ChecksumAlgorithm::NOT_SET)
+ {
+ return "md5";
+ }
+ else
+ {
+ return ChecksumAlgorithmMapper::GetNameForChecksumAlgorithm(m_checksumAlgorithm);
+ }
+}
+
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/UploadPartResult.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/UploadPartResult.cpp
index 13ad60a463..fa24c00520 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/UploadPartResult.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/UploadPartResult.cpp
@@ -53,6 +53,30 @@ UploadPartResult& UploadPartResult::operator =(const Aws::AmazonWebServiceResult
m_eTag = eTagIter->second;
}
+ const auto& checksumCRC32Iter = headers.find("x-amz-checksum-crc32");
+ if(checksumCRC32Iter != headers.end())
+ {
+ m_checksumCRC32 = checksumCRC32Iter->second;
+ }
+
+ const auto& checksumCRC32CIter = headers.find("x-amz-checksum-crc32c");
+ if(checksumCRC32CIter != headers.end())
+ {
+ m_checksumCRC32C = checksumCRC32CIter->second;
+ }
+
+ const auto& checksumSHA1Iter = headers.find("x-amz-checksum-sha1");
+ if(checksumSHA1Iter != headers.end())
+ {
+ m_checksumSHA1 = checksumSHA1Iter->second;
+ }
+
+ const auto& checksumSHA256Iter = headers.find("x-amz-checksum-sha256");
+ if(checksumSHA256Iter != headers.end())
+ {
+ m_checksumSHA256 = checksumSHA256Iter->second;
+ }
+
const auto& sSECustomerAlgorithmIter = headers.find("x-amz-server-side-encryption-customer-algorithm");
if(sSECustomerAlgorithmIter != headers.end())
{
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/WriteGetObjectResponseRequest.cpp b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/WriteGetObjectResponseRequest.cpp
index 49893d5823..fd1a33573c 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/WriteGetObjectResponseRequest.cpp
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/source/model/WriteGetObjectResponseRequest.cpp
@@ -32,6 +32,10 @@ WriteGetObjectResponseRequest::WriteGetObjectResponseRequest() :
m_contentLength(0),
m_contentLengthHasBeenSet(false),
m_contentRangeHasBeenSet(false),
+ m_checksumCRC32HasBeenSet(false),
+ m_checksumCRC32CHasBeenSet(false),
+ m_checksumSHA1HasBeenSet(false),
+ m_checksumSHA256HasBeenSet(false),
m_deleteMarker(false),
m_deleteMarkerHasBeenSet(false),
m_eTagHasBeenSet(false),
@@ -180,6 +184,34 @@ Aws::Http::HeaderValueCollection WriteGetObjectResponseRequest::GetRequestSpecif
ss.str("");
}
+ if(m_checksumCRC32HasBeenSet)
+ {
+ ss << m_checksumCRC32;
+ headers.emplace("x-amz-fwd-header-x-amz-checksum-crc32", ss.str());
+ ss.str("");
+ }
+
+ if(m_checksumCRC32CHasBeenSet)
+ {
+ ss << m_checksumCRC32C;
+ headers.emplace("x-amz-fwd-header-x-amz-checksum-crc32c", ss.str());
+ ss.str("");
+ }
+
+ if(m_checksumSHA1HasBeenSet)
+ {
+ ss << m_checksumSHA1;
+ headers.emplace("x-amz-fwd-header-x-amz-checksum-sha1", ss.str());
+ ss.str("");
+ }
+
+ if(m_checksumSHA256HasBeenSet)
+ {
+ ss << m_checksumSHA256;
+ headers.emplace("x-amz-fwd-header-x-amz-checksum-sha256", ss.str());
+ ss.str("");
+ }
+
if(m_deleteMarkerHasBeenSet)
{
ss << std::boolalpha << m_deleteMarker;
@@ -196,7 +228,7 @@ Aws::Http::HeaderValueCollection WriteGetObjectResponseRequest::GetRequestSpecif
if(m_expiresHasBeenSet)
{
- headers.emplace("x-amz-fwd-header-expires", m_expires.ToGmtString(DateFormat::RFC822));
+ headers.emplace("x-amz-fwd-header-expires", m_expires.ToGmtString(Aws::Utils::DateFormat::RFC822));
}
if(m_expirationHasBeenSet)
@@ -208,7 +240,7 @@ Aws::Http::HeaderValueCollection WriteGetObjectResponseRequest::GetRequestSpecif
if(m_lastModifiedHasBeenSet)
{
- headers.emplace("x-amz-fwd-header-last-modified", m_lastModified.ToGmtString(DateFormat::RFC822));
+ headers.emplace("x-amz-fwd-header-last-modified", m_lastModified.ToGmtString(Aws::Utils::DateFormat::RFC822));
}
if(m_missingMetaHasBeenSet)
@@ -240,7 +272,7 @@ Aws::Http::HeaderValueCollection WriteGetObjectResponseRequest::GetRequestSpecif
if(m_objectLockRetainUntilDateHasBeenSet)
{
- headers.emplace("x-amz-fwd-header-x-amz-object-lock-retain-until-date", m_objectLockRetainUntilDate.ToGmtString(DateFormat::ISO_8601));
+ headers.emplace("x-amz-fwd-header-x-amz-object-lock-retain-until-date", m_objectLockRetainUntilDate.ToGmtString(Aws::Utils::DateFormat::ISO_8601));
}
if(m_partsCountHasBeenSet)
@@ -322,3 +354,11 @@ Aws::Http::HeaderValueCollection WriteGetObjectResponseRequest::GetRequestSpecif
return headers;
}
+
+WriteGetObjectResponseRequest::EndpointParameters WriteGetObjectResponseRequest::GetEndpointContextParams() const
+{
+ EndpointParameters parameters;
+ // Static context parameters
+ parameters.emplace_back(Aws::String("UseObjectLambdaEndpoint"), true, Aws::Endpoint::EndpointParameter::ParameterOrigin::STATIC_CONTEXT);
+ return parameters;
+}
diff --git a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/ya.make b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/ya.make
index 154df74f77..88082e0010 100644
--- a/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/ya.make
+++ b/contrib/libs/aws-sdk-cpp/aws-cpp-sdk-s3/ya.make
@@ -8,8 +8,13 @@ LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
PEERDIR(
contrib/libs/aws-sdk-cpp/aws-cpp-sdk-core
+ contrib/restricted/aws/aws-c-auth
contrib/restricted/aws/aws-c-common
contrib/restricted/aws/aws-c-event-stream
+ contrib/restricted/aws/aws-c-io
+ contrib/restricted/aws/aws-c-mqtt
+ contrib/restricted/aws/aws-c-sdkutils
+ contrib/restricted/aws/aws-crt-cpp
)
ADDINCL(
@@ -22,38 +27,52 @@ NO_COMPILER_WARNINGS()
NO_UTIL()
CFLAGS(
+ -DAWS_AUTH_USE_IMPORT_EXPORT
-DAWS_CAL_USE_IMPORT_EXPORT
-DAWS_CHECKSUMS_USE_IMPORT_EXPORT
-DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_CRT_CPP_USE_IMPORT_EXPORT
-DAWS_EVENT_STREAM_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
-DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_MQTT_USE_IMPORT_EXPORT
+ -DAWS_MQTT_WITH_WEBSOCKETS
+ -DAWS_S3_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
-DAWS_SDK_VERSION_MAJOR=1
- -DAWS_SDK_VERSION_MINOR=8
- -DAWS_SDK_VERSION_PATCH=186
+ -DAWS_SDK_VERSION_MINOR=11
+ -DAWS_SDK_VERSION_PATCH=37
+ -DAWS_TEST_REGION=US_EAST_1
-DAWS_USE_EPOLL
+ -DENABLED_REQUEST_COMPRESSION
+ -DENABLED_ZLIB_REQUEST_COMPRESSION
-DENABLE_CURL_CLIENT
-DENABLE_OPENSSL_ENCRYPTION
-DHAS_PATHCONF
-DHAS_UMASK
- -DS2N_ADX
- -DS2N_BIKE_R3_AVX2
- -DS2N_BIKE_R3_AVX512
- -DS2N_BIKE_R3_PCLMUL
- -DS2N_BIKE_R3_VPCLMUL
+ -DS2N_CLONE_SUPPORTED
-DS2N_CPUID_AVAILABLE
-DS2N_FALL_THROUGH_SUPPORTED
- -DS2N_HAVE_EXECINFO
+ -DS2N_FEATURES_AVAILABLE
-DS2N_KYBER512R3_AVX2_BMI2
- -DS2N_SIKE_P434_R3_ASM
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
-DS2N___RESTRICT__SUPPORTED
)
SRCS(
- source/S3ARN.cpp
source/S3Client.cpp
- source/S3Endpoint.cpp
+ source/S3ClientConfiguration.cpp
+ source/S3EndpointProvider.cpp
+ source/S3EndpointRules.cpp
source/S3ErrorMarshaller.cpp
source/S3Errors.cpp
+ source/S3Request.cpp
source/model/AbortIncompleteMultipartUpload.cpp
source/model/AbortMultipartUploadRequest.cpp
source/model/AbortMultipartUploadResult.cpp
@@ -79,6 +98,9 @@ SRCS(
source/model/CORSRule.cpp
source/model/CSVInput.cpp
source/model/CSVOutput.cpp
+ source/model/Checksum.cpp
+ source/model/ChecksumAlgorithm.cpp
+ source/model/ChecksumMode.cpp
source/model/CloudFunctionConfiguration.cpp
source/model/CommonPrefix.cpp
source/model/CompleteMultipartUploadRequest.cpp
@@ -129,6 +151,7 @@ SRCS(
source/model/Error.cpp
source/model/ErrorDocument.cpp
source/model/Event.cpp
+ source/model/EventBridgeConfiguration.cpp
source/model/ExistingObjectReplication.cpp
source/model/ExistingObjectReplicationStatus.cpp
source/model/ExpirationStatus.cpp
@@ -178,6 +201,9 @@ SRCS(
source/model/GetBucketWebsiteResult.cpp
source/model/GetObjectAclRequest.cpp
source/model/GetObjectAclResult.cpp
+ source/model/GetObjectAttributesParts.cpp
+ source/model/GetObjectAttributesRequest.cpp
+ source/model/GetObjectAttributesResult.cpp
source/model/GetObjectLegalHoldRequest.cpp
source/model/GetObjectLegalHoldResult.cpp
source/model/GetObjectLockConfigurationRequest.cpp
@@ -262,6 +288,7 @@ SRCS(
source/model/NotificationConfigurationDeprecated.cpp
source/model/NotificationConfigurationFilter.cpp
source/model/Object.cpp
+ source/model/ObjectAttributes.cpp
source/model/ObjectCannedACL.cpp
source/model/ObjectIdentifier.cpp
source/model/ObjectLockConfiguration.cpp
@@ -273,6 +300,7 @@ SRCS(
source/model/ObjectLockRetentionMode.cpp
source/model/ObjectLockRule.cpp
source/model/ObjectOwnership.cpp
+ source/model/ObjectPart.cpp
source/model/ObjectStorageClass.cpp
source/model/ObjectVersion.cpp
source/model/ObjectVersionStorageClass.cpp
diff --git a/contrib/restricted/aws/CMakeLists.txt b/contrib/restricted/aws/CMakeLists.txt
index 87ffccf907..be12f5d90f 100644
--- a/contrib/restricted/aws/CMakeLists.txt
+++ b/contrib/restricted/aws/CMakeLists.txt
@@ -6,9 +6,16 @@
# original buildsystem will not be accepted.
+add_subdirectory(aws-c-auth)
add_subdirectory(aws-c-cal)
add_subdirectory(aws-c-common)
+add_subdirectory(aws-c-compression)
add_subdirectory(aws-c-event-stream)
+add_subdirectory(aws-c-http)
add_subdirectory(aws-c-io)
+add_subdirectory(aws-c-mqtt)
+add_subdirectory(aws-c-s3)
+add_subdirectory(aws-c-sdkutils)
add_subdirectory(aws-checksums)
+add_subdirectory(aws-crt-cpp)
add_subdirectory(s2n)
diff --git a/contrib/restricted/aws/aws-c-auth/CMakeLists.darwin-arm64.txt b/contrib/restricted/aws/aws-c-auth/CMakeLists.darwin-arm64.txt
new file mode 100644
index 0000000000..ec91345687
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/CMakeLists.darwin-arm64.txt
@@ -0,0 +1,77 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-auth)
+target_compile_options(restricted-aws-aws-c-auth PRIVATE
+ -DAWS_AUTH_USE_IMPORT_EXPORT
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DCJSON_HIDE_SYMBOLS
+ -DHAVE_SYSCONF
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-auth PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/include
+)
+target_link_libraries(restricted-aws-aws-c-auth PUBLIC
+ restricted-aws-aws-c-cal
+ restricted-aws-aws-c-common
+ restricted-aws-aws-c-http
+ restricted-aws-aws-c-io
+ restricted-aws-aws-c-sdkutils
+)
+target_sources(restricted-aws-aws-c-auth PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/auth.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/aws_imds_client.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/aws_profile.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/aws_signing.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_anonymous.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_cached.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_chain.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_cognito.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_default_chain.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_delegate.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_ecs.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_environment.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_imds.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_process.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_profile.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_static.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_sts.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_sts_web_identity.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_x509.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_utils.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/key_derivation.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signable.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signable_chunk.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signable_http_request.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signable_trailer.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signing.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signing_config.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signing_result.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/sigv4_http_request.c
+)
diff --git a/contrib/restricted/aws/aws-c-auth/CMakeLists.darwin-x86_64.txt b/contrib/restricted/aws/aws-c-auth/CMakeLists.darwin-x86_64.txt
new file mode 100644
index 0000000000..ec91345687
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/CMakeLists.darwin-x86_64.txt
@@ -0,0 +1,77 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-auth)
+target_compile_options(restricted-aws-aws-c-auth PRIVATE
+ -DAWS_AUTH_USE_IMPORT_EXPORT
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DCJSON_HIDE_SYMBOLS
+ -DHAVE_SYSCONF
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-auth PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/include
+)
+target_link_libraries(restricted-aws-aws-c-auth PUBLIC
+ restricted-aws-aws-c-cal
+ restricted-aws-aws-c-common
+ restricted-aws-aws-c-http
+ restricted-aws-aws-c-io
+ restricted-aws-aws-c-sdkutils
+)
+target_sources(restricted-aws-aws-c-auth PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/auth.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/aws_imds_client.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/aws_profile.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/aws_signing.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_anonymous.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_cached.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_chain.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_cognito.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_default_chain.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_delegate.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_ecs.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_environment.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_imds.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_process.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_profile.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_static.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_sts.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_sts_web_identity.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_x509.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_utils.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/key_derivation.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signable.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signable_chunk.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signable_http_request.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signable_trailer.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signing.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signing_config.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signing_result.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/sigv4_http_request.c
+)
diff --git a/contrib/restricted/aws/aws-c-auth/CMakeLists.linux-aarch64.txt b/contrib/restricted/aws/aws-c-auth/CMakeLists.linux-aarch64.txt
new file mode 100644
index 0000000000..7a5fcbf2c6
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/CMakeLists.linux-aarch64.txt
@@ -0,0 +1,78 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-auth)
+target_compile_options(restricted-aws-aws-c-auth PRIVATE
+ -DAWS_AUTH_USE_IMPORT_EXPORT
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DCJSON_HIDE_SYMBOLS
+ -DHAVE_SYSCONF
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-auth PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/include
+)
+target_link_libraries(restricted-aws-aws-c-auth PUBLIC
+ contrib-libs-linux-headers
+ restricted-aws-aws-c-cal
+ restricted-aws-aws-c-common
+ restricted-aws-aws-c-http
+ restricted-aws-aws-c-io
+ restricted-aws-aws-c-sdkutils
+)
+target_sources(restricted-aws-aws-c-auth PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/auth.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/aws_imds_client.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/aws_profile.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/aws_signing.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_anonymous.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_cached.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_chain.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_cognito.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_default_chain.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_delegate.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_ecs.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_environment.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_imds.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_process.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_profile.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_static.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_sts.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_sts_web_identity.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_x509.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_utils.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/key_derivation.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signable.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signable_chunk.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signable_http_request.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signable_trailer.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signing.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signing_config.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signing_result.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/sigv4_http_request.c
+)
diff --git a/contrib/restricted/aws/aws-c-auth/CMakeLists.linux-x86_64.txt b/contrib/restricted/aws/aws-c-auth/CMakeLists.linux-x86_64.txt
new file mode 100644
index 0000000000..7a5fcbf2c6
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/CMakeLists.linux-x86_64.txt
@@ -0,0 +1,78 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-auth)
+target_compile_options(restricted-aws-aws-c-auth PRIVATE
+ -DAWS_AUTH_USE_IMPORT_EXPORT
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DCJSON_HIDE_SYMBOLS
+ -DHAVE_SYSCONF
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-auth PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/include
+)
+target_link_libraries(restricted-aws-aws-c-auth PUBLIC
+ contrib-libs-linux-headers
+ restricted-aws-aws-c-cal
+ restricted-aws-aws-c-common
+ restricted-aws-aws-c-http
+ restricted-aws-aws-c-io
+ restricted-aws-aws-c-sdkutils
+)
+target_sources(restricted-aws-aws-c-auth PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/auth.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/aws_imds_client.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/aws_profile.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/aws_signing.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_anonymous.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_cached.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_chain.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_cognito.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_default_chain.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_delegate.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_ecs.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_environment.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_imds.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_process.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_profile.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_static.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_sts.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_sts_web_identity.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_x509.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_utils.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/key_derivation.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signable.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signable_chunk.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signable_http_request.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signable_trailer.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signing.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signing_config.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signing_result.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/sigv4_http_request.c
+)
diff --git a/contrib/restricted/aws/aws-c-auth/CMakeLists.txt b/contrib/restricted/aws/aws-c-auth/CMakeLists.txt
new file mode 100644
index 0000000000..2dce3a77fe
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/CMakeLists.txt
@@ -0,0 +1,19 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+if (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" AND NOT HAVE_CUDA)
+ include(CMakeLists.linux-aarch64.txt)
+elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
+ include(CMakeLists.darwin-x86_64.txt)
+elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
+ include(CMakeLists.darwin-arm64.txt)
+elseif (WIN32 AND CMAKE_SYSTEM_PROCESSOR STREQUAL "AMD64" AND NOT HAVE_CUDA)
+ include(CMakeLists.windows-x86_64.txt)
+elseif (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT HAVE_CUDA)
+ include(CMakeLists.linux-x86_64.txt)
+endif()
diff --git a/contrib/restricted/aws/aws-c-auth/CMakeLists.windows-x86_64.txt b/contrib/restricted/aws/aws-c-auth/CMakeLists.windows-x86_64.txt
new file mode 100644
index 0000000000..ec91345687
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/CMakeLists.windows-x86_64.txt
@@ -0,0 +1,77 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-auth)
+target_compile_options(restricted-aws-aws-c-auth PRIVATE
+ -DAWS_AUTH_USE_IMPORT_EXPORT
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DCJSON_HIDE_SYMBOLS
+ -DHAVE_SYSCONF
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-auth PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/include
+)
+target_link_libraries(restricted-aws-aws-c-auth PUBLIC
+ restricted-aws-aws-c-cal
+ restricted-aws-aws-c-common
+ restricted-aws-aws-c-http
+ restricted-aws-aws-c-io
+ restricted-aws-aws-c-sdkutils
+)
+target_sources(restricted-aws-aws-c-auth PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/auth.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/aws_imds_client.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/aws_profile.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/aws_signing.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_anonymous.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_cached.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_chain.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_cognito.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_default_chain.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_delegate.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_ecs.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_environment.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_imds.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_process.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_profile.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_static.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_sts.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_sts_web_identity.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_provider_x509.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/credentials_utils.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/key_derivation.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signable.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signable_chunk.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signable_http_request.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signable_trailer.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signing.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signing_config.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/signing_result.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-auth/source/sigv4_http_request.c
+)
diff --git a/contrib/restricted/aws/aws-c-auth/CODE_OF_CONDUCT.md b/contrib/restricted/aws/aws-c-auth/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..3b64466870
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/CODE_OF_CONDUCT.md
@@ -0,0 +1,4 @@
+## Code of Conduct
+This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
+For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
+opensource-codeofconduct@amazon.com with any additional questions or comments.
diff --git a/contrib/restricted/aws/aws-c-auth/CONTRIBUTING.md b/contrib/restricted/aws/aws-c-auth/CONTRIBUTING.md
new file mode 100644
index 0000000000..c3e7edf2f1
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/CONTRIBUTING.md
@@ -0,0 +1,61 @@
+# Contributing Guidelines
+
+Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional
+documentation, we greatly value feedback and contributions from our community.
+
+Please read through this document before submitting any issues or pull requests to ensure we have all the necessary
+information to effectively respond to your bug report or contribution.
+
+
+## Reporting Bugs/Feature Requests
+
+We welcome you to use the GitHub issue tracker to report bugs or suggest features.
+
+When filing an issue, please check [existing open](https://github.com/awslabs/aws-c-auth/issues), or [recently closed](https://github.com/awslabs/aws-c-auth/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already
+reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
+
+* A reproducible test case or series of steps
+* The version of our code being used
+* Any modifications you've made relevant to the bug
+* Anything unusual about your environment or deployment
+
+
+## Contributing via Pull Requests
+Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that:
+
+1. You are working against the latest source on the *main* branch.
+2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
+3. You open an issue to discuss any significant work - we would hate for your time to be wasted.
+
+To send us a pull request, please:
+
+1. Fork the repository.
+2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change.
+3. Ensure local tests pass.
+4. Commit to your fork using clear commit messages.
+5. Send us a pull request, answering any default questions in the pull request interface.
+6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation.
+
+GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
+[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
+
+
+## Finding contributions to work on
+Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/awslabs/aws-c-auth/labels/help%20wanted) issues is a great place to start.
+
+
+## Code of Conduct
+This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
+For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
+opensource-codeofconduct@amazon.com with any additional questions or comments.
+
+
+## Security issue notifications
+If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue.
+
+
+## Licensing
+
+See the [LICENSE](https://github.com/awslabs/aws-c-auth/blob/main/LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution.
+
+We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes.
diff --git a/contrib/restricted/aws/aws-c-auth/LICENSE b/contrib/restricted/aws/aws-c-auth/LICENSE
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/contrib/restricted/aws/aws-c-auth/NOTICE b/contrib/restricted/aws/aws-c-auth/NOTICE
new file mode 100644
index 0000000000..7b3f69846b
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/NOTICE
@@ -0,0 +1,3 @@
+AWS C Auth
+Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+SPDX-License-Identifier: Apache-2.0.
diff --git a/contrib/restricted/aws/aws-c-auth/README.md b/contrib/restricted/aws/aws-c-auth/README.md
new file mode 100644
index 0000000000..088feba5c0
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/README.md
@@ -0,0 +1,71 @@
+## AWS C Auth
+
+C99 library implementation of AWS client-side authentication: standard credentials providers and signing.
+
+From a cryptographic perspective, only functions with the suffix "_constant_time" should be considered constant
+time.
+
+## License
+
+This library is licensed under the Apache 2.0 License.
+
+## Usage
+
+### Building
+
+CMake 3.1+ is required to build.
+
+`<install-path>` must be an absolute path in the following instructions.
+
+#### Linux-Only Dependencies
+
+If you are building on Linux, you will need to build aws-lc and s2n-tls first.
+
+```
+git clone git@github.com:awslabs/aws-lc.git
+cmake -S aws-lc -B aws-lc/build -DCMAKE_INSTALL_PREFIX=<install-path>
+cmake --build aws-lc/build --target install
+
+git clone git@github.com:aws/s2n-tls.git
+cmake -S s2n-tls -B s2n-tls/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build s2n-tls/build --target install
+```
+
+#### Building aws-c-auth and Remaining Dependencies
+
+```
+git clone git@github.com:awslabs/aws-c-common.git
+cmake -S aws-c-common -B aws-c-common/build -DCMAKE_INSTALL_PREFIX=<install-path>
+cmake --build aws-c-common/build --target install
+
+git clone git@github.com:awslabs/aws-c-cal.git
+cmake -S aws-c-cal -B aws-c-cal/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build aws-c-cal/build --target install
+
+git clone git@github.com:awslabs/aws-c-io.git
+cmake -S aws-c-io -B aws-c-io/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build aws-c-io/build --target install
+
+git clone git@github.com:awslabs/aws-c-compression.git
+cmake -S aws-c-compression -B aws-c-compression/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build aws-c-compression/build --target install
+
+git clone git@github.com:awslabs/aws-c-http.git
+cmake -S aws-c-http -B aws-c-http/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build aws-c-http/build --target install
+
+git clone git@github.com:awslabs/aws-c-auth.git
+cmake -S aws-c-auth -B aws-c-auth/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build aws-c-auth/build --target install
+```
+
+### Testing
+Certain tests require a specific environment setup in order to run successfully. This may be a specific execution
+environment (EC2, ECS, etc...) or it may require certain environment variables to be set that configure properties
+(often sensitive materials, like keys). Whether or not these tests are enabled is controlled by certain CMAKE
+properties:
+* AWS_BUILDING_ON_EC2 - indicates real IMDS credentials provider test(s) should run
+* AWS_BUILDING_ON_ECS - indciates real ECS credentials provider tests(s) should run
+* AWS_HAS_CI_ENVIRONMENT - indicates that all tests that require environmentally injected secrets/properties should run
+
+Environment properties are injected by CRT builder process via the custom builder step defined in `./.builder/action/aws-c-auth-test.py`
diff --git a/contrib/restricted/aws/aws-c-auth/include/aws/auth/auth.h b/contrib/restricted/aws/aws-c-auth/include/aws/auth/auth.h
new file mode 100644
index 0000000000..4411d8ff25
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/include/aws/auth/auth.h
@@ -0,0 +1,83 @@
+#ifndef AWS_AUTH_AUTH_H
+#define AWS_AUTH_AUTH_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/exports.h>
+
+#include <aws/io/logging.h>
+#include <aws/sdkutils/sdkutils.h>
+
+#define AWS_C_AUTH_PACKAGE_ID 6
+
+/**
+ * Auth-specific error codes
+ */
+enum aws_auth_errors {
+ AWS_AUTH_PROFILE_PARSE_RECOVERABLE_ERROR = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE,
+ AWS_AUTH_PROFILE_PARSE_FATAL_ERROR = AWS_ERROR_SDKUTILS_PARSE_FATAL,
+ AWS_AUTH_SIGNING_UNSUPPORTED_ALGORITHM = AWS_ERROR_ENUM_BEGIN_RANGE(AWS_C_AUTH_PACKAGE_ID),
+ AWS_AUTH_SIGNING_MISMATCHED_CONFIGURATION,
+ AWS_AUTH_SIGNING_NO_CREDENTIALS,
+ AWS_AUTH_SIGNING_ILLEGAL_REQUEST_QUERY_PARAM,
+ AWS_AUTH_SIGNING_ILLEGAL_REQUEST_HEADER,
+ AWS_AUTH_SIGNING_INVALID_CONFIGURATION,
+ AWS_AUTH_CREDENTIALS_PROVIDER_INVALID_ENVIRONMENT,
+ AWS_AUTH_CREDENTIALS_PROVIDER_INVALID_DELEGATE,
+ AWS_AUTH_CREDENTIALS_PROVIDER_PROFILE_SOURCE_FAILURE,
+ AWS_AUTH_CREDENTIALS_PROVIDER_IMDS_SOURCE_FAILURE,
+ AWS_AUTH_CREDENTIALS_PROVIDER_STS_SOURCE_FAILURE,
+ AWS_AUTH_CREDENTIALS_PROVIDER_HTTP_STATUS_FAILURE,
+ AWS_AUTH_PROVIDER_PARSER_UNEXPECTED_RESPONSE,
+ AWS_AUTH_CREDENTIALS_PROVIDER_ECS_SOURCE_FAILURE,
+ AWS_AUTH_CREDENTIALS_PROVIDER_X509_SOURCE_FAILURE,
+ AWS_AUTH_CREDENTIALS_PROVIDER_PROCESS_SOURCE_FAILURE,
+ AWS_AUTH_CREDENTIALS_PROVIDER_STS_WEB_IDENTITY_SOURCE_FAILURE,
+ AWS_AUTH_SIGNING_UNSUPPORTED_SIGNATURE_TYPE,
+ AWS_AUTH_SIGNING_MISSING_PREVIOUS_SIGNATURE,
+ AWS_AUTH_SIGNING_INVALID_CREDENTIALS,
+ AWS_AUTH_CANONICAL_REQUEST_MISMATCH,
+ AWS_AUTH_SIGV4A_SIGNATURE_VALIDATION_FAILURE,
+ AWS_AUTH_CREDENTIALS_PROVIDER_COGNITO_SOURCE_FAILURE,
+ AWS_AUTH_CREDENTIALS_PROVIDER_DELEGATE_FAILURE,
+
+ AWS_AUTH_ERROR_END_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_AUTH_PACKAGE_ID)
+};
+
+/**
+ * Auth-specific logging subjects
+ */
+enum aws_auth_log_subject {
+ AWS_LS_AUTH_GENERAL = AWS_LOG_SUBJECT_BEGIN_RANGE(AWS_C_AUTH_PACKAGE_ID),
+ AWS_LS_AUTH_PROFILE,
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ AWS_LS_AUTH_SIGNING,
+ AWS_LS_IMDS_CLIENT,
+
+ AWS_LS_AUTH_LAST = AWS_LOG_SUBJECT_END_RANGE(AWS_C_AUTH_PACKAGE_ID)
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Initializes internal datastructures used by aws-c-auth.
+ * Must be called before using any functionality in aws-c-auth.
+ *
+ * @param allocator memory allocator to use for any module-level memory allocation
+ */
+AWS_AUTH_API
+void aws_auth_library_init(struct aws_allocator *allocator);
+
+/**
+ * Clean up internal datastructures used by aws-c-auth.
+ * Must not be called until application is done using functionality in aws-c-auth.
+ */
+AWS_AUTH_API
+void aws_auth_library_clean_up(void);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_AUTH_AUTH_H */
diff --git a/contrib/restricted/aws/aws-c-auth/include/aws/auth/aws_imds_client.h b/contrib/restricted/aws/aws-c-auth/include/aws/auth/aws_imds_client.h
new file mode 100644
index 0000000000..b7eb198574
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/include/aws/auth/aws_imds_client.h
@@ -0,0 +1,481 @@
+#ifndef AWS_AUTH_IMDS_CLIENT_H
+#define AWS_AUTH_IMDS_CLIENT_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/auth/auth.h>
+#include <aws/auth/credentials.h>
+#include <aws/common/array_list.h>
+#include <aws/common/date_time.h>
+#include <aws/http/connection_manager.h>
+#include <aws/io/retry_strategy.h>
+
+typedef void(aws_imds_client_shutdown_completed_fn)(void *user_data);
+
+/**
+ * Optional callback and user data to be invoked when an imds client has fully shut down
+ */
+struct aws_imds_client_shutdown_options {
+ aws_imds_client_shutdown_completed_fn *shutdown_callback;
+ void *shutdown_user_data;
+};
+
+/**
+ * Configuration options when creating an imds client
+ */
+struct aws_imds_client_options {
+ /*
+ * Completion callback to be invoked when the client has fully shut down
+ */
+ struct aws_imds_client_shutdown_options shutdown_options;
+
+ /*
+ * Client bootstrap to use when this client makes network connections
+ */
+ struct aws_client_bootstrap *bootstrap;
+
+ /*
+ * Retry strategy instance that governs how failed requests are retried
+ */
+ struct aws_retry_strategy *retry_strategy;
+
+ /*
+ * What version of the imds protocol to use
+ *
+ * Defaults to IMDS_PROTOCOL_V2
+ */
+ enum aws_imds_protocol_version imds_version;
+
+ /*
+ * Table holding all cross-system functional dependencies for an imds client.
+ *
+ * For mocking the http layer in tests, leave NULL otherwise
+ */
+ struct aws_auth_http_system_vtable *function_table;
+};
+
+/*
+ * Standard callback for instance metadata queries
+ */
+typedef void(
+ aws_imds_client_on_get_resource_callback_fn)(const struct aws_byte_buf *resource, int error_code, void *user_data);
+
+/**
+ * https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-categories.html
+ */
+struct aws_imds_iam_profile {
+ struct aws_date_time last_updated;
+ struct aws_byte_cursor instance_profile_arn;
+ struct aws_byte_cursor instance_profile_id;
+};
+
+/**
+ * Block of per-instance EC2-specific data
+ *
+ * https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html
+ */
+struct aws_imds_instance_info {
+ /* an array of aws_byte_cursor */
+ struct aws_array_list marketplace_product_codes;
+ struct aws_byte_cursor availability_zone;
+ struct aws_byte_cursor private_ip;
+ struct aws_byte_cursor version;
+ struct aws_byte_cursor instance_id;
+ /* an array of aws_byte_cursor */
+ struct aws_array_list billing_products;
+ struct aws_byte_cursor instance_type;
+ struct aws_byte_cursor account_id;
+ struct aws_byte_cursor image_id;
+ struct aws_date_time pending_time;
+ struct aws_byte_cursor architecture;
+ struct aws_byte_cursor kernel_id;
+ struct aws_byte_cursor ramdisk_id;
+ struct aws_byte_cursor region;
+};
+
+/* the item typed stored in array is pointer to aws_byte_cursor */
+typedef void(
+ aws_imds_client_on_get_array_callback_fn)(const struct aws_array_list *array, int error_code, void *user_data);
+
+typedef void(aws_imds_client_on_get_credentials_callback_fn)(
+ const struct aws_credentials *credentials,
+ int error_code,
+ void *user_data);
+
+typedef void(aws_imds_client_on_get_iam_profile_callback_fn)(
+ const struct aws_imds_iam_profile *iam_profile_info,
+ int error_code,
+ void *user_data);
+
+typedef void(aws_imds_client_on_get_instance_info_callback_fn)(
+ const struct aws_imds_instance_info *instance_info,
+ int error_code,
+ void *user_data);
+
+/**
+ * AWS EC2 Metadata Client is used to retrieve AWS EC2 Instance Metadata info.
+ */
+struct aws_imds_client;
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Creates a new imds client
+ *
+ * @param allocator memory allocator to use for creation and queries
+ * @param options configuration options for the imds client
+ *
+ * @return a newly-constructed imds client, or NULL on failure
+ */
+AWS_AUTH_API
+struct aws_imds_client *aws_imds_client_new(
+ struct aws_allocator *allocator,
+ const struct aws_imds_client_options *options);
+
+/**
+ * Increments the ref count on the client
+ *
+ * @param client imds client to acquire a reference to
+ */
+AWS_AUTH_API
+void aws_imds_client_acquire(struct aws_imds_client *client);
+
+/**
+ * Decrements the ref count on the client
+ *
+ * @param client imds client to release a reference to
+ */
+AWS_AUTH_API
+void aws_imds_client_release(struct aws_imds_client *client);
+
+/**
+ * Queries a generic resource (string) from the ec2 instance metadata document
+ *
+ * @param client imds client to use for the query
+ * @param resource_path path of the resource to query
+ * @param callback callback function to invoke on query success or failure
+ * @param user_data opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_imds_client_get_resource_async(
+ struct aws_imds_client *client,
+ struct aws_byte_cursor resource_path,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data);
+
+/**
+ * Gets the ami id of the ec2 instance from the instance metadata document
+ *
+ * @param client imds client to use for the query
+ * @param callback callback function to invoke on query success or failure
+ * @param user_data opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_imds_client_get_ami_id(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data);
+
+/**
+ * Gets the ami launch index of the ec2 instance from the instance metadata document
+ *
+ * @param client imds client to use for the query
+ * @param callback callback function to invoke on query success or failure
+ * @param user_data opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_imds_client_get_ami_launch_index(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data);
+
+/**
+ * Gets the ami manifest path of the ec2 instance from the instance metadata document
+ *
+ * @param client imds client to use for the query
+ * @param callback callback function to invoke on query success or failure
+ * @param user_data opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_imds_client_get_ami_manifest_path(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data);
+
+/**
+ * Gets the list of ancestor ami ids of the ec2 instance from the instance metadata document
+ *
+ * @param client imds client to use for the query
+ * @param callback callback function to invoke on query success or failure
+ * @param user_data opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_imds_client_get_ancestor_ami_ids(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_array_callback_fn callback,
+ void *user_data);
+
+/**
+ * Gets the instance-action of the ec2 instance from the instance metadata document
+ *
+ * @param client imds client to use for the query
+ * @param callback callback function to invoke on query success or failure
+ * @param user_data opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_imds_client_get_instance_action(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data);
+
+/**
+ * Gets the instance id of the ec2 instance from the instance metadata document
+ *
+ * @param client imds client to use for the query
+ * @param callback callback function to invoke on query success or failure
+ * @param user_data opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_imds_client_get_instance_id(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data);
+
+/**
+ * Gets the instance type of the ec2 instance from the instance metadata document
+ *
+ * @param client imds client to use for the query
+ * @param callback callback function to invoke on query success or failure
+ * @param user_data opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_imds_client_get_instance_type(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data);
+
+/**
+ * Gets the mac address of the ec2 instance from the instance metadata document
+ *
+ * @param client imds client to use for the query
+ * @param callback callback function to invoke on query success or failure
+ * @param user_data opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_imds_client_get_mac_address(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data);
+
+/**
+ * Gets the private ip address of the ec2 instance from the instance metadata document
+ *
+ * @param client imds client to use for the query
+ * @param callback callback function to invoke on query success or failure
+ * @param user_data opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_imds_client_get_private_ip_address(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data);
+
+/**
+ * Gets the availability zone of the ec2 instance from the instance metadata document
+ *
+ * @param client imds client to use for the query
+ * @param callback callback function to invoke on query success or failure
+ * @param user_data opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_imds_client_get_availability_zone(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data);
+
+/**
+ * Gets the product codes of the ec2 instance from the instance metadata document
+ *
+ * @param client imds client to use for the query
+ * @param callback callback function to invoke on query success or failure
+ * @param user_data opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_imds_client_get_product_codes(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data);
+
+/**
+ * Gets the public key of the ec2 instance from the instance metadata document
+ *
+ * @param client imds client to use for the query
+ * @param callback callback function to invoke on query success or failure
+ * @param user_data opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_imds_client_get_public_key(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data);
+
+/**
+ * Gets the ramdisk id of the ec2 instance from the instance metadata document
+ *
+ * @param client imds client to use for the query
+ * @param callback callback function to invoke on query success or failure
+ * @param user_data opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_imds_client_get_ramdisk_id(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data);
+
+/**
+ * Gets the reservation id of the ec2 instance from the instance metadata document
+ *
+ * @param client imds client to use for the query
+ * @param callback callback function to invoke on query success or failure
+ * @param user_data opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_imds_client_get_reservation_id(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data);
+
+/**
+ * Gets the list of the security groups of the ec2 instance from the instance metadata document
+ *
+ * @param client imds client to use for the query
+ * @param callback callback function to invoke on query success or failure
+ * @param user_data opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_imds_client_get_security_groups(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_array_callback_fn callback,
+ void *user_data);
+
+/**
+ * Gets the list of block device mappings of the ec2 instance from the instance metadata document
+ *
+ * @param client imds client to use for the query
+ * @param callback callback function to invoke on query success or failure
+ * @param user_data opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_imds_client_get_block_device_mapping(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_array_callback_fn callback,
+ void *user_data);
+
+/**
+ * Gets the attached iam role of the ec2 instance from the instance metadata document
+ *
+ * @param client imds client to use for the query
+ * @param callback callback function to invoke on query success or failure
+ * @param user_data opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_imds_client_get_attached_iam_role(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data);
+
+/**
+ * Gets temporary credentials based on the attached iam role of the ec2 instance
+ *
+ * @param client imds client to use for the query
+ * @param iam_role_name iam role name to get temporary credentials through
+ * @param callback callback function to invoke on query success or failure
+ * @param user_data opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_imds_client_get_credentials(
+ struct aws_imds_client *client,
+ struct aws_byte_cursor iam_role_name,
+ aws_imds_client_on_get_credentials_callback_fn callback,
+ void *user_data);
+
+/**
+ * Gets the iam profile information of the ec2 instance from the instance metadata document
+ *
+ * @param client imds client to use for the query
+ * @param callback callback function to invoke on query success or failure
+ * @param user_data opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_imds_client_get_iam_profile(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_iam_profile_callback_fn callback,
+ void *user_data);
+
+/**
+ * Gets the user data of the ec2 instance from the instance metadata document
+ *
+ * @param client imds client to use for the query
+ * @param callback callback function to invoke on query success or failure
+ * @param user_data opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_imds_client_get_user_data(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data);
+
+/**
+ * Gets the signature of the ec2 instance from the instance metadata document
+ *
+ * @param client imds client to use for the query
+ * @param callback callback function to invoke on query success or failure
+ * @param user_data opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_imds_client_get_instance_signature(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data);
+
+/**
+ * Gets the instance information data block of the ec2 instance from the instance metadata document
+ *
+ * @param client imds client to use for the query
+ * @param callback callback function to invoke on query success or failure
+ * @param user_data opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_imds_client_get_instance_info(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_instance_info_callback_fn callback,
+ void *user_data);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_AUTH_IMDS_CLIENT_H */
diff --git a/contrib/restricted/aws/aws-c-auth/include/aws/auth/credentials.h b/contrib/restricted/aws/aws-c-auth/include/aws/auth/credentials.h
new file mode 100644
index 0000000000..3fa13911f6
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/include/aws/auth/credentials.h
@@ -0,0 +1,1011 @@
+#ifndef AWS_AUTH_CREDENTIALS_H
+#define AWS_AUTH_CREDENTIALS_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/auth.h>
+#include <aws/common/array_list.h>
+#include <aws/common/atomics.h>
+#include <aws/common/linked_list.h>
+#include <aws/io/io.h>
+
+struct aws_client_bootstrap;
+struct aws_auth_http_system_vtable;
+struct aws_credentials;
+struct aws_credentials_provider;
+struct aws_ecc_key_pair;
+struct aws_string;
+
+extern const uint16_t aws_sts_assume_role_default_duration_secs;
+
+/*
+ * Signature for the credentials sourcing callback
+ */
+typedef void(aws_on_get_credentials_callback_fn)(struct aws_credentials *credentials, int error_code, void *user_data);
+
+typedef int(aws_credentials_provider_get_credentials_fn)(
+ struct aws_credentials_provider *provider,
+ aws_on_get_credentials_callback_fn callback,
+ void *user_data);
+typedef void(aws_credentials_provider_destroy_fn)(struct aws_credentials_provider *provider);
+
+/*
+ * Common function table that all credentials provider implementations must support
+ */
+struct aws_credentials_provider_vtable {
+ aws_credentials_provider_get_credentials_fn *get_credentials;
+ aws_credentials_provider_destroy_fn *destroy;
+};
+
+typedef void(aws_credentials_provider_shutdown_completed_fn)(void *user_data);
+
+/*
+ * All credentials providers support an optional shutdown callback that
+ * gets invoked, with appropriate user data, when the resources used by the provider
+ * are no longer in use. For example, the imds provider uses this to
+ * signal when it is no longer using the client bootstrap used in its
+ * internal connection manager.
+ */
+struct aws_credentials_provider_shutdown_options {
+ aws_credentials_provider_shutdown_completed_fn *shutdown_callback;
+ void *shutdown_user_data;
+};
+
+/**
+ * A baseclass for credentials providers. A credentials provider is an object that has an asynchronous
+ * query function for retrieving AWS credentials.
+ *
+ * Ref-counted. Thread-safe.
+ */
+struct aws_credentials_provider {
+ struct aws_credentials_provider_vtable *vtable;
+ struct aws_allocator *allocator;
+ struct aws_credentials_provider_shutdown_options shutdown_options;
+ void *impl;
+ struct aws_atomic_var ref_count;
+};
+
+/*
+ * Config structs for creating all the different credentials providers
+ */
+
+/**
+ * Configuration options for a provider that returns a fixed set of credentials
+ */
+struct aws_credentials_provider_static_options {
+ struct aws_credentials_provider_shutdown_options shutdown_options;
+ struct aws_byte_cursor access_key_id;
+ struct aws_byte_cursor secret_access_key;
+ struct aws_byte_cursor session_token;
+};
+
+/**
+ * Configuration options for a provider that returns credentials based on environment variable values
+ */
+struct aws_credentials_provider_environment_options {
+ struct aws_credentials_provider_shutdown_options shutdown_options;
+};
+
+/**
+ * Configuration options for a provider that sources credentials from the aws profile and credentials files
+ * (by default ~/.aws/profile and ~/.aws/credentials)
+ */
+struct aws_credentials_provider_profile_options {
+ struct aws_credentials_provider_shutdown_options shutdown_options;
+
+ /*
+ * Override of what profile to use to source credentials from ('default' by default)
+ */
+ struct aws_byte_cursor profile_name_override;
+
+ /*
+ * Override path to the profile config file (~/.aws/config by default)
+ */
+ struct aws_byte_cursor config_file_name_override;
+
+ /*
+ * Override path to the profile credentials file (~/.aws/credentials by default)
+ */
+ struct aws_byte_cursor credentials_file_name_override;
+
+ /**
+ * (Optional)
+ * Use a cached merged profile collection. A merge collection has both config file
+ * (~/.aws/profile) and credentials file based profile collection (~/.aws/credentials) using
+ * `aws_profile_collection_new_from_merge`.
+ * If this option is provided, `config_file_name_override` and `credentials_file_name_override` will be ignored.
+ */
+ struct aws_profile_collection *profile_collection_cached;
+
+ /*
+ * Bootstrap to use for any network connections made while sourcing credentials (for example,
+ * a profile that uses assume-role will need to hit STS)
+ */
+ struct aws_client_bootstrap *bootstrap;
+
+ /*
+ * Client TLS context to use for any secure network connections made while sourcing credentials
+ * (for example, a profile that uses assume-role will need to hit STS).
+ *
+ * If a TLS context is needed, and you did not pass one in, it will be created automatically.
+ * However, you are encouraged to pass in a shared one since these are expensive objects.
+ * If using BYO_CRYPTO, you must provide the TLS context since it cannot be created automatically.
+ */
+ struct aws_tls_ctx *tls_ctx;
+
+ /* For mocking the http layer in tests, leave NULL otherwise */
+ struct aws_auth_http_system_vtable *function_table;
+};
+
+/**
+ * Configuration options for a provider that functions as a caching decorator. Credentials sourced through this
+ * provider will be cached within it until their expiration time. When the cached credentials expire, new
+ * credentials will be fetched when next queried.
+ */
+struct aws_credentials_provider_cached_options {
+ struct aws_credentials_provider_shutdown_options shutdown_options;
+
+ /*
+ * The provider to cache credentials query results from
+ */
+ struct aws_credentials_provider *source;
+
+ /*
+ * An optional expiration time period for sourced credentials. For a given set of cached credentials,
+ * the refresh time period will be the minimum of this time and any expiration timestamp on the credentials
+ * themselves.
+ */
+ uint64_t refresh_time_in_milliseconds;
+
+ /* For mocking, leave NULL otherwise */
+ aws_io_clock_fn *high_res_clock_fn;
+ aws_io_clock_fn *system_clock_fn;
+};
+
+/**
+ * Configuration options for a provider that queries, in order, a list of providers. This provider uses the
+ * first set of credentials successfully queried. Providers are queried one at a time; a provider is not queried
+ * until the preceding provider has failed to source credentials.
+ */
+struct aws_credentials_provider_chain_options {
+ struct aws_credentials_provider_shutdown_options shutdown_options;
+
+ /*
+ * Pointer to an array of credentials providers to use
+ */
+ struct aws_credentials_provider **providers;
+
+ /*
+ * Number of elements in the array of credentials providers
+ */
+ size_t provider_count;
+};
+
+/*
+ * EC2 IMDS_V1 takes one http request to get resource, while IMDS_V2 takes one more token (Http PUT) request
+ * to get secure token used in following request.
+ */
+enum aws_imds_protocol_version {
+ /**
+ * Defaults to IMDS_PROTOCOL_V2. It can be set to either one and IMDS Client
+ * will figure out (by looking at response code) which protocol an instance
+ * is using. But a more clear setting will reduce unnecessary network request.
+ */
+ IMDS_PROTOCOL_V2,
+ IMDS_PROTOCOL_V1,
+};
+
+/**
+ * Configuration options for the provider that sources credentials from ec2 instance metadata
+ */
+struct aws_credentials_provider_imds_options {
+ struct aws_credentials_provider_shutdown_options shutdown_options;
+
+ /*
+ * Connection bootstrap to use for any network connections made while sourcing credentials
+ */
+ struct aws_client_bootstrap *bootstrap;
+
+ /*
+ * Which version of the imds query protocol to use.
+ */
+ enum aws_imds_protocol_version imds_version;
+
+ /* For mocking the http layer in tests, leave NULL otherwise */
+ struct aws_auth_http_system_vtable *function_table;
+};
+
+/*
+ * Configuration options for the provider that sources credentials from ECS container metadata
+ *
+ * ECS creds provider can be used to access creds via either
+ * relative uri to a fixed endpoint http://169.254.170.2,
+ * or via a full uri specified by environment variables:
+ * AWS_CONTAINER_CREDENTIALS_RELATIVE_URI
+ * AWS_CONTAINER_CREDENTIALS_FULL_URI
+ * AWS_CONTAINER_AUTHORIZATION_TOKEN
+ * If both relative uri and absolute uri are set, relative uri
+ * has higher priority. Token is used in auth header but only for
+ * absolute uri.
+ * While above information is used in request only, endpoint info
+ * is needed when creating ecs provider to initiate the connection
+ * manager, more specifically, host and http scheme (tls or not)
+ * from endpoint are needed.
+ */
+struct aws_credentials_provider_ecs_options {
+ struct aws_credentials_provider_shutdown_options shutdown_options;
+
+ /*
+ * Connection bootstrap to use for any network connections made while sourcing credentials
+ */
+ struct aws_client_bootstrap *bootstrap;
+
+ /*
+ * Host to query credentials from
+ */
+ struct aws_byte_cursor host;
+
+ /*
+ * Http path and query string for the credentials query
+ */
+ struct aws_byte_cursor path_and_query;
+
+ /*
+ * Authorization token to include in the credentials query
+ */
+ struct aws_byte_cursor auth_token;
+
+ /*
+ * Client TLS context to use when making query.
+ * If set, port 443 is used. If NULL, port 80 is used.
+ */
+ struct aws_tls_ctx *tls_ctx;
+
+ /* For mocking the http layer in tests, leave NULL otherwise */
+ struct aws_auth_http_system_vtable *function_table;
+
+ /*
+ * Port to query credentials from. If zero, 80/443 will be used based on whether or not tls is enabled.
+ */
+ uint16_t port;
+};
+
+/**
+ * Configuration options for the X509 credentials provider
+ *
+ * The x509 credentials provider sources temporary credentials from AWS IoT Core using TLS mutual authentication.
+ * See details: https://docs.aws.amazon.com/iot/latest/developerguide/authorizing-direct-aws.html
+ * An end to end demo with detailed steps can be found here:
+ * https://aws.amazon.com/blogs/security/how-to-eliminate-the-need-for-hardcoded-aws-credentials-in-devices-by-using-the-aws-iot-credentials-provider/
+ */
+struct aws_credentials_provider_x509_options {
+ struct aws_credentials_provider_shutdown_options shutdown_options;
+
+ /*
+ * Connection bootstrap to use for any network connections made while sourcing credentials
+ */
+ struct aws_client_bootstrap *bootstrap;
+
+ /* TLS connection options that have been initialized with your x509 certificate and private key */
+ const struct aws_tls_connection_options *tls_connection_options;
+
+ /* IoT thing name you registered with AWS IOT for your device, it will be used in http request header */
+ struct aws_byte_cursor thing_name;
+
+ /* Iot role alias you created with AWS IoT for your IAM role, it will be used in http request path */
+ struct aws_byte_cursor role_alias;
+
+ /**
+ * Per-account X509 credentials sourcing endpoint.
+ */
+ struct aws_byte_cursor endpoint;
+
+ /**
+ * (Optional) Http proxy configuration for the http request that fetches credentials
+ */
+ const struct aws_http_proxy_options *proxy_options;
+
+ /* For mocking the http layer in tests, leave NULL otherwise */
+ struct aws_auth_http_system_vtable *function_table;
+};
+
+/**
+ * Configuration options for the STS web identity provider
+ *
+ * Sts with web identity credentials provider sources a set of temporary security credentials for users who have been
+ * authenticated in a mobile or web application with a web identity provider.
+ * Example providers include Amazon Cognito, Login with Amazon, Facebook, Google, or any OpenID Connect-compatible
+ * identity provider like Elastic Kubernetes Service
+ * https://docs.aws.amazon.com/STS/latest/APIReference/API_AssumeRoleWithWebIdentity.html
+ * The required parameters used in the request (region, roleArn, sessionName, tokenFilePath) are automatically resolved
+ * by SDK from envrionment variables or config file.
+ ---------------------------------------------------------------------------------
+ | Parameter | Environment Variable Name | Config File Property Name |
+ ----------------------------------------------------------------------------------
+ | region | AWS_DEFAULT_REGION | region |
+ | role_arn | AWS_ROLE_ARN | role_arn |
+ | role_session_name | AWS_ROLE_SESSION_NAME | role_session_name |
+ | token_file_path | AWS_WEB_IDENTITY_TOKEN_FILE | web_identity_token_file |
+ |--------------------------------------------------------------------------------|
+ */
+struct aws_credentials_provider_sts_web_identity_options {
+ struct aws_credentials_provider_shutdown_options shutdown_options;
+
+ /*
+ * Connection bootstrap to use for any network connections made while sourcing credentials
+ */
+ struct aws_client_bootstrap *bootstrap;
+
+ /**
+ * (Optional)
+ * Use a cached config profile collection. You can also pass a merged collection.
+ */
+ struct aws_profile_collection *config_profile_collection_cached;
+
+ /*
+ * Client TLS context to use when querying STS web identity provider.
+ * Required.
+ */
+ struct aws_tls_ctx *tls_ctx;
+
+ /* For mocking the http layer in tests, leave NULL otherwise */
+ struct aws_auth_http_system_vtable *function_table;
+};
+
+/**
+ * Configuration options for the STS credentials provider
+ */
+struct aws_credentials_provider_sts_options {
+ /*
+ * Connection bootstrap to use for any network connections made while sourcing credentials
+ */
+ struct aws_client_bootstrap *bootstrap;
+
+ /*
+ * Client TLS context to use when querying STS.
+ * Required.
+ */
+ struct aws_tls_ctx *tls_ctx;
+
+ /*
+ * Credentials provider to be used to sign the requests made to STS to fetch credentials.
+ */
+ struct aws_credentials_provider *creds_provider;
+
+ /*
+ * Arn of the role to assume by fetching credentials for
+ */
+ struct aws_byte_cursor role_arn;
+
+ /*
+ * Assumed role session identifier to be associated with the sourced credentials
+ */
+ struct aws_byte_cursor session_name;
+
+ /*
+ * How long sourced credentials should remain valid for, in seconds. 900 is the minimum allowed value.
+ */
+ uint16_t duration_seconds;
+
+ /**
+ * (Optional) Http proxy configuration for the AssumeRole http request that fetches credentials
+ */
+ const struct aws_http_proxy_options *http_proxy_options;
+
+ struct aws_credentials_provider_shutdown_options shutdown_options;
+
+ /* For mocking, leave NULL otherwise */
+ struct aws_auth_http_system_vtable *function_table;
+ aws_io_clock_fn *system_clock_fn;
+};
+
+/**
+ *
+ * Configuration options for the process credentials provider
+ *
+ * The process credentials provider sources credentials from running a command or process.
+ * The command to run is sourced from a profile in the AWS config file, using the standard
+ * profile selection rules. The profile key the command is read from is "credential_process."
+ * E.g.:
+ * [default]
+ * credential_process=/opt/amazon/bin/my-credential-fetcher --argsA=abc
+ * On successfully running the command, the output should be a json data with the following
+ * format:
+ * {
+ "Version": 1,
+ "AccessKeyId": "accesskey",
+ "SecretAccessKey": "secretAccessKey"
+ "SessionToken": "....",
+ "Expiration": "2019-05-29T00:21:43Z"
+ }
+ * Version here identifies the command output format version.
+ * This provider is not part of the default provider chain.
+ */
+struct aws_credentials_provider_process_options {
+ struct aws_credentials_provider_shutdown_options shutdown_options;
+ /**
+ * In which profile name to look for credential_process,
+ * if not provided, we will try environment variable: AWS_PROFILE.
+ */
+ struct aws_byte_cursor profile_to_use;
+};
+
+/**
+ * Configuration options for the default credentials provider chain.
+ */
+struct aws_credentials_provider_chain_default_options {
+ struct aws_credentials_provider_shutdown_options shutdown_options;
+
+ /*
+ * Connection bootstrap to use for any network connections made while sourcing credentials
+ */
+ struct aws_client_bootstrap *bootstrap;
+
+ /*
+ * Client TLS context to use for any secure network connections made while sourcing credentials.
+ *
+ * If not provided the default chain will construct a new one, but these
+ * are expensive objects so you are encouraged to pass in a shared one.
+ *
+ * Must be provided if using BYO_CRYPTO.
+ */
+ struct aws_tls_ctx *tls_ctx;
+
+ /**
+ * (Optional)
+ * Use a cached merged profile collection. A merge collection has both config file
+ * (~/.aws/profile) and credentials file based profile collection (~/.aws/credentials) using
+ * `aws_profile_collection_new_from_merge`.
+ * If this option is provided, `config_file_name_override` and `credentials_file_name_override` will be ignored.
+ */
+ struct aws_profile_collection *profile_collection_cached;
+};
+
+typedef int(aws_credentials_provider_delegate_get_credentials_fn)(
+ void *delegate_user_data,
+ aws_on_get_credentials_callback_fn callback,
+ void *callback_user_data);
+
+/**
+ * Configuration options for the delegate credentials provider.
+ */
+struct aws_credentials_provider_delegate_options {
+ struct aws_credentials_provider_shutdown_options shutdown_options;
+
+ /**
+ * Delegated get_credentials() callback.
+ */
+ aws_credentials_provider_delegate_get_credentials_fn *get_credentials;
+
+ /**
+ * User data for delegated callbacks.
+ */
+ void *delegate_user_data;
+};
+
+/**
+ * A (string) pair defining an identity provider and a valid login token sourced from it.
+ */
+struct aws_cognito_identity_provider_token_pair {
+
+ /**
+ * Name of an identity provider
+ */
+ struct aws_byte_cursor identity_provider_name;
+
+ /**
+ * Valid login token source from the identity provider
+ */
+ struct aws_byte_cursor identity_provider_token;
+};
+
+/**
+ * Configuration options needed to create a Cognito-based Credentials Provider
+ */
+struct aws_credentials_provider_cognito_options {
+ struct aws_credentials_provider_shutdown_options shutdown_options;
+
+ /**
+ * Cognito service regional endpoint to source credentials from.
+ */
+ struct aws_byte_cursor endpoint;
+
+ /**
+ * Cognito identity to fetch credentials relative to.
+ */
+ struct aws_byte_cursor identity;
+
+ /**
+ * Optional set of identity provider token pairs to allow for authenticated identity access.
+ */
+ struct aws_cognito_identity_provider_token_pair *logins;
+ size_t login_count;
+
+ /**
+ * Optional ARN of the role to be assumed when multiple roles were received in the token from the identity provider.
+ */
+ struct aws_byte_cursor *custom_role_arn;
+
+ /*
+ * Connection bootstrap to use for network connections made while sourcing credentials
+ */
+ struct aws_client_bootstrap *bootstrap;
+
+ /*
+ * Client TLS context to use when querying cognito credentials.
+ * Required.
+ */
+ struct aws_tls_ctx *tls_ctx;
+
+ /**
+ * (Optional) Http proxy configuration for the http request that fetches credentials
+ */
+ const struct aws_http_proxy_options *http_proxy_options;
+
+ /* For mocking the http layer in tests, leave NULL otherwise */
+ struct aws_auth_http_system_vtable *function_table;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/*
+ * Credentials APIs
+ *
+ * expiration_timepoint_seconds is the timepoint, in seconds since epoch, that the credentials will no longer
+ * be valid. For credentials that do not expire, use UINT64_MAX.
+ */
+
+/**
+ * Creates a new set of aws credentials
+ *
+ * @param allocator memory allocator to use
+ * @param access_key_id_cursor value for the aws access key id field
+ * @param secret_access_key_cursor value for the secret access key field
+ * @param session_token_cursor (optional) security token associated with the credentials
+ * @param expiration_timepoint_seconds timepoint, in seconds since epoch, that the credentials will no longer
+ * be valid past. For credentials that do not expire, use UINT64_MAX
+ *
+ * @return a valid credentials object, or NULL
+ */
+AWS_AUTH_API
+struct aws_credentials *aws_credentials_new(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor access_key_id_cursor,
+ struct aws_byte_cursor secret_access_key_cursor,
+ struct aws_byte_cursor session_token_cursor,
+ uint64_t expiration_timepoint_seconds);
+
+/**
+ * Creates a new set of aws anonymous credentials.
+ * Use Anonymous credentials, when you want to skip the signing process.
+ *
+ * @param allocator memory allocator to use
+ *
+ * @return a valid credentials object, or NULL
+ */
+AWS_AUTH_API
+struct aws_credentials *aws_credentials_new_anonymous(struct aws_allocator *allocator);
+
+/**
+ * Creates a new set of AWS credentials
+ *
+ * @param allocator memory allocator to use
+ * @param access_key_id value for the aws access key id field
+ * @param secret_access_key value for the secret access key field
+ * @param session_token (optional) security token associated with the credentials
+ * @param expiration_timepoint_seconds timepoint, in seconds since epoch, that the credentials will no longer
+ * be valid past. For credentials that do not expire, use UINT64_MAX
+ *
+ * @return a valid credentials object, or NULL
+ */
+AWS_AUTH_API
+struct aws_credentials *aws_credentials_new_from_string(
+ struct aws_allocator *allocator,
+ const struct aws_string *access_key_id,
+ const struct aws_string *secret_access_key,
+ const struct aws_string *session_token,
+ uint64_t expiration_timepoint_seconds);
+
+/**
+ * Creates a set of AWS credentials that includes an ECC key pair. These credentials do not have a value for
+ * the secret access key; the ecc key takes over that field's role in sigv4a signing.
+ *
+ * @param allocator memory allocator to use for all memory allocation
+ * @param access_key_id access key id for the credential set
+ * @param ecc_key ecc key to use during signing when using these credentials
+ * @param session_token (optional) session token associated with the credentials
+ * @param expiration_timepoint_in_seconds (optional) if session-based, time at which these credentials expire
+ * @return a new pair of AWS credentials, or NULL
+ */
+AWS_AUTH_API
+struct aws_credentials *aws_credentials_new_ecc(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor access_key_id,
+ struct aws_ecc_key_pair *ecc_key,
+ struct aws_byte_cursor session_token,
+ uint64_t expiration_timepoint_in_seconds);
+
+/*
+ * Takes a pair of AWS credentials and performs the sigv4a key expansion algorithm to generate a unique
+ * ecc P256 key pair based on the credentials. The ecc key is written to the buffer in DER format.
+ *
+ * Sigv4a signing takes the raw DER-encoded ecc key as an optional parameter in signing (if not present,
+ * key expansion will be done for the caller before signing).
+ */
+AWS_AUTH_API
+struct aws_credentials *aws_credentials_new_ecc_from_aws_credentials(
+ struct aws_allocator *allocator,
+ const struct aws_credentials *credentials);
+
+/**
+ * Add a reference to some credentials
+ *
+ * @param credentials credentials to increment the ref count on
+ */
+AWS_AUTH_API
+void aws_credentials_acquire(const struct aws_credentials *credentials);
+
+/**
+ * Remove a reference to some credentials
+ *
+ * @param credentials credentials to decrement the ref count on
+ */
+AWS_AUTH_API
+void aws_credentials_release(const struct aws_credentials *credentials);
+
+/**
+ * Get the AWS access key id from a set of credentials
+ *
+ * @param credentials credentials to get the access key id from
+ * @return a byte cursor to the access key id
+ */
+AWS_AUTH_API
+struct aws_byte_cursor aws_credentials_get_access_key_id(const struct aws_credentials *credentials);
+
+/**
+ * Get the AWS secret access key from a set of credentials
+ *
+ * @param credentials credentials to get the secret access key from
+ * @return a byte cursor to the secret access key
+ */
+AWS_AUTH_API
+struct aws_byte_cursor aws_credentials_get_secret_access_key(const struct aws_credentials *credentials);
+
+/**
+ * Get the AWS session token from a set of credentials
+ *
+ * @param credentials credentials to get the session token from
+ * @return a byte cursor to the session token or an empty byte cursor if there is no session token
+ */
+AWS_AUTH_API
+struct aws_byte_cursor aws_credentials_get_session_token(const struct aws_credentials *credentials);
+
+/**
+ * Get the expiration timepoint (in seconds since epoch) associated with a set of credentials
+ *
+ * @param credentials credentials to get the expiration timepoint for
+ * @return the time, in seconds since epoch, the credentials will expire; UINT64_MAX for credentials
+ * without a specific expiration time
+ */
+AWS_AUTH_API
+uint64_t aws_credentials_get_expiration_timepoint_seconds(const struct aws_credentials *credentials);
+
+/**
+ * Get the elliptic curve key associated with this set of credentials
+ * @param credentials credentials to get the the elliptic curve key for
+ * @return the elliptic curve key associated with the credentials, or NULL if no key is associated with
+ * these credentials
+ */
+AWS_AUTH_API
+struct aws_ecc_key_pair *aws_credentials_get_ecc_key_pair(const struct aws_credentials *credentials);
+
+/**
+ * If credentials are anonymous, then the signing process is skipped.
+ *
+ * @param credentials credentials to check
+ *
+ * @return true if the credentials are anonymous; false otherwise.
+ */
+AWS_AUTH_API
+bool aws_credentials_is_anonymous(const struct aws_credentials *credentials);
+
+/**
+ * Derives an ecc key pair (based on the nist P256 curve) from the access key id and secret access key components
+ * of a set of AWS credentials using an internal key derivation specification. Used to perform sigv4a signing in
+ * the hybrid mode based on AWS credentials.
+ *
+ * @param allocator memory allocator to use for all memory allocation
+ * @param credentials AWS credentials to derive the ECC key from using the AWS sigv4a key deriviation specification
+ * @return a new ecc key pair or NULL on failure
+ */
+AWS_AUTH_API
+struct aws_ecc_key_pair *aws_ecc_key_pair_new_ecdsa_p256_key_from_aws_credentials(
+ struct aws_allocator *allocator,
+ const struct aws_credentials *credentials);
+
+/*
+ * Credentials provider APIs
+ */
+
+/**
+ * Release a reference to a credentials provider
+ *
+ * @param provider provider to increment the ref count on
+ */
+AWS_AUTH_API
+struct aws_credentials_provider *aws_credentials_provider_release(struct aws_credentials_provider *provider);
+
+/*
+ * Add a reference to a credentials provider
+ *
+ * @param provider provider to decrement the ref count on
+ */
+AWS_AUTH_API
+struct aws_credentials_provider *aws_credentials_provider_acquire(struct aws_credentials_provider *provider);
+
+/*
+ * Async function for retrieving credentials from a provider
+ *
+ * @param provider credentials provider to source from
+ * @param callback completion callback to invoke when the fetch has completed or failed
+ * @param user_data user data to pass to the completion callback
+ *
+ * @return AWS_OP_SUCCESS if the fetch was successfully started, AWS_OP_ERR otherwise. The completion
+ * callback will only be invoked if-and-only-if the return value was AWS_OP_SUCCESS.
+ *
+ */
+AWS_AUTH_API
+int aws_credentials_provider_get_credentials(
+ struct aws_credentials_provider *provider,
+ aws_on_get_credentials_callback_fn callback,
+ void *user_data);
+
+/*
+ * Credentials provider variant creation
+ */
+
+/**
+ * Creates a simple provider that just returns a fixed set of credentials
+ *
+ * @param allocator memory allocator to use for all memory allocation
+ * @param options provider-specific configuration options
+ *
+ * @return the newly-constructed credentials provider, or NULL if an error occurred.
+ */
+AWS_AUTH_API
+struct aws_credentials_provider *aws_credentials_provider_new_static(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_static_options *options);
+
+/**
+ * Creates a simple anonymous credentials provider
+ *
+ * @param allocator memory allocator to use for all memory allocation
+ * @param shutdown_options an optional shutdown callback that gets
+ * invoked when the resources used by the provider are no longer in use.
+ *
+ * @return the newly-constructed credentials provider, or NULL if an error occurred.
+ */
+AWS_AUTH_API
+struct aws_credentials_provider *aws_credentials_provider_new_anonymous(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_shutdown_options *shutdown_options);
+
+/**
+ * Creates a provider that returns credentials sourced from the environment variables:
+ *
+ * AWS_ACCESS_KEY_ID
+ * AWS_SECRET_ACCESS_KEY
+ * AWS_SESSION_TOKEN
+ *
+ * @param allocator memory allocator to use for all memory allocation
+ * @param options provider-specific configuration options
+ *
+ * @return the newly-constructed credentials provider, or NULL if an error occurred.
+ */
+AWS_AUTH_API
+struct aws_credentials_provider *aws_credentials_provider_new_environment(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_environment_options *options);
+
+/**
+ * Creates a provider that functions as a caching decorating of another provider.
+ *
+ * For example, the default chain is implemented as:
+ *
+ * CachedProvider -> ProviderChain(EnvironmentProvider -> ProfileProvider -> ECS/EC2IMD etc...)
+ *
+ * A reference is taken on the target provider
+ *
+ * @param allocator memory allocator to use for all memory allocation
+ * @param options provider-specific configuration options
+ *
+ * @return the newly-constructed credentials provider, or NULL if an error occurred.
+ */
+AWS_AUTH_API
+struct aws_credentials_provider *aws_credentials_provider_new_cached(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_cached_options *options);
+
+/**
+ * Creates a provider that sources credentials from key-value profiles loaded from the aws credentials
+ * file ("~/.aws/credentials" by default) and the aws config file ("~/.aws/config" by
+ * default)
+ *
+ * @param allocator memory allocator to use for all memory allocation
+ * @param options provider-specific configuration options
+ *
+ * @return the newly-constructed credentials provider, or NULL if an error occurred.
+ */
+AWS_AUTH_API
+struct aws_credentials_provider *aws_credentials_provider_new_profile(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_profile_options *options);
+
+/**
+ * Creates a provider that assumes an IAM role via. STS AssumeRole() API. This provider will fetch new credentials
+ * upon each call to aws_credentials_provider_get_credentials().
+ *
+ * @param allocator memory allocator to use for all memory allocation
+ * @param options provider-specific configuration options
+ *
+ * @return the newly-constructed credentials provider, or NULL if an error occurred.
+ */
+AWS_AUTH_API
+struct aws_credentials_provider *aws_credentials_provider_new_sts(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_sts_options *options);
+
+/**
+ * Creates a provider that sources credentials from an ordered sequence of providers, with the overall result
+ * being from the first provider to return a valid set of credentials
+ *
+ * References are taken on all supplied providers
+ *
+ * @param allocator memory allocator to use for all memory allocation
+ * @param options provider-specific configuration options
+ *
+ * @return the newly-constructed credentials provider, or NULL if an error occurred.
+ */
+AWS_AUTH_API
+struct aws_credentials_provider *aws_credentials_provider_new_chain(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_chain_options *options);
+
+/**
+ * Creates a provider that sources credentials from the ec2 instance metadata service
+ *
+ * @param allocator memory allocator to use for all memory allocation
+ * @param options provider-specific configuration options
+ *
+ * @return the newly-constructed credentials provider, or NULL if an error occurred.
+ */
+AWS_AUTH_API
+struct aws_credentials_provider *aws_credentials_provider_new_imds(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_imds_options *options);
+
+/**
+ * Creates a provider that sources credentials from the ecs role credentials service
+ *
+ * @param allocator memory allocator to use for all memory allocation
+ * @param options provider-specific configuration options
+ *
+ * @return the newly-constructed credentials provider, or NULL if an error occurred.
+ */
+AWS_AUTH_API
+struct aws_credentials_provider *aws_credentials_provider_new_ecs(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_ecs_options *options);
+
+/**
+ * Creates a provider that sources credentials from IoT Core
+ *
+ * @param allocator memory allocator to use for all memory allocation
+ * @param options provider-specific configuration options
+ *
+ * @return the newly-constructed credentials provider, or NULL if an error occurred.
+ */
+AWS_AUTH_API
+struct aws_credentials_provider *aws_credentials_provider_new_x509(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_x509_options *options);
+
+/**
+ * Creates a provider that sources credentials from STS using AssumeRoleWithWebIdentity
+ *
+ * @param allocator memory allocator to use for all memory allocation
+ * @param options provider-specific configuration options
+ *
+ * @return the newly-constructed credentials provider, or NULL if an error occurred.
+ */
+AWS_AUTH_API
+struct aws_credentials_provider *aws_credentials_provider_new_sts_web_identity(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_sts_web_identity_options *options);
+
+/*
+ * Creates a provider that sources credentials from running an external command or process
+ *
+ * @param allocator memory allocator to use for all memory allocation
+ * @param options provider-specific configuration options
+ *
+ * @return the newly-constructed credentials provider, or NULL if an error occurred.
+ */
+AWS_AUTH_API
+struct aws_credentials_provider *aws_credentials_provider_new_process(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_process_options *options);
+
+/**
+ * Create a credentials provider depends on provided vtable to fetch the credentials.
+ *
+ * @param allocator memory allocator to use for all memory allocation
+ * @param options provider-specific configuration options
+ *
+ * @return the newly-constructed credentials provider, or NULL if an error occurred.
+ */
+AWS_AUTH_API
+struct aws_credentials_provider *aws_credentials_provider_new_delegate(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_delegate_options *options);
+
+/**
+ * Creates a provider that sources credentials from the Cognito-Identity service via an
+ * invocation of the GetCredentialsForIdentity API call.
+ *
+ * @param allocator memory allocator to use for all memory allocation
+ * @param options provider-specific configuration options
+ *
+ * @return the newly-constructed credentials provider, or NULL if an error occurred.
+ */
+AWS_AUTH_API
+struct aws_credentials_provider *aws_credentials_provider_new_cognito(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_cognito_options *options);
+
+/**
+ * Creates a cognito-based provider that has a caching layer wrapped around it
+ *
+ * @param allocator memory allocator to use for all memory allocation
+ * @param options cognito-specific configuration options
+ *
+ * @return the newly-constructed credentials provider, or NULL if an error occurred.
+ */
+AWS_AUTH_API
+struct aws_credentials_provider *aws_credentials_provider_new_cognito_caching(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_cognito_options *options);
+
+/**
+ * Creates the default provider chain used by most AWS SDKs.
+ *
+ * Generally:
+ *
+ * (1) Environment
+ * (2) Profile
+ * (3) STS web identity
+ * (4) (conditional, off by default) ECS
+ * (5) (conditional, on by default) EC2 Instance Metadata
+ *
+ * Support for environmental control of the default provider chain is not yet
+ * implemented.
+ *
+ * @param allocator memory allocator to use for all memory allocation
+ * @param options provider-specific configuration options
+ *
+ * @return the newly-constructed credentials provider, or NULL if an error occurred.
+ */
+AWS_AUTH_API
+struct aws_credentials_provider *aws_credentials_provider_new_chain_default(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_chain_default_options *options);
+
+AWS_AUTH_API extern const struct aws_auth_http_system_vtable *g_aws_credentials_provider_http_function_table;
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_AUTH_CREDENTIALS_H */
diff --git a/contrib/restricted/aws/aws-c-auth/include/aws/auth/exports.h b/contrib/restricted/aws/aws-c-auth/include/aws/auth/exports.h
new file mode 100644
index 0000000000..9f8fe013fc
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/include/aws/auth/exports.h
@@ -0,0 +1,29 @@
+#ifndef AWS_AUTH_EXPORTS_H
+#define AWS_AUTH_EXPORTS_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#if defined(USE_WINDOWS_DLL_SEMANTICS) || defined(WIN32)
+# ifdef AWS_AUTH_USE_IMPORT_EXPORT
+# ifdef AWS_AUTH_EXPORTS
+# define AWS_AUTH_API __declspec(dllexport)
+# else
+# define AWS_AUTH_API __declspec(dllimport)
+# endif /* AWS_AUTH_EXPORTS */
+# else
+# define AWS_AUTH_API
+# endif /*USE_IMPORT_EXPORT */
+
+#else
+# if ((__GNUC__ >= 4) || defined(__clang__)) && defined(AWS_AUTH_USE_IMPORT_EXPORT) && defined(AWS_AUTH_EXPORTS)
+# define AWS_AUTH_API __attribute__((visibility("default")))
+# else
+# define AWS_AUTH_API
+# endif /* __GNUC__ >= 4 || defined(__clang__) */
+
+#endif /* defined(USE_WINDOWS_DLL_SEMANTICS) || defined(WIN32) */
+
+#endif /* AWS_AUTH_EXPORTS_H */
diff --git a/contrib/restricted/aws/aws-c-auth/include/aws/auth/private/aws_profile.h b/contrib/restricted/aws/aws-c-auth/include/aws/auth/private/aws_profile.h
new file mode 100644
index 0000000000..91e77d5af6
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/include/aws/auth/private/aws_profile.h
@@ -0,0 +1,25 @@
+#ifndef AWS_AUTH_AWS_PROFILE_H
+#define AWS_AUTH_AWS_PROFILE_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/auth.h>
+
+#include <aws/sdkutils/aws_profile.h>
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Returns a set of credentials associated with a profile, based on the properties within the profile
+ */
+AWS_AUTH_API
+struct aws_credentials *aws_credentials_new_from_profile(
+ struct aws_allocator *allocator,
+ const struct aws_profile *profile);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_AUTH_AWS_PROFILE_H */
diff --git a/contrib/restricted/aws/aws-c-auth/include/aws/auth/private/aws_signing.h b/contrib/restricted/aws/aws-c-auth/include/aws/auth/private/aws_signing.h
new file mode 100644
index 0000000000..3ba0fd2c74
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/include/aws/auth/private/aws_signing.h
@@ -0,0 +1,128 @@
+#ifndef AWS_AUTH_SIGNING_SIGV4_H
+#define AWS_AUTH_SIGNING_SIGV4_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/auth.h>
+#include <aws/auth/signing.h>
+#include <aws/auth/signing_result.h>
+
+#include <aws/common/byte_buf.h>
+#include <aws/common/hash_table.h>
+
+struct aws_ecc_key_pair;
+struct aws_signable;
+struct aws_signing_config_aws;
+struct aws_signing_result;
+
+/*
+ * Private signing API
+ *
+ * Technically this could be folded directly into signing.c but it's useful to be able
+ * to call the individual stages of the signing process for testing.
+ */
+
+/*
+ * A structure that contains all the state related to signing a request for AWS. We pass
+ * this around rather than a million parameters.
+ */
+struct aws_signing_state_aws {
+ struct aws_allocator *allocator;
+
+ const struct aws_signable *signable;
+ aws_signing_complete_fn *on_complete;
+ void *userdata;
+
+ struct aws_signing_config_aws config;
+ struct aws_byte_buf config_string_buffer;
+
+ struct aws_signing_result result;
+ int error_code;
+
+ /* persistent, constructed values that are either/or
+ * (1) consumed by later stages of the signing process,
+ * (2) used in multiple places
+ */
+ struct aws_byte_buf canonical_request;
+ struct aws_byte_buf string_to_sign;
+ struct aws_byte_buf signed_headers;
+ struct aws_byte_buf canonical_header_block;
+ struct aws_byte_buf payload_hash;
+ struct aws_byte_buf credential_scope;
+ struct aws_byte_buf access_credential_scope;
+ struct aws_byte_buf date;
+ struct aws_byte_buf signature;
+ /* The "payload" to be used in the string-to-sign.
+ * For a normal HTTP request, this is the hashed canonical-request.
+ * But for other types of signing (i.e chunk, event) it's something else. */
+ struct aws_byte_buf string_to_sign_payload;
+
+ /* temp buf for writing out strings */
+ struct aws_byte_buf scratch_buf;
+
+ char expiration_array[32]; /* serialization of the pre-signing expiration duration value */
+};
+
+AWS_EXTERN_C_BEGIN
+
+AWS_AUTH_API
+struct aws_signing_state_aws *aws_signing_state_new(
+ struct aws_allocator *allocator,
+ const struct aws_signing_config_aws *config,
+ const struct aws_signable *signable,
+ aws_signing_complete_fn *on_complete,
+ void *userdata);
+
+AWS_AUTH_API
+void aws_signing_state_destroy(struct aws_signing_state_aws *state);
+
+/*
+ * A set of functions that together performs the AWS signing process based
+ * on the algorithm and signature type requested in the shared config.
+ *
+ * These must be called (presumably by the signer) in sequential order:
+ *
+ * (1) aws_signing_build_canonical_request
+ * (2) aws_signing_build_string_to_sign
+ * (3) aws_signing_build_authorization_value
+ */
+
+AWS_AUTH_API
+int aws_signing_build_canonical_request(struct aws_signing_state_aws *state);
+
+AWS_AUTH_API
+int aws_signing_build_string_to_sign(struct aws_signing_state_aws *state);
+
+AWS_AUTH_API
+int aws_signing_build_authorization_value(struct aws_signing_state_aws *state);
+
+/*
+ * Named constants particular to the sigv4 signing algorithm. Can be moved to a public header
+ * as needed.
+ */
+AWS_AUTH_API extern const struct aws_string *g_aws_signing_content_header_name;
+AWS_AUTH_API extern const struct aws_string *g_aws_signing_algorithm_query_param_name;
+AWS_AUTH_API extern const struct aws_string *g_aws_signing_credential_query_param_name;
+AWS_AUTH_API extern const struct aws_string *g_aws_signing_date_name;
+AWS_AUTH_API extern const struct aws_string *g_aws_signing_signed_headers_query_param_name;
+AWS_AUTH_API extern const struct aws_string *g_aws_signing_security_token_name;
+AWS_AUTH_API extern const struct aws_string *g_signature_type_sigv4a_http_request;
+
+/**
+ * Initializes the internal table of headers that should not be signed
+ */
+AWS_AUTH_API
+int aws_signing_init_signing_tables(struct aws_allocator *allocator);
+
+/**
+ * Cleans up the internal table of headers that should not be signed
+ */
+AWS_AUTH_API
+void aws_signing_clean_up_signing_tables(void);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_AUTH_SIGNING_SIGV4_H */
diff --git a/contrib/restricted/aws/aws-c-auth/include/aws/auth/private/credentials_utils.h b/contrib/restricted/aws/aws-c-auth/include/aws/auth/private/credentials_utils.h
new file mode 100644
index 0000000000..7bba24331e
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/include/aws/auth/private/credentials_utils.h
@@ -0,0 +1,165 @@
+#ifndef AWS_AUTH_CREDENTIALS_PRIVATE_H
+#define AWS_AUTH_CREDENTIALS_PRIVATE_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/auth.h>
+#include <aws/auth/credentials.h>
+#include <aws/http/connection_manager.h>
+#include <aws/io/retry_strategy.h>
+
+struct aws_http_connection;
+struct aws_http_connection_manager;
+struct aws_http_make_request_options;
+struct aws_http_stream;
+struct aws_json_value;
+
+/*
+ * Internal struct tracking an asynchronous credentials query.
+ * Used by both the cached provider and the test mocks.
+ *
+ */
+struct aws_credentials_query {
+ struct aws_linked_list_node node;
+ struct aws_credentials_provider *provider;
+ aws_on_get_credentials_callback_fn *callback;
+ void *user_data;
+};
+
+typedef struct aws_http_connection_manager *(aws_http_connection_manager_new_fn)(
+ struct aws_allocator *allocator,
+ const struct aws_http_connection_manager_options *options);
+typedef void(aws_http_connection_manager_release_fn)(struct aws_http_connection_manager *manager);
+typedef void(aws_http_connection_manager_acquire_connection_fn)(
+ struct aws_http_connection_manager *manager,
+ aws_http_connection_manager_on_connection_setup_fn *callback,
+ void *user_data);
+typedef int(aws_http_connection_manager_release_connection_fn)(
+ struct aws_http_connection_manager *manager,
+ struct aws_http_connection *connection);
+typedef struct aws_http_stream *(aws_http_connection_make_request_fn)(
+ struct aws_http_connection *client_connection,
+ const struct aws_http_make_request_options *options);
+typedef int(aws_http_stream_activate_fn)(struct aws_http_stream *stream);
+typedef struct aws_http_connection *(aws_http_stream_get_connection_fn)(const struct aws_http_stream *stream);
+
+typedef int(aws_http_stream_get_incoming_response_status_fn)(const struct aws_http_stream *stream, int *out_status);
+typedef void(aws_http_stream_release_fn)(struct aws_http_stream *stream);
+typedef void(aws_http_connection_close_fn)(struct aws_http_connection *connection);
+
+/*
+ * Table of all downstream http functions used by the credentials providers that make http calls. Allows for simple
+ * mocking.
+ */
+struct aws_auth_http_system_vtable {
+ aws_http_connection_manager_new_fn *aws_http_connection_manager_new;
+ aws_http_connection_manager_release_fn *aws_http_connection_manager_release;
+
+ aws_http_connection_manager_acquire_connection_fn *aws_http_connection_manager_acquire_connection;
+ aws_http_connection_manager_release_connection_fn *aws_http_connection_manager_release_connection;
+
+ aws_http_connection_make_request_fn *aws_http_connection_make_request;
+ aws_http_stream_activate_fn *aws_http_stream_activate;
+ aws_http_stream_get_connection_fn *aws_http_stream_get_connection;
+ aws_http_stream_get_incoming_response_status_fn *aws_http_stream_get_incoming_response_status;
+ aws_http_stream_release_fn *aws_http_stream_release;
+
+ aws_http_connection_close_fn *aws_http_connection_close;
+};
+
+enum aws_parse_credentials_expiration_format {
+ AWS_PCEF_STRING_ISO_8601_DATE,
+ AWS_PCEF_NUMBER_UNIX_EPOCH,
+};
+
+struct aws_parse_credentials_from_json_doc_options {
+ const char *access_key_id_name;
+ const char *secret_access_key_name;
+ const char *token_name;
+ const char *expiration_name;
+ enum aws_parse_credentials_expiration_format expiration_format;
+ bool token_required;
+ bool expiration_required;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/*
+ * Misc. credentials-related APIs
+ */
+
+AWS_AUTH_API
+void aws_credentials_query_init(
+ struct aws_credentials_query *query,
+ struct aws_credentials_provider *provider,
+ aws_on_get_credentials_callback_fn *callback,
+ void *user_data);
+
+AWS_AUTH_API
+void aws_credentials_query_clean_up(struct aws_credentials_query *query);
+
+AWS_AUTH_API
+void aws_credentials_provider_init_base(
+ struct aws_credentials_provider *provider,
+ struct aws_allocator *allocator,
+ struct aws_credentials_provider_vtable *vtable,
+ void *impl);
+
+AWS_AUTH_API
+void aws_credentials_provider_destroy(struct aws_credentials_provider *provider);
+
+AWS_AUTH_API
+void aws_credentials_provider_invoke_shutdown_callback(struct aws_credentials_provider *provider);
+
+/**
+ * This API is used internally to parse credentials from json document.
+ * It _ONLY_ parses the first level of json structure. json document like
+ * this will produce a valid credentials:
+ {
+ "accessKeyId" : "...",
+ "secretAccessKey" : "...",
+ "Token" : "...",
+ "expiration" : "2019-05-29T00:21:43Z"
+ }
+ * but json document like this won't:
+ {
+ "credentials": {
+ "accessKeyId" : "...",
+ "secretAccessKey" : "...",
+ "sessionToken" : "...",
+ "expiration" : "2019-05-29T00:21:43Z"
+ }
+ }
+ * In general, the keys' names of credentials in json document are:
+ * "AccessKeyId", "SecretAccessKey", "Token" and "Expiration",
+ * but there are cases services use different keys like "sessionToken".
+ * A valid credentials must have "access key" and "secrete access key".
+ * For some services, token and expiration are not required.
+ * So in this API, the keys are provided by callers and this API will
+ * performe a case insensitive search.
+ */
+AWS_AUTH_API
+struct aws_credentials *aws_parse_credentials_from_aws_json_object(
+ struct aws_allocator *allocator,
+ struct aws_json_value *document_root,
+ const struct aws_parse_credentials_from_json_doc_options *options);
+
+/**
+ * This API is similar to aws_parse_credentials_from_aws_json_object,
+ * except it accpets a char buffer json document as it's input.
+ */
+AWS_AUTH_API
+struct aws_credentials *aws_parse_credentials_from_json_document(
+ struct aws_allocator *allocator,
+ const char *json_document,
+ const struct aws_parse_credentials_from_json_doc_options *options);
+
+AWS_AUTH_API
+enum aws_retry_error_type aws_credentials_provider_compute_retry_error_type(int response_code, int error_code);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_AUTH_CREDENTIALS_PRIVATE_H */
diff --git a/contrib/restricted/aws/aws-c-auth/include/aws/auth/private/key_derivation.h b/contrib/restricted/aws/aws-c-auth/include/aws/auth/private/key_derivation.h
new file mode 100644
index 0000000000..4cd25dfd00
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/include/aws/auth/private/key_derivation.h
@@ -0,0 +1,51 @@
+#ifndef AWS_AUTH_KEY_DERIVATION_H
+#define AWS_AUTH_KEY_DERIVATION_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/auth.h>
+
+struct aws_byte_buf;
+
+AWS_EXTERN_C_BEGIN
+
+/*
+ * Some utility functions used while deriving an ecc key from aws credentials.
+ *
+ * The functions operate on the raw bytes of a buffer, treating them as a (base 255) big-endian
+ * integer.
+ */
+
+/**
+ * Compares two byte buffers lexically. The buffers must be of equal size. Lexical comparison from front-to-back
+ * corresponds to arithmetic comparison when the byte sequences are considered to be big-endian large integers.
+ * The output parameter comparison_result is set to:
+ * -1 if lhs_raw_be_bigint < rhs_raw_be_bigint
+ * 0 if lhs_raw_be_bigint == rhs_raw_be_bigint
+ * 1 if lhs_raw_be_bigint > rhs_raw_be_bigint
+ *
+ * @return AWS_OP_SUCCESS or AWS_OP_ERR
+ *
+ * This is a constant-time operation.
+ */
+AWS_AUTH_API
+int aws_be_bytes_compare_constant_time(
+ const struct aws_byte_buf *lhs_raw_be_bigint,
+ const struct aws_byte_buf *rhs_raw_be_bigint,
+ int *comparison_result);
+
+/**
+ * Adds one to a big integer represented as a sequence of bytes (in big-endian order). A maximal (unsigned) value
+ * will roll over to 0.
+ *
+ * This is a constant-time operation.
+ */
+AWS_AUTH_API
+void aws_be_bytes_add_one_constant_time(struct aws_byte_buf *raw_be_bigint);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_AUTH_KEY_DERIVATION_H */
diff --git a/contrib/restricted/aws/aws-c-auth/include/aws/auth/private/sigv4_http_request.h b/contrib/restricted/aws/aws-c-auth/include/aws/auth/private/sigv4_http_request.h
new file mode 100644
index 0000000000..72da03a6bf
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/include/aws/auth/private/sigv4_http_request.h
@@ -0,0 +1,14 @@
+#ifndef AWS_AUTH_SIGV4_HTTP_REQUEST_H
+#define AWS_AUTH_SIGV4_HTTP_REQUEST_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/auth.h>
+
+AWS_EXTERN_C_BEGIN
+AWS_EXTERN_C_END
+
+#endif /* AWS_AUTH_SIGV4_HTTP_REQUEST_H */
diff --git a/contrib/restricted/aws/aws-c-auth/include/aws/auth/signable.h b/contrib/restricted/aws/aws-c-auth/include/aws/auth/signable.h
new file mode 100644
index 0000000000..46f65378d7
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/include/aws/auth/signable.h
@@ -0,0 +1,234 @@
+#ifndef AWS_AUTH_SIGNABLE_H
+#define AWS_AUTH_SIGNABLE_H
+
+#include <aws/auth/auth.h>
+
+struct aws_http_message;
+struct aws_http_headers;
+struct aws_input_stream;
+struct aws_signable;
+struct aws_string;
+
+/*
+ * While not referenced directly in this file, this is the structure expected to be in the property lists
+ */
+struct aws_signable_property_list_pair {
+ struct aws_byte_cursor name;
+ struct aws_byte_cursor value;
+};
+
+typedef int(aws_signable_get_property_fn)(
+ const struct aws_signable *signable,
+ const struct aws_string *name,
+ struct aws_byte_cursor *out_value);
+
+typedef int(aws_signable_get_property_list_fn)(
+ const struct aws_signable *signable,
+ const struct aws_string *name,
+ struct aws_array_list **out_list);
+
+typedef int(aws_signable_get_payload_stream_fn)(
+ const struct aws_signable *signable,
+ struct aws_input_stream **out_input_stream);
+
+typedef void(aws_signable_destroy_fn)(struct aws_signable *signable);
+
+struct aws_signable_vtable {
+ aws_signable_get_property_fn *get_property;
+ aws_signable_get_property_list_fn *get_property_list;
+ aws_signable_get_payload_stream_fn *get_payload_stream;
+ aws_signable_destroy_fn *destroy;
+};
+
+/**
+ * Signable is a generic interface for any kind of object that can be cryptographically signed.
+ *
+ * Like signing_result, the signable interface presents
+ *
+ * (1) Properties - A set of key-value pairs
+ * (2) Property Lists - A set of named key-value pair lists
+ *
+ * as well as
+ *
+ * (3) A message payload modeled as a stream
+ *
+ * When creating a signable "subclass" the query interface should map to retrieving
+ * the properties of the underlying object needed by signing algorithms that can operate on it.
+ *
+ * As an example, if a signable implementation wrapped an http request, you would query
+ * request elements like method and uri from the property interface, headers would be queried
+ * via the property list interface, and the request body would map to the payload stream.
+ *
+ * String constants that map to agreed on keys for particular signable types
+ * ("METHOD", "URI", "HEADERS", etc...) are exposed in appropriate header files.
+ */
+struct aws_signable {
+ struct aws_allocator *allocator;
+ void *impl;
+ struct aws_signable_vtable *vtable;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Cleans up and frees all resources associated with a signable instance
+ *
+ * @param signable signable object to destroy
+ */
+AWS_AUTH_API
+void aws_signable_destroy(struct aws_signable *signable);
+
+/**
+ * Retrieves a property (key-value pair) from a signable. Global property name constants are
+ * included below.
+ *
+ * @param signable signable object to retrieve a property from
+ * @param name name of the property to query
+ * @param out_value output parameter for the property's value
+ *
+ * @return AWS_OP_SUCCESS if the property was successfully fetched, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_signable_get_property(
+ const struct aws_signable *signable,
+ const struct aws_string *name,
+ struct aws_byte_cursor *out_value);
+
+/**
+ * Retrieves a named property list (list of key-value pairs) from a signable. Global property list name
+ * constants are included below.
+ *
+ * @param signable signable object to retrieve a property list from
+ * @param name name of the property list to fetch
+ * @param out_property_list output parameter for the fetched property list
+ *
+ * @return AWS_OP_SUCCESS if the property list was successfully fetched, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_signable_get_property_list(
+ const struct aws_signable *signable,
+ const struct aws_string *name,
+ struct aws_array_list **out_property_list);
+
+/**
+ * Retrieves the signable's message payload as a stream.
+ *
+ * @param signable signable to get the payload of
+ * @param out_input_stream output parameter for the payload stream
+ *
+ * @return AWS_OP_SUCCESS if successful, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_signable_get_payload_stream(const struct aws_signable *signable, struct aws_input_stream **out_input_stream);
+
+/*
+ * Some global property and property-list name constants
+ */
+
+/**
+ * Name of the property list that wraps the headers of an http request
+ */
+AWS_AUTH_API extern const struct aws_string *g_aws_http_headers_property_list_name;
+
+/**
+ * Name of the property list that wraps the query params of an http request. Only used by signing_result.
+ * For input to a http signing algorithm, query params are assumed to be part of the uri.
+ */
+AWS_AUTH_API extern const struct aws_string *g_aws_http_query_params_property_list_name;
+
+/**
+ * Name of the property that holds the method of an http request
+ */
+AWS_AUTH_API extern const struct aws_string *g_aws_http_method_property_name;
+
+/**
+ * Name of the property that holds the URI of an http request
+ */
+AWS_AUTH_API extern const struct aws_string *g_aws_http_uri_property_name;
+
+/**
+ * Name of the property that holds the signature value. This is always added to signing results.
+ * Depending on the requested signature type, the signature may be padded or encoded differently:
+ * (1) Header - hex encoding of the binary signature value
+ * (2) QueryParam - hex encoding of the binary signature value
+ * (3) Chunk/Sigv4 - hex encoding of the binary signature value
+ * (4) Chunk/Sigv4a - fixed-size-rhs-padded (with AWS_SIGV4A_SIGNATURE_PADDING_BYTE) hex encoding of the
+ * binary signature value
+ * (5) Event - binary signature value (NYI)
+ */
+AWS_AUTH_API extern const struct aws_string *g_aws_signature_property_name;
+
+/**
+ * Name of the property that holds the (hex-encoded) signature value of the signing event that preceded this one.
+ * This property must appear on signables that represent chunks or events.
+ */
+AWS_AUTH_API extern const struct aws_string *g_aws_previous_signature_property_name;
+
+/**
+ * Name of the property that holds the canonical request associated with this signable.
+ * This property must appear on signables that represent an http request's canonical request.
+ */
+AWS_AUTH_API extern const struct aws_string *g_aws_canonical_request_property_name;
+
+/*
+ * Common signable constructors
+ */
+
+/**
+ * Creates a signable wrapper around an http request.
+ *
+ * @param allocator memory allocator to use to create the signable
+ * @param request http request to create a signable for
+ *
+ * @return the new signable object, or NULL if failure
+ */
+AWS_AUTH_API
+struct aws_signable *aws_signable_new_http_request(struct aws_allocator *allocator, struct aws_http_message *request);
+
+/**
+ * Creates a signable that represents a unit of chunked encoding within an http request.
+ * This can also be used for Transcribe event signing with encoded payload as chunk_data.
+ *
+ * @param allocator memory allocator use to create the signable
+ * @param chunk_data stream representing the data in the chunk; it should be in its final, encoded form
+ * @param previous_signature the signature computed in the most recent signing that preceded this one. It can be
+ * found by copying the "signature" property from the signing_result of that most recent signing.
+ *
+ * @return the new signable object, or NULL if failure
+ */
+AWS_AUTH_API
+struct aws_signable *aws_signable_new_chunk(
+ struct aws_allocator *allocator,
+ struct aws_input_stream *chunk_data,
+ struct aws_byte_cursor previous_signature);
+
+/**
+ * Creates a signable wrapper around a set of headers.
+ *
+ * @param allocator memory allocator use to create the signable
+ * @param trailing_headers http headers to create a signable for
+ * @param previous_signature the signature computed in the most recent signing that preceded this one. It can be
+ * found by copying the "signature" property from the signing_result of that most recent signing.
+ *
+ * @return the new signable object, or NULL if failure
+ */
+AWS_AUTH_API
+struct aws_signable *aws_signable_new_trailing_headers(
+ struct aws_allocator *allocator,
+ struct aws_http_headers *trailing_headers,
+ struct aws_byte_cursor previous_signature);
+
+/**
+ * Creates a signable that represents a pre-computed canonical request from an http request
+ * @param allocator memory allocator use to create the signable
+ * @param canonical_request text of the canonical request
+ * @return the new signable object, or NULL if failure
+ */
+AWS_AUTH_API
+struct aws_signable *aws_signable_new_canonical_request(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor canonical_request);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_AUTH_SIGNABLE_H */
diff --git a/contrib/restricted/aws/aws-c-auth/include/aws/auth/signing.h b/contrib/restricted/aws/aws-c-auth/include/aws/auth/signing.h
new file mode 100644
index 0000000000..185a0abb54
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/include/aws/auth/signing.h
@@ -0,0 +1,135 @@
+#ifndef AWS_AUTH_SIGNER_H
+#define AWS_AUTH_SIGNER_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/auth.h>
+
+#include <aws/auth/signing_config.h>
+
+struct aws_ecc_key_pair;
+struct aws_signable;
+struct aws_signing_result;
+
+/**
+ * Gets called by the signing function when the signing is complete.
+ *
+ * Note that result will be destroyed after this function returns, so either copy it,
+ * or do all necessary adjustments inside the callback.
+ *
+ * When performing event or chunk signing, you will need to copy out the signature value in order
+ * to correctly configure the signable that wraps the event or chunk you want signed next. The signature is
+ * found in the "signature" property on the signing result. This value must be added as the
+ * "previous-signature" property on the next signable.
+ */
+typedef void(aws_signing_complete_fn)(struct aws_signing_result *result, int error_code, void *userdata);
+
+AWS_EXTERN_C_BEGIN
+
+/*
+ * Takes a signable object and a configuration struct and computes the changes to the signable necessary
+ * for compliance with the signer's signing algorithm.
+ *
+ * This signing function currently supports only the sigv4 algorithm.
+ *
+ * When using this signing function to sign AWS http requests:
+ *
+ * (1) Do not add the following headers to requests before signing:
+ * x-amz-content-sha256,
+ * X-Amz-Date,
+ * Authorization
+ *
+ * (2) Do not add the following query params to requests before signing:
+ * X-Amz-Signature,
+ * X-Amz-Date,
+ * X-Amz-Credential,
+ * X-Amz-Algorithm,
+ * X-Amz-SignedHeaders
+ *
+ * The signing result will tell exactly what header and/or query params to add to the request
+ * to become a fully-signed AWS http request.
+ *
+ *
+ * When using this signing function to sign chunks:
+ *
+ * (1) Use aws_signable_new_chunk() to create the signable object representing the chunk
+ *
+ * The signing result will include the chunk's signature as the "signature" property.
+ *
+ *
+ */
+
+/**
+ * (Asynchronous) entry point to sign something (a request, a chunk, an event) with an AWS signing process.
+ * Depending on the configuration, the signing process may or may not complete synchronously.
+ *
+ * @param allocator memory allocator to use throughout the signing process
+ * @param signable the thing to be signed. See signable.h for common constructors for signables that
+ * wrap different types.
+ * @param base_config pointer to a signing configuration, currently this must be of type aws_signing_config_aws
+ * @param on_complete completion callback to be invoked when signing has finished
+ * @param user_data opaque user data that will be passed to the completion callback
+ *
+ * @return AWS_OP_SUCCESS if the signing attempt was *initiated* successfully, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_sign_request_aws(
+ struct aws_allocator *allocator,
+ const struct aws_signable *signable,
+ const struct aws_signing_config_base *base_config,
+ aws_signing_complete_fn *on_complete,
+ void *userdata);
+
+/**
+ * Test-only API used for cross-library signing verification tests
+ *
+ * Verifies:
+ * (1) The canonical request generated during sigv4a signing of the request matches what is passed in
+ * (2) The signature passed in is a valid ECDSA signature of the hashed string-to-sign derived from the
+ * canonical request
+ *
+ * @param allocator memory allocator to use throughout the signing verification process
+ * @param signable the thing to be signed. See signable.h for common constructors for signables that
+ * wrap different types.
+ * @param base_config pointer to a signing configuration, currently this must be of type aws_signing_config_aws
+ * @param expected_canonical_request_cursor expected result when building the canonical request
+ * @param signature_cursor the actual signature computed from a previous signing of the signable
+ * @param ecc_key_pub_x the x coordinate of the public part of the ecc key to verify the signature
+ * @param ecc_key_pub_y the y coordinate of the public part of the ecc key to verify the signature
+ *
+ * @return AWS_OP_SUCCESS if the signing attempt was *initiated* successfully, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_verify_sigv4a_signing(
+ struct aws_allocator *allocator,
+ const struct aws_signable *signable,
+ const struct aws_signing_config_base *base_config,
+ struct aws_byte_cursor expected_canonical_request_cursor,
+ struct aws_byte_cursor signature_cursor,
+ struct aws_byte_cursor ecc_key_pub_x,
+ struct aws_byte_cursor ecc_key_pub_y);
+
+/**
+ * Another helper function to check a computed sigv4a signature.
+ */
+AWS_AUTH_API
+int aws_validate_v4a_authorization_value(
+ struct aws_allocator *allocator,
+ struct aws_ecc_key_pair *ecc_key,
+ struct aws_byte_cursor string_to_sign_cursor,
+ struct aws_byte_cursor signature_value_cursor);
+
+/**
+ * Removes any padding added to the end of a sigv4a signature. Signature must be hex-encoded.
+ * @param signature signature to remove padding from
+ * @return cursor that ranges over only the valid hex encoding of the sigv4a signature
+ */
+AWS_AUTH_API
+struct aws_byte_cursor aws_trim_padded_sigv4a_signature(struct aws_byte_cursor signature);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_AUTH_SIGNER_H */
diff --git a/contrib/restricted/aws/aws-c-auth/include/aws/auth/signing_config.h b/contrib/restricted/aws/aws-c-auth/include/aws/auth/signing_config.h
new file mode 100644
index 0000000000..e7ce06321c
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/include/aws/auth/signing_config.h
@@ -0,0 +1,310 @@
+#ifndef AWS_AUTH_SIGNING_CONFIG_H
+#define AWS_AUTH_SIGNING_CONFIG_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/auth.h>
+
+#include <aws/common/byte_buf.h>
+#include <aws/common/date_time.h>
+
+struct aws_credentials;
+
+typedef bool(aws_should_sign_header_fn)(const struct aws_byte_cursor *name, void *userdata);
+
+/**
+ * A primitive RTTI indicator for signing configuration structs
+ *
+ * There must be one entry per config structure type and it's a fatal error
+ * to put the wrong value in the "config_type" member of your config structure.
+ */
+enum aws_signing_config_type { AWS_SIGNING_CONFIG_AWS = 1 };
+
+/**
+ * All signing configuration structs must match this by having
+ * the config_type member as the first member.
+ */
+struct aws_signing_config_base {
+ enum aws_signing_config_type config_type;
+};
+
+/**
+ * What version of the AWS signing process should we use.
+ */
+enum aws_signing_algorithm {
+ AWS_SIGNING_ALGORITHM_V4,
+ AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC,
+};
+
+/**
+ * What sort of signature should be computed from the signable?
+ */
+enum aws_signature_type {
+ /**
+ * A signature for a full http request should be computed, with header updates applied to the signing result.
+ */
+ AWS_ST_HTTP_REQUEST_HEADERS,
+
+ /**
+ * A signature for a full http request should be computed, with query param updates applied to the signing result.
+ */
+ AWS_ST_HTTP_REQUEST_QUERY_PARAMS,
+
+ /**
+ * Compute a signature for a payload chunk. The signable's input stream should be the chunk data and the
+ * signable should contain the most recent signature value (either the original http request or the most recent
+ * chunk) in the "previous-signature" property.
+ */
+ AWS_ST_HTTP_REQUEST_CHUNK,
+
+ /**
+ * Compute a signature for an event stream event. The signable's input stream should be the encoded event-stream
+ * message (headers + payload), the signable should contain the most recent signature value (either the original
+ * http request or the most recent event) in the "previous-signature" property.
+ *
+ * This option is only supported for Sigv4 for now.
+ */
+ AWS_ST_HTTP_REQUEST_EVENT,
+
+ /**
+ * Compute a signature for an http request via it's already-computed canonical request. Only the authorization
+ * signature header is added to the signing result.
+ */
+ AWS_ST_CANONICAL_REQUEST_HEADERS,
+
+ /**
+ * Compute a signature for an http request via it's already-computed canonical request. Only the authorization
+ * signature query param is added to the signing result.
+ */
+ AWS_ST_CANONICAL_REQUEST_QUERY_PARAMS,
+
+ /**
+ * Compute a signature for the trailing headers.
+ * the signable should contain the most recent signature value (either the original http request or the most recent
+ * chunk) in the "previous-signature" property.
+ */
+ AWS_ST_HTTP_REQUEST_TRAILING_HEADERS
+};
+
+/**
+ * The SHA-256 of an empty string:
+ * 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
+ * For use with `aws_signing_config_aws.signed_body_value`.
+ */
+AWS_AUTH_API extern const struct aws_byte_cursor g_aws_signed_body_value_empty_sha256;
+
+/**
+ * 'UNSIGNED-PAYLOAD'
+ * For use with `aws_signing_config_aws.signed_body_value`.
+ */
+AWS_AUTH_API extern const struct aws_byte_cursor g_aws_signed_body_value_unsigned_payload;
+
+/**
+ * 'STREAMING-UNSIGNED-PAYLOAD-TRAILER'
+ * For use with `aws_signing_config_aws.signed_body_value`.
+ */
+AWS_AUTH_API extern const struct aws_byte_cursor g_aws_signed_body_value_streaming_unsigned_payload_trailer;
+
+/**
+ * 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD'
+ * For use with `aws_signing_config_aws.signed_body_value`.
+ */
+AWS_AUTH_API extern const struct aws_byte_cursor g_aws_signed_body_value_streaming_aws4_hmac_sha256_payload;
+
+/**
+ * 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER'
+ * For use with `aws_signing_config_aws.signed_body_value`.
+ */
+AWS_AUTH_API extern const struct aws_byte_cursor g_aws_signed_body_value_streaming_aws4_hmac_sha256_payload_trailer;
+
+/**
+ * 'STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD'
+ * For use with `aws_signing_config_aws.signed_body_value`.
+ */
+AWS_AUTH_API extern const struct aws_byte_cursor g_aws_signed_body_value_streaming_aws4_ecdsa_p256_sha256_payload;
+
+/**
+ * 'STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER'
+ * For use with `aws_signing_config_aws.signed_body_value`.
+ */
+AWS_AUTH_API extern const struct aws_byte_cursor
+ g_aws_signed_body_value_streaming_aws4_ecdsa_p256_sha256_payload_trailer;
+
+/**
+ * 'STREAMING-AWS4-HMAC-SHA256-EVENTS'
+ * For use with `aws_signing_config_aws.signed_body_value`.
+ *
+ * Event signing is only supported for Sigv4 for now.
+ */
+AWS_AUTH_API extern const struct aws_byte_cursor g_aws_signed_body_value_streaming_aws4_hmac_sha256_events;
+
+/**
+ * Controls if signing adds a header containing the canonical request's body value
+ */
+enum aws_signed_body_header_type {
+ /**
+ * Do not add a header
+ */
+ AWS_SBHT_NONE,
+
+ /**
+ * Add the "x-amz-content-sha256" header with the canonical request's body value
+ */
+ AWS_SBHT_X_AMZ_CONTENT_SHA256,
+};
+
+/**
+ * A configuration structure for use in AWS-related signing. Currently covers sigv4 only, but is not required to.
+ */
+struct aws_signing_config_aws {
+
+ /**
+ * What kind of config structure is this?
+ */
+ enum aws_signing_config_type config_type;
+
+ /**
+ * What signing algorithm to use.
+ */
+ enum aws_signing_algorithm algorithm;
+
+ /**
+ * What sort of signature should be computed?
+ */
+ enum aws_signature_type signature_type;
+
+ /*
+ * Region-related configuration
+ * (1) If Sigv4, the region to sign against
+ * (2) If Sigv4a, the value of the X-amzn-region-set header (added in signing)
+ */
+ struct aws_byte_cursor region;
+
+ /**
+ * name of service to sign a request for
+ */
+ struct aws_byte_cursor service;
+
+ /**
+ * Raw date to use during the signing process.
+ */
+ struct aws_date_time date;
+
+ /**
+ * Optional function to control which headers are a part of the canonical request.
+ * Skipping auth-required headers will result in an unusable signature. Headers injected by the signing process
+ * are not skippable.
+ *
+ * This function does not override the internal check function (x-amzn-trace-id, user-agent), but rather
+ * supplements it. In particular, a header will get signed if and only if it returns true to both
+ * the internal check (skips x-amzn-trace-id, user-agent) and this function (if defined).
+ */
+ aws_should_sign_header_fn *should_sign_header;
+ void *should_sign_header_ud;
+
+ /*
+ * Put all flags in here at the end. If this grows, stay aware of bit-space overflow and ABI compatibilty.
+ */
+ struct {
+ /**
+ * We assume the uri will be encoded once in preparation for transmission. Certain services
+ * do not decode before checking signature, requiring us to actually double-encode the uri in the canonical
+ * request in order to pass a signature check.
+ */
+ uint32_t use_double_uri_encode : 1;
+
+ /**
+ * Controls whether or not the uri paths should be normalized when building the canonical request
+ */
+ uint32_t should_normalize_uri_path : 1;
+
+ /**
+ * Controls whether "X-Amz-Security-Token" is omitted from the canonical request.
+ * "X-Amz-Security-Token" is added during signing, as a header or
+ * query param, when credentials have a session token.
+ * If false (the default), this parameter is included in the canonical request.
+ * If true, this parameter is still added, but omitted from the canonical request.
+ */
+ uint32_t omit_session_token : 1;
+ } flags;
+
+ /**
+ * Optional string to use as the canonical request's body value.
+ * If string is empty, a value will be calculated from the payload during signing.
+ * Typically, this is the SHA-256 of the (request/chunk/event) payload, written as lowercase hex.
+ * If this has been precalculated, it can be set here. Special values used by certain services can also be set
+ * (e.g. "UNSIGNED-PAYLOAD" "STREAMING-AWS4-HMAC-SHA256-PAYLOAD" "STREAMING-AWS4-HMAC-SHA256-EVENTS").
+ */
+ struct aws_byte_cursor signed_body_value;
+
+ /**
+ * Controls what body "hash" header, if any, should be added to the canonical request and the signed request:
+ * AWS_SBHT_NONE - no header should be added
+ * AWS_SBHT_X_AMZ_CONTENT_SHA256 - the body "hash" should be added in the X-Amz-Content-Sha256 header
+ */
+ enum aws_signed_body_header_type signed_body_header;
+
+ /*
+ * Signing key control:
+ *
+ * If "credentials" is valid:
+ * use it
+ * Else if "credentials_provider" is valid
+ * query credentials from the provider
+ * If sigv4a is being used
+ * use the ecc-based credentials derived from the query result
+ * Else
+ * use the query result
+ * Else
+ * fail
+ *
+ */
+
+ /*
+ * AWS Credentials to sign with. If Sigv4a is the algorithm and the credentials supplied are not ecc-based,
+ * a temporary ecc-based credentials object will be built and used instead.
+ */
+ const struct aws_credentials *credentials;
+
+ /*
+ * AWS credentials provider to fetch credentials from. If the signing algorithm is asymmetric sigv4, then the
+ * ecc-based credentials will be derived from the fetched credentials.
+ */
+ struct aws_credentials_provider *credentials_provider;
+
+ /**
+ * If non-zero and the signing transform is query param, then signing will add X-Amz-Expires to the query
+ * string, equal to the value specified here. If this value is zero or if header signing is being used then
+ * this parameter has no effect.
+ */
+ uint64_t expiration_in_seconds;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Returns a c-string that describes the supplied signing algorithm
+ *
+ * @param algorithm signing algorithm to get a friendly string name for
+ *
+ * @return friendly string name of the supplied algorithm, or "Unknown" if the algorithm is not recognized
+ */
+AWS_AUTH_API
+const char *aws_signing_algorithm_to_string(enum aws_signing_algorithm algorithm);
+
+/**
+ * Checks a signing configuration for invalid settings combinations.
+ *
+ * @param config signing configuration to validate
+ *
+ * @return - AWS_OP_SUCCESS if the configuration is valid, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_validate_aws_signing_config_aws(const struct aws_signing_config_aws *config);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_AUTH_SIGNING_CONFIG_H */
diff --git a/contrib/restricted/aws/aws-c-auth/include/aws/auth/signing_result.h b/contrib/restricted/aws/aws-c-auth/include/aws/auth/signing_result.h
new file mode 100644
index 0000000000..7e3ba8cf98
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/include/aws/auth/signing_result.h
@@ -0,0 +1,166 @@
+#ifndef AWS_AUTH_SIGNING_RESULT_H
+#define AWS_AUTH_SIGNING_RESULT_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/auth.h>
+
+#include <aws/common/hash_table.h>
+
+struct aws_array_list;
+struct aws_byte_cursor;
+struct aws_http_message;
+struct aws_string;
+
+struct aws_signing_result_property {
+ struct aws_string *name;
+ struct aws_string *value;
+};
+
+/**
+ * A structure for tracking all the signer-requested changes to a signable. Interpreting
+ * these changes is signing-algorithm specific.
+ *
+ * A signing result consists of
+ *
+ * (1) Properties - A set of key-value pairs
+ * (2) Property Lists - A set of named key-value pair lists
+ *
+ * The hope is that these two generic structures are enough to model the changes required
+ * by any generic message-signing algorithm.
+ *
+ * Note that the key-value pairs of a signing_result are different types (but same intent) as
+ * the key-value pairs in the signable interface. This is because the signing result stands alone
+ * and owns its own copies of all values, whereas a signable can wrap an existing object and thus
+ * use non-owning references (like byte cursors) if appropriate to its implementation.
+ */
+struct aws_signing_result {
+ struct aws_allocator *allocator;
+ struct aws_hash_table properties;
+ struct aws_hash_table property_lists;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Initialize a signing result to its starting state
+ *
+ * @param result signing result to initialize
+ * @param allocator allocator to use for all memory allocation
+ *
+ * @return AWS_OP_SUCCESS if initialization was successful, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_signing_result_init(struct aws_signing_result *result, struct aws_allocator *allocator);
+
+/**
+ * Clean up all resources held by the signing result
+ *
+ * @param result signing result to clean up resources for
+ */
+AWS_AUTH_API
+void aws_signing_result_clean_up(struct aws_signing_result *result);
+
+/**
+ * Sets the value of a property on a signing result
+ *
+ * @param result signing result to modify
+ * @param property_name name of the property to set
+ * @param property_value value that the property should assume
+ *
+ * @return AWS_OP_SUCCESS if the set was successful, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_signing_result_set_property(
+ struct aws_signing_result *result,
+ const struct aws_string *property_name,
+ const struct aws_byte_cursor *property_value);
+
+/**
+ * Gets the value of a property on a signing result
+ *
+ * @param result signing result to query from
+ * @param property_name name of the property to query the value of
+ * @param out_property_value output parameter for the property value
+ *
+ * @return AWS_OP_SUCCESS if the get was successful, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_signing_result_get_property(
+ const struct aws_signing_result *result,
+ const struct aws_string *property_name,
+ struct aws_string **out_property_value);
+
+/**
+ * Adds a key-value pair to a named property list. If the named list does not yet exist, it will be created as
+ * an empty list before the pair is added. No uniqueness checks are made against existing pairs.
+ *
+ * @param result signing result to modify
+ * @param list_name name of the list to add the property key-value pair to
+ * @param property_name key value of the key-value pair to append
+ * @param property_value property value of the key-value pair to append
+ *
+ * @return AWS_OP_SUCCESS if the operation was successful, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_signing_result_append_property_list(
+ struct aws_signing_result *result,
+ const struct aws_string *list_name,
+ const struct aws_byte_cursor *property_name,
+ const struct aws_byte_cursor *property_value);
+
+/**
+ * Gets a named property list on the signing result. If the list does not exist, *out_list will be set to null
+ *
+ * @param result signing result to query
+ * @param list_name name of the list of key-value pairs to get
+ * @param out_list output parameter for the list of key-value pairs
+ *
+ */
+AWS_AUTH_API
+void aws_signing_result_get_property_list(
+ const struct aws_signing_result *result,
+ const struct aws_string *list_name,
+ struct aws_array_list **out_list);
+
+/**
+ * Looks for a property within a named property list on the signing result. If the list does not exist, or the property
+ * does not exist within the list, *out_value will be set to NULL.
+ *
+ * @param result signing result to query
+ * @param list_name name of the list of key-value pairs to search through for the property
+ * @param property_name name of the property to search for within the list
+ * @param out_value output parameter for the property value, if found
+ *
+ */
+AWS_AUTH_API
+void aws_signing_result_get_property_value_in_property_list(
+ const struct aws_signing_result *result,
+ const struct aws_string *list_name,
+ const struct aws_string *property_name,
+ struct aws_string **out_value);
+
+/*
+ * Specific implementation that applies a signing result to a mutable http request
+ *
+ * @param request http request to apply the signing result to
+ * @param allocator memory allocator to use for all memory allocation
+ * @param result signing result to apply to the request
+ *
+ * @return AWS_OP_SUCCESS if the application operation was successful, AWS_OP_ERR otherwise
+ */
+AWS_AUTH_API
+int aws_apply_signing_result_to_http_request(
+ struct aws_http_message *request,
+ struct aws_allocator *allocator,
+ const struct aws_signing_result *result);
+
+AWS_AUTH_API extern const struct aws_string *g_aws_signing_authorization_header_name;
+AWS_AUTH_API extern const struct aws_string *g_aws_signing_authorization_query_param_name;
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_AUTH_SIGNING_RESULT_H */
diff --git a/contrib/restricted/aws/aws-c-auth/source/auth.c b/contrib/restricted/aws/aws-c-auth/source/auth.c
new file mode 100644
index 0000000000..b3fb4d3f5c
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/source/auth.c
@@ -0,0 +1,160 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/auth/auth.h>
+
+#include <aws/auth/private/aws_signing.h>
+
+#include <aws/cal/cal.h>
+
+#include <aws/http/http.h>
+
+#include <aws/sdkutils/sdkutils.h>
+
+#include <aws/common/error.h>
+#include <aws/common/json.h>
+
+#define AWS_DEFINE_ERROR_INFO_AUTH(CODE, STR) AWS_DEFINE_ERROR_INFO(CODE, STR, "aws-c-auth")
+
+/* clang-format off */
+static struct aws_error_info s_errors[] = {
+ AWS_DEFINE_ERROR_INFO_AUTH(
+ AWS_AUTH_SIGNING_UNSUPPORTED_ALGORITHM,
+ "Attempt to sign an http request with an unsupported version of the AWS signing protocol"),
+ AWS_DEFINE_ERROR_INFO_AUTH(
+ AWS_AUTH_SIGNING_MISMATCHED_CONFIGURATION,
+ "Attempt to sign an http request with a signing configuration unrecognized by the invoked signer"),
+ AWS_DEFINE_ERROR_INFO_AUTH(
+ AWS_AUTH_SIGNING_NO_CREDENTIALS,
+ "Attempt to sign an http request without credentials"),
+ AWS_DEFINE_ERROR_INFO_AUTH(
+ AWS_AUTH_SIGNING_ILLEGAL_REQUEST_QUERY_PARAM,
+ "Attempt to sign an http request that includes a query param that signing may add"),
+ AWS_DEFINE_ERROR_INFO_AUTH(
+ AWS_AUTH_SIGNING_ILLEGAL_REQUEST_HEADER,
+ "Attempt to sign an http request that includes a header that signing may add"),
+ AWS_DEFINE_ERROR_INFO_AUTH(
+ AWS_AUTH_SIGNING_INVALID_CONFIGURATION,
+ "Attempt to sign an http request with an invalid signing configuration"),
+ AWS_DEFINE_ERROR_INFO_AUTH(
+ AWS_AUTH_CREDENTIALS_PROVIDER_INVALID_ENVIRONMENT,
+ "Valid credentials could not be sourced from process environment"),
+ AWS_DEFINE_ERROR_INFO_AUTH(
+ AWS_AUTH_CREDENTIALS_PROVIDER_INVALID_DELEGATE,
+ "Valid credentials could not be sourced from the provided vtable"),
+ AWS_DEFINE_ERROR_INFO_AUTH(
+ AWS_AUTH_CREDENTIALS_PROVIDER_PROFILE_SOURCE_FAILURE,
+ "Valid credentials could not be sourced by a profile provider"),
+ AWS_DEFINE_ERROR_INFO_AUTH(
+ AWS_AUTH_CREDENTIALS_PROVIDER_IMDS_SOURCE_FAILURE,
+ "Valid credentials could not be sourced by the IMDS provider"),
+ AWS_DEFINE_ERROR_INFO_AUTH(
+ AWS_AUTH_CREDENTIALS_PROVIDER_STS_SOURCE_FAILURE,
+ "Valid credentials could not be sourced by the STS provider"),
+ AWS_DEFINE_ERROR_INFO_AUTH(
+ AWS_AUTH_CREDENTIALS_PROVIDER_HTTP_STATUS_FAILURE,
+ "Unsuccessful status code returned from credentials-fetching http request"),
+ AWS_DEFINE_ERROR_INFO_AUTH(
+ AWS_AUTH_PROVIDER_PARSER_UNEXPECTED_RESPONSE,
+ "Invalid response document encountered while querying credentials via http"),
+ AWS_DEFINE_ERROR_INFO_AUTH(
+ AWS_AUTH_CREDENTIALS_PROVIDER_ECS_SOURCE_FAILURE,
+ "Valid credentials could not be sourced by the ECS provider"),
+ AWS_DEFINE_ERROR_INFO_AUTH(
+ AWS_AUTH_CREDENTIALS_PROVIDER_X509_SOURCE_FAILURE,
+ "Valid credentials could not be sourced by the X509 provider"),
+ AWS_DEFINE_ERROR_INFO_AUTH(
+ AWS_AUTH_CREDENTIALS_PROVIDER_PROCESS_SOURCE_FAILURE,
+ "Valid credentials could not be sourced by the process provider"),
+ AWS_DEFINE_ERROR_INFO_AUTH(
+ AWS_AUTH_CREDENTIALS_PROVIDER_STS_WEB_IDENTITY_SOURCE_FAILURE,
+ "Valid credentials could not be sourced by the sts web identity provider"),
+ AWS_DEFINE_ERROR_INFO_AUTH(
+ AWS_AUTH_SIGNING_UNSUPPORTED_SIGNATURE_TYPE,
+ "Attempt to sign using an unusupported signature type"),
+ AWS_DEFINE_ERROR_INFO_AUTH(
+ AWS_AUTH_SIGNING_MISSING_PREVIOUS_SIGNATURE,
+ "Attempt to sign a streaming item without supplying a previous signature"),
+ AWS_DEFINE_ERROR_INFO_AUTH(
+ AWS_AUTH_SIGNING_INVALID_CREDENTIALS,
+ "Attempt to perform a signing operation with invalid credentials"),
+ AWS_DEFINE_ERROR_INFO_AUTH(
+ AWS_AUTH_CANONICAL_REQUEST_MISMATCH,
+ "Expected canonical request did not match the computed canonical request"),
+ AWS_DEFINE_ERROR_INFO_AUTH(
+ AWS_AUTH_SIGV4A_SIGNATURE_VALIDATION_FAILURE,
+ "The supplied sigv4a signature was not a valid signature for the hashed string to sign"),
+ AWS_DEFINE_ERROR_INFO_AUTH(
+ AWS_AUTH_CREDENTIALS_PROVIDER_COGNITO_SOURCE_FAILURE,
+ "Valid credentials could not be sourced by the cognito provider"),
+ AWS_DEFINE_ERROR_INFO_AUTH(
+ AWS_AUTH_CREDENTIALS_PROVIDER_DELEGATE_FAILURE,
+ "Valid credentials could not be sourced by the delegate provider"),
+
+};
+/* clang-format on */
+
+static struct aws_error_info_list s_error_list = {
+ .error_list = s_errors,
+ .count = sizeof(s_errors) / sizeof(struct aws_error_info),
+};
+
+static struct aws_log_subject_info s_auth_log_subject_infos[] = {
+ DEFINE_LOG_SUBJECT_INFO(
+ AWS_LS_AUTH_GENERAL,
+ "AuthGeneral",
+ "Subject for aws-c-auth logging that defies categorization."),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_AUTH_PROFILE, "AuthProfile", "Subject for config profile related logging."),
+ DEFINE_LOG_SUBJECT_INFO(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "AuthCredentialsProvider",
+ "Subject for credentials provider related logging."),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_AUTH_SIGNING, "AuthSigning", "Subject for AWS request signing logging."),
+};
+
+static struct aws_log_subject_info_list s_auth_log_subject_list = {
+ .subject_list = s_auth_log_subject_infos,
+ .count = AWS_ARRAY_SIZE(s_auth_log_subject_infos),
+};
+
+static bool s_library_initialized = false;
+static struct aws_allocator *s_library_allocator = NULL;
+
+void aws_auth_library_init(struct aws_allocator *allocator) {
+ if (s_library_initialized) {
+ return;
+ }
+
+ if (allocator) {
+ s_library_allocator = allocator;
+ } else {
+ s_library_allocator = aws_default_allocator();
+ }
+
+ aws_sdkutils_library_init(s_library_allocator);
+ aws_cal_library_init(s_library_allocator);
+ aws_http_library_init(s_library_allocator);
+
+ aws_register_error_info(&s_error_list);
+ aws_register_log_subject_info_list(&s_auth_log_subject_list);
+
+ AWS_FATAL_ASSERT(aws_signing_init_signing_tables(allocator) == AWS_OP_SUCCESS);
+ s_library_initialized = true;
+}
+
+void aws_auth_library_clean_up(void) {
+ if (!s_library_initialized) {
+ return;
+ }
+
+ s_library_initialized = false;
+
+ aws_signing_clean_up_signing_tables();
+ aws_unregister_log_subject_info_list(&s_auth_log_subject_list);
+ aws_unregister_error_info(&s_error_list);
+ aws_http_library_clean_up();
+ aws_cal_library_clean_up();
+ aws_sdkutils_library_clean_up();
+ s_library_allocator = NULL;
+}
diff --git a/contrib/restricted/aws/aws-c-auth/source/aws_imds_client.c b/contrib/restricted/aws/aws-c-auth/source/aws_imds_client.c
new file mode 100644
index 0000000000..d5e7352dd0
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/source/aws_imds_client.c
@@ -0,0 +1,1753 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/aws_imds_client.h>
+#include <aws/auth/private/credentials_utils.h>
+#include <aws/common/clock.h>
+#include <aws/common/condition_variable.h>
+#include <aws/common/mutex.h>
+#include <aws/common/string.h>
+#include <aws/http/connection.h>
+#include <aws/http/request_response.h>
+#include <aws/http/status_code.h>
+#include <aws/io/channel_bootstrap.h>
+#include <aws/io/socket.h>
+#include <ctype.h>
+
+#include <aws/common/json.h>
+
+#if defined(_MSC_VER)
+# pragma warning(disable : 4204)
+# pragma warning(disable : 4232)
+#endif /* _MSC_VER */
+
+/* instance role credentials body response is currently ~ 1300 characters + name length */
+#define IMDS_RESPONSE_SIZE_INITIAL 2048
+#define IMDS_RESPONSE_TOKEN_SIZE_INITIAL 64
+#define IMDS_RESPONSE_SIZE_LIMIT 65535
+#define IMDS_CONNECT_TIMEOUT_DEFAULT_IN_SECONDS 2
+#define IMDS_DEFAULT_RETRIES 1
+
+enum imds_token_state {
+ AWS_IMDS_TS_INVALID,
+ AWS_IMDS_TS_VALID,
+ AWS_IMDS_TS_UPDATE_IN_PROGRESS,
+};
+
+enum imds_token_copy_result {
+ /* Token is valid and copied to requester */
+ AWS_IMDS_TCR_SUCCESS,
+ /* Token is updating, so requester is added in waiting queue */
+ AWS_IMDS_TCR_WAITING_IN_QUEUE,
+ /* unexpected error,like mem allocation error */
+ AWS_IMDS_TCR_UNEXPECTED_ERROR,
+};
+
+struct imds_token_query {
+ struct aws_linked_list_node node;
+ void *user_data;
+};
+
+struct aws_imds_client {
+ struct aws_allocator *allocator;
+ struct aws_http_connection_manager *connection_manager;
+ struct aws_retry_strategy *retry_strategy;
+ const struct aws_auth_http_system_vtable *function_table;
+ struct aws_imds_client_shutdown_options shutdown_options;
+ /* will be set to true by default, means using IMDS V2 */
+ bool token_required;
+ struct aws_byte_buf cached_token;
+ enum imds_token_state token_state;
+ struct aws_linked_list pending_queries;
+ struct aws_mutex token_lock;
+ struct aws_condition_variable token_signal;
+
+ struct aws_atomic_var ref_count;
+};
+
+static void s_aws_imds_client_destroy(struct aws_imds_client *client) {
+ if (!client) {
+ return;
+ }
+ /**
+ * s_aws_imds_client_destroy is only called after all in-flight requests are finished,
+ * thus nothing is going to try and access retry_strategy again at this point.
+ */
+ aws_retry_strategy_release(client->retry_strategy);
+ aws_condition_variable_clean_up(&client->token_signal);
+ aws_mutex_clean_up(&client->token_lock);
+ aws_byte_buf_clean_up(&client->cached_token);
+ client->function_table->aws_http_connection_manager_release(client->connection_manager);
+ /* freeing the client takes place in the shutdown callback below */
+}
+
+static void s_on_connection_manager_shutdown(void *user_data) {
+ struct aws_imds_client *client = user_data;
+
+ if (client && client->shutdown_options.shutdown_callback) {
+ client->shutdown_options.shutdown_callback(client->shutdown_options.shutdown_user_data);
+ }
+
+ aws_mem_release(client->allocator, client);
+}
+
+void aws_imds_client_release(struct aws_imds_client *client) {
+ if (!client) {
+ return;
+ }
+
+ size_t old_value = aws_atomic_fetch_sub(&client->ref_count, 1);
+ if (old_value == 1) {
+ s_aws_imds_client_destroy(client);
+ }
+}
+
+void aws_imds_client_acquire(struct aws_imds_client *client) {
+ aws_atomic_fetch_add(&client->ref_count, 1);
+}
+
+struct aws_imds_client *aws_imds_client_new(
+ struct aws_allocator *allocator,
+ const struct aws_imds_client_options *options) {
+
+ if (!options->bootstrap) {
+ AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Client bootstrap is required for querying IMDS");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_imds_client *client = aws_mem_calloc(allocator, 1, sizeof(struct aws_imds_client));
+ if (!client) {
+ return NULL;
+ }
+
+ if (aws_mutex_init(&client->token_lock)) {
+ goto on_error;
+ }
+
+ if (aws_condition_variable_init(&client->token_signal)) {
+ goto on_error;
+ }
+
+ if (aws_byte_buf_init(&client->cached_token, allocator, IMDS_RESPONSE_TOKEN_SIZE_INITIAL)) {
+ goto on_error;
+ }
+
+ aws_linked_list_init(&client->pending_queries);
+
+ aws_atomic_store_int(&client->ref_count, 1);
+ client->allocator = allocator;
+ client->function_table =
+ options->function_table ? options->function_table : g_aws_credentials_provider_http_function_table;
+ client->token_required = options->imds_version == IMDS_PROTOCOL_V1 ? false : true;
+ client->shutdown_options = options->shutdown_options;
+
+ struct aws_socket_options socket_options;
+ AWS_ZERO_STRUCT(socket_options);
+ socket_options.type = AWS_SOCKET_STREAM;
+ socket_options.domain = AWS_SOCKET_IPV4;
+ socket_options.connect_timeout_ms = (uint32_t)aws_timestamp_convert(
+ IMDS_CONNECT_TIMEOUT_DEFAULT_IN_SECONDS, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL);
+
+ struct aws_http_connection_manager_options manager_options;
+ AWS_ZERO_STRUCT(manager_options);
+ manager_options.bootstrap = options->bootstrap;
+ manager_options.initial_window_size = IMDS_RESPONSE_SIZE_LIMIT;
+ manager_options.socket_options = &socket_options;
+ manager_options.tls_connection_options = NULL;
+ manager_options.host = aws_byte_cursor_from_c_str("169.254.169.254");
+ manager_options.port = 80;
+ manager_options.max_connections = 10;
+ manager_options.shutdown_complete_callback = s_on_connection_manager_shutdown;
+ manager_options.shutdown_complete_user_data = client;
+
+ struct aws_http_connection_monitoring_options monitor_options;
+ AWS_ZERO_STRUCT(monitor_options);
+ monitor_options.allowable_throughput_failure_interval_seconds = 1;
+ monitor_options.minimum_throughput_bytes_per_second = 1;
+ manager_options.monitoring_options = &monitor_options;
+
+ client->connection_manager = client->function_table->aws_http_connection_manager_new(allocator, &manager_options);
+ if (!client->connection_manager) {
+ goto on_error;
+ }
+
+ if (options->retry_strategy) {
+ client->retry_strategy = options->retry_strategy;
+ aws_retry_strategy_acquire(client->retry_strategy);
+ } else {
+ struct aws_exponential_backoff_retry_options retry_options = {
+ .el_group = options->bootstrap->event_loop_group,
+ .max_retries = IMDS_DEFAULT_RETRIES,
+ };
+ /* exponential backoff is plenty here. We're hitting a local endpoint and do not run the risk of bringing
+ * down more than the local VM. */
+ client->retry_strategy = aws_retry_strategy_new_exponential_backoff(allocator, &retry_options);
+ }
+ if (!client->retry_strategy) {
+ goto on_error;
+ }
+
+ return client;
+
+on_error:
+ s_aws_imds_client_destroy(client);
+ return NULL;
+}
+
+/*
+ * Tracking structure for each outstanding async query to an imds client
+ */
+struct imds_user_data {
+ /* immutable post-creation */
+ struct aws_allocator *allocator;
+ struct aws_imds_client *client;
+ aws_imds_client_on_get_resource_callback_fn *original_callback;
+ void *original_user_data;
+
+ /* mutable */
+ struct aws_http_connection *connection;
+ struct aws_http_message *request;
+ struct aws_byte_buf current_result;
+ struct aws_byte_buf imds_token;
+ struct aws_string *resource_path;
+ struct aws_retry_token *retry_token;
+ /*
+ * initial value is copy of client->token_required,
+ * will be adapted according to response.
+ */
+ bool imds_token_required;
+ bool is_imds_token_request;
+ int status_code;
+ int error_code;
+
+ struct aws_atomic_var ref_count;
+};
+
+static void s_user_data_destroy(struct imds_user_data *user_data) {
+ if (user_data == NULL) {
+ return;
+ }
+ struct aws_imds_client *client = user_data->client;
+
+ if (user_data->connection) {
+ client->function_table->aws_http_connection_manager_release_connection(
+ client->connection_manager, user_data->connection);
+ }
+
+ aws_byte_buf_clean_up(&user_data->current_result);
+ aws_byte_buf_clean_up(&user_data->imds_token);
+ aws_string_destroy(user_data->resource_path);
+
+ if (user_data->request) {
+ aws_http_message_destroy(user_data->request);
+ }
+ aws_retry_token_release(user_data->retry_token);
+ aws_imds_client_release(client);
+ aws_mem_release(user_data->allocator, user_data);
+}
+
+static struct imds_user_data *s_user_data_new(
+ struct aws_imds_client *client,
+ struct aws_byte_cursor resource_path,
+ aws_imds_client_on_get_resource_callback_fn *callback,
+ void *user_data) {
+
+ struct imds_user_data *wrapped_user_data = aws_mem_calloc(client->allocator, 1, sizeof(struct imds_user_data));
+ if (!wrapped_user_data) {
+ goto on_error;
+ }
+
+ wrapped_user_data->allocator = client->allocator;
+ wrapped_user_data->client = client;
+ aws_imds_client_acquire(client);
+ wrapped_user_data->original_user_data = user_data;
+ wrapped_user_data->original_callback = callback;
+
+ if (aws_byte_buf_init(&wrapped_user_data->current_result, client->allocator, IMDS_RESPONSE_SIZE_INITIAL)) {
+ goto on_error;
+ }
+
+ if (aws_byte_buf_init(&wrapped_user_data->imds_token, client->allocator, IMDS_RESPONSE_TOKEN_SIZE_INITIAL)) {
+ goto on_error;
+ }
+
+ wrapped_user_data->resource_path =
+ aws_string_new_from_array(client->allocator, resource_path.ptr, resource_path.len);
+ if (!wrapped_user_data->resource_path) {
+ goto on_error;
+ }
+
+ wrapped_user_data->imds_token_required = client->token_required;
+ aws_atomic_store_int(&wrapped_user_data->ref_count, 1);
+ return wrapped_user_data;
+
+on_error:
+ s_user_data_destroy(wrapped_user_data);
+ return NULL;
+}
+
+static void s_user_data_acquire(struct imds_user_data *user_data) {
+ if (user_data == NULL) {
+ return;
+ }
+ aws_atomic_fetch_add(&user_data->ref_count, 1);
+}
+
+static void s_user_data_release(struct imds_user_data *user_data) {
+ if (!user_data) {
+ return;
+ }
+ size_t old_value = aws_atomic_fetch_sub(&user_data->ref_count, 1);
+ if (old_value == 1) {
+ s_user_data_destroy(user_data);
+ }
+}
+
+static void s_reset_scratch_user_data(struct imds_user_data *user_data) {
+ user_data->current_result.len = 0;
+ user_data->status_code = 0;
+
+ if (user_data->request) {
+ aws_http_message_destroy(user_data->request);
+ user_data->request = NULL;
+ }
+}
+
+static enum imds_token_copy_result s_copy_token_safely(struct imds_user_data *user_data);
+static void s_invalidate_cached_token_safely(struct imds_user_data *user_data);
+static bool s_update_token_safely(struct aws_imds_client *client, struct aws_byte_buf *token, bool token_required);
+static void s_query_complete(struct imds_user_data *user_data);
+static void s_on_acquire_connection(struct aws_http_connection *connection, int error_code, void *user_data);
+static void s_on_retry_token_acquired(struct aws_retry_strategy *, int, struct aws_retry_token *, void *);
+
+static int s_on_incoming_body_fn(struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data) {
+ (void)stream;
+ (void)data;
+
+ struct imds_user_data *imds_user_data = user_data;
+ struct aws_imds_client *client = imds_user_data->client;
+
+ if (data->len + imds_user_data->current_result.len > IMDS_RESPONSE_SIZE_LIMIT) {
+ client->function_table->aws_http_connection_close(imds_user_data->connection);
+ AWS_LOGF_ERROR(
+ AWS_LS_IMDS_CLIENT, "(id=%p) IMDS client query response exceeded maximum allowed length", (void *)client);
+
+ return AWS_OP_ERR;
+ }
+
+ if (aws_byte_buf_append_dynamic(&imds_user_data->current_result, data)) {
+ client->function_table->aws_http_connection_close(imds_user_data->connection);
+ AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "(id=%p) IMDS client query error appending response", (void *)client);
+
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_on_incoming_headers_fn(
+ struct aws_http_stream *stream,
+ enum aws_http_header_block header_block,
+ const struct aws_http_header *header_array,
+ size_t num_headers,
+ void *user_data) {
+
+ (void)header_array;
+ (void)num_headers;
+
+ if (header_block != AWS_HTTP_HEADER_BLOCK_MAIN) {
+ return AWS_OP_SUCCESS;
+ }
+
+ struct imds_user_data *imds_user_data = user_data;
+ struct aws_imds_client *client = imds_user_data->client;
+ if (header_block == AWS_HTTP_HEADER_BLOCK_MAIN) {
+ if (imds_user_data->status_code == 0) {
+ if (client->function_table->aws_http_stream_get_incoming_response_status(
+ stream, &imds_user_data->status_code)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_IMDS_CLIENT, "(id=%p) IMDS client failed to get http status code", (void *)client);
+ return AWS_OP_ERR;
+ }
+ AWS_LOGF_DEBUG(
+ AWS_LS_IMDS_CLIENT,
+ "(id=%p) IMDS client query received http status code %d for requester %p.",
+ (void *)client,
+ imds_user_data->status_code,
+ user_data);
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+AWS_STATIC_STRING_FROM_LITERAL(s_imds_accept_header, "Accept");
+AWS_STATIC_STRING_FROM_LITERAL(s_imds_accept_header_value, "*/*");
+AWS_STATIC_STRING_FROM_LITERAL(s_imds_user_agent_header, "User-Agent");
+AWS_STATIC_STRING_FROM_LITERAL(s_imds_user_agent_header_value, "aws-sdk-crt/aws-imds-client");
+AWS_STATIC_STRING_FROM_LITERAL(s_imds_h1_0_keep_alive_header, "Connection");
+AWS_STATIC_STRING_FROM_LITERAL(s_imds_h1_0_keep_alive_header_value, "keep-alive");
+AWS_STATIC_STRING_FROM_LITERAL(s_imds_token_resource_path, "/latest/api/token");
+AWS_STATIC_STRING_FROM_LITERAL(s_imds_token_ttl_header, "x-aws-ec2-metadata-token-ttl-seconds");
+AWS_STATIC_STRING_FROM_LITERAL(s_imds_token_header, "x-aws-ec2-metadata-token");
+AWS_STATIC_STRING_FROM_LITERAL(s_imds_token_ttl_default_value, "21600");
+
+static void s_on_stream_complete_fn(struct aws_http_stream *stream, int error_code, void *user_data);
+
+static int s_make_imds_http_query(
+ struct imds_user_data *user_data,
+ const struct aws_byte_cursor *verb,
+ const struct aws_byte_cursor *uri,
+ const struct aws_http_header *headers,
+ size_t header_count) {
+
+ AWS_FATAL_ASSERT(user_data->connection);
+ struct aws_imds_client *client = user_data->client;
+ struct aws_http_stream *stream = NULL;
+ struct aws_http_message *request = aws_http_message_new_request(user_data->allocator);
+
+ if (request == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ if (headers && aws_http_message_add_header_array(request, headers, header_count)) {
+ goto on_error;
+ }
+
+ struct aws_http_header accept_header = {
+ .name = aws_byte_cursor_from_string(s_imds_accept_header),
+ .value = aws_byte_cursor_from_string(s_imds_accept_header_value),
+ };
+ if (aws_http_message_add_header(request, accept_header)) {
+ goto on_error;
+ }
+
+ struct aws_http_header user_agent_header = {
+ .name = aws_byte_cursor_from_string(s_imds_user_agent_header),
+ .value = aws_byte_cursor_from_string(s_imds_user_agent_header_value),
+ };
+ if (aws_http_message_add_header(request, user_agent_header)) {
+ goto on_error;
+ }
+
+ struct aws_http_header keep_alive_header = {
+ .name = aws_byte_cursor_from_string(s_imds_h1_0_keep_alive_header),
+ .value = aws_byte_cursor_from_string(s_imds_h1_0_keep_alive_header_value),
+ };
+ if (aws_http_message_add_header(request, keep_alive_header)) {
+ goto on_error;
+ }
+
+ if (aws_http_message_set_request_method(request, *verb)) {
+ goto on_error;
+ }
+
+ if (aws_http_message_set_request_path(request, *uri)) {
+ goto on_error;
+ }
+
+ user_data->request = request;
+
+ struct aws_http_make_request_options request_options = {
+ .self_size = sizeof(request_options),
+ .on_response_headers = s_on_incoming_headers_fn,
+ .on_response_header_block_done = NULL,
+ .on_response_body = s_on_incoming_body_fn,
+ .on_complete = s_on_stream_complete_fn,
+ .user_data = user_data,
+ .request = request,
+ };
+
+ /* for test with mocking http stack where make request finishes
+ immediately and releases client before stream activate call */
+ s_user_data_acquire(user_data);
+ stream = client->function_table->aws_http_connection_make_request(user_data->connection, &request_options);
+ if (!stream || client->function_table->aws_http_stream_activate(stream)) {
+ goto on_error;
+ }
+ s_user_data_release(user_data);
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ user_data->client->function_table->aws_http_stream_release(stream);
+ aws_http_message_destroy(request);
+ user_data->request = NULL;
+ s_user_data_release(user_data);
+ return AWS_OP_ERR;
+}
+
+/*
+ * Process the http response from the token put request.
+ */
+static void s_client_on_token_response(struct imds_user_data *user_data) {
+ /* Gets 400 means token is required but the request itself failed. */
+ if (user_data->status_code == AWS_HTTP_STATUS_CODE_400_BAD_REQUEST) {
+ s_update_token_safely(user_data->client, NULL, true);
+ return;
+ }
+ /*
+ * Other than that, if meets any error, then token is not required,
+ * we should fall back to insecure request. Otherwise, we should use
+ * token in following requests.
+ */
+ if (user_data->status_code != AWS_HTTP_STATUS_CODE_200_OK || user_data->current_result.len == 0) {
+ s_update_token_safely(user_data->client, NULL, false);
+ } else {
+ struct aws_byte_cursor cursor = aws_byte_cursor_from_buf(&(user_data->current_result));
+ aws_byte_cursor_trim_pred(&cursor, aws_char_is_space);
+ aws_byte_buf_reset(&user_data->imds_token, true /*zero contents*/);
+ if (aws_byte_buf_append_and_update(&user_data->imds_token, &cursor)) {
+ s_update_token_safely(user_data->client, NULL, true);
+ return;
+ }
+ s_update_token_safely(user_data->client, cursor.len == 0 ? NULL : &user_data->imds_token, cursor.len != 0);
+ }
+}
+
+static int s_client_start_query_token(struct aws_imds_client *client) {
+ struct imds_user_data *user_data = s_user_data_new(client, aws_byte_cursor_from_c_str(""), NULL, (void *)client);
+ if (!user_data) {
+ AWS_LOGF_ERROR(
+ AWS_LS_IMDS_CLIENT,
+ "(id=%p) IMDS client failed to query token with error: %s.",
+ (void *)client,
+ aws_error_str(aws_last_error()));
+ return AWS_OP_ERR;
+ }
+ user_data->is_imds_token_request = true;
+ if (aws_retry_strategy_acquire_retry_token(
+ client->retry_strategy, NULL, s_on_retry_token_acquired, user_data, 100)) {
+ s_user_data_release(user_data);
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* Make an http request to put a ttl and hopefully get a token back. */
+static void s_client_do_query_token(struct imds_user_data *user_data) {
+ /* start query token for imds client */
+ struct aws_byte_cursor uri = aws_byte_cursor_from_string(s_imds_token_resource_path);
+
+ struct aws_http_header token_ttl_header = {
+ .name = aws_byte_cursor_from_string(s_imds_token_ttl_header),
+ .value = aws_byte_cursor_from_string(s_imds_token_ttl_default_value),
+ };
+
+ struct aws_http_header headers[] = {
+ token_ttl_header,
+ };
+
+ struct aws_byte_cursor verb = aws_byte_cursor_from_c_str("PUT");
+
+ if (s_make_imds_http_query(user_data, &verb, &uri, headers, AWS_ARRAY_SIZE(headers))) {
+ user_data->error_code = aws_last_error();
+ if (user_data->error_code == AWS_ERROR_SUCCESS) {
+ user_data->error_code = AWS_ERROR_UNKNOWN;
+ }
+ s_query_complete(user_data);
+ }
+}
+
+/*
+ * Make the http request to fetch the resource
+ */
+static void s_do_query_resource(struct imds_user_data *user_data) {
+
+ struct aws_http_header token_header = {
+ .name = aws_byte_cursor_from_string(s_imds_token_header),
+ .value = aws_byte_cursor_from_buf(&user_data->imds_token),
+ };
+
+ struct aws_http_header headers[] = {
+ token_header,
+ };
+
+ size_t headers_count = 0;
+ struct aws_http_header *headers_array_ptr = NULL;
+
+ if (user_data->imds_token_required) {
+ headers_count = 1;
+ headers_array_ptr = headers;
+ }
+
+ struct aws_byte_cursor verb = aws_byte_cursor_from_c_str("GET");
+
+ struct aws_byte_cursor path_cursor = aws_byte_cursor_from_string(user_data->resource_path);
+ if (s_make_imds_http_query(user_data, &verb, &path_cursor, headers_array_ptr, headers_count)) {
+ user_data->error_code = aws_last_error();
+ if (user_data->error_code == AWS_ERROR_SUCCESS) {
+ user_data->error_code = AWS_ERROR_UNKNOWN;
+ }
+ s_query_complete(user_data);
+ }
+}
+
+int s_get_resource_async_with_imds_token(struct imds_user_data *user_data);
+
+static void s_query_complete(struct imds_user_data *user_data) {
+ if (user_data->is_imds_token_request) {
+ s_client_on_token_response(user_data);
+ s_user_data_release(user_data);
+ return;
+ }
+
+ /* In this case we fallback to the secure imds flow. */
+ if (user_data->status_code == AWS_HTTP_STATUS_CODE_401_UNAUTHORIZED) {
+ s_invalidate_cached_token_safely(user_data);
+ s_reset_scratch_user_data(user_data);
+ aws_retry_token_release(user_data->retry_token);
+ if (s_get_resource_async_with_imds_token(user_data)) {
+ s_user_data_release(user_data);
+ }
+ return;
+ }
+
+ user_data->original_callback(
+ user_data->error_code ? NULL : &user_data->current_result,
+ user_data->error_code,
+ user_data->original_user_data);
+
+ s_user_data_release(user_data);
+}
+
+static void s_on_acquire_connection(struct aws_http_connection *connection, int error_code, void *user_data) {
+ struct imds_user_data *imds_user_data = user_data;
+ imds_user_data->connection = connection;
+
+ if (!connection) {
+ AWS_LOGF_WARN(
+ AWS_LS_IMDS_CLIENT,
+ "id=%p: IMDS Client failed to acquire a connection, error code %d(%s)",
+ (void *)imds_user_data->client,
+ error_code,
+ aws_error_str(error_code));
+ imds_user_data->error_code = error_code;
+ s_query_complete(imds_user_data);
+ return;
+ }
+
+ if (imds_user_data->is_imds_token_request) {
+ s_client_do_query_token(imds_user_data);
+ } else {
+ s_do_query_resource(imds_user_data);
+ }
+}
+
+static void s_on_retry_ready(struct aws_retry_token *token, int error_code, void *user_data) {
+ (void)token;
+
+ struct imds_user_data *imds_user_data = user_data;
+ struct aws_imds_client *client = imds_user_data->client;
+
+ if (!error_code) {
+ client->function_table->aws_http_connection_manager_acquire_connection(
+ client->connection_manager, s_on_acquire_connection, user_data);
+ } else {
+ AWS_LOGF_WARN(
+ AWS_LS_IMDS_CLIENT,
+ "id=%p: IMDS Client failed to retry the request with error code %d(%s)",
+ (void *)client,
+ error_code,
+ aws_error_str(error_code));
+ imds_user_data->error_code = error_code;
+ s_query_complete(imds_user_data);
+ }
+}
+
+static void s_on_stream_complete_fn(struct aws_http_stream *stream, int error_code, void *user_data) {
+ struct imds_user_data *imds_user_data = user_data;
+ struct aws_imds_client *client = imds_user_data->client;
+
+ aws_http_message_destroy(imds_user_data->request);
+ imds_user_data->request = NULL;
+ imds_user_data->connection = NULL;
+
+ struct aws_http_connection *connection = client->function_table->aws_http_stream_get_connection(stream);
+ client->function_table->aws_http_stream_release(stream);
+ client->function_table->aws_http_connection_manager_release_connection(client->connection_manager, connection);
+
+ /* on encountering error, see if we could try again */
+ if (error_code) {
+ AWS_LOGF_WARN(
+ AWS_LS_IMDS_CLIENT,
+ "id=%p: Stream completed with error code %d(%s)",
+ (void *)client,
+ error_code,
+ aws_error_str(error_code));
+
+ if (!aws_retry_strategy_schedule_retry(
+ imds_user_data->retry_token, AWS_RETRY_ERROR_TYPE_TRANSIENT, s_on_retry_ready, user_data)) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_IMDS_CLIENT,
+ "id=%p: Stream completed, retrying the last request on a new connection.",
+ (void *)client);
+ return;
+ } else {
+ AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "id=%p: Stream completed, retries have been exhausted.", (void *)client);
+ imds_user_data->error_code = error_code;
+ }
+ } else if (aws_retry_token_record_success(imds_user_data->retry_token)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_IMDS_CLIENT,
+ "id=%p: Error while recording successful retry: %s",
+ (void *)client,
+ aws_error_str(aws_last_error()));
+ }
+
+ s_query_complete(imds_user_data);
+}
+
+static void s_on_retry_token_acquired(
+ struct aws_retry_strategy *strategy,
+ int error_code,
+ struct aws_retry_token *token,
+ void *user_data) {
+ (void)strategy;
+
+ struct imds_user_data *imds_user_data = user_data;
+ struct aws_imds_client *client = imds_user_data->client;
+
+ if (!error_code) {
+ AWS_LOGF_DEBUG(AWS_LS_IMDS_CLIENT, "id=%p: IMDS Client successfully acquired retry token.", (void *)client);
+ imds_user_data->retry_token = token;
+ client->function_table->aws_http_connection_manager_acquire_connection(
+ client->connection_manager, s_on_acquire_connection, imds_user_data);
+ } else {
+ AWS_LOGF_WARN(
+ AWS_LS_IMDS_CLIENT,
+ "id=%p: IMDS Client failed to acquire retry token, error code %d(%s)",
+ (void *)client,
+ error_code,
+ aws_error_str(error_code));
+ imds_user_data->error_code = error_code;
+ s_query_complete(imds_user_data);
+ }
+}
+
+static void s_complete_pending_queries(
+ struct aws_imds_client *client,
+ struct aws_linked_list *queries,
+ bool token_required,
+ struct aws_byte_buf *token) {
+
+ /* poll swapped out pending queries if there is any */
+ while (!aws_linked_list_empty(queries)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_back(queries);
+ struct imds_token_query *query = AWS_CONTAINER_OF(node, struct imds_token_query, node);
+ struct imds_user_data *requester = query->user_data;
+ aws_mem_release(client->allocator, query);
+
+ requester->imds_token_required = token_required;
+ bool should_continue = true;
+ if (token) {
+ aws_byte_buf_reset(&requester->imds_token, true);
+ struct aws_byte_cursor cursor = aws_byte_cursor_from_buf(token);
+ if (aws_byte_buf_append_dynamic(&requester->imds_token, &cursor)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_IMDS_CLIENT,
+ "(id=%p) IMDS client failed to copy IMDS token for requester %p.",
+ (void *)client,
+ (void *)requester);
+ should_continue = false;
+ }
+ } else if (token_required) {
+ should_continue = false;
+ }
+
+ if (should_continue && aws_retry_strategy_acquire_retry_token(
+ client->retry_strategy, NULL, s_on_retry_token_acquired, requester, 100)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_IMDS_CLIENT,
+ "(id=%p) IMDS client failed to allocate retry token for requester %p to send resource request.",
+ (void *)client,
+ (void *)requester);
+ should_continue = false;
+ }
+
+ if (!should_continue) {
+ requester->error_code = aws_last_error();
+ if (requester->error_code == AWS_ERROR_SUCCESS) {
+ requester->error_code = AWS_ERROR_UNKNOWN;
+ }
+ s_query_complete(requester);
+ }
+ }
+}
+
+static enum imds_token_copy_result s_copy_token_safely(struct imds_user_data *user_data) {
+ struct aws_imds_client *client = user_data->client;
+ enum imds_token_copy_result ret = AWS_IMDS_TCR_UNEXPECTED_ERROR;
+
+ struct aws_linked_list pending_queries;
+ aws_linked_list_init(&pending_queries);
+ aws_mutex_lock(&client->token_lock);
+
+ if (client->token_state == AWS_IMDS_TS_VALID) {
+ aws_byte_buf_reset(&user_data->imds_token, true);
+ struct aws_byte_cursor cursor = aws_byte_cursor_from_buf(&client->cached_token);
+ if (aws_byte_buf_append_dynamic(&user_data->imds_token, &cursor)) {
+ ret = AWS_IMDS_TCR_UNEXPECTED_ERROR;
+ } else {
+ ret = AWS_IMDS_TCR_SUCCESS;
+ }
+ } else {
+ ret = AWS_IMDS_TCR_WAITING_IN_QUEUE;
+ struct imds_token_query *query = aws_mem_calloc(client->allocator, 1, sizeof(struct imds_token_query));
+ if (query != NULL) {
+ query->user_data = user_data;
+ aws_linked_list_push_back(&client->pending_queries, &query->node);
+ } else {
+ ret = AWS_IMDS_TCR_UNEXPECTED_ERROR;
+ }
+
+ if (client->token_state == AWS_IMDS_TS_INVALID) {
+ if (s_client_start_query_token(client)) {
+ ret = AWS_IMDS_TCR_UNEXPECTED_ERROR;
+ aws_linked_list_swap_contents(&pending_queries, &client->pending_queries);
+ } else {
+ client->token_state = AWS_IMDS_TS_UPDATE_IN_PROGRESS;
+ }
+ }
+ }
+ aws_mutex_unlock(&client->token_lock);
+
+ s_complete_pending_queries(client, &pending_queries, true, NULL);
+
+ switch (ret) {
+ case AWS_IMDS_TCR_SUCCESS:
+ AWS_LOGF_DEBUG(
+ AWS_LS_IMDS_CLIENT,
+ "(id=%p) IMDS client copied token to requester %p successfully.",
+ (void *)client,
+ (void *)user_data);
+ break;
+
+ case AWS_IMDS_TCR_WAITING_IN_QUEUE:
+ AWS_LOGF_DEBUG(
+ AWS_LS_IMDS_CLIENT, "(id=%p) IMDS client's token is invalid and is now updating.", (void *)client);
+ break;
+
+ case AWS_IMDS_TCR_UNEXPECTED_ERROR:
+ AWS_LOGF_DEBUG(
+ AWS_LS_IMDS_CLIENT,
+ "(id=%p) IMDS client encountered unexpected error when processing token query for requester %p, error: "
+ "%s.",
+ (void *)client,
+ (void *)user_data,
+ aws_error_str(aws_last_error()));
+ break;
+ }
+ return ret;
+}
+
+static void s_invalidate_cached_token_safely(struct imds_user_data *user_data) {
+ bool invalidated = false;
+ struct aws_imds_client *client = user_data->client;
+ aws_mutex_lock(&client->token_lock);
+ if (aws_byte_buf_eq(&user_data->imds_token, &client->cached_token)) {
+ client->token_state = AWS_IMDS_TS_INVALID;
+ invalidated = true;
+ }
+ aws_mutex_unlock(&client->token_lock);
+ if (invalidated) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_IMDS_CLIENT,
+ "(id=%p) IMDS client's cached token is set to be invalid by requester %p.",
+ (void *)client,
+ (void *)user_data);
+ }
+}
+
+/**
+ * Once a requseter returns from token request, it should call this function to unblock all other
+ * waiting requesters. When the token parameter is NULL, means the token request failed. Now we need
+ * a new requester to acquire the token again.
+ */
+static bool s_update_token_safely(struct aws_imds_client *client, struct aws_byte_buf *token, bool token_required) {
+ AWS_FATAL_ASSERT(client);
+ bool updated = false;
+
+ struct aws_linked_list pending_queries;
+ aws_linked_list_init(&pending_queries);
+
+ aws_mutex_lock(&client->token_lock);
+ client->token_required = token_required;
+ if (token) {
+ aws_byte_buf_reset(&client->cached_token, true);
+ struct aws_byte_cursor cursor = aws_byte_cursor_from_buf(token);
+ if (aws_byte_buf_append_dynamic(&client->cached_token, &cursor) == AWS_OP_SUCCESS) {
+ client->token_state = AWS_IMDS_TS_VALID;
+ updated = true;
+ }
+ } else {
+ client->token_state = AWS_IMDS_TS_INVALID;
+ }
+ aws_linked_list_swap_contents(&pending_queries, &client->pending_queries);
+ aws_mutex_unlock(&client->token_lock);
+
+ s_complete_pending_queries(client, &pending_queries, token_required, token);
+
+ if (updated) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_IMDS_CLIENT, "(id=%p) IMDS client updated the cached token successfully.", (void *)client);
+ } else {
+ AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "(id=%p) IMDS client failed to update the token from IMDS.", (void *)client);
+ }
+ return updated;
+}
+
+int s_get_resource_async_with_imds_token(struct imds_user_data *user_data) {
+ enum imds_token_copy_result res = s_copy_token_safely(user_data);
+ if (res == AWS_IMDS_TCR_UNEXPECTED_ERROR) {
+ return AWS_OP_ERR;
+ }
+
+ if (res == AWS_IMDS_TCR_WAITING_IN_QUEUE) {
+ return AWS_OP_SUCCESS;
+ }
+
+ if (aws_retry_strategy_acquire_retry_token(
+ user_data->client->retry_strategy, NULL, s_on_retry_token_acquired, user_data, 100)) {
+ return AWS_OP_ERR;
+ }
+ return AWS_OP_SUCCESS;
+}
+
+int aws_imds_client_get_resource_async(
+ struct aws_imds_client *client,
+ struct aws_byte_cursor resource_path,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data) {
+
+ struct imds_user_data *wrapped_user_data = s_user_data_new(client, resource_path, callback, user_data);
+ if (wrapped_user_data == NULL) {
+ goto error;
+ }
+
+ if (!wrapped_user_data->imds_token_required) {
+ if (aws_retry_strategy_acquire_retry_token(
+ client->retry_strategy, NULL, s_on_retry_token_acquired, wrapped_user_data, 100)) {
+ goto error;
+ }
+ } else if (s_get_resource_async_with_imds_token(wrapped_user_data)) {
+ goto error;
+ }
+ return AWS_OP_SUCCESS;
+
+error:
+ s_user_data_release(wrapped_user_data);
+
+ return AWS_OP_ERR;
+}
+
+/**
+ * Higher level API definitions to get specific IMDS info
+ * Reference:
+ * https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-categories.html
+ * https://docs.aws.amazon.com/AWSJavaSDK/latest/javadoc/com/amazonaws/util/EC2MetadataUtils.html
+ * https://github.com/aws/aws-sdk-java-v2/blob/25f640c3b4f2e339c93a7da1494ab3310e128248/core/regions/src/main/java/software/amazon/awssdk/regions/internal/util/EC2MetadataUtils.java
+ * IMDS client only implements resource acquisition that needs one resource request.
+ * Complicated resource like network interface information defined in Java V2 SDK is not implemented here.
+ * To get a full map of network interface information, we need more than ten requests, but sometimes we only care about
+ * one or two of them.
+ */
+static struct aws_byte_cursor s_instance_identity_document =
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("instance-identity/document");
+static struct aws_byte_cursor s_instance_identity_signature =
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("instance-identity/signature");
+static struct aws_byte_cursor s_ec2_metadata_root = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/latest/meta-data");
+static struct aws_byte_cursor s_ec2_credentials_root =
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/latest/meta-data/iam/security-credentials/");
+static struct aws_byte_cursor s_ec2_userdata_root = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/latest/user-data/");
+static struct aws_byte_cursor s_ec2_dynamicdata_root = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/latest/dynamic/");
+
+struct imds_get_array_user_data {
+ struct aws_allocator *allocator;
+ aws_imds_client_on_get_array_callback_fn *callback;
+ void *user_data;
+};
+
+struct imds_get_credentials_user_data {
+ struct aws_allocator *allocator;
+ aws_imds_client_on_get_credentials_callback_fn *callback;
+ void *user_data;
+};
+
+struct imds_get_iam_user_data {
+ struct aws_allocator *allocator;
+ aws_imds_client_on_get_iam_profile_callback_fn *callback;
+ void *user_data;
+};
+
+struct imds_get_instance_user_data {
+ struct aws_allocator *allocator;
+ aws_imds_client_on_get_instance_info_callback_fn *callback;
+ void *user_data;
+};
+
+static void s_process_array_resource(const struct aws_byte_buf *resource, int error_code, void *user_data) {
+ struct imds_get_array_user_data *wrapped_user_data = user_data;
+ struct aws_array_list resource_array;
+ AWS_ZERO_STRUCT(resource_array);
+
+ if (resource && !error_code) {
+ struct aws_byte_cursor resource_cursor = aws_byte_cursor_from_buf(resource);
+ if (aws_array_list_init_dynamic(
+ &resource_array, wrapped_user_data->allocator, 10, sizeof(struct aws_byte_cursor))) {
+ goto on_finish;
+ }
+ aws_byte_cursor_split_on_char(&resource_cursor, '\n', &resource_array);
+ }
+
+on_finish:
+ wrapped_user_data->callback(&resource_array, error_code, wrapped_user_data->user_data);
+ aws_array_list_clean_up_secure(&resource_array);
+ aws_mem_release(wrapped_user_data->allocator, wrapped_user_data);
+}
+
+static void s_process_credentials_resource(const struct aws_byte_buf *resource, int error_code, void *user_data) {
+ struct imds_get_credentials_user_data *wrapped_user_data = user_data;
+ struct aws_credentials *credentials = NULL;
+
+ struct aws_byte_buf json_data;
+ AWS_ZERO_STRUCT(json_data);
+
+ if (!resource || error_code) {
+ goto on_finish;
+ }
+
+ if (aws_byte_buf_init_copy(&json_data, wrapped_user_data->allocator, resource)) {
+ goto on_finish;
+ }
+
+ if (aws_byte_buf_append_null_terminator(&json_data)) {
+ goto on_finish;
+ }
+
+ struct aws_parse_credentials_from_json_doc_options parse_options = {
+ .access_key_id_name = "AccessKeyId",
+ .secret_access_key_name = "SecretAccessKey",
+ .token_name = "Token",
+ .expiration_name = "Expiration",
+ .token_required = true,
+ .expiration_required = true,
+ };
+
+ credentials = aws_parse_credentials_from_json_document(
+ wrapped_user_data->allocator, (const char *)json_data.buffer, &parse_options);
+
+on_finish:
+ wrapped_user_data->callback(credentials, error_code, wrapped_user_data->user_data);
+ aws_credentials_release(credentials);
+ aws_byte_buf_clean_up_secure(&json_data);
+ aws_mem_release(wrapped_user_data->allocator, wrapped_user_data);
+}
+
+/**
+ * {
+ "LastUpdated" : "2020-06-03T20:42:19Z",
+ "InstanceProfileArn" : "arn:aws:iam::030535792909:instance-profile/CloudWatchAgentServerRole",
+ "InstanceProfileId" : "AIPAQOHATHEGTGNQ5THQB"
+}
+ */
+static int s_parse_iam_profile(struct aws_json_value *document_root, struct aws_imds_iam_profile *dest) {
+
+ bool success = false;
+
+ struct aws_byte_cursor last_updated_cursor;
+ struct aws_json_value *last_updated =
+ aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("LastUpdated"));
+ if (last_updated == NULL) {
+ last_updated = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("lastupdated"));
+ }
+ if (!aws_json_value_is_string(last_updated) ||
+ (aws_json_value_get_string(last_updated, &last_updated_cursor) == AWS_OP_ERR)) {
+ AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "Failed to parse LastUpdated from Json document for iam profile.");
+ goto done;
+ }
+
+ struct aws_byte_cursor profile_arn_cursor;
+ struct aws_json_value *profile_arn =
+ aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("InstanceProfileArn"));
+ if (profile_arn == NULL) {
+ profile_arn = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("instanceprofilearn"));
+ }
+ if (!aws_json_value_is_string(profile_arn) ||
+ (aws_json_value_get_string(profile_arn, &profile_arn_cursor) == AWS_OP_ERR)) {
+ AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "Failed to parse InstanceProfileArn from Json document for iam profile.");
+ goto done;
+ }
+
+ struct aws_byte_cursor profile_id_cursor;
+ struct aws_json_value *profile_id =
+ aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("InstanceProfileId"));
+ if (profile_id == NULL) {
+ profile_id = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("instanceprofileid"));
+ }
+ if (!aws_json_value_is_string(profile_id) ||
+ (aws_json_value_get_string(profile_id, &profile_id_cursor) == AWS_OP_ERR)) {
+ AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "Failed to parse InstanceProfileId from Json document for iam profile.");
+ goto done;
+ }
+
+ if (last_updated_cursor.len == 0 || profile_arn_cursor.len == 0 || profile_id_cursor.len == 0) {
+ AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "Parsed an unexpected Json document fro iam profile.");
+ goto done;
+ }
+
+ if (aws_date_time_init_from_str_cursor(&dest->last_updated, &last_updated_cursor, AWS_DATE_FORMAT_ISO_8601)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_IMDS_CLIENT, "LastUpdate in iam profile Json document is not a valid ISO_8601 date string.");
+ goto done;
+ }
+
+ dest->instance_profile_arn = profile_arn_cursor;
+ dest->instance_profile_id = profile_id_cursor;
+
+ success = true;
+
+done:
+ return success ? AWS_OP_ERR : AWS_OP_SUCCESS;
+}
+
+static void s_process_iam_profile(const struct aws_byte_buf *resource, int error_code, void *user_data) {
+ struct imds_get_iam_user_data *wrapped_user_data = user_data;
+ struct aws_json_value *document_root = NULL;
+ struct aws_imds_iam_profile iam;
+ AWS_ZERO_STRUCT(iam);
+
+ struct aws_byte_buf json_data;
+ AWS_ZERO_STRUCT(json_data);
+
+ if (!resource || error_code) {
+ goto on_finish;
+ }
+
+ if (aws_byte_buf_init_copy(&json_data, wrapped_user_data->allocator, resource)) {
+ goto on_finish;
+ }
+
+ if (aws_byte_buf_append_null_terminator(&json_data)) {
+ goto on_finish;
+ }
+
+ struct aws_byte_cursor json_data_cursor = aws_byte_cursor_from_buf(&json_data);
+ document_root = aws_json_value_new_from_string(aws_default_allocator(), json_data_cursor);
+ if (document_root == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "Failed to parse document as Json document for iam profile.");
+ goto on_finish;
+ }
+
+ if (s_parse_iam_profile(document_root, &iam)) {
+ goto on_finish;
+ }
+
+on_finish:
+ wrapped_user_data->callback(&iam, error_code, wrapped_user_data->user_data);
+ aws_byte_buf_clean_up_secure(&json_data);
+ aws_mem_release(wrapped_user_data->allocator, wrapped_user_data);
+ if (document_root != NULL) {
+ aws_json_value_destroy(document_root);
+ }
+}
+
+/**
+ * {
+ "accountId" : "030535792909",
+ "architecture" : "x86_64",
+ "availabilityZone" : "us-west-2a",
+ "billingProducts" : null, ------------>array
+ "devpayProductCodes" : null, ----------->deprecated
+ "marketplaceProductCodes" : null, -------->array
+ "imageId" : "ami-5b70e323",
+ "instanceId" : "i-022a93b5e640c0248",
+ "instanceType" : "c4.8xlarge",
+ "kernelId" : null,
+ "pendingTime" : "2020-05-27T08:41:17Z",
+ "privateIp" : "172.31.22.164",
+ "ramdiskId" : null,
+ "region" : "us-west-2",
+ "version" : "2017-09-30"
+ }
+ */
+static int s_parse_instance_info(struct aws_json_value *document_root, struct aws_imds_instance_info *dest) {
+
+ bool success = false;
+
+ struct aws_byte_cursor account_id_cursor;
+ struct aws_json_value *account_id =
+ aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("accountId"));
+ if (account_id == NULL) {
+ account_id = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("accountid"));
+ if (account_id == NULL) {
+ account_id = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("AccountId"));
+ }
+ }
+ if (!aws_json_value_is_string(account_id) ||
+ (aws_json_value_get_string(account_id, &account_id_cursor) == AWS_OP_ERR)) {
+ AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "Failed to parse accountId from Json document for ec2 instance info.");
+ goto done;
+ }
+ dest->account_id = account_id_cursor;
+
+ struct aws_byte_cursor architecture_cursor;
+ struct aws_json_value *architecture =
+ aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("architecture"));
+ if (architecture == NULL) {
+ architecture = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("Architecture"));
+ }
+ if (!aws_json_value_is_string(architecture) ||
+ (aws_json_value_get_string(architecture, &architecture_cursor) == AWS_OP_ERR)) {
+ AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "Failed to parse architecture from Json document for ec2 instance info.");
+ goto done;
+ }
+ dest->architecture = architecture_cursor;
+
+ struct aws_byte_cursor availability_zone_cursor;
+ struct aws_json_value *availability_zone =
+ aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("availabilityZone"));
+ if (availability_zone == NULL) {
+ availability_zone =
+ aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("availabilityzone"));
+ if (availability_zone == NULL) {
+ availability_zone =
+ aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("AvailabilityZone"));
+ }
+ }
+ if (!aws_json_value_is_string(availability_zone) ||
+ (aws_json_value_get_string(availability_zone, &availability_zone_cursor) == AWS_OP_ERR)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_IMDS_CLIENT, "Failed to parse availabilityZone from Json document for ec2 instance info.");
+ goto done;
+ }
+ dest->availability_zone = availability_zone_cursor;
+
+ struct aws_byte_cursor billing_products_cursor;
+ struct aws_json_value *billing_products =
+ aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("billingProducts"));
+ if (billing_products == NULL) {
+ billing_products = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("billingproducts"));
+ if (billing_products == NULL) {
+ billing_products =
+ aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("BillingProducts"));
+ }
+ }
+ if (aws_json_value_is_array(billing_products)) {
+ struct aws_json_value *element;
+ for (size_t i = 0; i < aws_json_get_array_size(billing_products); i++) {
+ element = aws_json_get_array_element(billing_products, i);
+ if (aws_json_value_is_string(element) &&
+ aws_json_value_get_string(element, &billing_products_cursor) != AWS_OP_ERR) {
+ struct aws_byte_cursor item = billing_products_cursor;
+ aws_array_list_push_back(&dest->billing_products, (const void *)&item);
+ }
+ }
+ }
+
+ struct aws_byte_cursor marketplace_product_codes_cursor;
+ struct aws_json_value *marketplace_product_codes =
+ aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("marketplaceProductCodes"));
+ if (marketplace_product_codes == NULL) {
+ marketplace_product_codes =
+ aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("marketplaceproductcodes"));
+ if (marketplace_product_codes == NULL) {
+ marketplace_product_codes =
+ aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("MarketplaceProductCodes"));
+ }
+ }
+ if (aws_json_value_is_array(marketplace_product_codes)) {
+ struct aws_json_value *element;
+ for (size_t i = 0; i < aws_json_get_array_size(marketplace_product_codes); i++) {
+ element = aws_json_get_array_element(marketplace_product_codes, i);
+ if (aws_json_value_is_string(element) &&
+ aws_json_value_get_string(element, &marketplace_product_codes_cursor) != AWS_OP_ERR) {
+ struct aws_byte_cursor item = marketplace_product_codes_cursor;
+ aws_array_list_push_back(&dest->billing_products, (const void *)&item);
+ }
+ }
+ }
+
+ struct aws_byte_cursor image_id_cursor;
+ struct aws_json_value *image_id =
+ aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("imageId"));
+ if (image_id == NULL) {
+ image_id = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("imageid"));
+ if (image_id == NULL) {
+ image_id = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("ImageId"));
+ }
+ }
+ if (aws_json_value_is_string(image_id) && (aws_json_value_get_string(image_id, &image_id_cursor) != AWS_OP_ERR)) {
+ dest->image_id = image_id_cursor;
+ }
+
+ struct aws_byte_cursor instance_id_cursor;
+ struct aws_json_value *instance_id =
+ aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("instanceId"));
+ if (instance_id == NULL) {
+ instance_id = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("instanceid"));
+ if (instance_id == NULL) {
+ instance_id = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("InstanceId"));
+ }
+ }
+ if (!aws_json_value_is_string(instance_id) ||
+ (aws_json_value_get_string(instance_id, &instance_id_cursor) == AWS_OP_ERR)) {
+ AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "Failed to parse instanceId from Json document for ec2 instance info.");
+ goto done;
+ }
+ dest->instance_id = instance_id_cursor;
+
+ struct aws_byte_cursor instance_type_cursor;
+ struct aws_json_value *instance_type =
+ aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("instanceType"));
+ if (instance_type == NULL) {
+ instance_type = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("instancetype"));
+ if (instance_type == NULL) {
+ instance_type = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("InstanceType"));
+ }
+ }
+ if (!aws_json_value_is_string(instance_type) ||
+ (aws_json_value_get_string(instance_type, &instance_type_cursor) == AWS_OP_ERR)) {
+ AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "Failed to parse instanceType from Json document for ec2 instance info.");
+ goto done;
+ }
+ dest->instance_type = instance_type_cursor;
+
+ struct aws_byte_cursor kernel_id_cursor;
+ struct aws_json_value *kernel_id =
+ aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("kernelId"));
+ if (kernel_id == NULL) {
+ kernel_id = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("kernelid"));
+ if (kernel_id == NULL) {
+ kernel_id = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("KernelId"));
+ }
+ }
+ if (aws_json_value_is_string(kernel_id) &&
+ (aws_json_value_get_string(kernel_id, &kernel_id_cursor) != AWS_OP_ERR)) {
+ dest->kernel_id = kernel_id_cursor;
+ }
+
+ struct aws_byte_cursor private_ip_cursor;
+ struct aws_json_value *private_ip =
+ aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("privateIp"));
+ if (private_ip == NULL) {
+ private_ip = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("privateip"));
+ if (private_ip == NULL) {
+ private_ip = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("PrivateIp"));
+ }
+ }
+ if (aws_json_value_is_string(private_ip) &&
+ (aws_json_value_get_string(private_ip, &private_ip_cursor) != AWS_OP_ERR)) {
+ dest->private_ip = private_ip_cursor;
+ }
+
+ struct aws_byte_cursor ramdisk_id_cursor;
+ struct aws_json_value *ramdisk_id =
+ aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("ramdiskId"));
+ if (ramdisk_id == NULL) {
+ ramdisk_id = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("ramdiskid"));
+ if (ramdisk_id == NULL) {
+ ramdisk_id = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("RamdiskId"));
+ }
+ }
+ if (aws_json_value_is_string(ramdisk_id) &&
+ (aws_json_value_get_string(ramdisk_id, &ramdisk_id_cursor) != AWS_OP_ERR)) {
+ dest->ramdisk_id = ramdisk_id_cursor;
+ }
+
+ struct aws_byte_cursor region_cursor;
+ struct aws_json_value *region = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("region"));
+ if (region == NULL) {
+ region = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("Region"));
+ }
+ if (!aws_json_value_is_string(region) || (aws_json_value_get_string(region, &region_cursor) == AWS_OP_ERR)) {
+ AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "Failed to parse region from Json document for ec2 instance info.");
+ goto done;
+ }
+ dest->region = region_cursor;
+
+ struct aws_byte_cursor version_cursor;
+ struct aws_json_value *version =
+ aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("version"));
+ if (version == NULL) {
+ version = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("Version"));
+ }
+ if (!aws_json_value_is_string(version) || (aws_json_value_get_string(version, &version_cursor) == AWS_OP_ERR)) {
+ AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "Failed to parse version from Json document for ec2 instance info.");
+ goto done;
+ }
+ dest->version = version_cursor;
+
+ struct aws_byte_cursor pending_time_cursor;
+ struct aws_json_value *pending_time =
+ aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("pendingTime"));
+ if (pending_time == NULL) {
+ pending_time = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("pendingtime"));
+ if (pending_time == NULL) {
+ pending_time = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("PendingTime"));
+ }
+ }
+ if (!aws_json_value_is_string(pending_time) ||
+ (aws_json_value_get_string(pending_time, &pending_time_cursor) == AWS_OP_ERR)) {
+ AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "Failed to parse pendingTime from Json document for ec2 instance info.");
+ goto done;
+ }
+
+ if (aws_date_time_init_from_str_cursor(&dest->pending_time, &pending_time_cursor, AWS_DATE_FORMAT_ISO_8601)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_IMDS_CLIENT, "pendingTime in instance info Json document is not a valid ISO_8601 date string.");
+ goto done;
+ }
+
+ success = true;
+
+done:
+ return success ? AWS_OP_ERR : AWS_OP_SUCCESS;
+}
+
+static void s_process_instance_info(const struct aws_byte_buf *resource, int error_code, void *user_data) {
+ struct imds_get_instance_user_data *wrapped_user_data = user_data;
+ struct aws_imds_instance_info instance_info;
+ AWS_ZERO_STRUCT(instance_info);
+ struct aws_byte_buf json_data;
+ AWS_ZERO_STRUCT(json_data);
+
+ struct aws_json_value *document_root = NULL;
+
+ if (aws_array_list_init_dynamic(
+ &instance_info.billing_products, wrapped_user_data->allocator, 10, sizeof(struct aws_byte_cursor))) {
+ goto on_finish;
+ }
+
+ if (aws_array_list_init_dynamic(
+ &instance_info.marketplace_product_codes,
+ wrapped_user_data->allocator,
+ 10,
+ sizeof(struct aws_byte_cursor))) {
+ goto on_finish;
+ }
+
+ if (!resource || error_code) {
+ goto on_finish;
+ }
+
+ if (aws_byte_buf_init_copy(&json_data, wrapped_user_data->allocator, resource)) {
+ goto on_finish;
+ }
+
+ if (aws_byte_buf_append_null_terminator(&json_data)) {
+ goto on_finish;
+ }
+
+ struct aws_byte_cursor json_data_cursor = aws_byte_cursor_from_buf(&json_data);
+ document_root = aws_json_value_new_from_string(aws_default_allocator(), json_data_cursor);
+ if (document_root == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_IMDS_CLIENT, "Failed to parse document as Json document for ec2 instance info.");
+ goto on_finish;
+ }
+
+ if (s_parse_instance_info(document_root, &instance_info)) {
+ goto on_finish;
+ }
+
+on_finish:
+ wrapped_user_data->callback(&instance_info, error_code, wrapped_user_data->user_data);
+ aws_array_list_clean_up_secure(&instance_info.billing_products);
+ aws_array_list_clean_up_secure(&instance_info.marketplace_product_codes);
+ aws_byte_buf_clean_up_secure(&json_data);
+ aws_mem_release(wrapped_user_data->allocator, wrapped_user_data);
+ if (document_root != NULL) {
+ aws_json_value_destroy(document_root);
+ }
+}
+
+static int s_aws_imds_get_resource(
+ struct aws_imds_client *client,
+ struct aws_byte_cursor path,
+ struct aws_byte_cursor name,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data) {
+
+ struct aws_byte_buf resource;
+ if (aws_byte_buf_init_copy_from_cursor(&resource, client->allocator, path)) {
+ return AWS_OP_ERR;
+ }
+ if (aws_byte_buf_append_dynamic(&resource, &name)) {
+ goto error;
+ }
+ if (aws_imds_client_get_resource_async(client, aws_byte_cursor_from_buf(&resource), callback, user_data)) {
+ goto error;
+ }
+ aws_byte_buf_clean_up(&resource);
+ return AWS_OP_SUCCESS;
+
+error:
+ aws_byte_buf_clean_up(&resource);
+ return AWS_OP_ERR;
+}
+
+int s_aws_imds_get_converted_resource(
+ struct aws_imds_client *client,
+ struct aws_byte_cursor path,
+ struct aws_byte_cursor name,
+ aws_imds_client_on_get_resource_callback_fn conversion_fn,
+ void *user_data) {
+ return s_aws_imds_get_resource(client, path, name, conversion_fn, user_data);
+}
+
+int aws_imds_client_get_ami_id(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data) {
+ return s_aws_imds_get_resource(
+ client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/ami-id"), callback, user_data);
+}
+
+int aws_imds_client_get_ami_launch_index(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data) {
+ return s_aws_imds_get_resource(
+ client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/ami-launch-index"), callback, user_data);
+}
+
+int aws_imds_client_get_ami_manifest_path(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data) {
+ return s_aws_imds_get_resource(
+ client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/ami-manifest-path"), callback, user_data);
+}
+
+int aws_imds_client_get_ancestor_ami_ids(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_array_callback_fn callback,
+ void *user_data) {
+
+ struct imds_get_array_user_data *wrapped_user_data =
+ aws_mem_calloc(client->allocator, 1, sizeof(struct imds_get_array_user_data));
+ if (!wrapped_user_data) {
+ return AWS_OP_ERR;
+ }
+
+ wrapped_user_data->allocator = client->allocator;
+ wrapped_user_data->callback = callback;
+ wrapped_user_data->user_data = user_data;
+
+ return s_aws_imds_get_converted_resource(
+ client,
+ s_ec2_metadata_root,
+ aws_byte_cursor_from_c_str("/ancestor-ami-ids"),
+ s_process_array_resource,
+ wrapped_user_data);
+}
+
+int aws_imds_client_get_instance_action(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data) {
+ return s_aws_imds_get_resource(
+ client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/instance-action"), callback, user_data);
+}
+
+int aws_imds_client_get_instance_id(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data) {
+ return s_aws_imds_get_resource(
+ client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/instance-id"), callback, user_data);
+}
+
+int aws_imds_client_get_instance_type(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data) {
+ return s_aws_imds_get_resource(
+ client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/instance-type"), callback, user_data);
+}
+
+int aws_imds_client_get_mac_address(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data) {
+ return s_aws_imds_get_resource(
+ client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/mac"), callback, user_data);
+}
+
+int aws_imds_client_get_private_ip_address(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data) {
+ return s_aws_imds_get_resource(
+ client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/local-ipv4"), callback, user_data);
+}
+
+int aws_imds_client_get_availability_zone(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data) {
+ return s_aws_imds_get_resource(
+ client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/placement/availability-zone"), callback, user_data);
+}
+
+int aws_imds_client_get_product_codes(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data) {
+ return s_aws_imds_get_resource(
+ client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/product-codes"), callback, user_data);
+}
+
+int aws_imds_client_get_public_key(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data) {
+ return s_aws_imds_get_resource(
+ client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/public-keys/0/openssh-key"), callback, user_data);
+}
+
+int aws_imds_client_get_ramdisk_id(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data) {
+ return s_aws_imds_get_resource(
+ client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/ramdisk-id"), callback, user_data);
+}
+
+int aws_imds_client_get_reservation_id(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data) {
+ return s_aws_imds_get_resource(
+ client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/reservation-id"), callback, user_data);
+}
+
+int aws_imds_client_get_security_groups(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_array_callback_fn callback,
+ void *user_data) {
+
+ struct imds_get_array_user_data *wrapped_user_data =
+ aws_mem_calloc(client->allocator, 1, sizeof(struct imds_get_array_user_data));
+ if (!wrapped_user_data) {
+ return AWS_OP_ERR;
+ }
+
+ wrapped_user_data->allocator = client->allocator;
+ wrapped_user_data->callback = callback;
+ wrapped_user_data->user_data = user_data;
+
+ return s_aws_imds_get_converted_resource(
+ client,
+ s_ec2_metadata_root,
+ aws_byte_cursor_from_c_str("/security-groups"),
+ s_process_array_resource,
+ wrapped_user_data);
+}
+
+int aws_imds_client_get_block_device_mapping(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_array_callback_fn callback,
+ void *user_data) {
+
+ struct imds_get_array_user_data *wrapped_user_data =
+ aws_mem_calloc(client->allocator, 1, sizeof(struct imds_get_array_user_data));
+
+ if (!wrapped_user_data) {
+ return AWS_OP_ERR;
+ }
+
+ wrapped_user_data->allocator = client->allocator;
+ wrapped_user_data->callback = callback;
+ wrapped_user_data->user_data = user_data;
+ return s_aws_imds_get_converted_resource(
+ client,
+ s_ec2_metadata_root,
+ aws_byte_cursor_from_c_str("/block-device-mapping"),
+ s_process_array_resource,
+ wrapped_user_data);
+}
+
+int aws_imds_client_get_attached_iam_role(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data) {
+ return s_aws_imds_get_resource(
+ client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/iam/security-credentials/"), callback, user_data);
+}
+
+int aws_imds_client_get_credentials(
+ struct aws_imds_client *client,
+ struct aws_byte_cursor iam_role_name,
+ aws_imds_client_on_get_credentials_callback_fn callback,
+ void *user_data) {
+
+ struct imds_get_credentials_user_data *wrapped_user_data =
+ aws_mem_calloc(client->allocator, 1, sizeof(struct imds_get_credentials_user_data));
+ if (!wrapped_user_data) {
+ return AWS_OP_ERR;
+ }
+
+ wrapped_user_data->allocator = client->allocator;
+ wrapped_user_data->callback = callback;
+ wrapped_user_data->user_data = user_data;
+
+ return s_aws_imds_get_converted_resource(
+ client, s_ec2_credentials_root, iam_role_name, s_process_credentials_resource, wrapped_user_data);
+}
+
+int aws_imds_client_get_iam_profile(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_iam_profile_callback_fn callback,
+ void *user_data) {
+
+ struct imds_get_iam_user_data *wrapped_user_data =
+ aws_mem_calloc(client->allocator, 1, sizeof(struct imds_get_iam_user_data));
+ if (!wrapped_user_data) {
+ return AWS_OP_ERR;
+ }
+
+ wrapped_user_data->allocator = client->allocator;
+ wrapped_user_data->callback = callback;
+ wrapped_user_data->user_data = user_data;
+
+ return s_aws_imds_get_converted_resource(
+ client, s_ec2_metadata_root, aws_byte_cursor_from_c_str("/iam/info"), s_process_iam_profile, wrapped_user_data);
+}
+
+int aws_imds_client_get_user_data(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data) {
+ return s_aws_imds_get_resource(client, s_ec2_userdata_root, aws_byte_cursor_from_c_str(""), callback, user_data);
+}
+
+int aws_imds_client_get_instance_signature(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_resource_callback_fn callback,
+ void *user_data) {
+ return s_aws_imds_get_resource(client, s_ec2_dynamicdata_root, s_instance_identity_signature, callback, user_data);
+}
+
+int aws_imds_client_get_instance_info(
+ struct aws_imds_client *client,
+ aws_imds_client_on_get_instance_info_callback_fn callback,
+ void *user_data) {
+
+ struct imds_get_instance_user_data *wrapped_user_data =
+ aws_mem_calloc(client->allocator, 1, sizeof(struct imds_get_instance_user_data));
+ if (!wrapped_user_data) {
+ return AWS_OP_ERR;
+ }
+
+ wrapped_user_data->allocator = client->allocator;
+ wrapped_user_data->callback = callback;
+ wrapped_user_data->user_data = user_data;
+
+ return s_aws_imds_get_converted_resource(
+ client, s_ec2_dynamicdata_root, s_instance_identity_document, s_process_instance_info, wrapped_user_data);
+}
diff --git a/contrib/restricted/aws/aws-c-auth/source/aws_profile.c b/contrib/restricted/aws/aws-c-auth/source/aws_profile.c
new file mode 100644
index 0000000000..26a6b15318
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/source/aws_profile.c
@@ -0,0 +1,40 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/private/aws_profile.h>
+
+#include <aws/auth/credentials.h>
+#include <aws/common/environment.h>
+#include <aws/common/string.h>
+
+static const struct aws_string *s_profile_get_property_value(
+ const struct aws_profile *profile,
+ const struct aws_string *property_name) {
+
+ const struct aws_profile_property *property = aws_profile_get_property(profile, property_name);
+ if (property == NULL) {
+ return NULL;
+ }
+
+ return aws_profile_property_get_value(property);
+}
+
+AWS_STATIC_STRING_FROM_LITERAL(s_access_key_id_profile_var, "aws_access_key_id");
+AWS_STATIC_STRING_FROM_LITERAL(s_secret_access_key_profile_var, "aws_secret_access_key");
+AWS_STATIC_STRING_FROM_LITERAL(s_session_token_profile_var, "aws_session_token");
+
+struct aws_credentials *aws_credentials_new_from_profile(
+ struct aws_allocator *allocator,
+ const struct aws_profile *profile) {
+ const struct aws_string *access_key = s_profile_get_property_value(profile, s_access_key_id_profile_var);
+ const struct aws_string *secret_key = s_profile_get_property_value(profile, s_secret_access_key_profile_var);
+ if (access_key == NULL || secret_key == NULL) {
+ return NULL;
+ }
+
+ const struct aws_string *session_token = s_profile_get_property_value(profile, s_session_token_profile_var);
+
+ return aws_credentials_new_from_string(allocator, access_key, secret_key, session_token, UINT64_MAX);
+}
diff --git a/contrib/restricted/aws/aws-c-auth/source/aws_signing.c b/contrib/restricted/aws/aws-c-auth/source/aws_signing.c
new file mode 100644
index 0000000000..aa38b8c683
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/source/aws_signing.c
@@ -0,0 +1,2669 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/private/aws_signing.h>
+
+#include <aws/auth/credentials.h>
+#include <aws/auth/private/key_derivation.h>
+#include <aws/auth/signable.h>
+#include <aws/auth/signing.h>
+#include <aws/cal/ecc.h>
+#include <aws/cal/hash.h>
+#include <aws/cal/hmac.h>
+#include <aws/common/date_time.h>
+#include <aws/common/encoding.h>
+#include <aws/common/string.h>
+#include <aws/io/stream.h>
+#include <aws/io/uri.h>
+
+#include <ctype.h>
+#include <inttypes.h>
+
+#if defined(_MSC_VER)
+# pragma warning(disable : 4204)
+#endif /* _MSC_VER */
+
+/*
+ * A bunch of initial size values for various buffers used throughout the signing process
+ *
+ * We want them to be sufficient-but-not-wasting-significant-amounts-of-memory for "most"
+ * requests. The body read buffer is an exception since it will just be holding windows rather than
+ * the entire thing.
+ */
+#define BODY_READ_BUFFER_SIZE 4096
+#define CANONICAL_REQUEST_STARTING_SIZE 1024
+#define STRING_TO_SIGN_STARTING_SIZE 256
+#define SIGNED_HEADERS_STARTING_SIZE 256
+#define CANONICAL_HEADER_BLOCK_STARTING_SIZE 1024
+#define AUTHORIZATION_VALUE_STARTING_SIZE 512
+#define PAYLOAD_HASH_STARTING_SIZE (AWS_SHA256_LEN * 2)
+#define CREDENTIAL_SCOPE_STARTING_SIZE 128
+#define ACCESS_CREDENTIAL_SCOPE_STARTING_SIZE 149
+#define SCRATCH_BUF_STARTING_SIZE 256
+#define MAX_AUTHORIZATION_HEADER_COUNT 4
+#define MAX_AUTHORIZATION_QUERY_PARAM_COUNT 6
+#define DEFAULT_PATH_COMPONENT_COUNT 10
+#define CANONICAL_REQUEST_SPLIT_OVER_ESTIMATE 20
+#define HEX_ENCODED_SIGNATURE_OVER_ESTIMATE 256
+#define MAX_ECDSA_P256_SIGNATURE_AS_BINARY_LENGTH 72
+#define MAX_ECDSA_P256_SIGNATURE_AS_HEX_LENGTH (MAX_ECDSA_P256_SIGNATURE_AS_BINARY_LENGTH * 2)
+#define AWS_SIGV4A_SIGNATURE_PADDING_BYTE ('*')
+
+AWS_STRING_FROM_LITERAL(g_aws_signing_content_header_name, "x-amz-content-sha256");
+AWS_STRING_FROM_LITERAL(g_aws_signing_authorization_header_name, "Authorization");
+AWS_STRING_FROM_LITERAL(g_aws_signing_authorization_query_param_name, "X-Amz-Signature");
+AWS_STRING_FROM_LITERAL(g_aws_signing_algorithm_query_param_name, "X-Amz-Algorithm");
+AWS_STRING_FROM_LITERAL(g_aws_signing_credential_query_param_name, "X-Amz-Credential");
+AWS_STRING_FROM_LITERAL(g_aws_signing_date_name, "X-Amz-Date");
+AWS_STRING_FROM_LITERAL(g_aws_signing_signed_headers_query_param_name, "X-Amz-SignedHeaders");
+AWS_STRING_FROM_LITERAL(g_aws_signing_security_token_name, "X-Amz-Security-Token");
+AWS_STRING_FROM_LITERAL(g_aws_signing_expires_query_param_name, "X-Amz-Expires");
+AWS_STRING_FROM_LITERAL(g_aws_signing_region_set_name, "X-Amz-Region-Set");
+
+AWS_STATIC_STRING_FROM_LITERAL(s_signature_type_sigv4_http_request, "AWS4-HMAC-SHA256");
+AWS_STATIC_STRING_FROM_LITERAL(s_signature_type_sigv4_s3_chunked_payload, "AWS4-HMAC-SHA256-PAYLOAD");
+AWS_STATIC_STRING_FROM_LITERAL(s_signature_type_sigv4a_s3_chunked_payload, "AWS4-ECDSA-P256-SHA256-PAYLOAD");
+
+AWS_STATIC_STRING_FROM_LITERAL(s_signature_type_sigv4_s3_chunked_trailer_payload, "AWS4-HMAC-SHA256-TRAILER");
+AWS_STATIC_STRING_FROM_LITERAL(s_signature_type_sigv4a_s3_chunked_trailer_payload, "AWS4-ECDSA-P256-SHA256-TRAILER");
+
+/* aws-related query param and header tables */
+static struct aws_hash_table s_forbidden_headers;
+static struct aws_hash_table s_forbidden_params;
+static struct aws_hash_table s_skipped_headers;
+
+static struct aws_byte_cursor s_amzn_trace_id_header_name;
+static struct aws_byte_cursor s_user_agent_header_name;
+static struct aws_byte_cursor s_connection_header_name;
+static struct aws_byte_cursor s_sec_websocket_key_header_name;
+static struct aws_byte_cursor s_sec_websocket_protocol_header_name;
+static struct aws_byte_cursor s_sec_websocket_version_header_name;
+static struct aws_byte_cursor s_upgrade_header_name;
+
+static struct aws_byte_cursor s_amz_content_sha256_header_name;
+static struct aws_byte_cursor s_amz_date_header_name;
+static struct aws_byte_cursor s_authorization_header_name;
+static struct aws_byte_cursor s_region_set_header_name;
+static struct aws_byte_cursor s_amz_security_token_header_name;
+
+static struct aws_byte_cursor s_amz_signature_param_name;
+static struct aws_byte_cursor s_amz_date_param_name;
+static struct aws_byte_cursor s_amz_credential_param_name;
+static struct aws_byte_cursor s_amz_algorithm_param_name;
+static struct aws_byte_cursor s_amz_signed_headers_param_name;
+static struct aws_byte_cursor s_amz_security_token_param_name;
+static struct aws_byte_cursor s_amz_expires_param_name;
+static struct aws_byte_cursor s_amz_region_set_param_name;
+
+/*
+ * Build a set of library-static tables for quick lookup.
+ *
+ * Construction errors are considered fatal.
+ */
+int aws_signing_init_signing_tables(struct aws_allocator *allocator) {
+
+ if (aws_hash_table_init(
+ &s_skipped_headers,
+ allocator,
+ 10,
+ aws_hash_byte_cursor_ptr_ignore_case,
+ (aws_hash_callback_eq_fn *)aws_byte_cursor_eq_ignore_case,
+ NULL,
+ NULL)) {
+ return AWS_OP_ERR;
+ }
+
+ s_amzn_trace_id_header_name = aws_byte_cursor_from_c_str("x-amzn-trace-id");
+ if (aws_hash_table_put(&s_skipped_headers, &s_amzn_trace_id_header_name, NULL, NULL)) {
+ return AWS_OP_ERR;
+ }
+
+ s_user_agent_header_name = aws_byte_cursor_from_c_str("User-Agent");
+ if (aws_hash_table_put(&s_skipped_headers, &s_user_agent_header_name, NULL, NULL)) {
+ return AWS_OP_ERR;
+ }
+
+ s_connection_header_name = aws_byte_cursor_from_c_str("connection");
+ if (aws_hash_table_put(&s_skipped_headers, &s_connection_header_name, NULL, NULL)) {
+ return AWS_OP_ERR;
+ }
+
+ s_connection_header_name = aws_byte_cursor_from_c_str("expect");
+ if (aws_hash_table_put(&s_skipped_headers, &s_connection_header_name, NULL, NULL)) {
+ return AWS_OP_ERR;
+ }
+
+ s_sec_websocket_key_header_name = aws_byte_cursor_from_c_str("sec-websocket-key");
+ if (aws_hash_table_put(&s_skipped_headers, &s_sec_websocket_key_header_name, NULL, NULL)) {
+ return AWS_OP_ERR;
+ }
+
+ s_sec_websocket_protocol_header_name = aws_byte_cursor_from_c_str("sec-websocket-protocol");
+ if (aws_hash_table_put(&s_skipped_headers, &s_sec_websocket_protocol_header_name, NULL, NULL)) {
+ return AWS_OP_ERR;
+ }
+
+ s_sec_websocket_version_header_name = aws_byte_cursor_from_c_str("sec-websocket-version");
+ if (aws_hash_table_put(&s_skipped_headers, &s_sec_websocket_version_header_name, NULL, NULL)) {
+ return AWS_OP_ERR;
+ }
+
+ s_upgrade_header_name = aws_byte_cursor_from_c_str("upgrade");
+ if (aws_hash_table_put(&s_skipped_headers, &s_upgrade_header_name, NULL, NULL)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_hash_table_init(
+ &s_forbidden_headers,
+ allocator,
+ 10,
+ aws_hash_byte_cursor_ptr_ignore_case,
+ (aws_hash_callback_eq_fn *)aws_byte_cursor_eq_ignore_case,
+ NULL,
+ NULL)) {
+ return AWS_OP_ERR;
+ }
+
+ s_amz_content_sha256_header_name = aws_byte_cursor_from_string(g_aws_signing_content_header_name);
+ if (aws_hash_table_put(&s_forbidden_headers, &s_amz_content_sha256_header_name, NULL, NULL)) {
+ return AWS_OP_ERR;
+ }
+
+ s_amz_date_header_name = aws_byte_cursor_from_string(g_aws_signing_date_name);
+ if (aws_hash_table_put(&s_forbidden_headers, &s_amz_date_header_name, NULL, NULL)) {
+ return AWS_OP_ERR;
+ }
+
+ s_authorization_header_name = aws_byte_cursor_from_string(g_aws_signing_authorization_header_name);
+ if (aws_hash_table_put(&s_forbidden_headers, &s_authorization_header_name, NULL, NULL)) {
+ return AWS_OP_ERR;
+ }
+
+ s_region_set_header_name = aws_byte_cursor_from_string(g_aws_signing_region_set_name);
+ if (aws_hash_table_put(&s_forbidden_headers, &s_region_set_header_name, NULL, NULL)) {
+ return AWS_OP_ERR;
+ }
+
+ s_amz_security_token_header_name = aws_byte_cursor_from_string(g_aws_signing_security_token_name);
+ if (aws_hash_table_put(&s_forbidden_headers, &s_amz_security_token_header_name, NULL, NULL)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_hash_table_init(
+ &s_forbidden_params,
+ allocator,
+ 10,
+ aws_hash_byte_cursor_ptr_ignore_case,
+ (aws_hash_callback_eq_fn *)aws_byte_cursor_eq_ignore_case,
+ NULL,
+ NULL)) {
+ return AWS_OP_ERR;
+ }
+
+ s_amz_signature_param_name = aws_byte_cursor_from_string(g_aws_signing_authorization_query_param_name);
+ if (aws_hash_table_put(&s_forbidden_params, &s_amz_signature_param_name, NULL, NULL)) {
+ return AWS_OP_ERR;
+ }
+
+ s_amz_date_param_name = aws_byte_cursor_from_string(g_aws_signing_date_name);
+ if (aws_hash_table_put(&s_forbidden_params, &s_amz_date_param_name, NULL, NULL)) {
+ return AWS_OP_ERR;
+ }
+
+ s_amz_credential_param_name = aws_byte_cursor_from_string(g_aws_signing_credential_query_param_name);
+ if (aws_hash_table_put(&s_forbidden_params, &s_amz_credential_param_name, NULL, NULL)) {
+ return AWS_OP_ERR;
+ }
+
+ s_amz_algorithm_param_name = aws_byte_cursor_from_string(g_aws_signing_algorithm_query_param_name);
+ if (aws_hash_table_put(&s_forbidden_params, &s_amz_algorithm_param_name, NULL, NULL)) {
+ return AWS_OP_ERR;
+ }
+
+ s_amz_signed_headers_param_name = aws_byte_cursor_from_string(g_aws_signing_signed_headers_query_param_name);
+ if (aws_hash_table_put(&s_forbidden_params, &s_amz_signed_headers_param_name, NULL, NULL)) {
+ return AWS_OP_ERR;
+ }
+
+ s_amz_security_token_param_name = aws_byte_cursor_from_string(g_aws_signing_security_token_name);
+ if (aws_hash_table_put(&s_forbidden_params, &s_amz_security_token_param_name, NULL, NULL)) {
+ return AWS_OP_ERR;
+ }
+
+ s_amz_expires_param_name = aws_byte_cursor_from_string(g_aws_signing_expires_query_param_name);
+ if (aws_hash_table_put(&s_forbidden_params, &s_amz_expires_param_name, NULL, NULL)) {
+ return AWS_OP_ERR;
+ }
+
+ s_amz_region_set_param_name = aws_byte_cursor_from_string(g_aws_signing_region_set_name);
+ if (aws_hash_table_put(&s_forbidden_params, &s_amz_region_set_param_name, NULL, NULL)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+void aws_signing_clean_up_signing_tables(void) {
+ aws_hash_table_clean_up(&s_skipped_headers);
+ aws_hash_table_clean_up(&s_forbidden_headers);
+ aws_hash_table_clean_up(&s_forbidden_params);
+}
+
+static bool s_is_header_based_signature_value(enum aws_signature_type signature_type) {
+ switch (signature_type) {
+ case AWS_ST_HTTP_REQUEST_HEADERS:
+ case AWS_ST_CANONICAL_REQUEST_HEADERS:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static bool s_is_query_param_based_signature_value(enum aws_signature_type signature_type) {
+ switch (signature_type) {
+ case AWS_ST_HTTP_REQUEST_QUERY_PARAMS:
+ case AWS_ST_CANONICAL_REQUEST_QUERY_PARAMS:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static int s_get_signature_type_cursor(struct aws_signing_state_aws *state, struct aws_byte_cursor *cursor) {
+ switch (state->config.signature_type) {
+ case AWS_ST_HTTP_REQUEST_HEADERS:
+ case AWS_ST_HTTP_REQUEST_QUERY_PARAMS:
+ case AWS_ST_CANONICAL_REQUEST_HEADERS:
+ case AWS_ST_CANONICAL_REQUEST_QUERY_PARAMS:
+ if (state->config.algorithm == AWS_SIGNING_ALGORITHM_V4) {
+ *cursor = aws_byte_cursor_from_string(s_signature_type_sigv4_http_request);
+ } else {
+ *cursor = aws_byte_cursor_from_string(g_signature_type_sigv4a_http_request);
+ }
+ break;
+ case AWS_ST_HTTP_REQUEST_CHUNK:
+ case AWS_ST_HTTP_REQUEST_EVENT:
+ if (state->config.algorithm == AWS_SIGNING_ALGORITHM_V4) {
+ *cursor = aws_byte_cursor_from_string(s_signature_type_sigv4_s3_chunked_payload);
+ } else {
+ *cursor = aws_byte_cursor_from_string(s_signature_type_sigv4a_s3_chunked_payload);
+ }
+ break;
+ case AWS_ST_HTTP_REQUEST_TRAILING_HEADERS:
+ if (state->config.algorithm == AWS_SIGNING_ALGORITHM_V4) {
+ *cursor = aws_byte_cursor_from_string(s_signature_type_sigv4_s3_chunked_trailer_payload);
+ } else {
+ *cursor = aws_byte_cursor_from_string(s_signature_type_sigv4a_s3_chunked_trailer_payload);
+ }
+ break;
+
+ default:
+ return aws_raise_error(AWS_AUTH_SIGNING_UNSUPPORTED_SIGNATURE_TYPE);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_append_sts_signature_type(struct aws_signing_state_aws *state, struct aws_byte_buf *dest) {
+ struct aws_byte_cursor algorithm_cursor;
+ if (s_get_signature_type_cursor(state, &algorithm_cursor)) {
+ return AWS_OP_ERR;
+ }
+
+ return aws_byte_buf_append_dynamic(dest, &algorithm_cursor);
+}
+
+/*
+ * signing state management
+ */
+struct aws_signing_state_aws *aws_signing_state_new(
+ struct aws_allocator *allocator,
+ const struct aws_signing_config_aws *config,
+ const struct aws_signable *signable,
+ aws_signing_complete_fn *on_complete,
+ void *userdata) {
+
+ if (aws_validate_aws_signing_config_aws(config)) {
+ return NULL;
+ }
+
+ struct aws_signing_state_aws *state = aws_mem_calloc(allocator, 1, sizeof(struct aws_signing_state_aws));
+ if (!state) {
+ return NULL;
+ }
+
+ state->allocator = allocator;
+
+ /* Make our own copy of the signing config */
+ state->config = *config;
+
+ if (state->config.credentials_provider != NULL) {
+ aws_credentials_provider_acquire(state->config.credentials_provider);
+ }
+
+ if (state->config.credentials != NULL) {
+ aws_credentials_acquire(state->config.credentials);
+ }
+
+ if (aws_byte_buf_init_cache_and_update_cursors(
+ &state->config_string_buffer,
+ allocator,
+ &state->config.region,
+ &state->config.service,
+ &state->config.signed_body_value,
+ NULL /*end*/)) {
+ goto on_error;
+ }
+
+ state->signable = signable;
+ state->on_complete = on_complete;
+ state->userdata = userdata;
+
+ if (aws_signing_result_init(&state->result, allocator)) {
+ goto on_error;
+ }
+
+ if (aws_byte_buf_init(&state->canonical_request, allocator, CANONICAL_REQUEST_STARTING_SIZE) ||
+ aws_byte_buf_init(&state->string_to_sign, allocator, STRING_TO_SIGN_STARTING_SIZE) ||
+ aws_byte_buf_init(&state->signed_headers, allocator, SIGNED_HEADERS_STARTING_SIZE) ||
+ aws_byte_buf_init(&state->canonical_header_block, allocator, CANONICAL_HEADER_BLOCK_STARTING_SIZE) ||
+ aws_byte_buf_init(&state->payload_hash, allocator, PAYLOAD_HASH_STARTING_SIZE) ||
+ aws_byte_buf_init(&state->credential_scope, allocator, CREDENTIAL_SCOPE_STARTING_SIZE) ||
+ aws_byte_buf_init(&state->access_credential_scope, allocator, ACCESS_CREDENTIAL_SCOPE_STARTING_SIZE) ||
+ aws_byte_buf_init(&state->date, allocator, AWS_DATE_TIME_STR_MAX_LEN) ||
+ aws_byte_buf_init(&state->signature, allocator, PAYLOAD_HASH_STARTING_SIZE) ||
+ aws_byte_buf_init(&state->string_to_sign_payload, allocator, PAYLOAD_HASH_STARTING_SIZE) ||
+ aws_byte_buf_init(&state->scratch_buf, allocator, SCRATCH_BUF_STARTING_SIZE)) {
+
+ goto on_error;
+ }
+
+ snprintf(
+ state->expiration_array, AWS_ARRAY_SIZE(state->expiration_array), "%" PRIu64 "", config->expiration_in_seconds);
+
+ return state;
+
+on_error:
+ aws_signing_state_destroy(state);
+ return NULL;
+}
+
+void aws_signing_state_destroy(struct aws_signing_state_aws *state) {
+ aws_signing_result_clean_up(&state->result);
+
+ aws_credentials_provider_release(state->config.credentials_provider);
+ aws_credentials_release(state->config.credentials);
+
+ aws_byte_buf_clean_up(&state->config_string_buffer);
+ aws_byte_buf_clean_up(&state->canonical_request);
+ aws_byte_buf_clean_up(&state->string_to_sign);
+ aws_byte_buf_clean_up(&state->signed_headers);
+ aws_byte_buf_clean_up(&state->canonical_header_block);
+ aws_byte_buf_clean_up(&state->payload_hash);
+ aws_byte_buf_clean_up(&state->credential_scope);
+ aws_byte_buf_clean_up(&state->access_credential_scope);
+ aws_byte_buf_clean_up(&state->date);
+ aws_byte_buf_clean_up(&state->signature);
+ aws_byte_buf_clean_up(&state->string_to_sign_payload);
+ aws_byte_buf_clean_up(&state->scratch_buf);
+
+ aws_mem_release(state->allocator, state);
+}
+
+/*
+ * canonical request utility functions:
+ *
+ * various appends, conversion/encoding, etc...
+ *
+ */
+
+static int s_append_canonical_method(struct aws_signing_state_aws *state) {
+ const struct aws_signable *signable = state->signable;
+ struct aws_byte_buf *buffer = &state->canonical_request;
+
+ struct aws_byte_cursor method_cursor;
+ aws_signable_get_property(signable, g_aws_http_method_property_name, &method_cursor);
+
+ if (aws_byte_buf_append_dynamic(buffer, &method_cursor)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_byte_buf_append_byte_dynamic(buffer, '\n')) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_append_with_lookup(
+ struct aws_byte_buf *dst,
+ const struct aws_byte_cursor *src,
+ const uint8_t *lookup_table) {
+
+ if (aws_byte_buf_reserve_relative(dst, src->len)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_byte_buf_append_with_lookup(dst, src, lookup_table)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/*
+ * A function that builds a normalized path (removes redundant '/' characters, '.' components, and properly pops off
+ * components in response '..' components)
+ *
+ * We use a simple algorithm to do this:
+ *
+ * First split the path into components
+ * Then, using a secondary stack of components, build the final path by pushing and popping (on '..') components
+ * on the stack. The final path is then the concatenation of the secondary stack.
+ */
+static int s_append_normalized_path(
+ const struct aws_byte_cursor *raw_path,
+ struct aws_allocator *allocator,
+ struct aws_byte_buf *dest) {
+
+ struct aws_array_list raw_split;
+ AWS_ZERO_STRUCT(raw_split);
+
+ struct aws_array_list normalized_split;
+ AWS_ZERO_STRUCT(normalized_split);
+
+ int result = AWS_OP_ERR;
+
+ if (aws_array_list_init_dynamic(
+ &raw_split, allocator, DEFAULT_PATH_COMPONENT_COUNT, sizeof(struct aws_byte_cursor))) {
+ goto cleanup;
+ }
+
+ if (aws_byte_cursor_split_on_char(raw_path, '/', &raw_split)) {
+ goto cleanup;
+ }
+
+ const size_t raw_split_count = aws_array_list_length(&raw_split);
+ if (aws_array_list_init_dynamic(&normalized_split, allocator, raw_split_count, sizeof(struct aws_byte_cursor))) {
+ goto cleanup;
+ }
+
+ /*
+ * Iterate the raw split to build a list of path components that make up the
+ * normalized path
+ */
+ for (size_t i = 0; i < raw_split_count; ++i) {
+ struct aws_byte_cursor path_component;
+ AWS_ZERO_STRUCT(path_component);
+ if (aws_array_list_get_at(&raw_split, &path_component, i)) {
+ goto cleanup;
+ }
+
+ if (path_component.len == 0 || (path_component.len == 1 && *path_component.ptr == '.')) {
+ /* '.' and '' contribute nothing to a normalized path */
+ continue;
+ }
+
+ if (path_component.len == 2 && *path_component.ptr == '.' && *(path_component.ptr + 1) == '.') {
+ /* '..' causes us to remove the last valid path component */
+ aws_array_list_pop_back(&normalized_split);
+ } else {
+ aws_array_list_push_back(&normalized_split, &path_component);
+ }
+ }
+
+ /*
+ * Special case preserve whether or not the path ended with a '/'
+ */
+ bool ends_with_slash = raw_path->len > 0 && raw_path->ptr[raw_path->len - 1] == '/';
+
+ /*
+ * Paths always start with a single '/'
+ */
+ if (aws_byte_buf_append_byte_dynamic(dest, '/')) {
+ goto cleanup;
+ }
+
+ /*
+ * build the final normalized path from the normalized split by joining
+ * the components together with '/'
+ */
+ const size_t normalized_split_count = aws_array_list_length(&normalized_split);
+ for (size_t i = 0; i < normalized_split_count; ++i) {
+ struct aws_byte_cursor normalized_path_component;
+ AWS_ZERO_STRUCT(normalized_path_component);
+ if (aws_array_list_get_at(&normalized_split, &normalized_path_component, i)) {
+ goto cleanup;
+ }
+
+ if (aws_byte_buf_append_dynamic(dest, &normalized_path_component)) {
+ goto cleanup;
+ }
+
+ if (i + 1 < normalized_split_count || ends_with_slash) {
+ if (aws_byte_buf_append_byte_dynamic(dest, '/')) {
+ goto cleanup;
+ }
+ }
+ }
+
+ result = AWS_OP_SUCCESS;
+
+cleanup:
+
+ aws_array_list_clean_up(&raw_split);
+ aws_array_list_clean_up(&normalized_split);
+
+ return result;
+}
+
+static int s_append_canonical_path(const struct aws_uri *uri, struct aws_signing_state_aws *state) {
+ const struct aws_signing_config_aws *config = &state->config;
+ struct aws_byte_buf *canonical_request_buffer = &state->canonical_request;
+ struct aws_allocator *allocator = state->allocator;
+ int result = AWS_OP_ERR;
+
+ /*
+ * Put this at function global scope so that it gets cleaned up even though it's only used inside
+ * a single branch. Allows error handling and cleanup to follow the pattern established
+ * throughout this file.
+ */
+ struct aws_byte_buf normalized_path;
+ AWS_ZERO_STRUCT(normalized_path);
+
+ /*
+ * We assume the request's uri path has already been encoded once (in order to go out on the wire).
+ * Some services do not decode the path before performing the sig v4 calculation, resulting in the
+ * service actually performing sigv4 on a double-encoding of the path. In order to match those
+ * services, we must double encode in our calculation as well.
+ */
+ if (config->flags.use_double_uri_encode) {
+ struct aws_byte_cursor path_cursor;
+
+ /*
+ * We need to transform the the normalized path, so we can't just append it into the canonical
+ * request. Instead we append it into a temporary buffer and perform the transformation from
+ * it.
+ *
+ * All this does is skip the temporary normalized path in the case where we don't need to
+ * double encode.
+ */
+ if (config->flags.should_normalize_uri_path) {
+ if (aws_byte_buf_init(&normalized_path, state->allocator, uri->path.len)) {
+ goto cleanup;
+ }
+
+ if (s_append_normalized_path(&uri->path, allocator, &normalized_path)) {
+ goto cleanup;
+ }
+
+ path_cursor = aws_byte_cursor_from_buf(&normalized_path);
+ } else {
+ path_cursor = uri->path;
+ }
+
+ if (aws_byte_buf_append_encoding_uri_path(canonical_request_buffer, &path_cursor)) {
+ goto cleanup;
+ }
+ } else {
+ /*
+ * If we don't need to perform any kind of transformation on the normalized path, just append it directly
+ * into the canonical request buffer
+ */
+ if (config->flags.should_normalize_uri_path) {
+ if (s_append_normalized_path(&uri->path, allocator, canonical_request_buffer)) {
+ goto cleanup;
+ }
+ } else {
+ if (aws_byte_buf_append_dynamic(canonical_request_buffer, &uri->path)) {
+ goto cleanup;
+ }
+ }
+ }
+
+ if (aws_byte_buf_append_byte_dynamic(canonical_request_buffer, '\n')) {
+ goto cleanup;
+ }
+
+ result = AWS_OP_SUCCESS;
+
+cleanup:
+
+ aws_byte_buf_clean_up(&normalized_path);
+
+ return result;
+}
+
+/*
+ * URI-encoded query params are compared first by key, then by value
+ */
+int s_canonical_query_param_comparator(const void *lhs, const void *rhs) {
+ const struct aws_uri_param *left_param = lhs;
+ const struct aws_uri_param *right_param = rhs;
+
+ int key_compare = aws_byte_cursor_compare_lexical(&left_param->key, &right_param->key);
+ if (key_compare != 0) {
+ return key_compare;
+ }
+
+ return aws_byte_cursor_compare_lexical(&left_param->value, &right_param->value);
+}
+
+/*
+ * We need to sort the headers in a stable fashion, but the default sorting methods available in the c library are not
+ * guaranteed to be stable. We can make the sort stable by instead sorting a wrapper object that includes the original
+ * index of the wrapped object and using that index to break lexical ties.
+ *
+ * We sort a copy of the header (rather than pointers) so that we can easily inject secondary headers into
+ * the canonical request.
+ */
+struct stable_header {
+ struct aws_signable_property_list_pair header;
+ size_t original_index;
+};
+
+int s_canonical_header_comparator(const void *lhs, const void *rhs) {
+ const struct stable_header *left_header = lhs;
+ const struct stable_header *right_header = rhs;
+
+ int result = aws_byte_cursor_compare_lookup(
+ &left_header->header.name, &right_header->header.name, aws_lookup_table_to_lower_get());
+ if (result != 0) {
+ return result;
+ }
+
+ /* they're the same header, use the original index to keep the sort stable */
+ if (left_header->original_index < right_header->original_index) {
+ return -1;
+ }
+
+ /* equality should never happen */
+ AWS_ASSERT(left_header->original_index > right_header->original_index);
+
+ return 1;
+}
+
+/**
+ * Given URI-encoded query param, write it to canonical buffer.
+ */
+static int s_append_canonical_query_param(struct aws_uri_param *encoded_param, struct aws_byte_buf *buffer) {
+ if (aws_byte_buf_append_dynamic(buffer, &encoded_param->key)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_byte_buf_append_byte_dynamic(buffer, '=')) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_byte_buf_append_dynamic(buffer, &encoded_param->value)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/**
+ * Given unencoded authorization query param:
+ * Add it, URI-encoded to final signing result (to be added to signable later).
+ */
+static int s_add_query_param_to_signing_result(
+ struct aws_signing_state_aws *state,
+ const struct aws_uri_param *unencoded_param) {
+ /* URI-Encode, and add to final signing result */
+ state->scratch_buf.len = 0;
+ if (aws_byte_buf_append_encoding_uri_param(&state->scratch_buf, &unencoded_param->key)) {
+ return AWS_OP_ERR;
+ }
+ size_t key_len = state->scratch_buf.len;
+ if (aws_byte_buf_append_encoding_uri_param(&state->scratch_buf, &unencoded_param->value)) {
+ return AWS_OP_ERR;
+ }
+ struct aws_byte_cursor encoded_val = aws_byte_cursor_from_buf(&state->scratch_buf);
+ struct aws_byte_cursor encoded_key = aws_byte_cursor_advance(&encoded_val, key_len);
+
+ if (aws_signing_result_append_property_list(
+ &state->result, g_aws_http_query_params_property_list_name, &encoded_key, &encoded_val)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/**
+ * Given unencoded authorization query param:
+ * 1) Add it to list of all unencoded query params (to be canonicalized later).
+ * 2) Add it, URI-encoded to final signing result (to be added to signable later).
+ */
+static int s_add_authorization_query_param(
+ struct aws_signing_state_aws *state,
+ struct aws_array_list *unencoded_auth_params,
+ const struct aws_uri_param *unencoded_auth_param) {
+
+ /* Add to unencoded list */
+ if (aws_array_list_push_back(unencoded_auth_params, unencoded_auth_param)) {
+ return AWS_OP_ERR;
+ }
+
+ return s_add_query_param_to_signing_result(state, unencoded_auth_param);
+}
+
+/*
+ * Checks the header against both an internal skip list as well as an optional user-supplied filter
+ * function. Only sign the header if both functions allow it.
+ */
+static bool s_should_sign_header(struct aws_signing_state_aws *state, struct aws_byte_cursor *name) {
+ if (state->config.should_sign_header) {
+ if (!state->config.should_sign_header(name, state->config.should_sign_header_ud)) {
+ return false;
+ }
+ }
+
+ struct aws_hash_element *element = NULL;
+ if (aws_hash_table_find(&s_skipped_headers, name, &element) == AWS_OP_ERR || element != NULL) {
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * If the auth type was query param then this function adds all the required query params and values with the
+ * exception of X-Amz-Signature (because we're still computing its value) Parameters are added to both the
+ * canonical request and the final signing result.
+ */
+static int s_add_authorization_query_params(
+ struct aws_signing_state_aws *state,
+ struct aws_array_list *unencoded_query_params) {
+
+ if (state->config.signature_type != AWS_ST_HTTP_REQUEST_QUERY_PARAMS) {
+ return AWS_OP_SUCCESS;
+ }
+
+ int result = AWS_OP_ERR;
+
+ /* X-Amz-Algorithm */
+ struct aws_uri_param algorithm_param = {
+ .key = aws_byte_cursor_from_string(g_aws_signing_algorithm_query_param_name),
+ };
+
+ if (s_get_signature_type_cursor(state, &algorithm_param.value)) {
+ goto done;
+ }
+
+ if (s_add_authorization_query_param(state, unencoded_query_params, &algorithm_param)) {
+ goto done;
+ }
+
+ /* X-Amz-Credential */
+ struct aws_uri_param credential_param = {
+ .key = aws_byte_cursor_from_string(g_aws_signing_credential_query_param_name),
+ .value = aws_byte_cursor_from_buf(&state->access_credential_scope),
+ };
+
+ if (s_add_authorization_query_param(state, unencoded_query_params, &credential_param)) {
+ goto done;
+ }
+
+ /* X-Amz-Date */
+ struct aws_uri_param date_param = {
+ .key = aws_byte_cursor_from_string(g_aws_signing_date_name),
+ .value = aws_byte_cursor_from_buf(&state->date),
+ };
+
+ if (s_add_authorization_query_param(state, unencoded_query_params, &date_param)) {
+ goto done;
+ }
+
+ /* X-Amz-SignedHeaders */
+ struct aws_uri_param signed_headers_param = {
+ .key = aws_byte_cursor_from_string(g_aws_signing_signed_headers_query_param_name),
+ .value = aws_byte_cursor_from_buf(&state->signed_headers),
+ };
+
+ if (s_add_authorization_query_param(state, unencoded_query_params, &signed_headers_param)) {
+ goto done;
+ }
+
+ /* X-Amz-Expires */
+ uint64_t expiration_in_seconds = state->config.expiration_in_seconds;
+ if (expiration_in_seconds > 0) {
+ struct aws_uri_param expires_param = {
+ .key = aws_byte_cursor_from_string(g_aws_signing_expires_query_param_name),
+ .value = aws_byte_cursor_from_c_str(state->expiration_array),
+ };
+
+ if (s_add_authorization_query_param(state, unencoded_query_params, &expires_param)) {
+ goto done;
+ }
+ }
+
+ /* X-Amz-Security-token */
+ struct aws_byte_cursor security_token_name_cur = aws_byte_cursor_from_string(g_aws_signing_security_token_name);
+ struct aws_byte_cursor session_token_cursor = aws_credentials_get_session_token(state->config.credentials);
+ if (session_token_cursor.len > 0) {
+ struct aws_uri_param security_token_param = {
+ .key = security_token_name_cur,
+ .value = session_token_cursor,
+ };
+
+ /* If omit_session_token is true, then security token is added to the
+ * final signing result, but is treated as "unsigned" and does not
+ * contribute to the authorization signature */
+ if (state->config.flags.omit_session_token) {
+ if (s_add_query_param_to_signing_result(state, &security_token_param)) {
+ goto done;
+ }
+ } else {
+ if (s_add_authorization_query_param(state, unencoded_query_params, &security_token_param)) {
+ goto done;
+ }
+ }
+ }
+
+ /* X-Amz-Region-Set */
+ if (state->config.algorithm == AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC) {
+ struct aws_uri_param region_set_param = {
+ .key = aws_byte_cursor_from_string(g_aws_signing_region_set_name),
+ .value = state->config.region,
+ };
+
+ if (s_add_authorization_query_param(state, unencoded_query_params, &region_set_param)) {
+ goto done;
+ }
+ }
+
+ /* NOTE: Update MAX_AUTHORIZATION_QUERY_PARAM_COUNT if more params added */
+
+ result = AWS_OP_SUCCESS;
+
+done:
+ return result;
+}
+
+static int s_validate_query_params(struct aws_array_list *unencoded_query_params) {
+ const size_t param_count = aws_array_list_length(unencoded_query_params);
+ for (size_t i = 0; i < param_count; ++i) {
+ struct aws_uri_param param;
+ AWS_ZERO_STRUCT(param);
+ aws_array_list_get_at(unencoded_query_params, &param, i);
+
+ struct aws_hash_element *forbidden_element = NULL;
+ aws_hash_table_find(&s_forbidden_params, &param.key, &forbidden_element);
+
+ if (forbidden_element != NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_SIGNING,
+ "AWS authorization query param \"" PRInSTR "\" found in request while signing",
+ AWS_BYTE_CURSOR_PRI(param.key));
+ return aws_raise_error(AWS_AUTH_SIGNING_ILLEGAL_REQUEST_QUERY_PARAM);
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/**
+ * Apply or remove URI-encoding to each aws_uri_param in a list.
+ * (new strings are added to temp_strings)
+ * Append function must grow buffer if necessary.
+ */
+static int s_transform_query_params(
+ struct aws_signing_state_aws *state,
+ struct aws_array_list *param_list,
+ struct aws_array_list *temp_strings,
+ int (*byte_buf_append_dynamic_param_fn)(struct aws_byte_buf *, const struct aws_byte_cursor *)) {
+
+ const size_t param_count = aws_array_list_length(param_list);
+ struct aws_uri_param *param = NULL;
+ for (size_t i = 0; i < param_count; ++i) {
+ aws_array_list_get_at_ptr(param_list, (void **)&param, i);
+
+ /* encode/decode key and save string */
+ state->scratch_buf.len = 0;
+ if (byte_buf_append_dynamic_param_fn(&state->scratch_buf, &param->key)) {
+ return AWS_OP_ERR;
+ }
+ struct aws_string *key_str = aws_string_new_from_buf(state->allocator, &state->scratch_buf);
+ if (!key_str) {
+ return AWS_OP_ERR;
+ }
+ if (aws_array_list_push_back(temp_strings, &key_str)) {
+ aws_string_destroy(key_str);
+ return AWS_OP_ERR;
+ }
+
+ /* encode/decode value and save string */
+ state->scratch_buf.len = 0;
+ if (byte_buf_append_dynamic_param_fn(&state->scratch_buf, &param->value)) {
+ return AWS_OP_ERR;
+ }
+ struct aws_string *value_str = aws_string_new_from_buf(state->allocator, &state->scratch_buf);
+ if (!value_str) {
+ return AWS_OP_ERR;
+ }
+ if (aws_array_list_push_back(temp_strings, &value_str)) {
+ aws_string_destroy(value_str);
+ return AWS_OP_ERR;
+ }
+
+ /* save encoded/decoded param */
+ param->key = aws_byte_cursor_from_string(key_str);
+ param->value = aws_byte_cursor_from_string(value_str);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/*
+ * Adds the full canonical query string to the canonical request.
+ * Note that aws-c-auth takes query params from the URI, so they should already be URI-encoded.
+ * To ensure that the signature uses "canonical" URI-encoding, we decode and then re-encode the params.
+ */
+static int s_append_canonical_query_string(struct aws_uri *uri, struct aws_signing_state_aws *state) {
+ struct aws_allocator *allocator = state->allocator;
+ struct aws_byte_buf *canonical_request_buffer = &state->canonical_request;
+
+ int result = AWS_OP_ERR;
+ struct aws_array_list query_params;
+ AWS_ZERO_STRUCT(query_params);
+ struct aws_array_list temp_strings;
+ AWS_ZERO_STRUCT(temp_strings);
+
+ /* Determine max number of query parameters.
+ * If none, skip to end of function */
+ size_t max_param_count = 0;
+ struct aws_uri_param param_i;
+ AWS_ZERO_STRUCT(param_i);
+ while (aws_uri_query_string_next_param(uri, &param_i)) {
+ ++max_param_count;
+ }
+ if (state->config.signature_type == AWS_ST_HTTP_REQUEST_QUERY_PARAMS) {
+ max_param_count += MAX_AUTHORIZATION_QUERY_PARAM_COUNT;
+ }
+ if (max_param_count == 0) {
+ goto finish;
+ }
+
+ /* Allocate storage for mutable list of query params */
+ if (aws_array_list_init_dynamic(&query_params, allocator, max_param_count, sizeof(struct aws_uri_param))) {
+ goto cleanup;
+ }
+
+ /* Allocate storage for both the decoded, and re-encoded, key and value strings */
+ if (aws_array_list_init_dynamic(
+ &temp_strings, state->allocator, max_param_count * 4, sizeof(struct aws_string *))) {
+ goto cleanup;
+ }
+
+ /* Get existing query params */
+ if (aws_uri_query_string_params(uri, &query_params)) {
+ goto cleanup;
+ }
+
+ /* Remove URI-encoding */
+ if (s_transform_query_params(state, &query_params, &temp_strings, aws_byte_buf_append_decoding_uri)) {
+ goto cleanup;
+ }
+
+ /* Validate existing query params */
+ if (s_validate_query_params(&query_params)) {
+ goto cleanup;
+ }
+
+ /* Add authorization query params */
+ if (s_add_authorization_query_params(state, &query_params)) {
+ goto cleanup;
+ }
+
+ /* Apply canonical URI-encoding to the query params */
+ if (s_transform_query_params(state, &query_params, &temp_strings, aws_byte_buf_append_encoding_uri_param)) {
+ goto cleanup;
+ }
+
+ const size_t param_count = aws_array_list_length(&query_params);
+
+ /* Sort the encoded params and append to canonical request */
+ qsort(query_params.data, param_count, sizeof(struct aws_uri_param), s_canonical_query_param_comparator);
+ for (size_t i = 0; i < param_count; ++i) {
+ struct aws_uri_param param;
+ AWS_ZERO_STRUCT(param);
+ if (aws_array_list_get_at(&query_params, &param, i)) {
+ goto cleanup;
+ }
+
+ if (s_append_canonical_query_param(&param, canonical_request_buffer)) {
+ goto cleanup;
+ }
+
+ if (i + 1 < param_count) {
+ if (aws_byte_buf_append_byte_dynamic(canonical_request_buffer, '&')) {
+ goto cleanup;
+ }
+ }
+ }
+
+finish:
+ if (aws_byte_buf_append_byte_dynamic(canonical_request_buffer, '\n')) {
+ goto cleanup;
+ }
+
+ result = AWS_OP_SUCCESS;
+
+cleanup:
+
+ aws_array_list_clean_up(&query_params);
+
+ if (aws_array_list_is_valid(&temp_strings)) {
+ const size_t string_count = aws_array_list_length(&temp_strings);
+ for (size_t i = 0; i < string_count; ++i) {
+ struct aws_string *string = NULL;
+ aws_array_list_get_at(&temp_strings, &string, i);
+ aws_string_destroy(string);
+ }
+ aws_array_list_clean_up(&temp_strings);
+ }
+
+ return result;
+}
+
+/*
+ * It is unclear from the spec (and not resolved by the tests) whether other forms of whitespace (\t \v) should be
+ * included in the trimming done to headers
+ */
+static bool s_is_space(uint8_t value) {
+ return aws_isspace(value);
+}
+
+/*
+ * Appends a single header key-value pair to the canonical request. Multi-line and repeat headers make this more
+ * complicated than you'd expect.
+ *
+ * We call this function on a sorted collection, so header repeats are guaranteed to be consecutive.
+ *
+ * In particular, there are two cases:
+ * (1) This is a header whose name hasn't been seen before, in which case we start a new line and append both name and
+ * value. (2) This is a header we've previously seen, just append the value.
+ *
+ * The fact that we can't '\n' until we've moved to a new header name also complicates the logic.
+ *
+ * This function appends to a state buffer rather than the canonical request. This allows us to calculate the signed
+ * headers (so that it can go into the query param if needed) before the query params are put into the canonical
+ * request.
+ */
+static int s_append_canonical_header(
+ struct aws_signing_state_aws *state,
+ struct aws_signable_property_list_pair *header,
+ const struct aws_byte_cursor *last_seen_header_name) {
+
+ struct aws_byte_buf *canonical_header_buffer = &state->canonical_header_block;
+ struct aws_byte_buf *signed_headers_buffer = &state->signed_headers;
+ const uint8_t *to_lower_table = aws_lookup_table_to_lower_get();
+
+ /*
+ * Write to the signed_headers shared state for later use, copy
+ * to canonical header buffer as well
+ */
+ if (last_seen_header_name == NULL ||
+ aws_byte_cursor_compare_lookup(last_seen_header_name, &header->name, aws_lookup_table_to_lower_get()) != 0) {
+ /*
+ * The headers arrive in sorted order, so we know we've never seen this header before
+ */
+ if (last_seen_header_name) {
+ /*
+ * there's a previous header, add appropriate separator in both canonical header buffer
+ * and signed headers buffer
+ */
+ if (aws_byte_buf_append_byte_dynamic(canonical_header_buffer, '\n')) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_byte_buf_append_byte_dynamic(signed_headers_buffer, ';')) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ /* add it to the signed headers buffer */
+ if (s_append_with_lookup(signed_headers_buffer, &header->name, to_lower_table)) {
+ return AWS_OP_ERR;
+ }
+
+ /* add it to the canonical header buffer */
+ if (s_append_with_lookup(canonical_header_buffer, &header->name, to_lower_table)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_byte_buf_append_byte_dynamic(canonical_header_buffer, ':')) {
+ return AWS_OP_ERR;
+ }
+ } else {
+ /* we've seen this header before, add a comma before appending the value */
+ if (aws_byte_buf_append_byte_dynamic(canonical_header_buffer, ',')) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ /*
+ * This is the unsafe, non-append write of the header value where consecutive whitespace
+ * is squashed into a single space. Since this can only shrink the value length and we've
+ * already reserved enough to hold the value, we can do raw buffer writes safely without
+ * worrying about capacity.
+ */
+ struct aws_byte_cursor trimmed_value = aws_byte_cursor_trim_pred(&header->value, s_is_space);
+
+ /* raw, unsafe write loop */
+ bool in_space = false;
+ uint8_t *start_ptr = trimmed_value.ptr;
+ uint8_t *end_ptr = trimmed_value.ptr + trimmed_value.len;
+ uint8_t *dest_ptr = canonical_header_buffer->buffer + canonical_header_buffer->len;
+ while (start_ptr < end_ptr) {
+ uint8_t value = *start_ptr;
+ bool is_space = s_is_space(value);
+ if (is_space) {
+ value = ' ';
+ }
+
+ if (!is_space || !in_space) {
+ *dest_ptr++ = value;
+ ++canonical_header_buffer->len;
+ }
+
+ in_space = is_space;
+
+ ++start_ptr;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* Add header to stable_header_list to be canonicalized, and also to final signing result */
+static int s_add_authorization_header(
+ struct aws_signing_state_aws *state,
+ struct aws_array_list *stable_header_list,
+ size_t *out_required_capacity,
+ struct aws_byte_cursor name,
+ struct aws_byte_cursor value) {
+
+ /* Add to stable_header_list to be canonicalized */
+ struct stable_header stable_header = {
+ .original_index = aws_array_list_length(stable_header_list),
+ .header =
+ {
+ .name = name,
+ .value = value,
+ },
+ };
+ if (aws_array_list_push_back(stable_header_list, &stable_header)) {
+ return AWS_OP_ERR;
+ }
+
+ /* Add to signing result */
+ if (aws_signing_result_append_property_list(&state->result, g_aws_http_headers_property_list_name, &name, &value)) {
+ return AWS_OP_ERR;
+ }
+
+ *out_required_capacity += name.len + value.len;
+ return AWS_OP_SUCCESS;
+}
+
+/*
+ * Builds the list of header name-value pairs to be added to the canonical request. The list members are
+ * actually the header wrapper structs that allow for stable sorting.
+ *
+ * Takes the original request headers, adds X-Amz-Date, and optionally, x-amz-content-sha256
+ *
+ * If we add filtering/exclusion support, this is where it would go
+ */
+static int s_build_canonical_stable_header_list(
+ struct aws_signing_state_aws *state,
+ struct aws_array_list *stable_header_list,
+ size_t *out_required_capacity) {
+
+ AWS_ASSERT(aws_array_list_length(stable_header_list) == 0);
+
+ *out_required_capacity = 0;
+ const struct aws_signable *signable = state->signable;
+
+ /*
+ * request headers
+ */
+ struct aws_array_list *signable_header_list = NULL;
+ if (aws_signable_get_property_list(signable, g_aws_http_headers_property_list_name, &signable_header_list)) {
+ return AWS_OP_ERR;
+ }
+
+ const size_t signable_header_count = aws_array_list_length(signable_header_list);
+ for (size_t i = 0; i < signable_header_count; ++i) {
+ struct stable_header header_wrapper;
+ AWS_ZERO_STRUCT(header_wrapper);
+ header_wrapper.original_index = i;
+
+ if (aws_array_list_get_at(signable_header_list, &header_wrapper.header, i)) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_byte_cursor *header_name_cursor = &header_wrapper.header.name;
+ if (!s_should_sign_header(state, header_name_cursor)) {
+ continue;
+ }
+
+ *out_required_capacity += header_wrapper.header.name.len + header_wrapper.header.value.len;
+
+ if (aws_array_list_push_back(stable_header_list, &header_wrapper)) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ /* If doing HEADERS signature type, add required X-Amz-*** headers.
+ * NOTE: For QUERY_PARAMS signature type, X-Amz-*** params are added to query string instead. */
+ if (state->config.signature_type == AWS_ST_HTTP_REQUEST_HEADERS) {
+
+ /*
+ * X-Amz-Security-Token
+ */
+ struct aws_byte_cursor session_token_cursor = aws_credentials_get_session_token(state->config.credentials);
+ if (session_token_cursor.len > 0) {
+ /* Note that if omit_session_token is true, it is added to final
+ * signing result but NOT included in canonicalized headers. */
+ if (state->config.flags.omit_session_token) {
+ if (aws_signing_result_append_property_list(
+ &state->result,
+ g_aws_http_headers_property_list_name,
+ &s_amz_security_token_header_name,
+ &session_token_cursor)) {
+ return AWS_OP_ERR;
+ }
+ } else {
+ if (s_add_authorization_header(
+ state,
+ stable_header_list,
+ out_required_capacity,
+ s_amz_security_token_header_name,
+ session_token_cursor)) {
+ return AWS_OP_ERR;
+ }
+ }
+ }
+
+ /*
+ * X-Amz-Date
+ */
+ if (s_add_authorization_header(
+ state,
+ stable_header_list,
+ out_required_capacity,
+ s_amz_date_header_name,
+ aws_byte_cursor_from_buf(&state->date))) {
+ return AWS_OP_ERR;
+ }
+
+ *out_required_capacity += g_aws_signing_date_name->len + state->date.len;
+
+ /*
+ * x-amz-region-set
+ */
+ if (state->config.algorithm == AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC) {
+ if (s_add_authorization_header(
+ state,
+ stable_header_list,
+ out_required_capacity,
+ aws_byte_cursor_from_string(g_aws_signing_region_set_name),
+ state->config.region)) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ /*
+ * x-amz-content-sha256 (optional)
+ */
+ if (state->config.signed_body_header == AWS_SBHT_X_AMZ_CONTENT_SHA256) {
+ if (s_add_authorization_header(
+ state,
+ stable_header_list,
+ out_required_capacity,
+ s_amz_content_sha256_header_name,
+ aws_byte_cursor_from_buf(&state->payload_hash))) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ /* NOTE: Update MAX_AUTHORIZATION_HEADER_COUNT if more headers added */
+ }
+
+ *out_required_capacity += aws_array_list_length(stable_header_list) * 2; /* ':' + '\n' per header */
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_validate_signable_header_list(struct aws_array_list *header_list) {
+ const size_t header_count = aws_array_list_length(header_list);
+ for (size_t i = 0; i < header_count; ++i) {
+ struct aws_signable_property_list_pair header;
+ AWS_ZERO_STRUCT(header);
+
+ aws_array_list_get_at(header_list, &header, i);
+
+ struct aws_hash_element *forbidden_element = NULL;
+ aws_hash_table_find(&s_forbidden_headers, &header.name, &forbidden_element);
+
+ if (forbidden_element != NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_SIGNING,
+ "AWS authorization header \"" PRInSTR "\" found in request while signing",
+ AWS_BYTE_CURSOR_PRI(header.name));
+ return aws_raise_error(AWS_AUTH_SIGNING_ILLEGAL_REQUEST_HEADER);
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_canonicalize_headers(struct aws_signing_state_aws *state) {
+ const struct aws_signable *signable = state->signable;
+ struct aws_allocator *allocator = state->allocator;
+ struct aws_byte_buf *header_buffer = &state->canonical_header_block;
+
+ AWS_ASSERT(header_buffer->len == 0);
+
+ int result = AWS_OP_ERR;
+
+ struct aws_array_list *signable_header_list = NULL;
+ if (aws_signable_get_property_list(signable, g_aws_http_headers_property_list_name, &signable_header_list)) {
+ return AWS_OP_ERR;
+ }
+
+ if (s_validate_signable_header_list(signable_header_list)) {
+ return AWS_OP_ERR;
+ }
+
+ const size_t signable_header_count = aws_array_list_length(signable_header_list);
+
+ /* Overestimate capacity to avoid re-allocation */
+ size_t headers_reserve_count = signable_header_count + MAX_AUTHORIZATION_HEADER_COUNT;
+
+ struct aws_array_list headers;
+ if (aws_array_list_init_dynamic(&headers, allocator, headers_reserve_count, sizeof(struct stable_header))) {
+ return AWS_OP_ERR;
+ }
+
+ size_t header_buffer_reserve_size = 0;
+ if (s_build_canonical_stable_header_list(state, &headers, &header_buffer_reserve_size)) {
+ goto on_cleanup;
+ }
+
+ /*
+ * Make sure there's enough room in the request buffer to hold a conservative overestimate of the room
+ * needed for canonical headers. There are places we'll be using an append function that does not resize.
+ */
+ if (aws_byte_buf_reserve(header_buffer, header_buffer_reserve_size)) {
+ return AWS_OP_ERR;
+ }
+
+ const size_t header_count = aws_array_list_length(&headers);
+
+ /* Sort the arraylist via lowercase header name and original position */
+ qsort(headers.data, header_count, sizeof(struct stable_header), s_canonical_header_comparator);
+
+ /* Iterate the sorted list, writing the canonical representation into the request */
+ struct aws_byte_cursor *last_seen_header_name = NULL;
+ for (size_t i = 0; i < header_count; ++i) {
+ struct stable_header *wrapper = NULL;
+ if (aws_array_list_get_at_ptr(&headers, (void **)&wrapper, i)) {
+ goto on_cleanup;
+ }
+
+ if (s_append_canonical_header(state, &wrapper->header, last_seen_header_name)) {
+ goto on_cleanup;
+ }
+
+ last_seen_header_name = &wrapper->header.name;
+ }
+
+ /* check for count greater than zero in case someone attempts to canonicalize an empty list of trailing headers */
+ /* There's always at least one header entry (X-Amz-Date), end the last one */
+ if (header_count > 0) {
+ if (aws_byte_buf_append_byte_dynamic(header_buffer, '\n')) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ result = AWS_OP_SUCCESS;
+
+on_cleanup:
+
+ aws_array_list_clean_up(&headers);
+
+ return result;
+}
+
+static int s_append_signed_headers(struct aws_signing_state_aws *state) {
+
+ struct aws_byte_buf *header_buffer = &state->canonical_header_block;
+ struct aws_byte_buf *signed_headers_buffer = &state->signed_headers;
+
+ if (aws_byte_buf_append_byte_dynamic(header_buffer, '\n')) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_byte_cursor signed_headers_cursor = aws_byte_cursor_from_buf(signed_headers_buffer);
+ if (aws_byte_buf_append_dynamic(header_buffer, &signed_headers_cursor)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_byte_buf_append_byte_dynamic(header_buffer, '\n')) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/*
+ * Top-level-ish function to write the canonical header set into a buffer as well as the signed header names
+ * into a separate buffer. We do this very early in the canonical request construction process so that the
+ * query params processing has the signed header names available to it.
+ */
+static int s_build_canonical_headers(struct aws_signing_state_aws *state) {
+ if (s_canonicalize_headers(state)) {
+ return AWS_OP_ERR;
+ }
+ if (s_append_signed_headers(state)) {
+ return AWS_OP_ERR;
+ }
+ return AWS_OP_SUCCESS;
+}
+
+/*
+ * Computes the canonical request payload value.
+ */
+static int s_build_canonical_payload(struct aws_signing_state_aws *state) {
+ const struct aws_signable *signable = state->signable;
+ struct aws_allocator *allocator = state->allocator;
+ struct aws_byte_buf *payload_hash_buffer = &state->payload_hash;
+
+ AWS_ASSERT(payload_hash_buffer->len == 0);
+
+ struct aws_byte_buf body_buffer;
+ AWS_ZERO_STRUCT(body_buffer);
+ struct aws_byte_buf digest_buffer;
+ AWS_ZERO_STRUCT(digest_buffer);
+
+ struct aws_hash *hash = NULL;
+
+ int result = AWS_OP_ERR;
+ if (state->config.signed_body_value.len == 0) {
+ /* No value provided by user, so we must calculate it */
+ hash = aws_sha256_new(allocator);
+ if (hash == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_byte_buf_init(&body_buffer, allocator, BODY_READ_BUFFER_SIZE) ||
+ aws_byte_buf_init(&digest_buffer, allocator, AWS_SHA256_LEN)) {
+ goto on_cleanup;
+ }
+
+ struct aws_input_stream *payload_stream = NULL;
+ if (aws_signable_get_payload_stream(signable, &payload_stream)) {
+ goto on_cleanup;
+ }
+
+ if (payload_stream != NULL) {
+ if (aws_input_stream_seek(payload_stream, 0, AWS_SSB_BEGIN)) {
+ goto on_cleanup;
+ }
+
+ struct aws_stream_status payload_status;
+ AWS_ZERO_STRUCT(payload_status);
+
+ while (!payload_status.is_end_of_stream) {
+ /* reset the temporary body buffer; we can calculate the hash in window chunks */
+ body_buffer.len = 0;
+ if (aws_input_stream_read(payload_stream, &body_buffer)) {
+ goto on_cleanup;
+ }
+
+ if (body_buffer.len > 0) {
+ struct aws_byte_cursor body_cursor = aws_byte_cursor_from_buf(&body_buffer);
+ aws_hash_update(hash, &body_cursor);
+ }
+
+ if (aws_input_stream_get_status(payload_stream, &payload_status)) {
+ goto on_cleanup;
+ }
+ }
+
+ /* reset the input stream for sending */
+ if (aws_input_stream_seek(payload_stream, 0, AWS_SSB_BEGIN)) {
+ goto on_cleanup;
+ }
+ }
+
+ if (aws_hash_finalize(hash, &digest_buffer, 0)) {
+ goto on_cleanup;
+ }
+
+ struct aws_byte_cursor digest_cursor = aws_byte_cursor_from_buf(&digest_buffer);
+ if (aws_hex_encode_append_dynamic(&digest_cursor, payload_hash_buffer)) {
+ goto on_cleanup;
+ }
+ } else {
+ /* Use value provided in config */
+ if (aws_byte_buf_append_dynamic(payload_hash_buffer, &state->config.signed_body_value)) {
+ goto on_cleanup;
+ }
+ }
+
+ result = AWS_OP_SUCCESS;
+
+on_cleanup:
+
+ aws_byte_buf_clean_up(&digest_buffer);
+ aws_byte_buf_clean_up(&body_buffer);
+
+ if (hash) {
+ aws_hash_destroy(hash);
+ }
+
+ return result;
+}
+
+/*
+ * Copies the previously-computed payload hash into the canonical request buffer
+ */
+static int s_append_canonical_payload_hash(struct aws_signing_state_aws *state) {
+ struct aws_byte_buf *canonical_request_buffer = &state->canonical_request;
+ struct aws_byte_buf *payload_hash_buffer = &state->payload_hash;
+
+ /*
+ * Copy the hex-encoded payload hash into the canonical request
+ */
+ struct aws_byte_cursor payload_hash_cursor = aws_byte_cursor_from_buf(payload_hash_buffer);
+ if (aws_byte_buf_append_dynamic(canonical_request_buffer, &payload_hash_cursor)) {
+ return AWS_OP_ERR;
+ }
+
+ /* Sigv4 spec claims a newline should be included after the payload, but the implementation doesn't do this */
+
+ return AWS_OP_SUCCESS;
+}
+
+AWS_STATIC_STRING_FROM_LITERAL(s_credential_scope_sigv4_terminator, "aws4_request");
+
+static int s_append_credential_scope_terminator(enum aws_signing_algorithm algorithm, struct aws_byte_buf *dest) {
+ struct aws_byte_cursor terminator_cursor;
+
+ switch (algorithm) {
+ case AWS_SIGNING_ALGORITHM_V4:
+ case AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC:
+ terminator_cursor = aws_byte_cursor_from_string(s_credential_scope_sigv4_terminator);
+ break;
+
+ default:
+ return aws_raise_error(AWS_AUTH_SIGNING_UNSUPPORTED_ALGORITHM);
+ }
+
+ return aws_byte_buf_append_dynamic(dest, &terminator_cursor);
+}
+
+/*
+ * Builds the credential scope string by appending a bunch of things together:
+ * Date, region, service, algorithm terminator
+ */
+static int s_build_credential_scope(struct aws_signing_state_aws *state) {
+ AWS_ASSERT(state->credential_scope.len == 0);
+
+ const struct aws_signing_config_aws *config = &state->config;
+ struct aws_byte_buf *dest = &state->credential_scope;
+
+ /*
+ * date output uses the non-dynamic append, so make sure there's enough room first
+ */
+ if (aws_byte_buf_reserve_relative(dest, AWS_DATE_TIME_STR_MAX_LEN)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_date_time_to_utc_time_short_str(&config->date, AWS_DATE_FORMAT_ISO_8601_BASIC, dest)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_byte_buf_append_byte_dynamic(dest, '/')) {
+ return AWS_OP_ERR;
+ }
+
+ if (config->algorithm != AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC) {
+ if (aws_byte_buf_append_dynamic(dest, &config->region)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_byte_buf_append_byte_dynamic(dest, '/')) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ if (aws_byte_buf_append_dynamic(dest, &config->service)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_byte_buf_append_byte_dynamic(dest, '/')) {
+ return AWS_OP_ERR;
+ }
+
+ if (s_append_credential_scope_terminator(state->config.algorithm, dest)) {
+ return AWS_OP_ERR;
+ }
+
+ /* While we're at it, build the accesskey/credential scope string which is used during query param signing*/
+ struct aws_byte_cursor access_key_cursor = aws_credentials_get_access_key_id(state->config.credentials);
+ if (aws_byte_buf_append_dynamic(&state->access_credential_scope, &access_key_cursor)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_byte_buf_append_byte_dynamic(&state->access_credential_scope, '/')) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_byte_cursor credential_scope_cursor = aws_byte_cursor_from_buf(&state->credential_scope);
+ if (aws_byte_buf_append_dynamic(&state->access_credential_scope, &credential_scope_cursor)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/*
+ * Hashes the canonical request and stores its hex representation
+ */
+static int s_build_canonical_request_hash(struct aws_signing_state_aws *state) {
+ struct aws_allocator *allocator = state->allocator;
+ struct aws_byte_buf *dest = &state->string_to_sign_payload;
+
+ int result = AWS_OP_ERR;
+
+ struct aws_byte_buf digest_buffer;
+ AWS_ZERO_STRUCT(digest_buffer);
+
+ if (aws_byte_buf_init(&digest_buffer, allocator, AWS_SHA256_LEN)) {
+ goto cleanup;
+ }
+
+ struct aws_byte_cursor canonical_request_cursor = aws_byte_cursor_from_buf(&state->canonical_request);
+ if (aws_sha256_compute(allocator, &canonical_request_cursor, &digest_buffer, 0)) {
+ goto cleanup;
+ }
+
+ struct aws_byte_cursor digest_cursor = aws_byte_cursor_from_buf(&digest_buffer);
+ if (aws_hex_encode_append_dynamic(&digest_cursor, dest)) {
+ goto cleanup;
+ }
+
+ result = AWS_OP_SUCCESS;
+
+cleanup:
+ aws_byte_buf_clean_up(&digest_buffer);
+
+ return result;
+}
+
+/**
+ * Note that there is no canonical request for event signing.
+ * The string to sign for events is detailed here:
+ * https://docs.aws.amazon.com/transcribe/latest/dg/streaming-http2.html
+ *
+ * String stringToSign =
+ * "AWS4-HMAC-SHA256" +
+ * "\n" +
+ * DateTime +
+ * "\n" +
+ * Keypath +
+ * "\n" +
+ * Hex(priorSignature) +
+ * "\n" +
+ * HexHash(nonSignatureHeaders) +
+ * "\n" +
+ * HexHash(payload);
+ *
+ * This function will build the string_to_sign_payload,
+ * aka "everything after the Keypath line in the string to sign".
+ */
+static int s_build_string_to_sign_payload_for_event(struct aws_signing_state_aws *state) {
+ int result = AWS_OP_ERR;
+
+ struct aws_byte_buf *dest = &state->string_to_sign_payload;
+
+ /*
+ * Hex(priorSignature) + "\n"
+ *
+ * Fortunately, the prior signature is already hex.
+ */
+ struct aws_byte_cursor prev_signature_cursor;
+ AWS_ZERO_STRUCT(prev_signature_cursor);
+ if (aws_signable_get_property(state->signable, g_aws_previous_signature_property_name, &prev_signature_cursor)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_SIGNING, "(id=%p) Event signable missing previous signature property", (void *)state->signable);
+ return aws_raise_error(AWS_AUTH_SIGNING_MISSING_PREVIOUS_SIGNATURE);
+ }
+
+ /* strip any padding (AWS_SIGV4A_SIGNATURE_PADDING_BYTE) from the previous signature */
+ prev_signature_cursor = aws_trim_padded_sigv4a_signature(prev_signature_cursor);
+
+ if (aws_byte_buf_append_dynamic(dest, &prev_signature_cursor)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_byte_buf_append_byte_dynamic(dest, '\n')) {
+ return AWS_OP_ERR;
+ }
+
+ /*
+ * HexHash(nonSignatureHeaders) + "\n"
+ *
+ * nonSignatureHeaders is just the ":date" header.
+ * We need to encode these headers in event-stream format, as described here:
+ * https://docs.aws.amazon.com/transcribe/latest/dg/streaming-setting-up.html
+ *
+ * | Header Name Length | Header Name | Header Value Type | Header Value Length | Header Value |
+ * | 1 byte | N bytes | 1 byte | 2 bytes | N bytes |
+ */
+ struct aws_byte_buf date_buffer;
+ AWS_ZERO_STRUCT(date_buffer);
+ struct aws_byte_buf digest_buffer;
+ AWS_ZERO_STRUCT(digest_buffer);
+
+ if (aws_byte_buf_init(&date_buffer, state->allocator, 15)) {
+ goto cleanup;
+ }
+
+ struct aws_byte_cursor header_name = aws_byte_cursor_from_c_str(":date");
+ AWS_FATAL_ASSERT(aws_byte_buf_write_u8(&date_buffer, (uint8_t)header_name.len));
+ if (aws_byte_buf_append_dynamic(&date_buffer, &header_name)) {
+ goto cleanup;
+ }
+
+ /* Type of timestamp header */
+ AWS_FATAL_ASSERT(aws_byte_buf_write_u8(&date_buffer, 8 /*AWS_EVENT_STREAM_HEADER_TIMESTAMP*/));
+ AWS_FATAL_ASSERT(aws_byte_buf_write_be64(&date_buffer, (int64_t)aws_date_time_as_millis(&state->config.date)));
+
+ /* calculate sha 256 of encoded buffer */
+ if (aws_byte_buf_init(&digest_buffer, state->allocator, AWS_SHA256_LEN)) {
+ goto cleanup;
+ }
+
+ struct aws_byte_cursor date_cursor = aws_byte_cursor_from_buf(&date_buffer);
+ if (aws_sha256_compute(state->allocator, &date_cursor, &digest_buffer, 0)) {
+ goto cleanup;
+ }
+
+ struct aws_byte_cursor digest_cursor = aws_byte_cursor_from_buf(&digest_buffer);
+ if (aws_hex_encode_append_dynamic(&digest_cursor, dest)) {
+ goto cleanup;
+ }
+
+ if (aws_byte_buf_append_byte_dynamic(dest, '\n')) {
+ goto cleanup;
+ }
+
+ /*
+ * HexHash(payload);
+ *
+ * The payload was already hashed in an earlier stage
+ */
+ struct aws_byte_cursor current_chunk_hash_cursor = aws_byte_cursor_from_buf(&state->payload_hash);
+ if (aws_byte_buf_append_dynamic(dest, &current_chunk_hash_cursor)) {
+ goto cleanup;
+ }
+
+ result = AWS_OP_SUCCESS;
+
+cleanup:
+ aws_byte_buf_clean_up(&date_buffer);
+ aws_byte_buf_clean_up(&digest_buffer);
+
+ return result;
+}
+
+static int s_build_canonical_request_body_chunk(struct aws_signing_state_aws *state) {
+
+ struct aws_byte_buf *dest = &state->string_to_sign_payload;
+
+ /* previous signature + \n */
+ struct aws_byte_cursor prev_signature_cursor;
+ AWS_ZERO_STRUCT(prev_signature_cursor);
+ if (aws_signable_get_property(state->signable, g_aws_previous_signature_property_name, &prev_signature_cursor)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_SIGNING, "(id=%p) Chunk signable missing previous signature property", (void *)state->signable);
+ return aws_raise_error(AWS_AUTH_SIGNING_MISSING_PREVIOUS_SIGNATURE);
+ }
+
+ /* strip any padding (AWS_SIGV4A_SIGNATURE_PADDING_BYTE) from the previous signature */
+ prev_signature_cursor = aws_trim_padded_sigv4a_signature(prev_signature_cursor);
+
+ if (aws_byte_buf_append_dynamic(dest, &prev_signature_cursor)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_byte_buf_append_byte_dynamic(dest, '\n')) {
+ return AWS_OP_ERR;
+ }
+
+ /* empty hash + \n */
+ if (aws_byte_buf_append_dynamic(dest, &g_aws_signed_body_value_empty_sha256)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_byte_buf_append_byte_dynamic(dest, '\n')) {
+ return AWS_OP_ERR;
+ }
+
+ /* current hash */
+ struct aws_byte_cursor current_chunk_hash_cursor = aws_byte_cursor_from_buf(&state->payload_hash);
+ if (aws_byte_buf_append_dynamic(dest, &current_chunk_hash_cursor)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_build_canonical_request_trailing_headers(struct aws_signing_state_aws *state) {
+
+ struct aws_byte_buf *dest = &state->string_to_sign_payload;
+
+ /* previous signature + \n */
+ struct aws_byte_cursor prev_signature_cursor;
+ AWS_ZERO_STRUCT(prev_signature_cursor);
+ if (aws_signable_get_property(state->signable, g_aws_previous_signature_property_name, &prev_signature_cursor)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_SIGNING,
+ "(id=%p) trailing_headers signable missing previous signature property",
+ (void *)state->signable);
+ return aws_raise_error(AWS_AUTH_SIGNING_MISSING_PREVIOUS_SIGNATURE);
+ }
+
+ /* strip any padding (AWS_SIGV4A_SIGNATURE_PADDING_BYTE) from the previous signature */
+ prev_signature_cursor = aws_trim_padded_sigv4a_signature(prev_signature_cursor);
+
+ if (aws_byte_buf_append_dynamic(dest, &prev_signature_cursor)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_byte_buf_append_byte_dynamic(dest, '\n')) {
+ return AWS_OP_ERR;
+ }
+
+ /* current hash */
+
+ if (s_canonicalize_headers(state)) {
+ return AWS_OP_ERR;
+ }
+ struct aws_byte_cursor header_block_cursor = aws_byte_cursor_from_buf(&state->canonical_header_block);
+ if (aws_byte_buf_append_dynamic(&state->canonical_request, &header_block_cursor)) {
+ return AWS_OP_ERR;
+ }
+ if (s_build_canonical_request_hash(state)) {
+ return AWS_OP_ERR;
+ }
+ return AWS_OP_SUCCESS;
+}
+
+/*
+ * Builds a sigv4-signed canonical request and its hashed value
+ */
+static int s_build_canonical_request_sigv4(struct aws_signing_state_aws *state) {
+ AWS_ASSERT(state->canonical_request.len == 0);
+ AWS_ASSERT(state->payload_hash.len > 0);
+
+ int result = AWS_OP_ERR;
+
+ struct aws_uri uri;
+ AWS_ZERO_STRUCT(uri);
+
+ struct aws_byte_cursor uri_cursor;
+ if (aws_signable_get_property(state->signable, g_aws_http_uri_property_name, &uri_cursor)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_uri_init_parse(&uri, state->allocator, &uri_cursor)) {
+ goto cleanup;
+ }
+
+ if (s_build_canonical_headers(state)) {
+ goto cleanup;
+ }
+
+ if (s_append_canonical_method(state)) {
+ goto cleanup;
+ }
+
+ if (s_append_canonical_path(&uri, state)) {
+ goto cleanup;
+ }
+
+ if (s_append_canonical_query_string(&uri, state)) {
+ goto cleanup;
+ }
+
+ struct aws_byte_cursor header_block_cursor = aws_byte_cursor_from_buf(&state->canonical_header_block);
+ if (aws_byte_buf_append_dynamic(&state->canonical_request, &header_block_cursor)) {
+ goto cleanup;
+ }
+
+ if (s_append_canonical_payload_hash(state)) {
+ goto cleanup;
+ }
+
+ if (s_build_canonical_request_hash(state)) {
+ goto cleanup;
+ }
+
+ result = AWS_OP_SUCCESS;
+
+cleanup:
+
+ aws_uri_clean_up(&uri);
+
+ return result;
+}
+
+/*
+ * The canonical header list is the next-to-the-last line on the canonical request, so split by lines and take
+ * the penultimate value.
+ */
+static struct aws_byte_cursor s_get_signed_headers_from_canonical_request(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor canonical_request) {
+
+ struct aws_byte_cursor header_cursor;
+ AWS_ZERO_STRUCT(header_cursor);
+
+ struct aws_array_list splits;
+ AWS_ZERO_STRUCT(splits);
+
+ if (aws_array_list_init_dynamic(
+ &splits, allocator, CANONICAL_REQUEST_SPLIT_OVER_ESTIMATE, sizeof(struct aws_byte_cursor))) {
+ return header_cursor;
+ }
+
+ if (aws_byte_cursor_split_on_char(&canonical_request, '\n', &splits)) {
+ goto done;
+ }
+
+ size_t split_count = aws_array_list_length(&splits);
+
+ if (split_count > 1) {
+ aws_array_list_get_at(&splits, &header_cursor, split_count - 2);
+ }
+
+done:
+
+ aws_array_list_clean_up(&splits);
+
+ return header_cursor;
+}
+
+/*
+ * Fill in the signing state values needed by later stages that computing the canonical request would have done.
+ */
+static int s_apply_existing_canonical_request(struct aws_signing_state_aws *state) {
+
+ struct aws_byte_cursor canonical_request_cursor;
+ AWS_ZERO_STRUCT(canonical_request_cursor);
+ if (aws_signable_get_property(state->signable, g_aws_canonical_request_property_name, &canonical_request_cursor)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_byte_buf_append_dynamic(&state->canonical_request, &canonical_request_cursor)) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_byte_cursor signed_headers_cursor =
+ s_get_signed_headers_from_canonical_request(state->allocator, canonical_request_cursor);
+ if (aws_byte_buf_append_dynamic(&state->signed_headers, &signed_headers_cursor)) {
+ return AWS_OP_ERR;
+ }
+
+ if (s_build_canonical_request_hash(state)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/*
+ * Top-level canonical request construction function.
+ * For signature types not associated directly with an http request (chunks, events), this calculates the
+ * string-to-sign payload that replaces the hashed canonical request in those signing procedures.
+ */
+int aws_signing_build_canonical_request(struct aws_signing_state_aws *state) {
+
+ if (aws_date_time_to_utc_time_str(&state->config.date, AWS_DATE_FORMAT_ISO_8601_BASIC, &state->date)) {
+ return AWS_OP_ERR;
+ }
+
+ if (s_build_canonical_payload(state)) {
+ return AWS_OP_ERR;
+ }
+
+ if (s_build_credential_scope(state)) {
+ return AWS_OP_ERR;
+ }
+
+ switch (state->config.signature_type) {
+ case AWS_ST_HTTP_REQUEST_HEADERS:
+ case AWS_ST_HTTP_REQUEST_QUERY_PARAMS:
+ return s_build_canonical_request_sigv4(state);
+
+ case AWS_ST_HTTP_REQUEST_CHUNK:
+ return s_build_canonical_request_body_chunk(state);
+ case AWS_ST_HTTP_REQUEST_EVENT:
+ return s_build_string_to_sign_payload_for_event(state);
+
+ case AWS_ST_HTTP_REQUEST_TRAILING_HEADERS:
+ return s_build_canonical_request_trailing_headers(state);
+
+ case AWS_ST_CANONICAL_REQUEST_HEADERS:
+ case AWS_ST_CANONICAL_REQUEST_QUERY_PARAMS:
+ return s_apply_existing_canonical_request(state);
+
+ default:
+ return AWS_OP_ERR;
+ }
+}
+
+/*
+ * Top-level function for computing the string-to-sign in an AWS signing process.
+ */
+int aws_signing_build_string_to_sign(struct aws_signing_state_aws *state) {
+ /* We must have a canonical request and the credential scope. We must not have the string to sign */
+ AWS_ASSERT(state->string_to_sign_payload.len > 0);
+ AWS_ASSERT(state->credential_scope.len > 0);
+ AWS_ASSERT(state->string_to_sign.len == 0);
+
+ struct aws_byte_buf *dest = &state->string_to_sign;
+
+ if (s_append_sts_signature_type(state, dest)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_byte_buf_append_byte_dynamic(dest, '\n')) {
+ return AWS_OP_ERR;
+ }
+
+ /* date_time output uses raw array writes, so ensure there's enough room beforehand */
+ if (aws_byte_buf_reserve_relative(dest, AWS_DATE_TIME_STR_MAX_LEN)) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_byte_cursor date_cursor = aws_byte_cursor_from_buf(&state->date);
+ if (aws_byte_buf_append_dynamic(dest, &date_cursor)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_byte_buf_append_byte_dynamic(dest, '\n')) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_byte_cursor credential_scope_cursor = aws_byte_cursor_from_buf(&state->credential_scope);
+ if (aws_byte_buf_append_dynamic(dest, &credential_scope_cursor)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_byte_buf_append_byte_dynamic(dest, '\n')) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_byte_cursor sts_payload_cursor = aws_byte_cursor_from_buf(&state->string_to_sign_payload);
+ if (aws_byte_buf_append_dynamic(dest, &sts_payload_cursor)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/*
+ * Signature calculation utility functions
+ */
+
+AWS_STATIC_STRING_FROM_LITERAL(s_secret_key_prefix, "AWS4");
+
+/*
+ * Computes the key to sign with as a function of the secret access key in the credentials and
+ * the components of the credential scope: date, region, service, algorithm terminator
+ */
+static int s_compute_sigv4_signing_key(struct aws_signing_state_aws *state, struct aws_byte_buf *dest) {
+ /* dest should be empty */
+ AWS_ASSERT(dest->len == 0);
+
+ const struct aws_signing_config_aws *config = &state->config;
+ struct aws_allocator *allocator = state->allocator;
+
+ int result = AWS_OP_ERR;
+
+ struct aws_byte_buf secret_key;
+ AWS_ZERO_STRUCT(secret_key);
+
+ struct aws_byte_buf output;
+ AWS_ZERO_STRUCT(output);
+
+ struct aws_byte_buf date_buf;
+ AWS_ZERO_STRUCT(date_buf);
+
+ struct aws_byte_cursor secret_access_key_cursor = aws_credentials_get_secret_access_key(state->config.credentials);
+ if (aws_byte_buf_init(&secret_key, allocator, s_secret_key_prefix->len + secret_access_key_cursor.len) ||
+ aws_byte_buf_init(&output, allocator, AWS_SHA256_LEN) ||
+ aws_byte_buf_init(&date_buf, allocator, AWS_DATE_TIME_STR_MAX_LEN)) {
+ goto cleanup;
+ }
+
+ /*
+ * Prep Key
+ */
+ struct aws_byte_cursor prefix_cursor = aws_byte_cursor_from_string(s_secret_key_prefix);
+ if (aws_byte_buf_append_dynamic(&secret_key, &prefix_cursor) ||
+ aws_byte_buf_append_dynamic(&secret_key, &secret_access_key_cursor)) {
+ goto cleanup;
+ }
+
+ /*
+ * Prep date
+ */
+ if (aws_date_time_to_utc_time_short_str(&config->date, AWS_DATE_FORMAT_ISO_8601_BASIC, &date_buf)) {
+ goto cleanup;
+ }
+
+ struct aws_byte_cursor date_cursor = aws_byte_cursor_from_buf(&date_buf);
+ struct aws_byte_cursor secret_key_cursor = aws_byte_cursor_from_buf(&secret_key);
+ if (aws_sha256_hmac_compute(allocator, &secret_key_cursor, &date_cursor, &output, 0)) {
+ goto cleanup;
+ }
+
+ struct aws_byte_cursor chained_key_cursor = aws_byte_cursor_from_buf(&output);
+ output.len = 0; /* necessary evil part 1*/
+ if (aws_sha256_hmac_compute(allocator, &chained_key_cursor, &config->region, &output, 0)) {
+ goto cleanup;
+ }
+
+ chained_key_cursor = aws_byte_cursor_from_buf(&output);
+ output.len = 0; /* necessary evil part 2 */
+ if (aws_sha256_hmac_compute(allocator, &chained_key_cursor, &config->service, &output, 0)) {
+ goto cleanup;
+ }
+
+ chained_key_cursor = aws_byte_cursor_from_buf(&output);
+ struct aws_byte_cursor scope_terminator_cursor = aws_byte_cursor_from_string(s_credential_scope_sigv4_terminator);
+ if (aws_sha256_hmac_compute(allocator, &chained_key_cursor, &scope_terminator_cursor, dest, 0)) {
+ goto cleanup;
+ }
+
+ result = AWS_OP_SUCCESS;
+
+cleanup:
+ aws_byte_buf_clean_up_secure(&secret_key);
+ aws_byte_buf_clean_up(&output);
+ aws_byte_buf_clean_up(&date_buf);
+
+ return result;
+}
+
+/*
+ * Calculates the hex-encoding of the final signature value from the sigv4 signing process
+ */
+static int s_calculate_sigv4_signature_value(struct aws_signing_state_aws *state) {
+ struct aws_allocator *allocator = state->allocator;
+
+ int result = AWS_OP_ERR;
+
+ struct aws_byte_buf key;
+ AWS_ZERO_STRUCT(key);
+
+ struct aws_byte_buf digest;
+ AWS_ZERO_STRUCT(digest);
+
+ if (aws_byte_buf_init(&key, allocator, AWS_SHA256_LEN) || aws_byte_buf_init(&digest, allocator, AWS_SHA256_LEN)) {
+ goto cleanup;
+ }
+
+ if (s_compute_sigv4_signing_key(state, &key)) {
+ goto cleanup;
+ }
+
+ struct aws_byte_cursor key_cursor = aws_byte_cursor_from_buf(&key);
+ struct aws_byte_cursor string_to_sign_cursor = aws_byte_cursor_from_buf(&state->string_to_sign);
+ if (aws_sha256_hmac_compute(allocator, &key_cursor, &string_to_sign_cursor, &digest, 0)) {
+ goto cleanup;
+ }
+
+ struct aws_byte_cursor digest_cursor = aws_byte_cursor_from_buf(&digest);
+ if (aws_hex_encode_append_dynamic(&digest_cursor, &state->signature)) {
+ goto cleanup;
+ }
+
+ result = AWS_OP_SUCCESS;
+
+cleanup:
+
+ aws_byte_buf_clean_up(&key);
+ aws_byte_buf_clean_up(&digest);
+
+ return result;
+}
+
+/*
+ * Calculates the hex-encoding of the final signature value from the sigv4a signing process
+ */
+static int s_calculate_sigv4a_signature_value(struct aws_signing_state_aws *state) {
+ struct aws_allocator *allocator = state->allocator;
+
+ int result = AWS_OP_ERR;
+
+ struct aws_byte_buf ecdsa_digest;
+ AWS_ZERO_STRUCT(ecdsa_digest);
+
+ struct aws_byte_buf sha256_digest;
+ AWS_ZERO_STRUCT(sha256_digest);
+
+ struct aws_ecc_key_pair *ecc_key = aws_credentials_get_ecc_key_pair(state->config.credentials);
+ if (ecc_key == NULL) {
+ return aws_raise_error(AWS_AUTH_SIGNING_INVALID_CREDENTIALS);
+ }
+
+ if (aws_byte_buf_init(&ecdsa_digest, allocator, aws_ecc_key_pair_signature_length(ecc_key)) ||
+ aws_byte_buf_init(&sha256_digest, allocator, AWS_SHA256_LEN)) {
+ goto cleanup;
+ }
+
+ struct aws_byte_cursor string_to_sign_cursor = aws_byte_cursor_from_buf(&state->string_to_sign);
+ if (aws_sha256_compute(allocator, &string_to_sign_cursor, &sha256_digest, 0)) {
+ goto cleanup;
+ }
+
+ struct aws_byte_cursor sha256_digest_cursor = aws_byte_cursor_from_buf(&sha256_digest);
+ if (aws_ecc_key_pair_sign_message(ecc_key, &sha256_digest_cursor, &ecdsa_digest)) {
+ goto cleanup;
+ }
+
+ struct aws_byte_cursor ecdsa_digest_cursor = aws_byte_cursor_from_buf(&ecdsa_digest);
+ if (aws_hex_encode_append_dynamic(&ecdsa_digest_cursor, &state->signature)) {
+ goto cleanup;
+ }
+
+ result = AWS_OP_SUCCESS;
+
+cleanup:
+
+ aws_byte_buf_clean_up(&ecdsa_digest);
+ aws_byte_buf_clean_up(&sha256_digest);
+
+ return result;
+}
+
+/*
+ * Appends a final signature value to a buffer based on the requested signing algorithm
+ */
+int s_calculate_signature_value(struct aws_signing_state_aws *state) {
+ switch (state->config.algorithm) {
+ case AWS_SIGNING_ALGORITHM_V4:
+ return s_calculate_sigv4_signature_value(state);
+
+ case AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC:
+ return s_calculate_sigv4a_signature_value(state);
+
+ default:
+ return aws_raise_error(AWS_AUTH_SIGNING_UNSUPPORTED_ALGORITHM);
+ }
+}
+
+static int s_add_signature_property_to_result_set(struct aws_signing_state_aws *state) {
+
+ int result = AWS_OP_ERR;
+
+ struct aws_byte_buf final_signature_buffer;
+ AWS_ZERO_STRUCT(final_signature_buffer);
+
+ if (aws_byte_buf_init(&final_signature_buffer, state->allocator, HEX_ENCODED_SIGNATURE_OVER_ESTIMATE)) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_byte_cursor signature_value = aws_byte_cursor_from_buf(&state->signature);
+ if (aws_byte_buf_append_dynamic(&final_signature_buffer, &signature_value)) {
+ goto cleanup;
+ }
+
+ if (state->config.algorithm == AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC &&
+ (state->config.signature_type == AWS_ST_HTTP_REQUEST_CHUNK ||
+ state->config.signature_type == AWS_ST_HTTP_REQUEST_TRAILING_HEADERS)) {
+ if (aws_byte_buf_reserve(&final_signature_buffer, MAX_ECDSA_P256_SIGNATURE_AS_HEX_LENGTH)) {
+ goto cleanup;
+ }
+
+ if (signature_value.len < MAX_ECDSA_P256_SIGNATURE_AS_HEX_LENGTH) {
+ size_t padding_byte_count = MAX_ECDSA_P256_SIGNATURE_AS_HEX_LENGTH - signature_value.len;
+ if (!aws_byte_buf_write_u8_n(
+ &final_signature_buffer, AWS_SIGV4A_SIGNATURE_PADDING_BYTE, padding_byte_count)) {
+ goto cleanup;
+ }
+ }
+ }
+
+ signature_value = aws_byte_cursor_from_buf(&final_signature_buffer);
+ if (aws_signing_result_set_property(&state->result, g_aws_signature_property_name, &signature_value)) {
+ return AWS_OP_ERR;
+ }
+
+ result = AWS_OP_SUCCESS;
+
+cleanup:
+
+ aws_byte_buf_clean_up(&final_signature_buffer);
+
+ return result;
+}
+
+/*
+ * Adds the appropriate authorization header or query param to the signing result
+ */
+static int s_add_authorization_to_result(
+ struct aws_signing_state_aws *state,
+ struct aws_byte_buf *authorization_value) {
+ struct aws_byte_cursor name;
+ struct aws_byte_cursor value = aws_byte_cursor_from_buf(authorization_value);
+
+ if (s_is_header_based_signature_value(state->config.signature_type)) {
+ name = aws_byte_cursor_from_string(g_aws_signing_authorization_header_name);
+ if (aws_signing_result_append_property_list(
+ &state->result, g_aws_http_headers_property_list_name, &name, &value)) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ if (s_is_query_param_based_signature_value(state->config.signature_type)) {
+ name = aws_byte_cursor_from_string(g_aws_signing_authorization_query_param_name);
+ if (aws_signing_result_append_property_list(
+ &state->result, g_aws_http_query_params_property_list_name, &name, &value)) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ /*
+ * Unconditionally add the signature value as a top-level property.
+ */
+ if (s_add_signature_property_to_result_set(state)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+AWS_STATIC_STRING_FROM_LITERAL(s_credential_prefix, " Credential=");
+AWS_STATIC_STRING_FROM_LITERAL(s_signed_headers_prefix, ", SignedHeaders=");
+AWS_STATIC_STRING_FROM_LITERAL(s_signature_prefix, ", Signature=");
+
+/*
+ * The Authorization has a lot more than just the final signature value in it. This function appends all those
+ * other values together ala:
+ *
+ * "AWS4-HMAC-SHA256 Credential=AKIDEXAMPLE/20150830/us-east-1/service/aws4_request, SignedHeaders=host;x-amz-date,
+ * Signature="
+ *
+ * The final header value is this with the signature value appended to the end.
+ */
+static int s_append_authorization_header_preamble(struct aws_signing_state_aws *state, struct aws_byte_buf *dest) {
+ if (s_append_sts_signature_type(state, dest)) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_byte_cursor credential_cursor = aws_byte_cursor_from_string(s_credential_prefix);
+ if (aws_byte_buf_append_dynamic(dest, &credential_cursor)) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_byte_cursor access_key_cursor = aws_credentials_get_access_key_id(state->config.credentials);
+ if (aws_byte_buf_append_dynamic(dest, &access_key_cursor)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_byte_buf_append_byte_dynamic(dest, '/')) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_byte_cursor credential_scope_cursor = aws_byte_cursor_from_buf(&state->credential_scope);
+ if (aws_byte_buf_append_dynamic(dest, &credential_scope_cursor)) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_byte_cursor signed_headers_prefix_cursor = aws_byte_cursor_from_string(s_signed_headers_prefix);
+ if (aws_byte_buf_append_dynamic(dest, &signed_headers_prefix_cursor)) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_byte_cursor signed_headers_cursor = aws_byte_cursor_from_buf(&state->signed_headers);
+ if (aws_byte_buf_append_dynamic(dest, &signed_headers_cursor)) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_byte_cursor signature_prefix_cursor = aws_byte_cursor_from_string(s_signature_prefix);
+ if (aws_byte_buf_append_dynamic(dest, &signature_prefix_cursor)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/*
+ * Top-level function for constructing the final authorization header/query-param and adding it to the
+ * signing result.
+ */
+int aws_signing_build_authorization_value(struct aws_signing_state_aws *state) {
+ AWS_ASSERT(state->string_to_sign.len > 0);
+ AWS_ASSERT(state->credential_scope.len > 0);
+
+ int result = AWS_OP_ERR;
+
+ struct aws_byte_buf authorization_value;
+
+ if (aws_byte_buf_init(&authorization_value, state->allocator, AUTHORIZATION_VALUE_STARTING_SIZE)) {
+ goto cleanup;
+ }
+
+ if (s_is_header_based_signature_value(state->config.signature_type) &&
+ s_append_authorization_header_preamble(state, &authorization_value)) {
+ goto cleanup;
+ }
+
+ if (s_calculate_signature_value(state)) {
+ goto cleanup;
+ }
+
+ struct aws_byte_cursor signature_cursor = aws_byte_cursor_from_buf(&state->signature);
+ if (aws_byte_buf_append_dynamic(&authorization_value, &signature_cursor)) {
+ goto cleanup;
+ }
+
+ if (s_add_authorization_to_result(state, &authorization_value)) {
+ goto cleanup;
+ }
+
+ AWS_LOGF_INFO(
+ AWS_LS_AUTH_SIGNING,
+ "(id=%p) Http request successfully built final authorization value via algorithm %s, with contents "
+ "\n" PRInSTR "\n",
+ (void *)state->signable,
+ aws_signing_algorithm_to_string(state->config.algorithm),
+ AWS_BYTE_BUF_PRI(authorization_value));
+
+ result = AWS_OP_SUCCESS;
+
+cleanup:
+ aws_byte_buf_clean_up(&authorization_value);
+
+ return result;
+}
+
+int aws_validate_v4a_authorization_value(
+ struct aws_allocator *allocator,
+ struct aws_ecc_key_pair *ecc_key,
+ struct aws_byte_cursor string_to_sign_cursor,
+ struct aws_byte_cursor signature_value_cursor) {
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_SIGNING,
+ "(id=%p) Verifying v4a auth value: \n" PRInSTR "\n\nusing string-to-sign: \n" PRInSTR "\n\n",
+ (void *)ecc_key,
+ AWS_BYTE_CURSOR_PRI(signature_value_cursor),
+ AWS_BYTE_CURSOR_PRI(string_to_sign_cursor));
+
+ signature_value_cursor = aws_trim_padded_sigv4a_signature(signature_value_cursor);
+
+ size_t binary_length = 0;
+ if (aws_hex_compute_decoded_len(signature_value_cursor.len, &binary_length)) {
+ return AWS_OP_ERR;
+ }
+
+ int result = AWS_OP_ERR;
+
+ struct aws_byte_buf binary_signature;
+ AWS_ZERO_STRUCT(binary_signature);
+
+ struct aws_byte_buf sha256_digest;
+ AWS_ZERO_STRUCT(sha256_digest);
+
+ if (aws_byte_buf_init(&binary_signature, allocator, binary_length) ||
+ aws_byte_buf_init(&sha256_digest, allocator, AWS_SHA256_LEN)) {
+ goto done;
+ }
+
+ if (aws_hex_decode(&signature_value_cursor, &binary_signature)) {
+ goto done;
+ }
+
+ if (aws_sha256_compute(allocator, &string_to_sign_cursor, &sha256_digest, 0)) {
+ goto done;
+ }
+
+ struct aws_byte_cursor binary_signature_cursor =
+ aws_byte_cursor_from_array(binary_signature.buffer, binary_signature.len);
+ struct aws_byte_cursor digest_cursor = aws_byte_cursor_from_buf(&sha256_digest);
+ if (aws_ecc_key_pair_verify_signature(ecc_key, &digest_cursor, &binary_signature_cursor)) {
+ goto done;
+ }
+
+ result = AWS_OP_SUCCESS;
+
+done:
+
+ aws_byte_buf_clean_up(&binary_signature);
+ aws_byte_buf_clean_up(&sha256_digest);
+
+ return result;
+}
+
+int aws_verify_sigv4a_signing(
+ struct aws_allocator *allocator,
+ const struct aws_signable *signable,
+ const struct aws_signing_config_base *base_config,
+ struct aws_byte_cursor expected_canonical_request_cursor,
+ struct aws_byte_cursor signature_cursor,
+ struct aws_byte_cursor ecc_key_pub_x,
+ struct aws_byte_cursor ecc_key_pub_y) {
+
+ int result = AWS_OP_ERR;
+
+ if (base_config->config_type != AWS_SIGNING_CONFIG_AWS) {
+ AWS_LOGF_ERROR(AWS_LS_AUTH_SIGNING, "Signing config is not an AWS signing config");
+ return aws_raise_error(AWS_AUTH_SIGNING_MISMATCHED_CONFIGURATION);
+ }
+
+ if (aws_validate_aws_signing_config_aws((void *)base_config)) {
+ AWS_LOGF_ERROR(AWS_LS_AUTH_SIGNING, "Signing config failed validation");
+ return aws_raise_error(AWS_AUTH_SIGNING_INVALID_CONFIGURATION);
+ }
+
+ const struct aws_signing_config_aws *config = (void *)base_config;
+ if (config->algorithm != AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC) {
+ AWS_LOGF_ERROR(AWS_LS_AUTH_SIGNING, "Signing algorithm is not V4_ASYMMETRIC");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (config->credentials == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_AUTH_SIGNING, "AWS credentials were not provided/null");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ struct aws_signing_state_aws *signing_state = aws_signing_state_new(allocator, config, signable, NULL, NULL);
+ if (!signing_state) {
+ AWS_LOGF_ERROR(AWS_LS_AUTH_SIGNING, "Unable to create new signing state");
+ return AWS_OP_ERR;
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_SIGNING,
+ "(id=%p) Verifying v4a signature: \n" PRInSTR "\n\nagainst expected canonical request: \n" PRInSTR
+ "\n\nusing ecc key:\n X:" PRInSTR "\n Y:" PRInSTR "\n\n",
+ (void *)signable,
+ AWS_BYTE_CURSOR_PRI(signature_cursor),
+ AWS_BYTE_CURSOR_PRI(expected_canonical_request_cursor),
+ AWS_BYTE_CURSOR_PRI(ecc_key_pub_x),
+ AWS_BYTE_CURSOR_PRI(ecc_key_pub_y));
+
+ struct aws_ecc_key_pair *verification_key =
+ aws_ecc_key_new_from_hex_coordinates(allocator, AWS_CAL_ECDSA_P256, ecc_key_pub_x, ecc_key_pub_y);
+ if (verification_key == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_AUTH_SIGNING, "Unable to create an ECC key from provided coordinates");
+ goto done;
+ }
+
+ if (aws_credentials_get_ecc_key_pair(signing_state->config.credentials) == NULL) {
+ struct aws_credentials *ecc_credentials =
+ aws_credentials_new_ecc_from_aws_credentials(allocator, signing_state->config.credentials);
+ aws_credentials_release(signing_state->config.credentials);
+ signing_state->config.credentials = ecc_credentials;
+ if (signing_state->config.credentials == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_AUTH_SIGNING, "Unable to create ECC from provided credentials");
+ goto done;
+ }
+ }
+
+ if (aws_signing_build_canonical_request(signing_state)) {
+ AWS_LOGF_ERROR(AWS_LS_AUTH_SIGNING, "Unable to canonicalize request for signing");
+ goto done;
+ }
+
+ struct aws_byte_cursor canonical_request_cursor = aws_byte_cursor_from_buf(&signing_state->canonical_request);
+ if (aws_byte_cursor_compare_lexical(&expected_canonical_request_cursor, &canonical_request_cursor) != 0) {
+ AWS_LOGF_ERROR(AWS_LS_AUTH_SIGNING, "Canonicalized request and expected canonical request do not match");
+ aws_raise_error(AWS_AUTH_CANONICAL_REQUEST_MISMATCH);
+ goto done;
+ }
+
+ if (aws_signing_build_string_to_sign(signing_state)) {
+ AWS_LOGF_ERROR(AWS_LS_AUTH_SIGNING, "Unable to build string to sign from canonical request");
+ goto done;
+ }
+
+ if (aws_validate_v4a_authorization_value(
+ allocator, verification_key, aws_byte_cursor_from_buf(&signing_state->string_to_sign), signature_cursor)) {
+ AWS_LOGF_ERROR(AWS_LS_AUTH_SIGNING, "Signature does not validate");
+ aws_raise_error(AWS_AUTH_SIGV4A_SIGNATURE_VALIDATION_FAILURE);
+ goto done;
+ }
+
+ result = AWS_OP_SUCCESS;
+
+done:
+
+ if (verification_key) {
+ aws_ecc_key_pair_release(verification_key);
+ }
+ aws_signing_state_destroy(signing_state);
+
+ return result;
+}
+
+static bool s_is_padding_byte(uint8_t byte) {
+ return byte == AWS_SIGV4A_SIGNATURE_PADDING_BYTE;
+}
+
+struct aws_byte_cursor aws_trim_padded_sigv4a_signature(struct aws_byte_cursor signature) {
+ return aws_byte_cursor_trim_pred(&signature, s_is_padding_byte);
+}
diff --git a/contrib/restricted/aws/aws-c-auth/source/credentials.c b/contrib/restricted/aws/aws-c-auth/source/credentials.c
new file mode 100644
index 0000000000..f838c3e17e
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/source/credentials.c
@@ -0,0 +1,339 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/credentials.h>
+
+#include <aws/cal/ecc.h>
+#include <aws/common/environment.h>
+#include <aws/common/string.h>
+
+/*
+ * A structure that wraps the public/private data needed to sign an authenticated AWS request
+ */
+struct aws_credentials {
+ struct aws_allocator *allocator;
+
+ struct aws_atomic_var ref_count;
+
+ struct aws_string *access_key_id;
+ struct aws_string *secret_access_key;
+ struct aws_string *session_token;
+
+ /*
+ * A timepoint, in seconds since epoch, at which the credentials should no longer be used because they
+ * will have expired.
+ *
+ *
+ * The primary purpose of this value is to allow providers to communicate to the caching provider any
+ * additional constraints on how the sourced credentials should be used (STS). After refreshing the cached
+ * credentials, the caching provider uses the following calculation to determine the next requery time:
+ *
+ * next_requery_time = now + cached_expiration_config;
+ * if (cached_creds->expiration_timepoint_seconds < next_requery_time) {
+ * next_requery_time = cached_creds->expiration_timepoint_seconds;
+ *
+ * The cached provider may, at its discretion, use a smaller requery time to avoid edge-case scenarios where
+ * credential expiration becomes a race condition.
+ *
+ * The following leaf providers always set this value to UINT64_MAX (indefinite):
+ * static
+ * environment
+ * imds
+ * profile_config*
+ *
+ * * - profile_config may invoke sts which will use a non-max value
+ *
+ * The following leaf providers set this value to a sensible timepoint:
+ * sts - value is based on current time + options->duration_seconds
+ *
+ */
+ uint64_t expiration_timepoint_seconds;
+
+ struct aws_ecc_key_pair *ecc_key;
+};
+
+/*
+ * Credentials API implementations
+ */
+struct aws_credentials *aws_credentials_new(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor access_key_id_cursor,
+ struct aws_byte_cursor secret_access_key_cursor,
+ struct aws_byte_cursor session_token_cursor,
+ uint64_t expiration_timepoint_seconds) {
+
+ if (access_key_id_cursor.ptr == NULL || access_key_id_cursor.len == 0) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ if (secret_access_key_cursor.ptr == NULL || secret_access_key_cursor.len == 0) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_credentials *credentials = aws_mem_acquire(allocator, sizeof(struct aws_credentials));
+ if (credentials == NULL) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*credentials);
+
+ credentials->allocator = allocator;
+ aws_atomic_init_int(&credentials->ref_count, 1);
+
+ credentials->access_key_id =
+ aws_string_new_from_array(allocator, access_key_id_cursor.ptr, access_key_id_cursor.len);
+ if (credentials->access_key_id == NULL) {
+ goto error;
+ }
+
+ credentials->secret_access_key =
+ aws_string_new_from_array(allocator, secret_access_key_cursor.ptr, secret_access_key_cursor.len);
+ if (credentials->secret_access_key == NULL) {
+ goto error;
+ }
+
+ if (session_token_cursor.ptr != NULL && session_token_cursor.len > 0) {
+ credentials->session_token =
+ aws_string_new_from_array(allocator, session_token_cursor.ptr, session_token_cursor.len);
+ if (credentials->session_token == NULL) {
+ goto error;
+ }
+ }
+
+ credentials->expiration_timepoint_seconds = expiration_timepoint_seconds;
+
+ return credentials;
+
+error:
+
+ aws_credentials_release(credentials);
+
+ return NULL;
+}
+
+struct aws_credentials *aws_credentials_new_anonymous(struct aws_allocator *allocator) {
+
+ struct aws_credentials *credentials = aws_mem_calloc(allocator, 1, sizeof(struct aws_credentials));
+
+ credentials->allocator = allocator;
+ aws_atomic_init_int(&credentials->ref_count, 1);
+
+ credentials->expiration_timepoint_seconds = UINT64_MAX;
+
+ return credentials;
+}
+
+static void s_aws_credentials_destroy(struct aws_credentials *credentials) {
+ if (credentials == NULL) {
+ return;
+ }
+
+ if (credentials->access_key_id != NULL) {
+ aws_string_destroy(credentials->access_key_id);
+ }
+
+ if (credentials->secret_access_key != NULL) {
+ aws_string_destroy_secure(credentials->secret_access_key);
+ }
+
+ if (credentials->session_token != NULL) {
+ aws_string_destroy_secure(credentials->session_token);
+ }
+
+ aws_ecc_key_pair_release(credentials->ecc_key);
+
+ aws_mem_release(credentials->allocator, credentials);
+}
+
+void aws_credentials_acquire(const struct aws_credentials *credentials) {
+ if (credentials == NULL) {
+ return;
+ }
+
+ aws_atomic_fetch_add((struct aws_atomic_var *)&credentials->ref_count, 1);
+}
+
+void aws_credentials_release(const struct aws_credentials *credentials) {
+ if (credentials == NULL) {
+ return;
+ }
+
+ size_t old_value = aws_atomic_fetch_sub((struct aws_atomic_var *)&credentials->ref_count, 1);
+ if (old_value == 1) {
+ s_aws_credentials_destroy((struct aws_credentials *)credentials);
+ }
+}
+
+static struct aws_byte_cursor s_empty_token_cursor = {
+ .ptr = NULL,
+ .len = 0,
+};
+
+struct aws_byte_cursor aws_credentials_get_access_key_id(const struct aws_credentials *credentials) {
+ if (credentials->access_key_id == NULL) {
+ return s_empty_token_cursor;
+ }
+
+ return aws_byte_cursor_from_string(credentials->access_key_id);
+}
+
+struct aws_byte_cursor aws_credentials_get_secret_access_key(const struct aws_credentials *credentials) {
+ if (credentials->secret_access_key == NULL) {
+ return s_empty_token_cursor;
+ }
+
+ return aws_byte_cursor_from_string(credentials->secret_access_key);
+}
+
+struct aws_byte_cursor aws_credentials_get_session_token(const struct aws_credentials *credentials) {
+ if (credentials->session_token != NULL) {
+ return aws_byte_cursor_from_string(credentials->session_token);
+ }
+
+ return s_empty_token_cursor;
+}
+
+uint64_t aws_credentials_get_expiration_timepoint_seconds(const struct aws_credentials *credentials) {
+ return credentials->expiration_timepoint_seconds;
+}
+
+struct aws_ecc_key_pair *aws_credentials_get_ecc_key_pair(const struct aws_credentials *credentials) {
+ return credentials->ecc_key;
+}
+
+bool aws_credentials_is_anonymous(const struct aws_credentials *credentials) {
+ AWS_PRECONDITION(credentials);
+ return credentials->access_key_id == NULL && credentials->secret_access_key == NULL;
+}
+
+struct aws_credentials *aws_credentials_new_from_string(
+ struct aws_allocator *allocator,
+ const struct aws_string *access_key_id,
+ const struct aws_string *secret_access_key,
+ const struct aws_string *session_token,
+ uint64_t expiration_timepoint_seconds) {
+ struct aws_byte_cursor access_key_cursor = aws_byte_cursor_from_string(access_key_id);
+ struct aws_byte_cursor secret_access_key_cursor = aws_byte_cursor_from_string(secret_access_key);
+ struct aws_byte_cursor session_token_cursor;
+ AWS_ZERO_STRUCT(session_token_cursor);
+
+ if (session_token) {
+ session_token_cursor = aws_byte_cursor_from_string(session_token);
+ }
+
+ return aws_credentials_new(
+ allocator, access_key_cursor, secret_access_key_cursor, session_token_cursor, expiration_timepoint_seconds);
+}
+
+struct aws_credentials *aws_credentials_new_ecc(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor access_key_id,
+ struct aws_ecc_key_pair *ecc_key,
+ struct aws_byte_cursor session_token,
+ uint64_t expiration_timepoint_in_seconds) {
+
+ if (access_key_id.len == 0 || ecc_key == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_AUTH_GENERAL, "Provided credentials do not have a valid access_key_id or ecc_key");
+ return NULL;
+ }
+
+ struct aws_credentials *credentials = aws_mem_calloc(allocator, 1, sizeof(struct aws_credentials));
+ if (credentials == NULL) {
+ return NULL;
+ }
+
+ credentials->allocator = allocator;
+ credentials->expiration_timepoint_seconds = expiration_timepoint_in_seconds;
+ aws_atomic_init_int(&credentials->ref_count, 1);
+ aws_ecc_key_pair_acquire(ecc_key);
+ credentials->ecc_key = ecc_key;
+
+ credentials->access_key_id = aws_string_new_from_array(allocator, access_key_id.ptr, access_key_id.len);
+ if (credentials->access_key_id == NULL) {
+ goto on_error;
+ }
+
+ if (session_token.ptr != NULL && session_token.len > 0) {
+ credentials->session_token = aws_string_new_from_array(allocator, session_token.ptr, session_token.len);
+ if (credentials->session_token == NULL) {
+ goto on_error;
+ }
+ }
+
+ return credentials;
+
+on_error:
+
+ s_aws_credentials_destroy(credentials);
+
+ return NULL;
+}
+
+struct aws_credentials *aws_credentials_new_ecc_from_aws_credentials(
+ struct aws_allocator *allocator,
+ const struct aws_credentials *credentials) {
+
+ struct aws_ecc_key_pair *ecc_key = aws_ecc_key_pair_new_ecdsa_p256_key_from_aws_credentials(allocator, credentials);
+
+ if (ecc_key == NULL) {
+ return NULL;
+ }
+
+ struct aws_credentials *ecc_credentials = aws_credentials_new_ecc(
+ allocator,
+ aws_credentials_get_access_key_id(credentials),
+ ecc_key,
+ aws_credentials_get_session_token(credentials),
+ aws_credentials_get_expiration_timepoint_seconds(credentials));
+
+ aws_ecc_key_pair_release(ecc_key);
+
+ return ecc_credentials;
+}
+
+/*
+ * global credentials provider APIs
+ */
+
+void aws_credentials_provider_destroy(struct aws_credentials_provider *provider) {
+ if (provider != NULL) {
+ provider->vtable->destroy(provider);
+ }
+}
+
+struct aws_credentials_provider *aws_credentials_provider_release(struct aws_credentials_provider *provider) {
+ if (provider == NULL) {
+ return NULL;
+ }
+
+ size_t old_value = aws_atomic_fetch_sub(&provider->ref_count, 1);
+ if (old_value == 1) {
+ aws_credentials_provider_destroy(provider);
+ }
+
+ return NULL;
+}
+
+struct aws_credentials_provider *aws_credentials_provider_acquire(struct aws_credentials_provider *provider) {
+ if (provider == NULL) {
+ return NULL;
+ }
+
+ aws_atomic_fetch_add(&provider->ref_count, 1);
+
+ return provider;
+}
+
+int aws_credentials_provider_get_credentials(
+ struct aws_credentials_provider *provider,
+ aws_on_get_credentials_callback_fn callback,
+ void *user_data) {
+
+ AWS_ASSERT(provider->vtable->get_credentials);
+
+ return provider->vtable->get_credentials(provider, callback, user_data);
+}
diff --git a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_anonymous.c b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_anonymous.c
new file mode 100644
index 0000000000..a0ac07714f
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_anonymous.c
@@ -0,0 +1,60 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/credentials.h>
+#include <aws/auth/private/credentials_utils.h>
+
+static int s_anonymous_credentials_provider_get_credentials_async(
+ struct aws_credentials_provider *provider,
+ aws_on_get_credentials_callback_fn callback,
+ void *user_data) {
+
+ struct aws_credentials *credentials = provider->impl;
+
+ AWS_LOGF_INFO(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) Anonymous credentials provider successfully sourced credentials",
+ (void *)provider);
+ callback(credentials, AWS_ERROR_SUCCESS, user_data);
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_anonymous_credentials_provider_destroy(struct aws_credentials_provider *provider) {
+ struct aws_credentials *credentials = provider->impl;
+
+ aws_credentials_release(credentials);
+ aws_credentials_provider_invoke_shutdown_callback(provider);
+ aws_mem_release(provider->allocator, provider);
+}
+
+static struct aws_credentials_provider_vtable s_aws_credentials_provider_anonymous_vtable = {
+ .get_credentials = s_anonymous_credentials_provider_get_credentials_async,
+ .destroy = s_anonymous_credentials_provider_destroy,
+};
+
+struct aws_credentials_provider *aws_credentials_provider_new_anonymous(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_shutdown_options *shutdown_options) {
+
+ struct aws_credentials_provider *provider = aws_mem_calloc(allocator, 1, sizeof(struct aws_credentials_provider));
+
+ struct aws_credentials *credentials = aws_credentials_new_anonymous(allocator);
+ if (credentials == NULL) {
+ goto on_new_credentials_failure;
+ }
+
+ aws_credentials_provider_init_base(provider, allocator, &s_aws_credentials_provider_anonymous_vtable, credentials);
+
+ provider->shutdown_options = *shutdown_options;
+
+ return provider;
+
+on_new_credentials_failure:
+
+ aws_mem_release(allocator, provider);
+
+ return NULL;
+}
diff --git a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_cached.c b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_cached.c
new file mode 100644
index 0000000000..300794f1d5
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_cached.c
@@ -0,0 +1,312 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/credentials.h>
+
+#include <aws/auth/private/credentials_utils.h>
+#include <aws/common/clock.h>
+#include <aws/common/mutex.h>
+#include <aws/common/time.h>
+
+#include <inttypes.h>
+
+/*
+
+ ToDo: credentials expiration environment overrides
+
+AWS_STATIC_STRING_FROM_LITERAL(s_credential_expiration_env_var, "AWS_CREDENTIAL_EXPIRATION");
+
+*/
+
+#define REFRESH_CREDENTIALS_EARLY_DURATION_SECONDS 10
+
+struct aws_credentials_provider_cached {
+ struct aws_credentials_provider *source;
+ struct aws_credentials *cached_credentials;
+ struct aws_mutex lock;
+ uint64_t refresh_interval_in_ns;
+ uint64_t next_refresh_time;
+ aws_io_clock_fn *high_res_clock_fn;
+ aws_io_clock_fn *system_clock_fn;
+ struct aws_linked_list pending_queries;
+};
+
+static void s_aws_credentials_query_list_notify_and_clean_up(
+ struct aws_linked_list *query_list,
+ struct aws_allocator *allocator,
+ struct aws_credentials *credentials,
+ int error_code) {
+
+ while (!aws_linked_list_empty(query_list)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(query_list);
+ struct aws_credentials_query *query = AWS_CONTAINER_OF(node, struct aws_credentials_query, node);
+ query->callback(credentials, error_code, query->user_data);
+ aws_credentials_query_clean_up(query);
+ aws_mem_release(allocator, query);
+ }
+}
+
+static void s_swap_cached_credentials(
+ struct aws_credentials_provider *provider,
+ struct aws_credentials *new_credentials) {
+ struct aws_credentials_provider_cached *cached_provider = provider->impl;
+
+ aws_credentials_release(cached_provider->cached_credentials);
+ cached_provider->cached_credentials = new_credentials;
+ if (cached_provider->cached_credentials != NULL) {
+ aws_credentials_acquire(cached_provider->cached_credentials);
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) Cached credentials provider succesfully sourced credentials on refresh",
+ (void *)provider);
+ } else {
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) Cached credentials provider was unable to source credentials on refresh",
+ (void *)provider);
+ }
+}
+
+static void s_cached_credentials_provider_get_credentials_async_callback(
+ struct aws_credentials *credentials,
+ int error_code,
+ void *user_data) {
+
+ struct aws_credentials_provider *provider = user_data;
+ struct aws_credentials_provider_cached *impl = provider->impl;
+
+ aws_mutex_lock(&impl->lock);
+
+ /*
+ * Move pending queries so that we can do notifications outside the lock
+ */
+ struct aws_linked_list pending_queries;
+ aws_linked_list_init(&pending_queries);
+
+ aws_linked_list_swap_contents(&pending_queries, &impl->pending_queries);
+
+ uint64_t next_refresh_time_in_ns = UINT64_MAX;
+
+ uint64_t high_res_now = 0;
+ if (!impl->high_res_clock_fn(&high_res_now)) {
+ if (impl->refresh_interval_in_ns > 0) {
+ next_refresh_time_in_ns = high_res_now + impl->refresh_interval_in_ns;
+ }
+
+ uint64_t credentials_expiration_timepoint_seconds = UINT64_MAX;
+ if (credentials != NULL) {
+ credentials_expiration_timepoint_seconds = aws_credentials_get_expiration_timepoint_seconds(credentials);
+ }
+
+ /*
+ * If the sourced credentials have an explicit expiration time, we should always use that time
+ * rather than the much cruder, mechanical refresh setting on the caching wrapper.
+ */
+ if (credentials_expiration_timepoint_seconds < UINT64_MAX) {
+ uint64_t system_now = 0;
+ if (!impl->system_clock_fn(&system_now)) {
+
+ uint64_t system_now_seconds =
+ aws_timestamp_convert(system_now, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, NULL);
+ if (credentials_expiration_timepoint_seconds >=
+ system_now_seconds + REFRESH_CREDENTIALS_EARLY_DURATION_SECONDS) {
+ next_refresh_time_in_ns = high_res_now;
+ next_refresh_time_in_ns += aws_timestamp_convert(
+ credentials_expiration_timepoint_seconds - system_now_seconds -
+ REFRESH_CREDENTIALS_EARLY_DURATION_SECONDS,
+ AWS_TIMESTAMP_SECS,
+ AWS_TIMESTAMP_NANOS,
+ NULL);
+ }
+ }
+ }
+ }
+
+ impl->next_refresh_time = next_refresh_time_in_ns;
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) Cached credentials provider next refresh time set to %" PRIu64,
+ (void *)provider,
+ impl->next_refresh_time);
+
+ s_swap_cached_credentials(provider, credentials);
+
+ aws_mutex_unlock(&impl->lock);
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) Cached credentials provider notifying pending queries of new credentials",
+ (void *)provider);
+
+ s_aws_credentials_query_list_notify_and_clean_up(&pending_queries, provider->allocator, credentials, error_code);
+}
+
+static int s_cached_credentials_provider_get_credentials_async(
+ struct aws_credentials_provider *provider,
+ aws_on_get_credentials_callback_fn callback,
+ void *user_data) {
+
+ struct aws_credentials_provider_cached *impl = provider->impl;
+
+ uint64_t current_time = 0;
+ impl->high_res_clock_fn(&current_time);
+
+ bool should_submit_query = false;
+ bool perform_callback = false;
+ struct aws_credentials *credentials = NULL;
+
+ aws_mutex_lock(&impl->lock);
+
+ if (impl->cached_credentials != NULL && current_time < impl->next_refresh_time) {
+ perform_callback = true;
+ credentials = impl->cached_credentials;
+ aws_credentials_acquire(credentials);
+ } else {
+ struct aws_credentials_query *query =
+ aws_mem_acquire(provider->allocator, sizeof(struct aws_credentials_query));
+ if (query != NULL) {
+ aws_credentials_query_init(query, provider, callback, user_data);
+ should_submit_query = aws_linked_list_empty(&impl->pending_queries);
+ aws_linked_list_push_back(&impl->pending_queries, &query->node);
+ } else {
+ perform_callback = true;
+ }
+ }
+
+ aws_mutex_unlock(&impl->lock);
+
+ if (should_submit_query) {
+ AWS_LOGF_INFO(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) Cached credentials provider has expired credentials. Requerying.",
+ (void *)provider);
+
+ aws_credentials_provider_get_credentials(
+ impl->source, s_cached_credentials_provider_get_credentials_async_callback, provider);
+
+ } else if (!perform_callback) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) Cached credentials provider has expired credentials. Waiting on existing query.",
+ (void *)provider);
+ }
+
+ if (perform_callback) {
+ if (credentials != NULL) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) Cached credentials provider successfully sourced from cache",
+ (void *)provider);
+ } else {
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) Cached credentials provider failed to source credentials while skipping requery",
+ (void *)provider);
+ }
+ callback(credentials, (credentials != NULL) ? AWS_ERROR_SUCCESS : aws_last_error(), user_data);
+ aws_credentials_release(credentials);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_cached_credentials_provider_destroy(struct aws_credentials_provider *provider) {
+ struct aws_credentials_provider_cached *impl = provider->impl;
+ if (impl == NULL) {
+ return;
+ }
+
+ aws_credentials_provider_release(impl->source);
+
+ /* Invoke our own shutdown callback */
+ aws_credentials_provider_invoke_shutdown_callback(provider);
+
+ if (impl->cached_credentials != NULL) {
+ aws_credentials_release(impl->cached_credentials);
+ }
+
+ aws_mutex_clean_up(&impl->lock);
+
+ aws_mem_release(provider->allocator, provider);
+}
+
+static struct aws_credentials_provider_vtable s_aws_credentials_provider_cached_vtable = {
+ .get_credentials = s_cached_credentials_provider_get_credentials_async,
+ .destroy = s_cached_credentials_provider_destroy,
+};
+
+struct aws_credentials_provider *aws_credentials_provider_new_cached(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_cached_options *options) {
+
+ AWS_ASSERT(options->source != NULL);
+
+ struct aws_credentials_provider *provider = NULL;
+ struct aws_credentials_provider_cached *impl = NULL;
+
+ aws_mem_acquire_many(
+ allocator,
+ 2,
+ &provider,
+ sizeof(struct aws_credentials_provider),
+ &impl,
+ sizeof(struct aws_credentials_provider_cached));
+
+ if (!provider) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*provider);
+ AWS_ZERO_STRUCT(*impl);
+
+ aws_credentials_provider_init_base(provider, allocator, &s_aws_credentials_provider_cached_vtable, impl);
+
+ if (aws_mutex_init(&impl->lock)) {
+ goto on_error;
+ }
+
+ aws_linked_list_init(&impl->pending_queries);
+
+ impl->source = options->source;
+ aws_credentials_provider_acquire(impl->source);
+
+ if (options->refresh_time_in_milliseconds > 0) {
+ impl->refresh_interval_in_ns = aws_timestamp_convert(
+ options->refresh_time_in_milliseconds, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL);
+ } else {
+ /*
+ * TODO: query AWS_CREDENTIAL_EXPIRATION for a refresh override
+ *
+ * This must be an ISO 8601 time interval which we don't have a parser for yet (one could be cobbled
+ * together from the existing timestamp parser). Does not seem important enough to get bogged down in atm.
+ * Punting for now.
+ */
+ impl->refresh_interval_in_ns = 0;
+ }
+
+ if (options->high_res_clock_fn != NULL) {
+ impl->high_res_clock_fn = options->high_res_clock_fn;
+ } else {
+ impl->high_res_clock_fn = &aws_high_res_clock_get_ticks;
+ }
+
+ if (options->system_clock_fn != NULL) {
+ impl->system_clock_fn = options->system_clock_fn;
+ } else {
+ impl->system_clock_fn = &aws_sys_clock_get_ticks;
+ }
+
+ provider->shutdown_options = options->shutdown_options;
+
+ return provider;
+
+on_error:
+
+ aws_credentials_provider_destroy(provider);
+
+ return NULL;
+}
diff --git a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_chain.c b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_chain.c
new file mode 100644
index 0000000000..b93e708c8e
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_chain.c
@@ -0,0 +1,195 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/credentials.h>
+
+#include <aws/auth/private/credentials_utils.h>
+
+struct aws_credentials_provider_chain_impl {
+ struct aws_array_list providers;
+};
+
+struct aws_credentials_provider_chain_user_data {
+ struct aws_allocator *allocator;
+ struct aws_credentials_provider *provider_chain;
+ size_t current_provider_index;
+ aws_on_get_credentials_callback_fn *original_callback;
+ void *original_user_data;
+};
+
+static void s_aws_provider_chain_member_callback(struct aws_credentials *credentials, int error_code, void *user_data) {
+ struct aws_credentials_provider_chain_user_data *wrapped_user_data = user_data;
+ struct aws_credentials_provider *provider = wrapped_user_data->provider_chain;
+ struct aws_credentials_provider_chain_impl *impl = provider->impl;
+
+ size_t provider_count = aws_array_list_length(&impl->providers);
+
+ if (credentials != NULL || wrapped_user_data->current_provider_index + 1 >= provider_count) {
+ AWS_LOGF_INFO(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) Credentials provider chain callback terminating on index %zu, with %s credentials and error code "
+ "%d",
+ (void *)provider,
+ wrapped_user_data->current_provider_index + 1,
+ (credentials != NULL) ? "valid" : "invalid",
+ error_code);
+
+ goto on_terminate_chain;
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) Credentials provider chain callback %zu invoked with %s credentials and error code %d",
+ (void *)provider,
+ wrapped_user_data->current_provider_index + 1,
+ (credentials != NULL) ? "valid" : "invalid",
+ error_code);
+
+ wrapped_user_data->current_provider_index++;
+
+ /*
+ * TODO: Immutable data, shouldn't need a lock, but we might need a fence and we don't have one atm
+ */
+ struct aws_credentials_provider *next_provider = NULL;
+ if (aws_array_list_get_at(&impl->providers, &next_provider, wrapped_user_data->current_provider_index)) {
+ goto on_terminate_chain;
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) Credentials provider chain invoking chain member #%zu",
+ (void *)provider,
+ wrapped_user_data->current_provider_index);
+
+ aws_credentials_provider_get_credentials(next_provider, s_aws_provider_chain_member_callback, wrapped_user_data);
+
+ return;
+
+on_terminate_chain:
+
+ wrapped_user_data->original_callback(credentials, error_code, wrapped_user_data->original_user_data);
+ aws_credentials_provider_release(provider);
+ aws_mem_release(wrapped_user_data->allocator, wrapped_user_data);
+}
+
+static int s_credentials_provider_chain_get_credentials_async(
+ struct aws_credentials_provider *provider,
+ aws_on_get_credentials_callback_fn callback,
+ void *user_data) {
+
+ struct aws_credentials_provider_chain_impl *impl = provider->impl;
+
+ struct aws_credentials_provider *first_provider = NULL;
+ if (aws_array_list_get_at(&impl->providers, &first_provider, 0)) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_credentials_provider_chain_user_data *wrapped_user_data =
+ aws_mem_acquire(provider->allocator, sizeof(struct aws_credentials_provider_chain_user_data));
+ if (wrapped_user_data == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ AWS_ZERO_STRUCT(*wrapped_user_data);
+
+ wrapped_user_data->allocator = provider->allocator;
+ wrapped_user_data->provider_chain = provider;
+ wrapped_user_data->current_provider_index = 0;
+ wrapped_user_data->original_user_data = user_data;
+ wrapped_user_data->original_callback = callback;
+
+ aws_credentials_provider_acquire(provider);
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) Credentials provider chain get credentials dispatch",
+ (void *)provider);
+
+ aws_credentials_provider_get_credentials(first_provider, s_aws_provider_chain_member_callback, wrapped_user_data);
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_credentials_provider_chain_destroy(struct aws_credentials_provider *provider) {
+ struct aws_credentials_provider_chain_impl *impl = provider->impl;
+ if (impl == NULL) {
+ return;
+ }
+
+ size_t provider_count = aws_array_list_length(&impl->providers);
+ for (size_t i = 0; i < provider_count; ++i) {
+ struct aws_credentials_provider *chain_member = NULL;
+ if (aws_array_list_get_at(&impl->providers, &chain_member, i)) {
+ continue;
+ }
+
+ aws_credentials_provider_release(chain_member);
+ }
+
+ /* Invoke our own shutdown callback */
+ aws_credentials_provider_invoke_shutdown_callback(provider);
+
+ aws_array_list_clean_up(&impl->providers);
+
+ aws_mem_release(provider->allocator, provider);
+}
+
+static struct aws_credentials_provider_vtable s_aws_credentials_provider_chain_vtable = {
+ .get_credentials = s_credentials_provider_chain_get_credentials_async,
+ .destroy = s_credentials_provider_chain_destroy,
+};
+
+struct aws_credentials_provider *aws_credentials_provider_new_chain(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_chain_options *options) {
+
+ if (options->provider_count == 0) {
+ return NULL;
+ }
+
+ struct aws_credentials_provider *provider = NULL;
+ struct aws_credentials_provider_chain_impl *impl = NULL;
+
+ aws_mem_acquire_many(
+ allocator,
+ 2,
+ &provider,
+ sizeof(struct aws_credentials_provider),
+ &impl,
+ sizeof(struct aws_credentials_provider_chain_impl));
+
+ if (!provider) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*provider);
+ AWS_ZERO_STRUCT(*impl);
+
+ aws_credentials_provider_init_base(provider, allocator, &s_aws_credentials_provider_chain_vtable, impl);
+
+ if (aws_array_list_init_dynamic(
+ &impl->providers, allocator, options->provider_count, sizeof(struct aws_credentials_provider *))) {
+ goto on_error;
+ }
+
+ for (size_t i = 0; i < options->provider_count; ++i) {
+ struct aws_credentials_provider *sub_provider = options->providers[i];
+ if (aws_array_list_push_back(&impl->providers, &sub_provider)) {
+ goto on_error;
+ }
+
+ aws_credentials_provider_acquire(sub_provider);
+ }
+
+ provider->shutdown_options = options->shutdown_options;
+
+ return provider;
+
+on_error:
+
+ aws_credentials_provider_destroy(provider);
+
+ return NULL;
+}
diff --git a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_cognito.c b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_cognito.c
new file mode 100644
index 0000000000..ece91b8aab
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_cognito.c
@@ -0,0 +1,859 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/credentials.h>
+
+#include <aws/auth/private/credentials_utils.h>
+#include <aws/common/clock.h>
+#include <aws/common/json.h>
+#include <aws/common/string.h>
+#include <aws/http/connection.h>
+#include <aws/http/connection_manager.h>
+#include <aws/http/request_response.h>
+#include <aws/http/status_code.h>
+#include <aws/io/channel_bootstrap.h>
+#include <aws/io/retry_strategy.h>
+#include <aws/io/socket.h>
+#include <aws/io/stream.h>
+#include <aws/io/tls_channel_handler.h>
+
+#include <inttypes.h>
+
+#define COGNITO_CONNECT_TIMEOUT_DEFAULT_IN_SECONDS 5
+#define COGNITO_MAX_RETRIES 8
+#define HTTP_REQUEST_BODY_INITIAL_SIZE 1024
+#define HTTP_RESPONSE_BODY_INITIAL_SIZE 4096
+
+static void s_on_connection_manager_shutdown(void *user_data);
+static void s_on_connection_setup_fn(struct aws_http_connection *connection, int error_code, void *user_data);
+
+struct aws_cognito_login {
+ struct aws_byte_cursor identity_provider_name;
+ struct aws_byte_cursor identity_provider_token;
+ struct aws_byte_buf login_buffer;
+};
+
+static int s_aws_cognito_login_init(
+ struct aws_cognito_login *login,
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor identity_provider_name,
+ struct aws_byte_cursor identity_provider_token) {
+ AWS_ZERO_STRUCT(*login);
+
+ login->identity_provider_name = identity_provider_name;
+ login->identity_provider_token = identity_provider_token;
+
+ return aws_byte_buf_init_cache_and_update_cursors(
+ &login->login_buffer, allocator, &login->identity_provider_name, &login->identity_provider_token, NULL);
+}
+
+static void s_aws_cognito_login_clean_up(struct aws_cognito_login *login) {
+ aws_byte_buf_clean_up(&login->login_buffer);
+
+ AWS_ZERO_STRUCT(*login);
+}
+
+struct aws_credentials_provider_cognito_impl {
+ struct aws_http_connection_manager *connection_manager;
+ struct aws_retry_strategy *retry_strategy;
+ const struct aws_auth_http_system_vtable *function_table;
+
+ struct aws_string *endpoint;
+
+ struct aws_string *identity;
+
+ struct aws_array_list logins;
+
+ struct aws_string *custom_role_arn;
+};
+
+struct cognito_user_data {
+ struct aws_allocator *allocator;
+
+ struct aws_credentials_provider *provider;
+
+ aws_on_get_credentials_callback_fn *original_callback;
+ void *original_user_data;
+
+ struct aws_http_connection *connection;
+ struct aws_http_message *get_credentials_request;
+ struct aws_byte_buf request_body_buffer;
+ struct aws_input_stream *request_body_stream;
+
+ struct aws_retry_token *retry_token;
+ struct aws_credentials *credentials;
+ struct aws_byte_buf response_body;
+};
+
+static void s_user_data_reset(struct cognito_user_data *user_data) {
+ aws_byte_buf_clean_up(&user_data->request_body_buffer);
+
+ user_data->request_body_stream = aws_input_stream_release(user_data->request_body_stream);
+ user_data->get_credentials_request = aws_http_message_release(user_data->get_credentials_request);
+
+ struct aws_credentials_provider_cognito_impl *impl = user_data->provider->impl;
+ if (user_data->connection != NULL) {
+ impl->function_table->aws_http_connection_manager_release_connection(
+ impl->connection_manager, user_data->connection);
+ user_data->connection = NULL;
+ }
+
+ aws_byte_buf_reset(&user_data->response_body, false);
+}
+
+static void s_user_data_destroy(struct cognito_user_data *user_data) {
+ if (user_data == NULL) {
+ return;
+ }
+
+ s_user_data_reset(user_data);
+
+ aws_byte_buf_clean_up(&user_data->response_body);
+ aws_retry_token_release(user_data->retry_token);
+ aws_credentials_provider_release(user_data->provider);
+ aws_credentials_release(user_data->credentials);
+
+ aws_mem_release(user_data->allocator, user_data);
+}
+
+static struct cognito_user_data *s_user_data_new(
+ struct aws_credentials_provider *provider,
+ aws_on_get_credentials_callback_fn callback,
+ void *user_data) {
+
+ struct aws_allocator *allocator = provider->allocator;
+ struct cognito_user_data *cognito_user_data = aws_mem_calloc(allocator, 1, sizeof(struct cognito_user_data));
+ cognito_user_data->allocator = allocator;
+
+ aws_byte_buf_init(&cognito_user_data->response_body, cognito_user_data->allocator, HTTP_RESPONSE_BODY_INITIAL_SIZE);
+
+ cognito_user_data->provider = aws_credentials_provider_acquire(provider);
+ cognito_user_data->original_callback = callback;
+ cognito_user_data->original_user_data = user_data;
+
+ return cognito_user_data;
+}
+
+static void s_finalize_credentials_query(struct cognito_user_data *user_data, int error_code) {
+ AWS_FATAL_ASSERT(user_data != NULL);
+
+ if (user_data->credentials == NULL && error_code == AWS_ERROR_SUCCESS) {
+ error_code = AWS_AUTH_CREDENTIALS_PROVIDER_COGNITO_SOURCE_FAILURE;
+ }
+
+ (user_data->original_callback)(user_data->credentials, error_code, user_data->original_user_data);
+
+ s_user_data_destroy(user_data);
+}
+
+/* Keys per Cognito-Identity service model */
+AWS_STATIC_STRING_FROM_LITERAL(s_credentials_key, "Credentials");
+AWS_STATIC_STRING_FROM_LITERAL(s_access_key_id_name, "AccessKeyId");
+AWS_STATIC_STRING_FROM_LITERAL(s_secret_access_key_name, "SecretKey");
+AWS_STATIC_STRING_FROM_LITERAL(s_session_token_name, "SessionToken");
+AWS_STATIC_STRING_FROM_LITERAL(s_expiration_name, "Expiration");
+
+static int s_parse_credentials_from_response(struct cognito_user_data *user_data) {
+
+ int result = AWS_OP_ERR;
+
+ struct aws_json_value *response_document =
+ aws_json_value_new_from_string(user_data->allocator, aws_byte_cursor_from_buf(&user_data->response_body));
+ if (response_document == NULL) {
+ goto done;
+ }
+
+ struct aws_json_value *credentials_entry =
+ aws_json_value_get_from_object(response_document, aws_byte_cursor_from_string(s_credentials_key));
+ if (credentials_entry == NULL) {
+ goto done;
+ }
+
+ struct aws_parse_credentials_from_json_doc_options credentials_parse_options = {
+ .access_key_id_name = aws_string_c_str(s_access_key_id_name),
+ .secret_access_key_name = aws_string_c_str(s_secret_access_key_name),
+ .token_name = aws_string_c_str(s_session_token_name),
+ .expiration_name = aws_string_c_str(s_expiration_name),
+ .expiration_format = AWS_PCEF_NUMBER_UNIX_EPOCH,
+ .token_required = true,
+ .expiration_required = true,
+ };
+
+ user_data->credentials =
+ aws_parse_credentials_from_aws_json_object(user_data->allocator, credentials_entry, &credentials_parse_options);
+ if (user_data->credentials == NULL) {
+ goto done;
+ }
+
+ result = AWS_OP_SUCCESS;
+
+done:
+
+ aws_json_value_destroy(response_document);
+
+ if (result != AWS_OP_SUCCESS) {
+ aws_raise_error(AWS_AUTH_PROVIDER_PARSER_UNEXPECTED_RESPONSE);
+ }
+
+ return result;
+}
+
+static void s_on_retry_ready(struct aws_retry_token *token, int error_code, void *user_data) {
+ (void)token;
+ struct cognito_user_data *provider_user_data = user_data;
+
+ if (error_code != AWS_ERROR_SUCCESS) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): Cognito credentials provider retry task failed: %s",
+ (void *)provider_user_data->provider,
+ aws_error_str(error_code));
+ s_finalize_credentials_query(user_data, error_code);
+ return;
+ }
+
+ s_user_data_reset(provider_user_data);
+
+ struct aws_credentials_provider_cognito_impl *impl = provider_user_data->provider->impl;
+
+ impl->function_table->aws_http_connection_manager_acquire_connection(
+ impl->connection_manager, s_on_connection_setup_fn, provider_user_data);
+}
+
+static void s_on_stream_complete_fn(struct aws_http_stream *stream, int error_code, void *user_data) {
+
+ struct cognito_user_data *provider_user_data = user_data;
+ struct aws_credentials_provider_cognito_impl *impl = provider_user_data->provider->impl;
+
+ int http_response_code = 0;
+ impl->function_table->aws_http_stream_get_incoming_response_status(stream, &http_response_code);
+
+ if (http_response_code != 200) {
+ error_code = AWS_AUTH_CREDENTIALS_PROVIDER_HTTP_STATUS_FAILURE;
+ }
+
+ impl->function_table->aws_http_stream_release(stream);
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): GetCredentialsForIdentity call completed with http status %d",
+ (void *)provider_user_data->provider,
+ http_response_code);
+
+ if (http_response_code == AWS_HTTP_STATUS_CODE_200_OK) {
+ aws_retry_token_record_success(provider_user_data->retry_token);
+
+ if (s_parse_credentials_from_response(provider_user_data) == AWS_OP_SUCCESS) {
+ s_finalize_credentials_query(user_data, AWS_ERROR_SUCCESS);
+ return;
+ }
+
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): Cognito credentials provider failed to parse GetCredentialsForIdentity response",
+ (void *)provider_user_data->provider);
+
+ error_code = AWS_AUTH_PROVIDER_PARSER_UNEXPECTED_RESPONSE;
+ }
+
+ /* Success path is done, error-only from here on out */
+
+ /* Unsure if this should be unconditional or a function of status code. STS does this unconditionally. */
+ impl->function_table->aws_http_connection_close(provider_user_data->connection);
+
+ enum aws_retry_error_type error_type =
+ aws_credentials_provider_compute_retry_error_type(http_response_code, error_code);
+ bool can_retry = http_response_code == 0 || error_type != AWS_RETRY_ERROR_TYPE_CLIENT_ERROR;
+ if (!can_retry) {
+ s_finalize_credentials_query(user_data, error_code);
+ return;
+ }
+
+ if (aws_retry_strategy_schedule_retry(
+ provider_user_data->retry_token, error_type, s_on_retry_ready, provider_user_data)) {
+ error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): Cognito credentials provider failed to schedule retry: %s",
+ (void *)provider_user_data->provider,
+ aws_error_str(error_code));
+ s_finalize_credentials_query(user_data, error_code);
+ return;
+ }
+}
+
+static int s_on_incoming_body_fn(struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data) {
+ (void)stream;
+
+ struct cognito_user_data *provider_user_data = user_data;
+ return aws_byte_buf_append_dynamic(&provider_user_data->response_body, data);
+}
+
+AWS_STATIC_STRING_FROM_LITERAL(s_identity_id_key, "IdentityId");
+AWS_STATIC_STRING_FROM_LITERAL(s_custom_role_arn_key, "CustomRoleArn");
+AWS_STATIC_STRING_FROM_LITERAL(s_logins_key, "Logins");
+
+int s_create_get_credentials_for_identity_body_buffer(
+ struct aws_byte_buf *buffer,
+ struct cognito_user_data *provider_user_data) {
+ struct aws_allocator *allocator = provider_user_data->allocator;
+ struct aws_credentials_provider_cognito_impl *impl = provider_user_data->provider->impl;
+
+ int result = AWS_OP_ERR;
+
+ struct aws_json_value *json_body = aws_json_value_new_object(allocator);
+ if (json_body == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_json_value *identity_string =
+ aws_json_value_new_string(allocator, aws_byte_cursor_from_string(impl->identity));
+ if (identity_string == NULL) {
+ goto done;
+ }
+
+ if (aws_json_value_add_to_object(json_body, aws_byte_cursor_from_string(s_identity_id_key), identity_string)) {
+ aws_json_value_destroy(identity_string);
+ goto done;
+ }
+
+ if (impl->custom_role_arn != NULL) {
+ struct aws_json_value *custom_role_arn_string =
+ aws_json_value_new_string(allocator, aws_byte_cursor_from_string(impl->custom_role_arn));
+ if (custom_role_arn_string == NULL) {
+ goto done;
+ }
+
+ if (aws_json_value_add_to_object(
+ json_body, aws_byte_cursor_from_string(s_custom_role_arn_key), custom_role_arn_string)) {
+ aws_json_value_destroy(custom_role_arn_string);
+ goto done;
+ }
+ }
+
+ size_t login_count = aws_array_list_length(&impl->logins);
+ if (login_count > 0) {
+ struct aws_json_value *logins = aws_json_value_new_object(allocator);
+ if (logins == NULL) {
+ goto done;
+ }
+
+ if (aws_json_value_add_to_object(json_body, aws_byte_cursor_from_string(s_logins_key), logins)) {
+ aws_json_value_destroy(logins);
+ goto done;
+ }
+
+ for (size_t i = 0; i < login_count; ++i) {
+ struct aws_cognito_login login;
+ if (aws_array_list_get_at(&impl->logins, &login, i)) {
+ goto done;
+ }
+
+ struct aws_json_value *login_value_string =
+ aws_json_value_new_string(allocator, login.identity_provider_token);
+ if (login_value_string == NULL) {
+ goto done;
+ }
+
+ if (aws_json_value_add_to_object(logins, login.identity_provider_name, login_value_string)) {
+ aws_json_value_destroy(login_value_string);
+ goto done;
+ }
+ }
+ }
+
+ if (aws_byte_buf_append_json_string(json_body, buffer)) {
+ goto done;
+ }
+
+ result = AWS_OP_SUCCESS;
+
+done:
+
+ aws_json_value_destroy(json_body);
+
+ return result;
+}
+
+static struct aws_http_header s_content_type_header = {
+ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("content-type"),
+ .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("application/x-amz-json-1.1"),
+};
+
+AWS_STATIC_STRING_FROM_LITERAL(s_get_credentials_for_identity_path, "/");
+
+static struct aws_http_header s_x_amz_target_header = {
+ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("X-Amz-Target"),
+ .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("AWSCognitoIdentityService.GetCredentialsForIdentity"),
+};
+
+static int s_create_get_credentials_for_identity_request(struct cognito_user_data *provider_user_data) {
+ struct aws_credentials_provider_cognito_impl *impl = provider_user_data->provider->impl;
+
+ struct aws_byte_buf body_buffer;
+ AWS_ZERO_STRUCT(body_buffer);
+
+ struct aws_input_stream *body_stream = NULL;
+ struct aws_http_message *request = aws_http_message_new_request(provider_user_data->allocator);
+ if (request == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_http_message_set_request_method(request, aws_http_method_post)) {
+ goto on_error;
+ }
+
+ if (aws_http_message_set_request_path(request, aws_byte_cursor_from_string(s_get_credentials_for_identity_path))) {
+ goto on_error;
+ }
+
+ struct aws_http_header host_header = {
+ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("host"),
+ .value = aws_byte_cursor_from_string(impl->endpoint),
+ };
+
+ if (aws_http_message_add_header(request, host_header)) {
+ goto on_error;
+ }
+
+ if (aws_http_message_add_header(request, s_content_type_header)) {
+ goto on_error;
+ }
+
+ if (aws_http_message_add_header(request, s_x_amz_target_header)) {
+ goto on_error;
+ }
+
+ if (aws_byte_buf_init(&body_buffer, provider_user_data->allocator, HTTP_REQUEST_BODY_INITIAL_SIZE)) {
+ goto on_error;
+ }
+
+ if (s_create_get_credentials_for_identity_body_buffer(&body_buffer, provider_user_data)) {
+ goto on_error;
+ }
+
+ char content_length[21];
+ AWS_ZERO_ARRAY(content_length);
+ snprintf(content_length, sizeof(content_length), "%" PRIu64, (uint64_t)body_buffer.len);
+
+ struct aws_http_header content_length_header = {
+ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"),
+ .value = aws_byte_cursor_from_c_str(content_length),
+ };
+
+ if (aws_http_message_add_header(request, content_length_header)) {
+ goto on_error;
+ }
+
+ struct aws_byte_cursor payload_cur = aws_byte_cursor_from_buf(&body_buffer);
+ body_stream = aws_input_stream_new_from_cursor(provider_user_data->allocator, &payload_cur);
+ if (body_stream == NULL) {
+ goto on_error;
+ }
+
+ aws_http_message_set_body_stream(request, body_stream);
+
+ provider_user_data->get_credentials_request = request;
+ provider_user_data->request_body_buffer = body_buffer;
+ provider_user_data->request_body_stream = body_stream;
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+
+ aws_byte_buf_clean_up(&body_buffer);
+ aws_input_stream_release(body_stream);
+ aws_http_message_release(request);
+
+ return AWS_OP_ERR;
+}
+
+static void s_on_connection_setup_fn(struct aws_http_connection *connection, int error_code, void *user_data) {
+ struct cognito_user_data *wrapped_user_data = user_data;
+ struct aws_http_stream *stream = NULL;
+ struct aws_credentials_provider_cognito_impl *impl = wrapped_user_data->provider->impl;
+
+ if (connection == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): Cognito credentials provider failed to acquire http connection: %s",
+ (void *)wrapped_user_data->provider,
+ aws_error_debug_str(error_code));
+ goto on_error;
+ }
+
+ wrapped_user_data->connection = connection;
+ if (s_create_get_credentials_for_identity_request(wrapped_user_data)) {
+ error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): Cognito credentials provider failed to create http request: %s",
+ (void *)wrapped_user_data->provider,
+ aws_error_debug_str(error_code));
+ goto on_error;
+ }
+
+ struct aws_http_make_request_options options = {
+ .user_data = user_data,
+ .request = wrapped_user_data->get_credentials_request,
+ .self_size = sizeof(struct aws_http_make_request_options),
+ .on_response_headers = NULL,
+ .on_response_header_block_done = NULL,
+ .on_response_body = s_on_incoming_body_fn,
+ .on_complete = s_on_stream_complete_fn,
+ };
+
+ stream = impl->function_table->aws_http_connection_make_request(connection, &options);
+ if (!stream) {
+ error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): Cognito credentials provider failed to create http stream: %s",
+ (void *)wrapped_user_data->provider,
+ aws_error_debug_str(error_code));
+ goto on_error;
+ }
+
+ if (impl->function_table->aws_http_stream_activate(stream)) {
+ error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): Cognito credentials provider failed to activate http stream: %s",
+ (void *)wrapped_user_data->provider,
+ aws_error_debug_str(error_code));
+ goto on_error;
+ }
+
+ return;
+
+on_error:
+
+ impl->function_table->aws_http_stream_release(stream);
+ s_finalize_credentials_query(wrapped_user_data, error_code);
+}
+
+static void s_on_retry_token_acquired(
+ struct aws_retry_strategy *strategy,
+ int error_code,
+ struct aws_retry_token *token,
+ void *user_data) {
+ (void)strategy;
+ struct cognito_user_data *wrapped_user_data = user_data;
+
+ if (token == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): Cognito credentials provider failed to acquire retry token: %s",
+ (void *)wrapped_user_data->provider,
+ aws_error_debug_str(error_code));
+ s_finalize_credentials_query(wrapped_user_data, error_code);
+ return;
+ }
+
+ wrapped_user_data->retry_token = token;
+
+ struct aws_credentials_provider_cognito_impl *impl = wrapped_user_data->provider->impl;
+
+ impl->function_table->aws_http_connection_manager_acquire_connection(
+ impl->connection_manager, s_on_connection_setup_fn, wrapped_user_data);
+}
+
+static int s_credentials_provider_cognito_get_credentials_async(
+ struct aws_credentials_provider *provider,
+ aws_on_get_credentials_callback_fn callback,
+ void *user_data) {
+
+ struct aws_credentials_provider_cognito_impl *impl = provider->impl;
+
+ struct cognito_user_data *wrapped_user_data = s_user_data_new(provider, callback, user_data);
+ if (wrapped_user_data == NULL) {
+ goto on_error;
+ }
+
+ if (aws_retry_strategy_acquire_retry_token(
+ impl->retry_strategy, NULL, s_on_retry_token_acquired, wrapped_user_data, 100)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): Cognito credentials provider failed to acquire retry token with error %s",
+ (void *)provider,
+ aws_error_debug_str(aws_last_error()));
+ goto on_error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+
+ s_user_data_destroy(wrapped_user_data);
+
+ return AWS_OP_ERR;
+}
+
+static void s_credentials_provider_cognito_destroy(struct aws_credentials_provider *provider) {
+ struct aws_credentials_provider_cognito_impl *impl = provider->impl;
+ if (impl == NULL) {
+ return;
+ }
+
+ /* aws_http_connection_manager_release will eventually leads to call of s_on_connection_manager_shutdown,
+ * which will do memory release for provider and impl.
+ */
+ if (impl->connection_manager) {
+ impl->function_table->aws_http_connection_manager_release(impl->connection_manager);
+ } else {
+ /* If provider setup failed halfway through, connection_manager might not exist.
+ * In this case invoke shutdown completion callback directly to finish cleanup */
+ s_on_connection_manager_shutdown(provider);
+ }
+
+ /* freeing the provider takes place in the shutdown callback below */
+}
+
+static struct aws_credentials_provider_vtable s_aws_credentials_provider_cognito_vtable = {
+ .get_credentials = s_credentials_provider_cognito_get_credentials_async,
+ .destroy = s_credentials_provider_cognito_destroy,
+};
+
+static void s_on_connection_manager_shutdown(void *user_data) {
+ struct aws_credentials_provider *provider = user_data;
+
+ aws_credentials_provider_invoke_shutdown_callback(provider);
+
+ struct aws_credentials_provider_cognito_impl *impl = provider->impl;
+
+ aws_retry_strategy_release(impl->retry_strategy);
+
+ aws_string_destroy(impl->endpoint);
+ aws_string_destroy(impl->identity);
+ aws_string_destroy(impl->custom_role_arn);
+
+ for (size_t i = 0; i < aws_array_list_length(&impl->logins); ++i) {
+ struct aws_cognito_login login;
+ if (aws_array_list_get_at(&impl->logins, &login, i)) {
+ continue;
+ }
+
+ s_aws_cognito_login_clean_up(&login);
+ }
+
+ aws_array_list_clean_up(&impl->logins);
+
+ aws_mem_release(provider->allocator, provider);
+}
+
+static int s_validate_options(const struct aws_credentials_provider_cognito_options *options) {
+ if (options == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ if (options->tls_ctx == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(static) Cognito credentials provider options must include a TLS context");
+ return AWS_OP_ERR;
+ }
+
+ if (options->bootstrap == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(static) Cognito credentials provider options must include a client bootstrap");
+ return AWS_OP_ERR;
+ }
+
+ if (options->endpoint.len == 0) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(static) Cognito credentials provider options must have a non-empty endpoint");
+ return AWS_OP_ERR;
+ }
+
+ if (options->identity.len == 0) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(static) Cognito credentials provider options must have a non-empty identity");
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+struct aws_credentials_provider *aws_credentials_provider_new_cognito(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_cognito_options *options) {
+
+ struct aws_credentials_provider *provider = NULL;
+ struct aws_credentials_provider_cognito_impl *impl = NULL;
+
+ if (s_validate_options(options)) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ aws_mem_acquire_many(
+ allocator,
+ 2,
+ &provider,
+ sizeof(struct aws_credentials_provider),
+ &impl,
+ sizeof(struct aws_credentials_provider_cognito_impl));
+
+ if (!provider) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*provider);
+ AWS_ZERO_STRUCT(*impl);
+
+ aws_credentials_provider_init_base(provider, allocator, &s_aws_credentials_provider_cognito_vtable, impl);
+
+ struct aws_tls_connection_options tls_connection_options;
+ AWS_ZERO_STRUCT(tls_connection_options);
+ aws_tls_connection_options_init_from_ctx(&tls_connection_options, options->tls_ctx);
+ struct aws_byte_cursor host = options->endpoint;
+ if (aws_tls_connection_options_set_server_name(&tls_connection_options, allocator, &host)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): Cognito credentials provider failed to create tls connection options with error %s",
+ (void *)provider,
+ aws_error_debug_str(aws_last_error()));
+ goto on_error;
+ }
+
+ struct aws_socket_options socket_options;
+ AWS_ZERO_STRUCT(socket_options);
+ socket_options.type = AWS_SOCKET_STREAM;
+ socket_options.domain = AWS_SOCKET_IPV4;
+ socket_options.connect_timeout_ms = (uint32_t)aws_timestamp_convert(
+ COGNITO_CONNECT_TIMEOUT_DEFAULT_IN_SECONDS, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL);
+
+ struct aws_http_connection_manager_options manager_options;
+ AWS_ZERO_STRUCT(manager_options);
+ manager_options.bootstrap = options->bootstrap;
+ manager_options.initial_window_size = SIZE_MAX;
+ manager_options.socket_options = &socket_options;
+ manager_options.host = options->endpoint;
+ manager_options.port = 443;
+ manager_options.max_connections = 2;
+ manager_options.shutdown_complete_callback = s_on_connection_manager_shutdown;
+ manager_options.shutdown_complete_user_data = provider;
+ manager_options.tls_connection_options = &tls_connection_options;
+ manager_options.proxy_options = options->http_proxy_options;
+
+ impl->function_table = options->function_table;
+ if (impl->function_table == NULL) {
+ impl->function_table = g_aws_credentials_provider_http_function_table;
+ }
+
+ impl->connection_manager = impl->function_table->aws_http_connection_manager_new(allocator, &manager_options);
+ if (impl->connection_manager == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): Cognito credentials provider failed to create http connection manager with error %s",
+ (void *)provider,
+ aws_error_debug_str(aws_last_error()));
+ goto on_error;
+ }
+
+ impl->endpoint = aws_string_new_from_cursor(allocator, &options->endpoint);
+ impl->identity = aws_string_new_from_cursor(allocator, &options->identity);
+
+ if (options->custom_role_arn != NULL) {
+ impl->custom_role_arn = aws_string_new_from_cursor(allocator, options->custom_role_arn);
+ }
+
+ aws_array_list_init_dynamic(&impl->logins, allocator, options->login_count, sizeof(struct aws_cognito_login));
+
+ for (size_t i = 0; i < options->login_count; ++i) {
+ struct aws_cognito_identity_provider_token_pair *login_token_pair = &options->logins[i];
+
+ struct aws_cognito_login login;
+ if (s_aws_cognito_login_init(
+ &login,
+ allocator,
+ login_token_pair->identity_provider_name,
+ login_token_pair->identity_provider_token)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): Cognito credentials provider failed to initialize login entry with error %s",
+ (void *)provider,
+ aws_error_debug_str(aws_last_error()));
+ goto on_error;
+ }
+
+ aws_array_list_push_back(&impl->logins, &login);
+ }
+
+ struct aws_standard_retry_options retry_options = {
+ .backoff_retry_options =
+ {
+ .el_group = options->bootstrap->event_loop_group,
+ .max_retries = COGNITO_MAX_RETRIES,
+ },
+ };
+
+ impl->retry_strategy = aws_retry_strategy_new_standard(allocator, &retry_options);
+ if (!impl->retry_strategy) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): Cognito credentials provider failed to create a retry strategy with error %s",
+ (void *)provider,
+ aws_error_debug_str(aws_last_error()));
+ goto on_error;
+ }
+
+ provider->shutdown_options = options->shutdown_options;
+
+ aws_tls_connection_options_clean_up(&tls_connection_options);
+
+ return provider;
+
+on_error:
+
+ aws_tls_connection_options_clean_up(&tls_connection_options);
+ aws_credentials_provider_destroy(provider);
+
+ return NULL;
+}
+
+/*************************************************************************/
+
+#define DEFAULT_CREDENTIAL_PROVIDER_REFRESH_MS (15 * 60 * 1000)
+
+/*
+ * Cognito provider with caching implementation
+ */
+struct aws_credentials_provider *aws_credentials_provider_new_cognito_caching(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_cognito_options *options) {
+
+ struct aws_credentials_provider *cognito_provider = NULL;
+ struct aws_credentials_provider *caching_provider = NULL;
+
+ cognito_provider = aws_credentials_provider_new_cognito(allocator, options);
+ if (cognito_provider == NULL) {
+ goto on_error;
+ }
+
+ struct aws_credentials_provider_cached_options cached_options = {
+ .source = cognito_provider,
+ .refresh_time_in_milliseconds = DEFAULT_CREDENTIAL_PROVIDER_REFRESH_MS,
+ };
+
+ caching_provider = aws_credentials_provider_new_cached(allocator, &cached_options);
+ if (caching_provider == NULL) {
+ goto on_error;
+ }
+
+ aws_credentials_provider_release(cognito_provider);
+
+ return caching_provider;
+
+on_error:
+
+ aws_credentials_provider_release(caching_provider);
+ aws_credentials_provider_release(cognito_provider);
+
+ return NULL;
+}
diff --git a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_default_chain.c b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_default_chain.c
new file mode 100644
index 0000000000..a68241f9e0
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_default_chain.c
@@ -0,0 +1,423 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/credentials.h>
+
+#include <aws/auth/private/credentials_utils.h>
+#include <aws/common/clock.h>
+#include <aws/common/environment.h>
+#include <aws/common/logging.h>
+#include <aws/common/string.h>
+#include <aws/io/tls_channel_handler.h>
+#include <aws/io/uri.h>
+
+#define DEFAULT_CREDENTIAL_PROVIDER_REFRESH_MS (15 * 60 * 1000)
+
+#if defined(_MSC_VER)
+# pragma warning(disable : 4204)
+/*
+ * For designated initialization: .providers = providers,
+ * of aws_credentials_provider_chain_options in function
+ * aws_credentials_provider_new_chain_default
+ */
+# pragma warning(disable : 4221)
+#endif /* _MSC_VER */
+
+AWS_STATIC_STRING_FROM_LITERAL(s_ecs_creds_env_relative_uri, "AWS_CONTAINER_CREDENTIALS_RELATIVE_URI");
+AWS_STATIC_STRING_FROM_LITERAL(s_ecs_creds_env_full_uri, "AWS_CONTAINER_CREDENTIALS_FULL_URI");
+AWS_STATIC_STRING_FROM_LITERAL(s_ecs_creds_env_token, "AWS_CONTAINER_AUTHORIZATION_TOKEN");
+AWS_STATIC_STRING_FROM_LITERAL(s_ecs_host, "169.254.170.2");
+AWS_STATIC_STRING_FROM_LITERAL(s_ec2_creds_env_disable, "AWS_EC2_METADATA_DISABLED");
+
+/**
+ * ECS and IMDS credentials providers are mutually exclusive,
+ * ECS has higher priority
+ */
+static struct aws_credentials_provider *s_aws_credentials_provider_new_ecs_or_imds(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_shutdown_options *shutdown_options,
+ struct aws_client_bootstrap *bootstrap,
+ struct aws_tls_ctx *tls_ctx) {
+
+ struct aws_byte_cursor auth_token_cursor;
+ AWS_ZERO_STRUCT(auth_token_cursor);
+
+ struct aws_credentials_provider *ecs_or_imds_provider = NULL;
+ struct aws_string *ecs_relative_uri = NULL;
+ struct aws_string *ecs_full_uri = NULL;
+ struct aws_string *ec2_imds_disable = NULL;
+ struct aws_string *ecs_token = NULL;
+
+ if (aws_get_environment_value(allocator, s_ecs_creds_env_relative_uri, &ecs_relative_uri) != AWS_OP_SUCCESS ||
+ aws_get_environment_value(allocator, s_ecs_creds_env_full_uri, &ecs_full_uri) != AWS_OP_SUCCESS ||
+ aws_get_environment_value(allocator, s_ec2_creds_env_disable, &ec2_imds_disable) != AWS_OP_SUCCESS ||
+ aws_get_environment_value(allocator, s_ecs_creds_env_token, &ecs_token) != AWS_OP_SUCCESS) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "Failed reading environment variables during default credentials provider chain initialization.");
+ goto clean_up;
+ }
+
+ if (ecs_token && ecs_token->len) {
+ auth_token_cursor = aws_byte_cursor_from_string(ecs_token);
+ }
+
+ /*
+ * ToDo: the uri choice logic should be done in the ecs provider init logic. As it stands, it's a nightmare
+ * to try and use the ecs provider anywhere outside the default chain.
+ */
+ if (ecs_relative_uri && ecs_relative_uri->len) {
+ struct aws_credentials_provider_ecs_options ecs_options = {
+ .shutdown_options = *shutdown_options,
+ .bootstrap = bootstrap,
+ .host = aws_byte_cursor_from_string(s_ecs_host),
+ .path_and_query = aws_byte_cursor_from_string(ecs_relative_uri),
+ .tls_ctx = NULL,
+ .auth_token = auth_token_cursor,
+ };
+ ecs_or_imds_provider = aws_credentials_provider_new_ecs(allocator, &ecs_options);
+
+ } else if (ecs_full_uri && ecs_full_uri->len) {
+ struct aws_uri uri;
+ struct aws_byte_cursor uri_cstr = aws_byte_cursor_from_string(ecs_full_uri);
+ if (AWS_OP_ERR == aws_uri_init_parse(&uri, allocator, &uri_cstr)) {
+ goto clean_up;
+ }
+
+ struct aws_credentials_provider_ecs_options ecs_options = {
+ .shutdown_options = *shutdown_options,
+ .bootstrap = bootstrap,
+ .host = uri.host_name,
+ .path_and_query = uri.path_and_query,
+ .tls_ctx = aws_byte_cursor_eq_c_str_ignore_case(&(uri.scheme), "HTTPS") ? tls_ctx : NULL,
+ .auth_token = auth_token_cursor,
+ .port = uri.port,
+ };
+
+ ecs_or_imds_provider = aws_credentials_provider_new_ecs(allocator, &ecs_options);
+ aws_uri_clean_up(&uri);
+ } else if (ec2_imds_disable == NULL || aws_string_eq_c_str_ignore_case(ec2_imds_disable, "false")) {
+ struct aws_credentials_provider_imds_options imds_options = {
+ .shutdown_options = *shutdown_options,
+ .bootstrap = bootstrap,
+ };
+ ecs_or_imds_provider = aws_credentials_provider_new_imds(allocator, &imds_options);
+ }
+
+clean_up:
+
+ aws_string_destroy(ecs_relative_uri);
+ aws_string_destroy(ecs_full_uri);
+ aws_string_destroy(ec2_imds_disable);
+ aws_string_destroy(ecs_token);
+
+ return ecs_or_imds_provider;
+}
+
+struct default_chain_callback_data {
+ struct aws_allocator *allocator;
+ struct aws_credentials_provider *default_chain_provider;
+ aws_on_get_credentials_callback_fn *original_callback;
+ void *original_user_data;
+};
+
+static struct default_chain_callback_data *s_create_callback_data(
+ struct aws_credentials_provider *provider,
+ aws_on_get_credentials_callback_fn *callback,
+ void *user_data) {
+ struct default_chain_callback_data *callback_data =
+ aws_mem_calloc(provider->allocator, 1, sizeof(struct default_chain_callback_data));
+ if (callback_data == NULL) {
+ return NULL;
+ }
+ callback_data->allocator = provider->allocator;
+ callback_data->default_chain_provider = provider;
+ callback_data->original_callback = callback;
+ callback_data->original_user_data = user_data;
+
+ aws_credentials_provider_acquire(provider);
+
+ return callback_data;
+}
+
+static void s_destroy_callback_data(struct default_chain_callback_data *callback_data) {
+ aws_credentials_provider_release(callback_data->default_chain_provider);
+ aws_mem_release(callback_data->allocator, callback_data);
+}
+
+struct aws_credentials_provider_default_chain_impl {
+ struct aws_atomic_var shutdowns_remaining;
+ struct aws_credentials_provider *cached_provider;
+};
+
+static void s_aws_provider_default_chain_callback(
+ struct aws_credentials *credentials,
+ int error_code,
+ void *user_data) {
+ struct default_chain_callback_data *callback_data = user_data;
+ struct aws_credentials_provider *provider = callback_data->default_chain_provider;
+
+ if (credentials != NULL) {
+ AWS_LOGF_INFO(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) Default chain credentials provider successfully sourced credentials",
+ (void *)provider);
+ } else {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) Default chain credentials provider failed to source credentials with error %d(%s)",
+ (void *)provider,
+ error_code,
+ aws_error_debug_str(error_code));
+ }
+
+ callback_data->original_callback(credentials, error_code, callback_data->original_user_data);
+ s_destroy_callback_data(callback_data);
+}
+
+static int s_credentials_provider_default_chain_get_credentials_async(
+ struct aws_credentials_provider *provider,
+ aws_on_get_credentials_callback_fn callback,
+ void *user_data) {
+
+ struct aws_credentials_provider_default_chain_impl *impl = provider->impl;
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) Credentials provider chain get credentials dispatch",
+ (void *)provider);
+
+ struct default_chain_callback_data *callback_data = s_create_callback_data(provider, callback, user_data);
+ if (callback_data == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ int result = aws_credentials_provider_get_credentials(
+ impl->cached_provider, s_aws_provider_default_chain_callback, callback_data);
+ if (result != AWS_OP_SUCCESS) {
+ s_destroy_callback_data(callback_data);
+ }
+
+ return result;
+}
+
+static void s_on_sub_provider_shutdown_completed(void *user_data) {
+ struct aws_credentials_provider *provider = user_data;
+ struct aws_credentials_provider_default_chain_impl *impl = provider->impl;
+
+ size_t remaining = aws_atomic_fetch_sub(&impl->shutdowns_remaining, 1);
+ if (remaining != 1) {
+ return;
+ }
+
+ /* Invoke our own shutdown callback */
+ aws_credentials_provider_invoke_shutdown_callback(provider);
+
+ aws_mem_release(provider->allocator, provider);
+}
+
+static void s_credentials_provider_default_chain_destroy(struct aws_credentials_provider *provider) {
+ struct aws_credentials_provider_default_chain_impl *impl = provider->impl;
+ if (impl == NULL) {
+ return;
+ }
+
+ aws_credentials_provider_release(impl->cached_provider);
+
+ s_on_sub_provider_shutdown_completed(provider);
+}
+
+static struct aws_credentials_provider_vtable s_aws_credentials_provider_default_chain_vtable = {
+ .get_credentials = s_credentials_provider_default_chain_get_credentials_async,
+ .destroy = s_credentials_provider_default_chain_destroy,
+};
+
+/*
+ * Default provider chain implementation
+ */
+struct aws_credentials_provider *aws_credentials_provider_new_chain_default(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_chain_default_options *options) {
+
+ struct aws_credentials_provider *provider = NULL;
+ struct aws_credentials_provider_default_chain_impl *impl = NULL;
+
+ aws_mem_acquire_many(
+ allocator,
+ 2,
+ &provider,
+ sizeof(struct aws_credentials_provider),
+ &impl,
+ sizeof(struct aws_credentials_provider_default_chain_impl));
+
+ if (!provider) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*provider);
+ AWS_ZERO_STRUCT(*impl);
+
+ aws_credentials_provider_init_base(provider, allocator, &s_aws_credentials_provider_default_chain_vtable, impl);
+ provider->shutdown_options = options->shutdown_options;
+
+ /* 1 shutdown call from the provider's destroy itself */
+ aws_atomic_init_int(&impl->shutdowns_remaining, 1);
+
+ struct aws_credentials_provider_shutdown_options sub_provider_shutdown_options;
+ AWS_ZERO_STRUCT(sub_provider_shutdown_options);
+ sub_provider_shutdown_options.shutdown_callback = s_on_sub_provider_shutdown_completed;
+ sub_provider_shutdown_options.shutdown_user_data = provider;
+
+ struct aws_tls_ctx *tls_ctx = NULL;
+ struct aws_credentials_provider *environment_provider = NULL;
+ struct aws_credentials_provider *profile_provider = NULL;
+ struct aws_credentials_provider *sts_provider = NULL;
+ struct aws_credentials_provider *ecs_or_imds_provider = NULL;
+ struct aws_credentials_provider *chain_provider = NULL;
+ struct aws_credentials_provider *cached_provider = NULL;
+
+ if (options->tls_ctx) {
+ tls_ctx = aws_tls_ctx_acquire(options->tls_ctx);
+ } else {
+#ifdef BYO_CRYPTO
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "TLS context must be provided to credentials provider.");
+ goto on_error;
+#else
+ AWS_LOGF_INFO(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): TLS context not provided, initializing a new one for credentials provider.",
+ (void *)provider);
+ struct aws_tls_ctx_options tls_options;
+ aws_tls_ctx_options_init_default_client(&tls_options, allocator);
+ tls_ctx = aws_tls_client_ctx_new(allocator, &tls_options);
+ aws_tls_ctx_options_clean_up(&tls_options);
+ if (!tls_ctx) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): failed to create a TLS context with error %s",
+ (void *)provider,
+ aws_error_debug_str(aws_last_error()));
+ goto on_error;
+ }
+#endif /* BYO_CRYPTO */
+ }
+
+ enum { providers_size = 4 };
+ struct aws_credentials_provider *providers[providers_size];
+ AWS_ZERO_ARRAY(providers);
+ size_t index = 0;
+
+ struct aws_credentials_provider_environment_options environment_options;
+ AWS_ZERO_STRUCT(environment_options);
+ environment_provider = aws_credentials_provider_new_environment(allocator, &environment_options);
+ if (environment_provider == NULL) {
+ goto on_error;
+ }
+
+ providers[index++] = environment_provider;
+
+ struct aws_credentials_provider_profile_options profile_options;
+ AWS_ZERO_STRUCT(profile_options);
+ profile_options.bootstrap = options->bootstrap;
+ profile_options.tls_ctx = tls_ctx;
+ profile_options.shutdown_options = sub_provider_shutdown_options;
+ profile_options.profile_collection_cached = options->profile_collection_cached;
+ profile_provider = aws_credentials_provider_new_profile(allocator, &profile_options);
+ if (profile_provider != NULL) {
+ providers[index++] = profile_provider;
+ /* 1 shutdown call from the profile provider's shutdown */
+ aws_atomic_fetch_add(&impl->shutdowns_remaining, 1);
+ }
+
+ struct aws_credentials_provider_sts_web_identity_options sts_options;
+ AWS_ZERO_STRUCT(sts_options);
+ sts_options.bootstrap = options->bootstrap;
+ sts_options.tls_ctx = tls_ctx;
+ sts_options.shutdown_options = sub_provider_shutdown_options;
+ sts_options.config_profile_collection_cached = options->profile_collection_cached;
+ sts_provider = aws_credentials_provider_new_sts_web_identity(allocator, &sts_options);
+ if (sts_provider != NULL) {
+ providers[index++] = sts_provider;
+ /* 1 shutdown call from the web identity provider's shutdown */
+ aws_atomic_fetch_add(&impl->shutdowns_remaining, 1);
+ }
+
+ ecs_or_imds_provider = s_aws_credentials_provider_new_ecs_or_imds(
+ allocator, &sub_provider_shutdown_options, options->bootstrap, tls_ctx);
+ if (ecs_or_imds_provider != NULL) {
+ providers[index++] = ecs_or_imds_provider;
+ /* 1 shutdown call from the imds or ecs provider's shutdown */
+ aws_atomic_fetch_add(&impl->shutdowns_remaining, 1);
+ }
+
+ AWS_FATAL_ASSERT(index <= providers_size);
+
+ struct aws_credentials_provider_chain_options chain_options = {
+ .provider_count = index,
+ .providers = providers,
+ };
+
+ chain_provider = aws_credentials_provider_new_chain(allocator, &chain_options);
+ if (chain_provider == NULL) {
+ goto on_error;
+ }
+
+ /*
+ * Transfer ownership
+ */
+ aws_credentials_provider_release(environment_provider);
+ aws_credentials_provider_release(profile_provider);
+ aws_credentials_provider_release(sts_provider);
+ aws_credentials_provider_release(ecs_or_imds_provider);
+
+ struct aws_credentials_provider_cached_options cached_options = {
+ .source = chain_provider,
+ .refresh_time_in_milliseconds = DEFAULT_CREDENTIAL_PROVIDER_REFRESH_MS,
+ };
+
+ cached_provider = aws_credentials_provider_new_cached(allocator, &cached_options);
+ if (cached_provider == NULL) {
+ goto on_error;
+ }
+
+ /*
+ * Transfer ownership
+ */
+ aws_credentials_provider_release(chain_provider);
+
+ impl->cached_provider = cached_provider;
+
+ /* Subproviders have their own reference to the tls_ctx now */
+ aws_tls_ctx_release(tls_ctx);
+
+ return provider;
+
+on_error:
+
+ /*
+ * Have to be a bit more careful than normal with this clean up pattern since the chain/cache will
+ * recursively destroy the other providers via ref release.
+ *
+ * Technically, the cached_provider can never be non-null here, but let's handle it anyways
+ * in case someone does something weird in the future.
+ */
+ if (cached_provider) {
+ aws_credentials_provider_release(cached_provider);
+ } else if (chain_provider) {
+ aws_credentials_provider_release(chain_provider);
+ } else {
+ aws_credentials_provider_release(ecs_or_imds_provider);
+ aws_credentials_provider_release(profile_provider);
+ aws_credentials_provider_release(sts_provider);
+ aws_credentials_provider_release(environment_provider);
+ }
+
+ aws_tls_ctx_release(tls_ctx);
+
+ aws_mem_release(allocator, provider);
+
+ return NULL;
+}
diff --git a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_delegate.c b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_delegate.c
new file mode 100644
index 0000000000..63cd66962f
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_delegate.c
@@ -0,0 +1,64 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/private/credentials_utils.h>
+
+struct aws_credentials_provider_delegate_impl {
+ aws_credentials_provider_delegate_get_credentials_fn *get_credentials;
+ void *user_data;
+};
+
+static int s_credentials_provider_delegate_get_credentials(
+ struct aws_credentials_provider *provider,
+ aws_on_get_credentials_callback_fn callback,
+ void *callback_user_data) {
+
+ struct aws_credentials_provider_delegate_impl *impl = provider->impl;
+ return impl->get_credentials(impl->user_data, callback, callback_user_data);
+}
+
+static void s_credentials_provider_delegate_destroy(struct aws_credentials_provider *provider) {
+ aws_credentials_provider_invoke_shutdown_callback(provider);
+ aws_mem_release(provider->allocator, provider);
+}
+
+static struct aws_credentials_provider_vtable s_credentials_provider_delegate_vtable = {
+ .get_credentials = s_credentials_provider_delegate_get_credentials,
+ .destroy = s_credentials_provider_delegate_destroy,
+};
+
+struct aws_credentials_provider *aws_credentials_provider_new_delegate(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_delegate_options *options) {
+
+ AWS_ASSERT(options);
+ AWS_ASSERT(options->get_credentials);
+
+ struct aws_credentials_provider *provider = NULL;
+ struct aws_credentials_provider_delegate_impl *impl = NULL;
+
+ aws_mem_acquire_many(
+ allocator,
+ 2,
+ &provider,
+ sizeof(struct aws_credentials_provider),
+ &impl,
+ sizeof(struct aws_credentials_provider_delegate_impl));
+
+ if (!provider) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*provider);
+ AWS_ZERO_STRUCT(*impl);
+
+ aws_credentials_provider_init_base(provider, allocator, &s_credentials_provider_delegate_vtable, impl);
+ provider->shutdown_options = options->shutdown_options;
+
+ impl->get_credentials = options->get_credentials;
+ impl->user_data = options->delegate_user_data;
+
+ return provider;
+}
diff --git a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_ecs.c b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_ecs.c
new file mode 100644
index 0000000000..91c74f0852
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_ecs.c
@@ -0,0 +1,590 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/credentials.h>
+
+#include <aws/auth/private/credentials_utils.h>
+#include <aws/common/clock.h>
+#include <aws/common/date_time.h>
+#include <aws/common/string.h>
+#include <aws/http/connection.h>
+#include <aws/http/connection_manager.h>
+#include <aws/http/request_response.h>
+#include <aws/http/status_code.h>
+#include <aws/io/logging.h>
+#include <aws/io/socket.h>
+#include <aws/io/tls_channel_handler.h>
+#include <aws/io/uri.h>
+
+#if defined(_MSC_VER)
+# pragma warning(disable : 4204)
+# pragma warning(disable : 4232)
+#endif /* _MSC_VER */
+
+/* ecs task role credentials body response is currently ~ 1300 characters + name length */
+#define ECS_RESPONSE_SIZE_INITIAL 2048
+#define ECS_RESPONSE_SIZE_LIMIT 10000
+#define ECS_CONNECT_TIMEOUT_DEFAULT_IN_SECONDS 2
+
+static void s_on_connection_manager_shutdown(void *user_data);
+
+struct aws_credentials_provider_ecs_impl {
+ struct aws_http_connection_manager *connection_manager;
+ const struct aws_auth_http_system_vtable *function_table;
+ struct aws_string *host;
+ struct aws_string *path_and_query;
+ struct aws_string *auth_token;
+};
+
+/*
+ * Tracking structure for each outstanding async query to an ecs provider
+ */
+struct aws_credentials_provider_ecs_user_data {
+ /* immutable post-creation */
+ struct aws_allocator *allocator;
+ struct aws_credentials_provider *ecs_provider;
+ aws_on_get_credentials_callback_fn *original_callback;
+ void *original_user_data;
+
+ /* mutable */
+ struct aws_http_connection *connection;
+ struct aws_http_message *request;
+ struct aws_byte_buf current_result;
+ int status_code;
+ int error_code;
+};
+
+static void s_aws_credentials_provider_ecs_user_data_destroy(struct aws_credentials_provider_ecs_user_data *user_data) {
+ if (user_data == NULL) {
+ return;
+ }
+
+ struct aws_credentials_provider_ecs_impl *impl = user_data->ecs_provider->impl;
+
+ if (user_data->connection) {
+ impl->function_table->aws_http_connection_manager_release_connection(
+ impl->connection_manager, user_data->connection);
+ }
+
+ aws_byte_buf_clean_up(&user_data->current_result);
+
+ if (user_data->request) {
+ aws_http_message_destroy(user_data->request);
+ }
+ aws_credentials_provider_release(user_data->ecs_provider);
+ aws_mem_release(user_data->allocator, user_data);
+}
+
+static struct aws_credentials_provider_ecs_user_data *s_aws_credentials_provider_ecs_user_data_new(
+ struct aws_credentials_provider *ecs_provider,
+ aws_on_get_credentials_callback_fn callback,
+ void *user_data) {
+
+ struct aws_credentials_provider_ecs_user_data *wrapped_user_data =
+ aws_mem_calloc(ecs_provider->allocator, 1, sizeof(struct aws_credentials_provider_ecs_user_data));
+ if (wrapped_user_data == NULL) {
+ goto on_error;
+ }
+
+ wrapped_user_data->allocator = ecs_provider->allocator;
+ wrapped_user_data->ecs_provider = ecs_provider;
+ aws_credentials_provider_acquire(ecs_provider);
+ wrapped_user_data->original_user_data = user_data;
+ wrapped_user_data->original_callback = callback;
+
+ if (aws_byte_buf_init(&wrapped_user_data->current_result, ecs_provider->allocator, ECS_RESPONSE_SIZE_INITIAL)) {
+ goto on_error;
+ }
+
+ return wrapped_user_data;
+
+on_error:
+
+ s_aws_credentials_provider_ecs_user_data_destroy(wrapped_user_data);
+
+ return NULL;
+}
+
+static void s_aws_credentials_provider_ecs_user_data_reset_response(
+ struct aws_credentials_provider_ecs_user_data *ecs_user_data) {
+ ecs_user_data->current_result.len = 0;
+ ecs_user_data->status_code = 0;
+
+ if (ecs_user_data->request) {
+ aws_http_message_destroy(ecs_user_data->request);
+ ecs_user_data->request = NULL;
+ }
+}
+
+/*
+ * In general, the ECS document looks something like:
+ {
+ "Code" : "Success",
+ "LastUpdated" : "2019-05-28T18:03:09Z",
+ "Type" : "AWS-HMAC",
+ "AccessKeyId" : "...",
+ "SecretAccessKey" : "...",
+ "Token" : "...",
+ "Expiration" : "2019-05-29T00:21:43Z"
+ }
+ *
+ * No matter the result, this always gets called assuming that esc_user_data is successfully allocated
+ */
+static void s_ecs_finalize_get_credentials_query(struct aws_credentials_provider_ecs_user_data *ecs_user_data) {
+ /* Try to build credentials from whatever, if anything, was in the result */
+ struct aws_credentials *credentials = NULL;
+ struct aws_parse_credentials_from_json_doc_options parse_options = {
+ .access_key_id_name = "AccessKeyId",
+ .secret_access_key_name = "SecretAccessKey",
+ .token_name = "Token",
+ .expiration_name = "Expiration",
+ .token_required = true,
+ .expiration_required = true,
+ };
+ if (aws_byte_buf_append_null_terminator(&ecs_user_data->current_result) == AWS_OP_SUCCESS) {
+ credentials = aws_parse_credentials_from_json_document(
+ ecs_user_data->allocator, (const char *)ecs_user_data->current_result.buffer, &parse_options);
+ } else {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) ECS credentials provider failed to add null terminating char to resulting buffer.",
+ (void *)ecs_user_data->ecs_provider);
+ }
+
+ if (credentials != NULL) {
+ AWS_LOGF_INFO(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) ECS credentials provider successfully queried instance role credentials",
+ (void *)ecs_user_data->ecs_provider);
+ } else {
+ /* no credentials, make sure we have a valid error to report */
+ if (ecs_user_data->error_code == AWS_ERROR_SUCCESS) {
+ ecs_user_data->error_code = aws_last_error();
+ if (ecs_user_data->error_code == AWS_ERROR_SUCCESS) {
+ ecs_user_data->error_code = AWS_AUTH_CREDENTIALS_PROVIDER_ECS_SOURCE_FAILURE;
+ }
+ }
+ AWS_LOGF_WARN(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) ECS credentials provider failed to query instance role credentials with error %d(%s)",
+ (void *)ecs_user_data->ecs_provider,
+ ecs_user_data->error_code,
+ aws_error_str(ecs_user_data->error_code));
+ }
+
+ /* pass the credentials back */
+ ecs_user_data->original_callback(credentials, ecs_user_data->error_code, ecs_user_data->original_user_data);
+
+ /* clean up */
+ s_aws_credentials_provider_ecs_user_data_destroy(ecs_user_data);
+ aws_credentials_release(credentials);
+}
+
+static int s_ecs_on_incoming_body_fn(
+ struct aws_http_stream *stream,
+ const struct aws_byte_cursor *data,
+ void *user_data) {
+
+ (void)stream;
+
+ struct aws_credentials_provider_ecs_user_data *ecs_user_data = user_data;
+ struct aws_credentials_provider_ecs_impl *impl = ecs_user_data->ecs_provider->impl;
+
+ AWS_LOGF_TRACE(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) ECS credentials provider received %zu response bytes",
+ (void *)ecs_user_data->ecs_provider,
+ data->len);
+
+ if (data->len + ecs_user_data->current_result.len > ECS_RESPONSE_SIZE_LIMIT) {
+ impl->function_table->aws_http_connection_close(ecs_user_data->connection);
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) ECS credentials provider query response exceeded maximum allowed length",
+ (void *)ecs_user_data->ecs_provider);
+
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ if (aws_byte_buf_append_dynamic(&ecs_user_data->current_result, data)) {
+ impl->function_table->aws_http_connection_close(ecs_user_data->connection);
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) ECS credentials provider query error appending response",
+ (void *)ecs_user_data->ecs_provider);
+
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_ecs_on_incoming_headers_fn(
+ struct aws_http_stream *stream,
+ enum aws_http_header_block header_block,
+ const struct aws_http_header *header_array,
+ size_t num_headers,
+ void *user_data) {
+
+ (void)header_array;
+ (void)num_headers;
+
+ if (header_block != AWS_HTTP_HEADER_BLOCK_MAIN) {
+ return AWS_OP_SUCCESS;
+ }
+
+ struct aws_credentials_provider_ecs_user_data *ecs_user_data = user_data;
+ if (header_block == AWS_HTTP_HEADER_BLOCK_MAIN) {
+ if (ecs_user_data->status_code == 0) {
+ struct aws_credentials_provider_ecs_impl *impl = ecs_user_data->ecs_provider->impl;
+ if (impl->function_table->aws_http_stream_get_incoming_response_status(
+ stream, &ecs_user_data->status_code)) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) ECS credentials provider failed to get http status code",
+ (void *)ecs_user_data->ecs_provider);
+
+ return AWS_OP_ERR;
+ }
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) ECS credentials provider query received http status code %d",
+ (void *)ecs_user_data->ecs_provider,
+ ecs_user_data->status_code);
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_ecs_query_task_role_credentials(struct aws_credentials_provider_ecs_user_data *ecs_user_data);
+
+static void s_ecs_on_stream_complete_fn(struct aws_http_stream *stream, int error_code, void *user_data) {
+ struct aws_credentials_provider_ecs_user_data *ecs_user_data = user_data;
+
+ aws_http_message_destroy(ecs_user_data->request);
+ ecs_user_data->request = NULL;
+
+ struct aws_credentials_provider_ecs_impl *impl = ecs_user_data->ecs_provider->impl;
+ impl->function_table->aws_http_stream_release(stream);
+
+ /*
+ * On anything other than a 200, nullify the response and pretend there was
+ * an error
+ */
+ if (ecs_user_data->status_code != AWS_HTTP_STATUS_CODE_200_OK || error_code != AWS_OP_SUCCESS) {
+ ecs_user_data->current_result.len = 0;
+
+ if (error_code != AWS_OP_SUCCESS) {
+ ecs_user_data->error_code = error_code;
+ } else {
+ ecs_user_data->error_code = AWS_AUTH_CREDENTIALS_PROVIDER_HTTP_STATUS_FAILURE;
+ }
+ }
+
+ s_ecs_finalize_get_credentials_query(ecs_user_data);
+}
+
+AWS_STATIC_STRING_FROM_LITERAL(s_ecs_accept_header, "Accept");
+AWS_STATIC_STRING_FROM_LITERAL(s_ecs_accept_header_value, "application/json");
+AWS_STATIC_STRING_FROM_LITERAL(s_ecs_user_agent_header, "User-Agent");
+AWS_STATIC_STRING_FROM_LITERAL(s_ecs_user_agent_header_value, "aws-sdk-crt/ecs-credentials-provider");
+AWS_STATIC_STRING_FROM_LITERAL(s_ecs_authorization_header, "Authorization");
+AWS_STATIC_STRING_FROM_LITERAL(s_ecs_accept_encoding_header, "Accept-Encoding");
+AWS_STATIC_STRING_FROM_LITERAL(s_ecs_accept_encoding_header_value, "identity");
+AWS_STATIC_STRING_FROM_LITERAL(s_ecs_host_header, "Host");
+
+static int s_make_ecs_http_query(
+ struct aws_credentials_provider_ecs_user_data *ecs_user_data,
+ struct aws_byte_cursor *uri) {
+ AWS_FATAL_ASSERT(ecs_user_data->connection);
+
+ struct aws_http_stream *stream = NULL;
+ struct aws_http_message *request = aws_http_message_new_request(ecs_user_data->allocator);
+ if (request == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_credentials_provider_ecs_impl *impl = ecs_user_data->ecs_provider->impl;
+
+ struct aws_http_header host_header = {
+ .name = aws_byte_cursor_from_string(s_ecs_host_header),
+ .value = aws_byte_cursor_from_string(impl->host),
+ };
+ if (aws_http_message_add_header(request, host_header)) {
+ goto on_error;
+ }
+
+ if (impl->auth_token != NULL) {
+ struct aws_http_header auth_header = {
+ .name = aws_byte_cursor_from_string(s_ecs_authorization_header),
+ .value = aws_byte_cursor_from_string(impl->auth_token),
+ };
+ if (aws_http_message_add_header(request, auth_header)) {
+ goto on_error;
+ }
+ }
+
+ struct aws_http_header accept_header = {
+ .name = aws_byte_cursor_from_string(s_ecs_accept_header),
+ .value = aws_byte_cursor_from_string(s_ecs_accept_header_value),
+ };
+ if (aws_http_message_add_header(request, accept_header)) {
+ goto on_error;
+ }
+
+ struct aws_http_header accept_encoding_header = {
+ .name = aws_byte_cursor_from_string(s_ecs_accept_encoding_header),
+ .value = aws_byte_cursor_from_string(s_ecs_accept_encoding_header_value),
+ };
+ if (aws_http_message_add_header(request, accept_encoding_header)) {
+ goto on_error;
+ }
+
+ struct aws_http_header user_agent_header = {
+ .name = aws_byte_cursor_from_string(s_ecs_user_agent_header),
+ .value = aws_byte_cursor_from_string(s_ecs_user_agent_header_value),
+ };
+ if (aws_http_message_add_header(request, user_agent_header)) {
+ goto on_error;
+ }
+
+ if (aws_http_message_set_request_path(request, *uri)) {
+ goto on_error;
+ }
+
+ if (aws_http_message_set_request_method(request, aws_byte_cursor_from_c_str("GET"))) {
+ goto on_error;
+ }
+
+ ecs_user_data->request = request;
+
+ struct aws_http_make_request_options request_options = {
+ .self_size = sizeof(request_options),
+ .on_response_headers = s_ecs_on_incoming_headers_fn,
+ .on_response_header_block_done = NULL,
+ .on_response_body = s_ecs_on_incoming_body_fn,
+ .on_complete = s_ecs_on_stream_complete_fn,
+ .user_data = ecs_user_data,
+ .request = request,
+ };
+
+ stream = impl->function_table->aws_http_connection_make_request(ecs_user_data->connection, &request_options);
+
+ if (!stream) {
+ goto on_error;
+ }
+
+ if (impl->function_table->aws_http_stream_activate(stream)) {
+ goto on_error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ impl->function_table->aws_http_stream_release(stream);
+ aws_http_message_destroy(request);
+ ecs_user_data->request = NULL;
+ return AWS_OP_ERR;
+}
+
+static void s_ecs_query_task_role_credentials(struct aws_credentials_provider_ecs_user_data *ecs_user_data) {
+ AWS_FATAL_ASSERT(ecs_user_data->connection);
+
+ struct aws_credentials_provider_ecs_impl *impl = ecs_user_data->ecs_provider->impl;
+
+ /* "Clear" the result */
+ s_aws_credentials_provider_ecs_user_data_reset_response(ecs_user_data);
+
+ struct aws_byte_cursor uri_cursor = aws_byte_cursor_from_string(impl->path_and_query);
+ if (s_make_ecs_http_query(ecs_user_data, &uri_cursor) == AWS_OP_ERR) {
+ s_ecs_finalize_get_credentials_query(ecs_user_data);
+ }
+}
+
+static void s_ecs_on_acquire_connection(struct aws_http_connection *connection, int error_code, void *user_data) {
+ struct aws_credentials_provider_ecs_user_data *ecs_user_data = user_data;
+
+ if (connection == NULL) {
+ AWS_LOGF_WARN(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "id=%p: ECS provider failed to acquire a connection, error code %d(%s)",
+ (void *)ecs_user_data->ecs_provider,
+ error_code,
+ aws_error_str(error_code));
+
+ ecs_user_data->error_code = error_code;
+ s_ecs_finalize_get_credentials_query(ecs_user_data);
+ return;
+ }
+
+ ecs_user_data->connection = connection;
+
+ s_ecs_query_task_role_credentials(ecs_user_data);
+}
+
+static int s_credentials_provider_ecs_get_credentials_async(
+ struct aws_credentials_provider *provider,
+ aws_on_get_credentials_callback_fn callback,
+ void *user_data) {
+
+ struct aws_credentials_provider_ecs_impl *impl = provider->impl;
+
+ struct aws_credentials_provider_ecs_user_data *wrapped_user_data =
+ s_aws_credentials_provider_ecs_user_data_new(provider, callback, user_data);
+ if (wrapped_user_data == NULL) {
+ goto error;
+ }
+
+ impl->function_table->aws_http_connection_manager_acquire_connection(
+ impl->connection_manager, s_ecs_on_acquire_connection, wrapped_user_data);
+
+ return AWS_OP_SUCCESS;
+
+error:
+
+ s_aws_credentials_provider_ecs_user_data_destroy(wrapped_user_data);
+
+ return AWS_OP_ERR;
+}
+
+static void s_credentials_provider_ecs_destroy(struct aws_credentials_provider *provider) {
+ struct aws_credentials_provider_ecs_impl *impl = provider->impl;
+ if (impl == NULL) {
+ return;
+ }
+
+ aws_string_destroy(impl->path_and_query);
+ aws_string_destroy(impl->auth_token);
+ aws_string_destroy(impl->host);
+
+ /* aws_http_connection_manager_release will eventually leads to call of s_on_connection_manager_shutdown,
+ * which will do memory release for provider and impl. So We should be freeing impl
+ * related memory first, then call aws_http_connection_manager_release.
+ */
+ if (impl->connection_manager) {
+ impl->function_table->aws_http_connection_manager_release(impl->connection_manager);
+ } else {
+ /* If provider setup failed halfway through, connection_manager might not exist.
+ * In this case invoke shutdown completion callback directly to finish cleanup */
+ s_on_connection_manager_shutdown(provider);
+ }
+
+ /* freeing the provider takes place in the shutdown callback below */
+}
+
+static struct aws_credentials_provider_vtable s_aws_credentials_provider_ecs_vtable = {
+ .get_credentials = s_credentials_provider_ecs_get_credentials_async,
+ .destroy = s_credentials_provider_ecs_destroy,
+};
+
+static void s_on_connection_manager_shutdown(void *user_data) {
+ struct aws_credentials_provider *provider = user_data;
+
+ aws_credentials_provider_invoke_shutdown_callback(provider);
+ aws_mem_release(provider->allocator, provider);
+}
+
+struct aws_credentials_provider *aws_credentials_provider_new_ecs(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_ecs_options *options) {
+
+ struct aws_credentials_provider *provider = NULL;
+ struct aws_credentials_provider_ecs_impl *impl = NULL;
+
+ aws_mem_acquire_many(
+ allocator,
+ 2,
+ &provider,
+ sizeof(struct aws_credentials_provider),
+ &impl,
+ sizeof(struct aws_credentials_provider_ecs_impl));
+
+ if (!provider) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*provider);
+ AWS_ZERO_STRUCT(*impl);
+
+ aws_credentials_provider_init_base(provider, allocator, &s_aws_credentials_provider_ecs_vtable, impl);
+
+ struct aws_tls_connection_options tls_connection_options;
+ AWS_ZERO_STRUCT(tls_connection_options);
+ if (options->tls_ctx) {
+ aws_tls_connection_options_init_from_ctx(&tls_connection_options, options->tls_ctx);
+ struct aws_byte_cursor host = options->host;
+ if (aws_tls_connection_options_set_server_name(&tls_connection_options, allocator, &host)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): failed to create a tls connection options with error %s",
+ (void *)provider,
+ aws_error_debug_str(aws_last_error()));
+ goto on_error;
+ }
+ }
+
+ struct aws_socket_options socket_options;
+ AWS_ZERO_STRUCT(socket_options);
+ socket_options.type = AWS_SOCKET_STREAM;
+ socket_options.domain = AWS_SOCKET_IPV4;
+ socket_options.connect_timeout_ms = (uint32_t)aws_timestamp_convert(
+ ECS_CONNECT_TIMEOUT_DEFAULT_IN_SECONDS, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL);
+
+ struct aws_http_connection_manager_options manager_options;
+ AWS_ZERO_STRUCT(manager_options);
+ manager_options.bootstrap = options->bootstrap;
+ manager_options.initial_window_size = ECS_RESPONSE_SIZE_LIMIT;
+ manager_options.socket_options = &socket_options;
+ manager_options.host = options->host;
+ if (options->port == 0) {
+ manager_options.port = options->tls_ctx ? 443 : 80;
+ } else {
+ manager_options.port = options->port;
+ }
+ manager_options.max_connections = 2;
+ manager_options.shutdown_complete_callback = s_on_connection_manager_shutdown;
+ manager_options.shutdown_complete_user_data = provider;
+ manager_options.tls_connection_options = options->tls_ctx ? &tls_connection_options : NULL;
+
+ impl->function_table = options->function_table;
+ if (impl->function_table == NULL) {
+ impl->function_table = g_aws_credentials_provider_http_function_table;
+ }
+
+ impl->connection_manager = impl->function_table->aws_http_connection_manager_new(allocator, &manager_options);
+ if (impl->connection_manager == NULL) {
+ goto on_error;
+ }
+ if (options->auth_token.len != 0) {
+ impl->auth_token = aws_string_new_from_cursor(allocator, &options->auth_token);
+ if (impl->auth_token == NULL) {
+ goto on_error;
+ }
+ }
+ impl->path_and_query = aws_string_new_from_cursor(allocator, &options->path_and_query);
+ if (impl->path_and_query == NULL) {
+ goto on_error;
+ }
+
+ impl->host = aws_string_new_from_cursor(allocator, &options->host);
+ if (impl->host == NULL) {
+ goto on_error;
+ }
+
+ provider->shutdown_options = options->shutdown_options;
+
+ aws_tls_connection_options_clean_up(&tls_connection_options);
+
+ return provider;
+
+on_error:
+
+ aws_tls_connection_options_clean_up(&tls_connection_options);
+ aws_credentials_provider_destroy(provider);
+
+ return NULL;
+}
diff --git a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_environment.c b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_environment.c
new file mode 100644
index 0000000000..0aeac48411
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_environment.c
@@ -0,0 +1,79 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/credentials.h>
+
+#include <aws/auth/private/credentials_utils.h>
+#include <aws/common/environment.h>
+#include <aws/common/string.h>
+
+AWS_STATIC_STRING_FROM_LITERAL(s_access_key_id_env_var, "AWS_ACCESS_KEY_ID");
+AWS_STATIC_STRING_FROM_LITERAL(s_secret_access_key_env_var, "AWS_SECRET_ACCESS_KEY");
+AWS_STATIC_STRING_FROM_LITERAL(s_session_token_env_var, "AWS_SESSION_TOKEN");
+
+static int s_credentials_provider_environment_get_credentials_async(
+ struct aws_credentials_provider *provider,
+ aws_on_get_credentials_callback_fn callback,
+ void *user_data) {
+
+ struct aws_allocator *allocator = provider->allocator;
+
+ struct aws_string *access_key_id = NULL;
+ struct aws_string *secret_access_key = NULL;
+ struct aws_string *session_token = NULL;
+ struct aws_credentials *credentials = NULL;
+ int error_code = AWS_ERROR_SUCCESS;
+
+ aws_get_environment_value(allocator, s_access_key_id_env_var, &access_key_id);
+ aws_get_environment_value(allocator, s_secret_access_key_env_var, &secret_access_key);
+ aws_get_environment_value(allocator, s_session_token_env_var, &session_token);
+
+ if (access_key_id != NULL && secret_access_key != NULL) {
+ credentials =
+ aws_credentials_new_from_string(allocator, access_key_id, secret_access_key, session_token, UINT64_MAX);
+ if (credentials == NULL) {
+ error_code = aws_last_error();
+ }
+ } else {
+ error_code = AWS_AUTH_CREDENTIALS_PROVIDER_INVALID_ENVIRONMENT;
+ }
+
+ callback(credentials, error_code, user_data);
+
+ aws_credentials_release(credentials);
+ aws_string_destroy(session_token);
+ aws_string_destroy(secret_access_key);
+ aws_string_destroy(access_key_id);
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_credentials_provider_environment_destroy(struct aws_credentials_provider *provider) {
+ aws_credentials_provider_invoke_shutdown_callback(provider);
+
+ aws_mem_release(provider->allocator, provider);
+}
+
+static struct aws_credentials_provider_vtable s_aws_credentials_provider_environment_vtable = {
+ .get_credentials = s_credentials_provider_environment_get_credentials_async,
+ .destroy = s_credentials_provider_environment_destroy,
+};
+
+struct aws_credentials_provider *aws_credentials_provider_new_environment(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_environment_options *options) {
+ struct aws_credentials_provider *provider = aws_mem_acquire(allocator, sizeof(struct aws_credentials_provider));
+ if (provider == NULL) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*provider);
+
+ aws_credentials_provider_init_base(provider, allocator, &s_aws_credentials_provider_environment_vtable, NULL);
+
+ provider->shutdown_options = options->shutdown_options;
+
+ return provider;
+}
diff --git a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_imds.c b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_imds.c
new file mode 100644
index 0000000000..e7801ab26e
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_imds.c
@@ -0,0 +1,208 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/aws_imds_client.h>
+#include <aws/auth/credentials.h>
+#include <aws/auth/private/credentials_utils.h>
+#include <aws/common/string.h>
+
+#if defined(_MSC_VER)
+# pragma warning(disable : 4204)
+#endif /* _MSC_VER */
+
+struct aws_credentials_provider_imds_impl {
+ struct aws_imds_client *client;
+};
+
+static int s_credentials_provider_imds_get_credentials_async(
+ struct aws_credentials_provider *provider,
+ aws_on_get_credentials_callback_fn callback,
+ void *user_data);
+
+static void s_on_imds_client_shutdown(void *user_data);
+
+static void s_credentials_provider_imds_destroy(struct aws_credentials_provider *provider) {
+ struct aws_credentials_provider_imds_impl *impl = provider->impl;
+ if (impl == NULL) {
+ return;
+ }
+
+ if (impl->client) {
+ /* release IMDS client, cleanup will finish when its shutdown callback fires */
+ aws_imds_client_release(impl->client);
+ } else {
+ /* If provider setup failed halfway through, IMDS client might not exist.
+ * In this case invoke shutdown completion callback directly to finish cleanup */
+ s_on_imds_client_shutdown(provider);
+ }
+}
+
+static void s_on_imds_client_shutdown(void *user_data) {
+ struct aws_credentials_provider *provider = user_data;
+ aws_credentials_provider_invoke_shutdown_callback(provider);
+ aws_mem_release(provider->allocator, provider);
+}
+
+static struct aws_credentials_provider_vtable s_aws_credentials_provider_imds_vtable = {
+ .get_credentials = s_credentials_provider_imds_get_credentials_async,
+ .destroy = s_credentials_provider_imds_destroy,
+};
+
+struct aws_credentials_provider *aws_credentials_provider_new_imds(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_imds_options *options) {
+
+ if (!options->bootstrap) {
+ AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Client bootstrap is required for querying IMDS");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_credentials_provider *provider = NULL;
+ struct aws_credentials_provider_imds_impl *impl = NULL;
+
+ aws_mem_acquire_many(
+ allocator,
+ 2,
+ &provider,
+ sizeof(struct aws_credentials_provider),
+ &impl,
+ sizeof(struct aws_credentials_provider_imds_impl));
+
+ if (!provider) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*provider);
+ AWS_ZERO_STRUCT(*impl);
+
+ aws_credentials_provider_init_base(provider, allocator, &s_aws_credentials_provider_imds_vtable, impl);
+
+ struct aws_imds_client_options client_options = {
+ .bootstrap = options->bootstrap,
+ .function_table = options->function_table,
+ .imds_version = options->imds_version,
+ .shutdown_options =
+ {
+ .shutdown_callback = s_on_imds_client_shutdown,
+ .shutdown_user_data = provider,
+ },
+ };
+
+ impl->client = aws_imds_client_new(allocator, &client_options);
+ if (!impl->client) {
+ goto on_error;
+ }
+
+ provider->shutdown_options = options->shutdown_options;
+ return provider;
+
+on_error:
+ aws_credentials_provider_destroy(provider);
+ return NULL;
+}
+
+/*
+ * Tracking structure for each outstanding async query to an imds provider
+ */
+struct imds_provider_user_data {
+ /* immutable post-creation */
+ struct aws_allocator *allocator;
+ struct aws_credentials_provider *imds_provider;
+ aws_on_get_credentials_callback_fn *original_callback;
+ struct aws_byte_buf role;
+ void *original_user_data;
+};
+
+static void s_imds_provider_user_data_destroy(struct imds_provider_user_data *user_data) {
+ if (user_data == NULL) {
+ return;
+ }
+ aws_byte_buf_clean_up(&user_data->role);
+ aws_credentials_provider_release(user_data->imds_provider);
+ aws_mem_release(user_data->allocator, user_data);
+}
+
+static struct imds_provider_user_data *s_imds_provider_user_data_new(
+ struct aws_credentials_provider *imds_provider,
+ aws_on_get_credentials_callback_fn callback,
+ void *user_data) {
+
+ struct imds_provider_user_data *wrapped_user_data =
+ aws_mem_calloc(imds_provider->allocator, 1, sizeof(struct imds_provider_user_data));
+ if (wrapped_user_data == NULL) {
+ goto on_error;
+ }
+ if (aws_byte_buf_init(&wrapped_user_data->role, imds_provider->allocator, 100)) {
+ goto on_error;
+ }
+ wrapped_user_data->allocator = imds_provider->allocator;
+ wrapped_user_data->imds_provider = imds_provider;
+ aws_credentials_provider_acquire(imds_provider);
+ wrapped_user_data->original_user_data = user_data;
+ wrapped_user_data->original_callback = callback;
+
+ return wrapped_user_data;
+
+on_error:
+ s_imds_provider_user_data_destroy(wrapped_user_data);
+ return NULL;
+}
+
+static void s_on_get_credentials(const struct aws_credentials *credentials, int error_code, void *user_data) {
+ (void)error_code;
+ struct imds_provider_user_data *wrapped_user_data = user_data;
+ wrapped_user_data->original_callback(
+ (struct aws_credentials *)credentials, error_code, wrapped_user_data->original_user_data);
+ s_imds_provider_user_data_destroy(wrapped_user_data);
+}
+
+static void s_on_get_role(const struct aws_byte_buf *role, int error_code, void *user_data) {
+ struct imds_provider_user_data *wrapped_user_data = user_data;
+ if (!role || error_code || role->len == 0) {
+ goto on_error;
+ }
+
+ struct aws_byte_cursor role_cursor = aws_byte_cursor_from_buf(role);
+ if (aws_byte_buf_append_dynamic(&wrapped_user_data->role, &role_cursor)) {
+ goto on_error;
+ }
+
+ struct aws_credentials_provider_imds_impl *impl = wrapped_user_data->imds_provider->impl;
+ if (aws_imds_client_get_credentials(
+ impl->client, aws_byte_cursor_from_buf(&wrapped_user_data->role), s_on_get_credentials, user_data)) {
+ goto on_error;
+ }
+
+ return;
+
+on_error:
+ wrapped_user_data->original_callback(
+ NULL, AWS_AUTH_CREDENTIALS_PROVIDER_IMDS_SOURCE_FAILURE, wrapped_user_data->original_user_data);
+ s_imds_provider_user_data_destroy(wrapped_user_data);
+}
+
+static int s_credentials_provider_imds_get_credentials_async(
+ struct aws_credentials_provider *provider,
+ aws_on_get_credentials_callback_fn callback,
+ void *user_data) {
+
+ struct aws_credentials_provider_imds_impl *impl = provider->impl;
+
+ struct imds_provider_user_data *wrapped_user_data = s_imds_provider_user_data_new(provider, callback, user_data);
+ if (wrapped_user_data == NULL) {
+ goto error;
+ }
+
+ if (aws_imds_client_get_attached_iam_role(impl->client, s_on_get_role, wrapped_user_data)) {
+ goto error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+error:
+ s_imds_provider_user_data_destroy(wrapped_user_data);
+ return AWS_OP_ERR;
+}
diff --git a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_process.c b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_process.c
new file mode 100644
index 0000000000..29f99d0928
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_process.c
@@ -0,0 +1,258 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/credentials.h>
+#include <aws/auth/private/aws_profile.h>
+#include <aws/auth/private/credentials_utils.h>
+#include <aws/common/clock.h>
+#include <aws/common/date_time.h>
+#include <aws/common/environment.h>
+#include <aws/common/process.h>
+#include <aws/common/string.h>
+
+#if defined(_MSC_VER)
+# pragma warning(disable : 4204)
+#endif /* _MSC_VER */
+
+struct aws_credentials_provider_process_impl {
+ struct aws_string *command;
+};
+
+static int s_get_credentials_from_process(
+ struct aws_credentials_provider *provider,
+ aws_on_get_credentials_callback_fn callback,
+ void *user_data) {
+
+ struct aws_credentials_provider_process_impl *impl = provider->impl;
+ struct aws_credentials *credentials = NULL;
+ struct aws_run_command_options options = {
+ .command = aws_string_c_str(impl->command),
+ };
+
+ struct aws_run_command_result result;
+ int ret = AWS_OP_ERR;
+ if (aws_run_command_result_init(provider->allocator, &result)) {
+ goto on_finish;
+ }
+
+ if (aws_run_command(provider->allocator, &options, &result) || result.ret_code || !result.std_out) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) Failed to source credentials from running process credentials provider with command: %s, err:%s",
+ (void *)provider,
+ aws_string_c_str(impl->command),
+ aws_error_str(aws_last_error()));
+ goto on_finish;
+ }
+
+ struct aws_parse_credentials_from_json_doc_options parse_options = {
+ .access_key_id_name = "AccessKeyId",
+ .secret_access_key_name = "SecretAccessKey",
+ .token_name = "Token",
+ .expiration_name = "Expiration",
+ .token_required = false,
+ .expiration_required = false,
+ };
+
+ credentials =
+ aws_parse_credentials_from_json_document(provider->allocator, aws_string_c_str(result.std_out), &parse_options);
+ if (!credentials) {
+ AWS_LOGF_INFO(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) Process credentials provider failed to parse credentials from command output (output is not "
+ "logged in case sensitive information).",
+ (void *)provider);
+ goto on_finish;
+ }
+
+ AWS_LOGF_INFO(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) Process credentials provider successfully sourced credentials.",
+ (void *)provider);
+ ret = AWS_OP_SUCCESS;
+
+on_finish:
+
+ ;
+ int error_code = AWS_ERROR_SUCCESS;
+ if (credentials == NULL) {
+ error_code = aws_last_error();
+ if (error_code == AWS_ERROR_SUCCESS) {
+ error_code = AWS_AUTH_CREDENTIALS_PROVIDER_PROCESS_SOURCE_FAILURE;
+ }
+ }
+
+ callback(credentials, error_code, user_data);
+ aws_run_command_result_cleanup(&result);
+ aws_credentials_release(credentials);
+ return ret;
+}
+
+static void s_credentials_provider_process_destroy(struct aws_credentials_provider *provider) {
+ struct aws_credentials_provider_process_impl *impl = provider->impl;
+ if (impl) {
+ aws_string_destroy_secure(impl->command);
+ }
+ aws_credentials_provider_invoke_shutdown_callback(provider);
+ aws_mem_release(provider->allocator, provider);
+}
+
+AWS_STATIC_STRING_FROM_LITERAL(s_credentials_process, "credential_process");
+static struct aws_byte_cursor s_default_profile_name_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("default");
+
+static struct aws_profile_collection *s_load_profile(struct aws_allocator *allocator) {
+
+ struct aws_profile_collection *config_profiles = NULL;
+ struct aws_string *config_file_path = NULL;
+
+ config_file_path = aws_get_config_file_path(allocator, NULL);
+ if (!config_file_path) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "Failed to resolve config file path during process credentials provider initialization: %s",
+ aws_error_str(aws_last_error()));
+ goto on_done;
+ }
+
+ config_profiles = aws_profile_collection_new_from_file(allocator, config_file_path, AWS_PST_CONFIG);
+ if (config_profiles != NULL) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "Successfully built config profile collection from file at (%s)",
+ aws_string_c_str(config_file_path));
+ } else {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "Failed to build config profile collection from file at (%s) : %s",
+ aws_string_c_str(config_file_path),
+ aws_error_str(aws_last_error()));
+ goto on_done;
+ }
+
+on_done:
+ aws_string_destroy(config_file_path);
+ return config_profiles;
+}
+
+static void s_check_or_get_with_profile_config(
+ struct aws_allocator *allocator,
+ const struct aws_profile *profile,
+ const struct aws_string *config_key,
+ struct aws_byte_buf *target) {
+
+ if (!allocator || !profile || !config_key || !target) {
+ return;
+ }
+ if (!target->len) {
+ aws_byte_buf_clean_up(target);
+ const struct aws_profile_property *property = aws_profile_get_property(profile, config_key);
+ if (property) {
+ aws_byte_buf_init_copy_from_cursor(
+ target, allocator, aws_byte_cursor_from_string(aws_profile_property_get_value(property)));
+ }
+ }
+}
+
+static struct aws_byte_cursor s_stderr_redirect_to_stdout = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(" 2>&1");
+static struct aws_string *s_get_command(struct aws_allocator *allocator, struct aws_byte_cursor profile_cursor) {
+
+ struct aws_byte_buf command_buf;
+ AWS_ZERO_STRUCT(command_buf);
+ struct aws_string *command = NULL;
+ struct aws_profile_collection *config_profiles = NULL;
+ struct aws_string *profile_name = NULL;
+ const struct aws_profile *profile = NULL;
+
+ config_profiles = s_load_profile(allocator);
+ if (profile_cursor.len == 0) {
+ profile_name = aws_get_profile_name(allocator, &s_default_profile_name_cursor);
+ } else {
+ profile_name = aws_string_new_from_array(allocator, profile_cursor.ptr, profile_cursor.len);
+ }
+ if (config_profiles && profile_name) {
+ profile = aws_profile_collection_get_profile(config_profiles, profile_name);
+ }
+
+ if (!profile) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "Failed to resolve config profile during process credentials provider initialization.");
+ goto on_finish;
+ }
+
+ s_check_or_get_with_profile_config(allocator, profile, s_credentials_process, &command_buf);
+
+ if (!command_buf.len) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "Failed to resolve credentials_process command during process credentials provider initialization.");
+ goto on_finish;
+ }
+
+ if (aws_byte_buf_append_dynamic(&command_buf, &s_stderr_redirect_to_stdout)) {
+ goto on_finish;
+ }
+
+ command = aws_string_new_from_array(allocator, command_buf.buffer, command_buf.len);
+ if (!command) {
+ goto on_finish;
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "Successfully loaded credentials_process command for process credentials provider.");
+
+on_finish:
+ aws_string_destroy(profile_name);
+ aws_profile_collection_destroy(config_profiles);
+ aws_byte_buf_clean_up_secure(&command_buf);
+ return command;
+}
+
+static struct aws_credentials_provider_vtable s_aws_credentials_provider_process_vtable = {
+ .get_credentials = s_get_credentials_from_process,
+ .destroy = s_credentials_provider_process_destroy,
+};
+
+struct aws_credentials_provider *aws_credentials_provider_new_process(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_process_options *options) {
+
+ struct aws_credentials_provider *provider = NULL;
+ struct aws_credentials_provider_process_impl *impl = NULL;
+
+ aws_mem_acquire_many(
+ allocator,
+ 2,
+ &provider,
+ sizeof(struct aws_credentials_provider),
+ &impl,
+ sizeof(struct aws_credentials_provider_process_impl));
+
+ if (!provider) {
+ goto on_error;
+ }
+
+ AWS_ZERO_STRUCT(*provider);
+ AWS_ZERO_STRUCT(*impl);
+
+ impl->command = s_get_command(allocator, options->profile_to_use);
+ if (!impl->command) {
+ goto on_error;
+ }
+
+ aws_credentials_provider_init_base(provider, allocator, &s_aws_credentials_provider_process_vtable, impl);
+ provider->shutdown_options = options->shutdown_options;
+ AWS_LOGF_TRACE(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): Successfully initializing a process credentials provider.",
+ (void *)provider);
+
+ return provider;
+
+on_error:
+ aws_mem_release(allocator, provider);
+ return NULL;
+}
diff --git a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_profile.c b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_profile.c
new file mode 100644
index 0000000000..7b90b1b1b4
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_profile.c
@@ -0,0 +1,454 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/credentials.h>
+
+#include <aws/auth/private/aws_profile.h>
+#include <aws/auth/private/credentials_utils.h>
+#include <aws/common/process.h>
+#include <aws/common/string.h>
+#include <aws/io/tls_channel_handler.h>
+
+#ifdef _MSC_VER
+/* allow non-constant declared initializers. */
+# pragma warning(disable : 4204)
+#endif
+
+/*
+ * Profile provider implementation
+ */
+
+AWS_STRING_FROM_LITERAL(s_role_arn_name, "role_arn");
+AWS_STRING_FROM_LITERAL(s_role_session_name_name, "role_session_name");
+AWS_STRING_FROM_LITERAL(s_credential_source_name, "credential_source");
+AWS_STRING_FROM_LITERAL(s_source_profile_name, "source_profile");
+
+static struct aws_byte_cursor s_default_session_name_pfx =
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("aws-common-runtime-profile-config");
+static struct aws_byte_cursor s_ec2_imds_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Ec2InstanceMetadata");
+static struct aws_byte_cursor s_environment_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Environment");
+
+#define MAX_SESSION_NAME_LEN ((size_t)64)
+
+struct aws_credentials_provider_profile_file_impl {
+ struct aws_string *config_file_path;
+ struct aws_string *credentials_file_path;
+ struct aws_string *profile_name;
+ struct aws_profile_collection *profile_collection_cached;
+};
+
+static int s_profile_file_credentials_provider_get_credentials_async(
+ struct aws_credentials_provider *provider,
+ aws_on_get_credentials_callback_fn callback,
+ void *user_data) {
+
+ struct aws_credentials_provider_profile_file_impl *impl = provider->impl;
+ struct aws_credentials *credentials = NULL;
+ struct aws_profile_collection *merged_profiles = NULL;
+
+ if (impl->profile_collection_cached) {
+ /* Use cached profile collection */
+ merged_profiles = aws_profile_collection_acquire(impl->profile_collection_cached);
+ } else {
+ /*
+ * Parse config file from file, if it exists
+ */
+ struct aws_profile_collection *config_profiles =
+ aws_profile_collection_new_from_file(provider->allocator, impl->config_file_path, AWS_PST_CONFIG);
+
+ if (config_profiles != NULL) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) Profile credentials provider successfully built config profile collection from file at (%s)",
+ (void *)provider,
+ aws_string_c_str(impl->config_file_path));
+ } else {
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) Profile credentials provider failed to build config profile collection from file at (%s)",
+ (void *)provider,
+ aws_string_c_str(impl->config_file_path));
+ }
+
+ /*
+ * Parse credentials file, if it exists
+ */
+ struct aws_profile_collection *credentials_profiles =
+ aws_profile_collection_new_from_file(provider->allocator, impl->credentials_file_path, AWS_PST_CREDENTIALS);
+
+ if (credentials_profiles != NULL) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) Profile credentials provider successfully built credentials profile collection from file at "
+ "(%s)",
+ (void *)provider,
+ aws_string_c_str(impl->credentials_file_path));
+ } else {
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) Profile credentials provider failed to build credentials profile collection from file at (%s)",
+ (void *)provider,
+ aws_string_c_str(impl->credentials_file_path));
+ }
+
+ /*
+ * Merge the (up to) two sources into a single unified profile
+ */
+ merged_profiles =
+ aws_profile_collection_new_from_merge(provider->allocator, config_profiles, credentials_profiles);
+
+ aws_profile_collection_release(config_profiles);
+ aws_profile_collection_release(credentials_profiles);
+ }
+
+ if (merged_profiles != NULL) {
+ const struct aws_profile *profile = aws_profile_collection_get_profile(merged_profiles, impl->profile_name);
+ if (profile != NULL) {
+ AWS_LOGF_INFO(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) Profile credentials provider attempting to pull credentials from profile \"%s\"",
+ (void *)provider,
+ aws_string_c_str(impl->profile_name));
+ credentials = aws_credentials_new_from_profile(provider->allocator, profile);
+ } else {
+ AWS_LOGF_INFO(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) Profile credentials provider could not find a profile named \"%s\"",
+ (void *)provider,
+ aws_string_c_str(impl->profile_name));
+ }
+ } else {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) Profile credentials provider failed to merge config and credentials profile collections",
+ (void *)provider);
+ }
+
+ int error_code = AWS_ERROR_SUCCESS;
+ if (credentials == NULL) {
+ error_code = aws_last_error();
+ if (error_code == AWS_ERROR_SUCCESS) {
+ error_code = AWS_AUTH_CREDENTIALS_PROVIDER_PROFILE_SOURCE_FAILURE;
+ }
+ }
+
+ callback(credentials, error_code, user_data);
+
+ /*
+ * clean up
+ */
+ aws_credentials_release(credentials);
+ aws_profile_collection_release(merged_profiles);
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_profile_file_credentials_provider_destroy(struct aws_credentials_provider *provider) {
+ struct aws_credentials_provider_profile_file_impl *impl = provider->impl;
+ if (impl == NULL) {
+ return;
+ }
+
+ aws_string_destroy(impl->config_file_path);
+ aws_string_destroy(impl->credentials_file_path);
+ aws_string_destroy(impl->profile_name);
+ aws_profile_collection_release(impl->profile_collection_cached);
+ aws_credentials_provider_invoke_shutdown_callback(provider);
+
+ aws_mem_release(provider->allocator, provider);
+}
+
+static struct aws_credentials_provider_vtable s_aws_credentials_provider_profile_file_vtable = {
+ .get_credentials = s_profile_file_credentials_provider_get_credentials_async,
+ .destroy = s_profile_file_credentials_provider_destroy,
+};
+
+/* load a purely config/credentials file based provider. */
+static struct aws_credentials_provider *s_create_profile_based_provider(
+ struct aws_allocator *allocator,
+ struct aws_string *credentials_file_path,
+ struct aws_string *config_file_path,
+ const struct aws_string *profile_name,
+ struct aws_profile_collection *profile_collection_cached) {
+
+ struct aws_credentials_provider *provider = NULL;
+ struct aws_credentials_provider_profile_file_impl *impl = NULL;
+
+ aws_mem_acquire_many(
+ allocator,
+ 2,
+ &provider,
+ sizeof(struct aws_credentials_provider),
+ &impl,
+ sizeof(struct aws_credentials_provider_profile_file_impl));
+
+ if (!provider) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*provider);
+ AWS_ZERO_STRUCT(*impl);
+
+ aws_credentials_provider_init_base(provider, allocator, &s_aws_credentials_provider_profile_file_vtable, impl);
+ if (credentials_file_path) {
+ impl->credentials_file_path = aws_string_clone_or_reuse(allocator, credentials_file_path);
+ }
+ if (config_file_path) {
+ impl->config_file_path = aws_string_clone_or_reuse(allocator, config_file_path);
+ }
+ impl->profile_name = aws_string_clone_or_reuse(allocator, profile_name);
+ impl->profile_collection_cached = aws_profile_collection_acquire(profile_collection_cached);
+ return provider;
+}
+
+/* use the selected property that specifies a role_arn to load an STS based provider. */
+static struct aws_credentials_provider *s_create_sts_based_provider(
+ struct aws_allocator *allocator,
+ const struct aws_profile_property *role_arn_property,
+ const struct aws_profile *profile,
+ struct aws_string *credentials_file_path,
+ struct aws_string *config_file_path,
+ const struct aws_credentials_provider_profile_options *options) {
+ struct aws_credentials_provider *provider = NULL;
+
+ AWS_LOGF_INFO(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "static: profile %s has role_arn property is set to %s, attempting to "
+ "create an STS credentials provider.",
+ aws_string_c_str(aws_profile_get_name(profile)),
+ aws_string_c_str(aws_profile_property_get_value(role_arn_property)));
+
+ const struct aws_profile_property *source_profile_property =
+ aws_profile_get_property(profile, s_source_profile_name);
+ const struct aws_profile_property *credential_source_property =
+ aws_profile_get_property(profile, s_credential_source_name);
+
+ const struct aws_profile_property *role_session_name = aws_profile_get_property(profile, s_role_session_name_name);
+ char session_name_array[MAX_SESSION_NAME_LEN + 1];
+ AWS_ZERO_ARRAY(session_name_array);
+
+ if (role_session_name) {
+ size_t to_write = aws_profile_property_get_value(role_session_name)->len;
+ if (to_write > MAX_SESSION_NAME_LEN) {
+ AWS_LOGF_WARN(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "static: session_name property is %d bytes long, "
+ "but the max is %d. Truncating",
+ (int)aws_profile_property_get_value(role_session_name)->len,
+ (int)MAX_SESSION_NAME_LEN);
+ to_write = MAX_SESSION_NAME_LEN;
+ }
+ memcpy(session_name_array, aws_string_bytes(aws_profile_property_get_value(role_session_name)), to_write);
+ } else {
+ memcpy(session_name_array, s_default_session_name_pfx.ptr, s_default_session_name_pfx.len);
+ snprintf(
+ session_name_array + s_default_session_name_pfx.len,
+ sizeof(session_name_array) - s_default_session_name_pfx.len,
+ "-%d",
+ aws_get_pid());
+ }
+
+ AWS_LOGF_DEBUG(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "static: computed session_name as %s", session_name_array);
+
+ /* Automatically create a TLS context if necessary. We'd prefer that users pass one in, but can't force
+ * them to because aws_credentials_provider_profile_options didn't always have a tls_ctx member. */
+ struct aws_tls_ctx *tls_ctx = NULL;
+ if (options->tls_ctx) {
+ tls_ctx = aws_tls_ctx_acquire(options->tls_ctx);
+ } else {
+#ifdef BYO_CRYPTO
+ AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "a TLS context must be provided to query STS");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ goto done;
+#else
+ AWS_LOGF_INFO(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER, "TLS context not provided, initializing a new one for querying STS");
+ struct aws_tls_ctx_options tls_options;
+ aws_tls_ctx_options_init_default_client(&tls_options, allocator);
+ tls_ctx = aws_tls_client_ctx_new(allocator, &tls_options);
+ aws_tls_ctx_options_clean_up(&tls_options);
+ if (!tls_ctx) {
+ goto done;
+ }
+#endif
+ }
+
+ struct aws_credentials_provider_sts_options sts_options = {
+ .bootstrap = options->bootstrap,
+ .tls_ctx = tls_ctx,
+ .role_arn = aws_byte_cursor_from_string(aws_profile_property_get_value(role_arn_property)),
+ .session_name = aws_byte_cursor_from_c_str(session_name_array),
+ .duration_seconds = 0,
+ .function_table = options->function_table,
+ };
+
+ if (source_profile_property) {
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "static: source_profile set to %s",
+ aws_string_c_str(aws_profile_property_get_value(source_profile_property)));
+
+ sts_options.creds_provider = s_create_profile_based_provider(
+ allocator,
+ credentials_file_path,
+ config_file_path,
+ aws_profile_property_get_value(source_profile_property),
+ options->profile_collection_cached);
+
+ if (!sts_options.creds_provider) {
+ goto done;
+ }
+
+ provider = aws_credentials_provider_new_sts(allocator, &sts_options);
+
+ aws_credentials_provider_release(sts_options.creds_provider);
+
+ if (!provider) {
+ AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "static: failed to load STS credentials provider");
+ }
+ } else if (credential_source_property) {
+ AWS_LOGF_INFO(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "static: credential_source property set to %s",
+ aws_string_c_str(aws_profile_property_get_value(credential_source_property)));
+
+ if (aws_string_eq_byte_cursor_ignore_case(
+ aws_profile_property_get_value(credential_source_property), &s_ec2_imds_name)) {
+ struct aws_credentials_provider_imds_options imds_options = {
+ .bootstrap = options->bootstrap,
+ .function_table = options->function_table,
+ };
+
+ struct aws_credentials_provider *imds_provider =
+ aws_credentials_provider_new_imds(allocator, &imds_options);
+
+ if (!imds_provider) {
+ goto done;
+ }
+
+ sts_options.creds_provider = imds_provider;
+ provider = aws_credentials_provider_new_sts(allocator, &sts_options);
+
+ aws_credentials_provider_release(imds_provider);
+
+ } else if (aws_string_eq_byte_cursor_ignore_case(
+ aws_profile_property_get_value(credential_source_property), &s_environment_name)) {
+ struct aws_credentials_provider_environment_options env_options;
+ AWS_ZERO_STRUCT(env_options);
+
+ struct aws_credentials_provider *env_provider =
+ aws_credentials_provider_new_environment(allocator, &env_options);
+
+ if (!env_provider) {
+ goto done;
+ }
+
+ sts_options.creds_provider = env_provider;
+ provider = aws_credentials_provider_new_sts(allocator, &sts_options);
+
+ aws_credentials_provider_release(env_provider);
+ } else {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "static: invalid credential_source property: %s",
+ aws_string_c_str(aws_profile_property_get_value(credential_source_property)));
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ }
+done:
+ aws_tls_ctx_release(tls_ctx);
+ return provider;
+}
+
+struct aws_credentials_provider *aws_credentials_provider_new_profile(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_profile_options *options) {
+
+ struct aws_credentials_provider *provider = NULL;
+ struct aws_profile_collection *config_profiles = NULL;
+ struct aws_profile_collection *credentials_profiles = NULL;
+ struct aws_profile_collection *merged_profiles = NULL;
+ struct aws_string *credentials_file_path = NULL;
+ struct aws_string *config_file_path = NULL;
+ struct aws_string *profile_name = NULL;
+
+ profile_name = aws_get_profile_name(allocator, &options->profile_name_override);
+ if (!profile_name) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER, "static: Profile credentials parser failed to resolve profile name");
+ goto on_finished;
+ }
+
+ if (options->profile_collection_cached) {
+ /* Use cached profile collection */
+ merged_profiles = aws_profile_collection_acquire(options->profile_collection_cached);
+ } else {
+ /* Load profile collection from files */
+
+ credentials_file_path = aws_get_credentials_file_path(allocator, &options->credentials_file_name_override);
+ if (!credentials_file_path) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "static: Profile credentials parser failed resolve credentials file path");
+ goto on_finished;
+ }
+
+ config_file_path = aws_get_config_file_path(allocator, &options->config_file_name_override);
+ if (!config_file_path) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER, "static: Profile credentials parser failed resolve config file path");
+ goto on_finished;
+ }
+
+ config_profiles = aws_profile_collection_new_from_file(allocator, config_file_path, AWS_PST_CONFIG);
+ credentials_profiles =
+ aws_profile_collection_new_from_file(allocator, credentials_file_path, AWS_PST_CREDENTIALS);
+
+ if (!(config_profiles || credentials_profiles)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "static: Profile credentials parser could not load or parse"
+ " a credentials or config file.");
+ goto on_finished;
+ }
+
+ merged_profiles = aws_profile_collection_new_from_merge(allocator, config_profiles, credentials_profiles);
+ }
+ const struct aws_profile *profile = aws_profile_collection_get_profile(merged_profiles, profile_name);
+
+ if (!profile) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "static: Profile credentials provider could not load"
+ " a profile at %s.",
+ aws_string_c_str(profile_name));
+ goto on_finished;
+ }
+ const struct aws_profile_property *role_arn_property = aws_profile_get_property(profile, s_role_arn_name);
+
+ if (role_arn_property) {
+ provider = s_create_sts_based_provider(
+ allocator, role_arn_property, profile, credentials_file_path, config_file_path, options);
+ } else {
+ provider = s_create_profile_based_provider(
+ allocator, credentials_file_path, config_file_path, profile_name, options->profile_collection_cached);
+ }
+
+on_finished:
+ aws_profile_collection_release(config_profiles);
+ aws_profile_collection_release(credentials_profiles);
+ aws_profile_collection_release(merged_profiles);
+
+ aws_string_destroy(credentials_file_path);
+ aws_string_destroy(config_file_path);
+ aws_string_destroy(profile_name);
+
+ if (provider) {
+ provider->shutdown_options = options->shutdown_options;
+ }
+
+ return provider;
+}
diff --git a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_static.c b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_static.c
new file mode 100644
index 0000000000..55154ee386
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_static.c
@@ -0,0 +1,71 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/credentials.h>
+
+#include <aws/auth/private/credentials_utils.h>
+
+static int s_static_credentials_provider_get_credentials_async(
+ struct aws_credentials_provider *provider,
+ aws_on_get_credentials_callback_fn callback,
+ void *user_data) {
+
+ struct aws_credentials *credentials = provider->impl;
+
+ AWS_LOGF_INFO(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) Static credentials provider successfully sourced credentials",
+ (void *)provider);
+ callback(credentials, AWS_ERROR_SUCCESS, user_data);
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_static_credentials_provider_destroy(struct aws_credentials_provider *provider) {
+ struct aws_credentials *credentials = provider->impl;
+
+ aws_credentials_release(credentials);
+ aws_credentials_provider_invoke_shutdown_callback(provider);
+ aws_mem_release(provider->allocator, provider);
+}
+
+/*
+ * shared across all providers that do not need to do anything special on shutdown
+ */
+
+static struct aws_credentials_provider_vtable s_aws_credentials_provider_static_vtable = {
+ .get_credentials = s_static_credentials_provider_get_credentials_async,
+ .destroy = s_static_credentials_provider_destroy,
+};
+
+struct aws_credentials_provider *aws_credentials_provider_new_static(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_static_options *options) {
+
+ struct aws_credentials_provider *provider = aws_mem_acquire(allocator, sizeof(struct aws_credentials_provider));
+ if (provider == NULL) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*provider);
+
+ struct aws_credentials *credentials = aws_credentials_new(
+ allocator, options->access_key_id, options->secret_access_key, options->session_token, UINT64_MAX);
+ if (credentials == NULL) {
+ goto on_new_credentials_failure;
+ }
+
+ aws_credentials_provider_init_base(provider, allocator, &s_aws_credentials_provider_static_vtable, credentials);
+
+ provider->shutdown_options = options->shutdown_options;
+
+ return provider;
+
+on_new_credentials_failure:
+
+ aws_mem_release(allocator, provider);
+
+ return NULL;
+}
diff --git a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_sts.c b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_sts.c
new file mode 100644
index 0000000000..33d68f2ce8
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_sts.c
@@ -0,0 +1,848 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/auth/credentials.h>
+#include <aws/auth/private/credentials_utils.h>
+#include <aws/auth/signable.h>
+#include <aws/auth/signing.h>
+#include <aws/auth/signing_config.h>
+#include <aws/auth/signing_result.h>
+#include <aws/common/xml_parser.h>
+
+#include <aws/common/clock.h>
+#include <aws/common/string.h>
+
+#include <aws/http/connection.h>
+#include <aws/http/connection_manager.h>
+#include <aws/http/request_response.h>
+#include <aws/http/status_code.h>
+
+#include <aws/io/channel_bootstrap.h>
+#include <aws/io/retry_strategy.h>
+#include <aws/io/socket.h>
+#include <aws/io/stream.h>
+#include <aws/io/tls_channel_handler.h>
+#include <aws/io/uri.h>
+
+#include <inttypes.h>
+
+#ifdef _MSC_VER
+/* allow non-constant declared initializers. */
+# pragma warning(disable : 4204)
+/* allow passing of address of automatic variable */
+# pragma warning(disable : 4221)
+/* function pointer to dll symbol */
+# pragma warning(disable : 4232)
+#endif
+
+static struct aws_http_header s_host_header = {
+ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("host"),
+ .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("sts.amazonaws.com"),
+};
+
+static struct aws_http_header s_content_type_header = {
+ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("content-type"),
+ .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("application/x-www-form-urlencoded"),
+};
+
+static struct aws_byte_cursor s_content_length = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("content-length");
+static struct aws_byte_cursor s_path = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/");
+static struct aws_byte_cursor s_signing_region = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("us-east-1");
+static struct aws_byte_cursor s_service_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("sts");
+static struct aws_byte_cursor s_assume_role_root_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("AssumeRoleResponse");
+static struct aws_byte_cursor s_assume_role_result_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("AssumeRoleResult");
+static struct aws_byte_cursor s_assume_role_credentials_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Credentials");
+static struct aws_byte_cursor s_assume_role_session_token_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("SessionToken");
+static struct aws_byte_cursor s_assume_role_secret_key_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("SecretAccessKey");
+static struct aws_byte_cursor s_assume_role_access_key_id_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("AccessKeyId");
+static const int s_max_retries = 8;
+
+const uint16_t aws_sts_assume_role_default_duration_secs = 900;
+
+struct aws_credentials_provider_sts_impl {
+ struct aws_http_connection_manager *connection_manager;
+ struct aws_string *assume_role_profile;
+ struct aws_string *role_session_name;
+ uint16_t duration_seconds;
+ struct aws_credentials_provider *provider;
+ struct aws_credentials_provider_shutdown_options source_shutdown_options;
+ const struct aws_auth_http_system_vtable *function_table;
+ struct aws_retry_strategy *retry_strategy;
+ aws_io_clock_fn *system_clock_fn;
+};
+
+struct sts_creds_provider_user_data {
+ struct aws_allocator *allocator;
+ struct aws_credentials_provider *provider;
+ struct aws_credentials *credentials;
+ struct aws_string *access_key_id;
+ struct aws_string *secret_access_key;
+ struct aws_string *session_token;
+ aws_on_get_credentials_callback_fn *callback;
+ struct aws_http_connection *connection;
+ struct aws_byte_buf payload_body;
+ struct aws_input_stream *input_stream;
+ struct aws_signable *signable;
+ struct aws_signing_config_aws signing_config;
+ struct aws_http_message *message;
+ struct aws_byte_buf output_buf;
+
+ struct aws_retry_token *retry_token;
+ int error_code;
+ void *user_data;
+};
+
+static void s_reset_request_specific_data(struct sts_creds_provider_user_data *user_data) {
+ if (user_data->connection) {
+ struct aws_credentials_provider_sts_impl *provider_impl = user_data->provider->impl;
+ provider_impl->function_table->aws_http_connection_manager_release_connection(
+ provider_impl->connection_manager, user_data->connection);
+ user_data->connection = NULL;
+ }
+
+ if (user_data->signable) {
+ aws_signable_destroy(user_data->signable);
+ user_data->signable = NULL;
+ }
+
+ if (user_data->input_stream) {
+ aws_input_stream_destroy(user_data->input_stream);
+ user_data->input_stream = NULL;
+ }
+
+ aws_byte_buf_clean_up(&user_data->payload_body);
+
+ if (user_data->message) {
+ aws_http_message_destroy(user_data->message);
+ user_data->message = NULL;
+ }
+
+ aws_byte_buf_clean_up(&user_data->output_buf);
+
+ aws_string_destroy(user_data->access_key_id);
+ user_data->access_key_id = NULL;
+
+ aws_string_destroy_secure(user_data->secret_access_key);
+ user_data->secret_access_key = NULL;
+
+ aws_string_destroy(user_data->session_token);
+ user_data->session_token = NULL;
+}
+static void s_clean_up_user_data(struct sts_creds_provider_user_data *user_data) {
+ user_data->callback(user_data->credentials, user_data->error_code, user_data->user_data);
+
+ aws_credentials_release(user_data->credentials);
+
+ s_reset_request_specific_data(user_data);
+ aws_credentials_provider_release(user_data->provider);
+
+ aws_retry_token_release(user_data->retry_token);
+ aws_mem_release(user_data->allocator, user_data);
+}
+
+static int s_write_body_to_buffer(struct aws_credentials_provider *provider, struct aws_byte_buf *body) {
+ struct aws_credentials_provider_sts_impl *provider_impl = provider->impl;
+
+ struct aws_byte_cursor working_cur = aws_byte_cursor_from_c_str("Version=2011-06-15&Action=AssumeRole&RoleArn=");
+ if (aws_byte_buf_append_dynamic(body, &working_cur)) {
+ return AWS_OP_ERR;
+ }
+ struct aws_byte_cursor role_cur = aws_byte_cursor_from_string(provider_impl->assume_role_profile);
+ if (aws_byte_buf_append_encoding_uri_param(body, &role_cur)) {
+ return AWS_OP_ERR;
+ }
+ working_cur = aws_byte_cursor_from_c_str("&RoleSessionName=");
+ if (aws_byte_buf_append_dynamic(body, &working_cur)) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_byte_cursor session_cur = aws_byte_cursor_from_string(provider_impl->role_session_name);
+ if (aws_byte_buf_append_encoding_uri_param(body, &session_cur)) {
+ return AWS_OP_ERR;
+ }
+
+ working_cur = aws_byte_cursor_from_c_str("&DurationSeconds=");
+ if (aws_byte_buf_append_dynamic(body, &working_cur)) {
+ return AWS_OP_ERR;
+ }
+
+ char duration_seconds[6];
+ AWS_ZERO_ARRAY(duration_seconds);
+ snprintf(duration_seconds, sizeof(duration_seconds), "%" PRIu16, provider_impl->duration_seconds);
+ working_cur = aws_byte_cursor_from_c_str(duration_seconds);
+ if (aws_byte_buf_append_dynamic(body, &working_cur)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_on_incoming_body_fn(struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data) {
+ (void)stream;
+
+ struct sts_creds_provider_user_data *provider_user_data = user_data;
+ return aws_byte_buf_append_dynamic(&provider_user_data->output_buf, data);
+}
+
+/* parse doc of form
+<AssumeRoleResponse>
+ <AssumeRoleResult>
+ <Credentials>
+ <AccessKeyId>accessKeyId</AccessKeyId>
+ <SecretKey>secretKey</SecretKey>
+ <SessionToken>sessionToken</SessionToken>
+ </Credentials>
+ <AssumedRoleUser>
+ ... more stuff we don't care about.
+ </AssumedRoleUser>
+ ... more stuff we don't care about
+ </AssumeRoleResult>
+</AssumeRoleResponse>
+ */
+static bool s_on_node_encountered_fn(struct aws_xml_parser *parser, struct aws_xml_node *node, void *user_data) {
+
+ struct aws_byte_cursor node_name;
+ AWS_ZERO_STRUCT(node_name);
+
+ if (aws_xml_node_get_name(node, &node_name)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): While parsing credentials xml response for sts credentials provider, could not get xml node name "
+ "for function s_on_node_encountered_fn.",
+ user_data);
+ return false;
+ }
+
+ if (aws_byte_cursor_eq_ignore_case(&node_name, &s_assume_role_root_name) ||
+ aws_byte_cursor_eq_ignore_case(&node_name, &s_assume_role_result_name) ||
+ aws_byte_cursor_eq_ignore_case(&node_name, &s_assume_role_credentials_name)) {
+ return aws_xml_node_traverse(parser, node, s_on_node_encountered_fn, user_data);
+ }
+
+ struct sts_creds_provider_user_data *provider_user_data = user_data;
+ struct aws_byte_cursor credential_data;
+ AWS_ZERO_STRUCT(credential_data);
+ if (aws_byte_cursor_eq_ignore_case(&node_name, &s_assume_role_access_key_id_name)) {
+ aws_xml_node_as_body(parser, node, &credential_data);
+ provider_user_data->access_key_id =
+ aws_string_new_from_array(provider_user_data->allocator, credential_data.ptr, credential_data.len);
+
+ if (provider_user_data->access_key_id) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): Read AccessKeyId %s",
+ (void *)provider_user_data->provider,
+ aws_string_c_str(provider_user_data->access_key_id));
+ }
+ }
+
+ if (aws_byte_cursor_eq_ignore_case(&node_name, &s_assume_role_secret_key_name)) {
+ aws_xml_node_as_body(parser, node, &credential_data);
+ provider_user_data->secret_access_key =
+ aws_string_new_from_array(provider_user_data->allocator, credential_data.ptr, credential_data.len);
+ }
+
+ if (aws_byte_cursor_eq_ignore_case(&node_name, &s_assume_role_session_token_name)) {
+ aws_xml_node_as_body(parser, node, &credential_data);
+ provider_user_data->session_token =
+ aws_string_new_from_array(provider_user_data->allocator, credential_data.ptr, credential_data.len);
+ }
+
+ return true;
+}
+
+static void s_start_make_request(
+ struct aws_credentials_provider *provider,
+ struct sts_creds_provider_user_data *provider_user_data);
+
+static void s_on_retry_ready(struct aws_retry_token *token, int error_code, void *user_data) {
+ (void)token;
+ struct sts_creds_provider_user_data *provider_user_data = user_data;
+
+ if (!error_code) {
+ s_start_make_request(provider_user_data->provider, provider_user_data);
+ } else {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): retry task failed: %s",
+ (void *)provider_user_data->provider,
+ aws_error_str(aws_last_error()));
+ s_clean_up_user_data(provider_user_data);
+ }
+}
+
+/* called upon completion of http request */
+static void s_on_stream_complete_fn(struct aws_http_stream *stream, int error_code, void *user_data) {
+ int http_response_code = 0;
+ struct sts_creds_provider_user_data *provider_user_data = user_data;
+ struct aws_credentials_provider_sts_impl *provider_impl = provider_user_data->provider->impl;
+ struct aws_xml_parser *xml_parser = NULL;
+
+ provider_user_data->error_code = error_code;
+
+ if (provider_impl->function_table->aws_http_stream_get_incoming_response_status(stream, &http_response_code)) {
+ goto finish;
+ }
+
+ if (http_response_code != 200) {
+ provider_user_data->error_code = AWS_AUTH_CREDENTIALS_PROVIDER_HTTP_STATUS_FAILURE;
+ }
+
+ provider_impl->function_table->aws_http_stream_release(stream);
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): AssumeRole call completed with http status %d",
+ (void *)provider_user_data->provider,
+ http_response_code);
+
+ if (error_code || http_response_code != AWS_HTTP_STATUS_CODE_200_OK) {
+ /* prevent connection reuse. */
+ provider_impl->function_table->aws_http_connection_close(provider_user_data->connection);
+
+ enum aws_retry_error_type error_type =
+ aws_credentials_provider_compute_retry_error_type(http_response_code, error_code);
+
+ s_reset_request_specific_data(provider_user_data);
+
+ /* don't retry client errors at all. */
+ if (error_type != AWS_RETRY_ERROR_TYPE_CLIENT_ERROR) {
+ if (aws_retry_strategy_schedule_retry(
+ provider_user_data->retry_token, error_type, s_on_retry_ready, provider_user_data)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): failed to schedule retry: %s",
+ (void *)provider_user_data->provider,
+ aws_error_str(aws_last_error()));
+ goto finish;
+ }
+ return;
+ }
+ }
+
+ if (!error_code && http_response_code == AWS_HTTP_STATUS_CODE_200_OK) {
+ /* update the book keeping so we can let the retry strategy make determinations about when the service is
+ * healthy after an outage. */
+ if (aws_retry_token_record_success(provider_user_data->retry_token)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): failed to register operation success: %s",
+ (void *)provider_user_data->provider,
+ aws_error_str(aws_last_error()));
+ goto finish;
+ }
+
+ struct aws_xml_parser_options options;
+ AWS_ZERO_STRUCT(options);
+ options.doc = aws_byte_cursor_from_buf(&provider_user_data->output_buf);
+
+ xml_parser = aws_xml_parser_new(provider_user_data->provider->allocator, &options);
+
+ if (xml_parser == NULL) {
+ goto finish;
+ }
+
+ uint64_t now = UINT64_MAX;
+ if (provider_impl->system_clock_fn(&now) != AWS_OP_SUCCESS) {
+ goto finish;
+ }
+
+ uint64_t now_seconds = aws_timestamp_convert(now, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, NULL);
+
+ if (aws_xml_parser_parse(xml_parser, s_on_node_encountered_fn, provider_user_data)) {
+ provider_user_data->error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): credentials parsing failed with error %s",
+ (void *)provider_user_data->credentials,
+ aws_error_debug_str(provider_user_data->error_code));
+ goto finish;
+ }
+
+ if (provider_user_data->access_key_id && provider_user_data->secret_access_key &&
+ provider_user_data->session_token) {
+
+ provider_user_data->credentials = aws_credentials_new_from_string(
+ provider_user_data->allocator,
+ provider_user_data->access_key_id,
+ provider_user_data->secret_access_key,
+ provider_user_data->session_token,
+ now_seconds + provider_impl->duration_seconds);
+ } else {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): credentials document was corrupted, treating as an error.",
+ (void *)provider_user_data->provider);
+ }
+ }
+
+finish:
+
+ if (xml_parser != NULL) {
+ aws_xml_parser_destroy(xml_parser);
+ xml_parser = NULL;
+ }
+
+ s_clean_up_user_data(provider_user_data);
+}
+
+/* called upon acquiring a connection from the pool */
+static void s_on_connection_setup_fn(struct aws_http_connection *connection, int error_code, void *user_data) {
+ struct sts_creds_provider_user_data *provider_user_data = user_data;
+ struct aws_credentials_provider_sts_impl *provider_impl = provider_user_data->provider->impl;
+ struct aws_http_stream *stream = NULL;
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): connection returned with error code %d",
+ (void *)provider_user_data->provider,
+ error_code);
+
+ if (error_code) {
+ aws_raise_error(error_code);
+ goto error;
+ }
+ provider_user_data->connection = connection;
+
+ if (aws_byte_buf_init(&provider_user_data->output_buf, provider_impl->provider->allocator, 2048)) {
+ goto error;
+ }
+
+ struct aws_http_make_request_options options = {
+ .user_data = user_data,
+ .request = provider_user_data->message,
+ .self_size = sizeof(struct aws_http_make_request_options),
+ .on_response_headers = NULL,
+ .on_response_header_block_done = NULL,
+ .on_response_body = s_on_incoming_body_fn,
+ .on_complete = s_on_stream_complete_fn,
+ };
+
+ stream = provider_impl->function_table->aws_http_connection_make_request(connection, &options);
+
+ if (!stream) {
+ goto error;
+ }
+
+ if (provider_impl->function_table->aws_http_stream_activate(stream)) {
+ goto error;
+ }
+
+ return;
+error:
+ provider_impl->function_table->aws_http_stream_release(stream);
+ s_clean_up_user_data(provider_user_data);
+}
+
+/* called once sigv4 signing is complete. */
+void s_on_signing_complete(struct aws_signing_result *result, int error_code, void *userdata) {
+ struct sts_creds_provider_user_data *provider_user_data = userdata;
+ struct aws_credentials_provider_sts_impl *sts_impl = provider_user_data->provider->impl;
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): signing completed with error code %d",
+ (void *)provider_user_data->provider,
+ error_code);
+
+ if (error_code) {
+ aws_raise_error(error_code);
+ goto error;
+ }
+
+ if (aws_apply_signing_result_to_http_request(
+ provider_user_data->message, provider_user_data->provider->allocator, result)) {
+ goto error;
+ }
+
+ sts_impl->function_table->aws_http_connection_manager_acquire_connection(
+ sts_impl->connection_manager, s_on_connection_setup_fn, provider_user_data);
+ return;
+
+error:
+ s_clean_up_user_data(provider_user_data);
+}
+
+static void s_start_make_request(
+ struct aws_credentials_provider *provider,
+ struct sts_creds_provider_user_data *provider_user_data) {
+ provider_user_data->message = aws_http_message_new_request(provider->allocator);
+
+ if (!provider_user_data->message) {
+ goto error;
+ }
+
+ if (aws_http_message_add_header(provider_user_data->message, s_host_header)) {
+ goto error;
+ }
+
+ if (aws_http_message_add_header(provider_user_data->message, s_content_type_header)) {
+ goto error;
+ }
+
+ if (aws_byte_buf_init(&provider_user_data->payload_body, provider->allocator, 256)) {
+ goto error;
+ }
+
+ if (s_write_body_to_buffer(provider, &provider_user_data->payload_body)) {
+ goto error;
+ }
+
+ char content_length[21];
+ AWS_ZERO_ARRAY(content_length);
+ snprintf(content_length, sizeof(content_length), "%" PRIu64, (uint64_t)provider_user_data->payload_body.len);
+
+ struct aws_http_header content_len_header = {
+ .name = s_content_length,
+ .value = aws_byte_cursor_from_c_str(content_length),
+ };
+
+ if (aws_http_message_add_header(provider_user_data->message, content_len_header)) {
+ goto error;
+ }
+
+ struct aws_byte_cursor payload_cur = aws_byte_cursor_from_buf(&provider_user_data->payload_body);
+ provider_user_data->input_stream =
+ aws_input_stream_new_from_cursor(provider_user_data->provider->allocator, &payload_cur);
+
+ if (!provider_user_data->input_stream) {
+ goto error;
+ }
+
+ aws_http_message_set_body_stream(provider_user_data->message, provider_user_data->input_stream);
+
+ if (aws_http_message_set_request_method(provider_user_data->message, aws_http_method_post)) {
+ goto error;
+ }
+
+ if (aws_http_message_set_request_path(provider_user_data->message, s_path)) {
+ goto error;
+ }
+
+ provider_user_data->signable = aws_signable_new_http_request(provider->allocator, provider_user_data->message);
+
+ if (!provider_user_data->signable) {
+ goto error;
+ }
+
+ struct aws_credentials_provider_sts_impl *impl = provider->impl;
+
+ provider_user_data->signing_config.algorithm = AWS_SIGNING_ALGORITHM_V4;
+ provider_user_data->signing_config.signature_type = AWS_ST_HTTP_REQUEST_HEADERS;
+ provider_user_data->signing_config.signed_body_header = AWS_SBHT_NONE;
+ provider_user_data->signing_config.config_type = AWS_SIGNING_CONFIG_AWS;
+ provider_user_data->signing_config.credentials_provider = impl->provider;
+ aws_date_time_init_now(&provider_user_data->signing_config.date);
+ provider_user_data->signing_config.region = s_signing_region;
+ provider_user_data->signing_config.service = s_service_name;
+ provider_user_data->signing_config.flags.use_double_uri_encode = false;
+
+ if (aws_sign_request_aws(
+ provider->allocator,
+ provider_user_data->signable,
+ (struct aws_signing_config_base *)&provider_user_data->signing_config,
+ s_on_signing_complete,
+ provider_user_data)) {
+ goto error;
+ }
+
+ return;
+
+error:
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): error occurred while creating an http request for signing: %s",
+ (void *)provider_user_data->provider,
+ aws_error_debug_str(aws_last_error()));
+ if (provider_user_data) {
+ s_clean_up_user_data(provider_user_data);
+ } else {
+ provider_user_data->callback(NULL, provider_user_data->error_code, provider_user_data->user_data);
+ }
+}
+
+static void s_on_retry_token_acquired(
+ struct aws_retry_strategy *strategy,
+ int error_code,
+ struct aws_retry_token *token,
+ void *user_data) {
+ (void)strategy;
+ struct sts_creds_provider_user_data *provider_user_data = user_data;
+
+ if (!error_code) {
+ provider_user_data->retry_token = token;
+ s_start_make_request(provider_user_data->provider, provider_user_data);
+ } else {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): failed to acquire retry token: %s",
+ (void *)provider_user_data->provider,
+ aws_error_debug_str(error_code));
+ s_clean_up_user_data(provider_user_data);
+ }
+}
+
+static int s_sts_get_creds(
+ struct aws_credentials_provider *provider,
+ aws_on_get_credentials_callback_fn callback,
+ void *user_data) {
+
+ struct aws_credentials_provider_sts_impl *impl = provider->impl;
+
+ AWS_LOGF_DEBUG(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): fetching credentials", (void *)provider);
+
+ struct sts_creds_provider_user_data *provider_user_data =
+ aws_mem_calloc(provider->allocator, 1, sizeof(struct sts_creds_provider_user_data));
+
+ if (!provider_user_data) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): error occurred while allocating memory: %s",
+ (void *)provider,
+ aws_error_debug_str(aws_last_error()));
+ callback(NULL, aws_last_error(), user_data);
+ return AWS_OP_ERR;
+ }
+
+ provider_user_data->allocator = provider->allocator;
+ provider_user_data->provider = provider;
+ aws_credentials_provider_acquire(provider);
+ provider_user_data->callback = callback;
+ provider_user_data->user_data = user_data;
+
+ if (aws_retry_strategy_acquire_retry_token(
+ impl->retry_strategy, NULL, s_on_retry_token_acquired, provider_user_data, 100)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): failed to acquire retry token: %s",
+ (void *)provider_user_data->provider,
+ aws_error_debug_str(aws_last_error()));
+ callback(NULL, aws_last_error(), user_data);
+ s_clean_up_user_data(user_data);
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_on_credentials_provider_shutdown(void *user_data) {
+ struct aws_credentials_provider *provider = user_data;
+ if (provider == NULL) {
+ return;
+ }
+
+ struct aws_credentials_provider_sts_impl *impl = provider->impl;
+ if (impl == NULL) {
+ return;
+ }
+
+ /* The wrapped provider has shut down, invoke its shutdown callback if there was one */
+ if (impl->source_shutdown_options.shutdown_callback != NULL) {
+ impl->source_shutdown_options.shutdown_callback(impl->source_shutdown_options.shutdown_user_data);
+ }
+
+ /* Invoke our own shutdown callback */
+ aws_credentials_provider_invoke_shutdown_callback(provider);
+
+ aws_string_destroy(impl->role_session_name);
+ aws_string_destroy(impl->assume_role_profile);
+
+ aws_mem_release(provider->allocator, provider);
+}
+
+void s_destroy(struct aws_credentials_provider *provider) {
+ AWS_LOGF_TRACE(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): cleaning up credentials provider", (void *)provider);
+
+ struct aws_credentials_provider_sts_impl *sts_impl = provider->impl;
+
+ if (sts_impl->connection_manager) {
+ sts_impl->function_table->aws_http_connection_manager_release(sts_impl->connection_manager);
+ }
+
+ aws_retry_strategy_release(sts_impl->retry_strategy);
+ aws_credentials_provider_release(sts_impl->provider);
+}
+
+static struct aws_credentials_provider_vtable s_aws_credentials_provider_sts_vtable = {
+ .get_credentials = s_sts_get_creds,
+ .destroy = s_destroy,
+};
+
+struct aws_credentials_provider *aws_credentials_provider_new_sts(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_sts_options *options) {
+
+ if (!options->bootstrap) {
+ AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "a client bootstrap is necessary for quering STS");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ if (!options->tls_ctx) {
+ AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "a TLS context is necessary for querying STS");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_credentials_provider *provider = NULL;
+ struct aws_credentials_provider_sts_impl *impl = NULL;
+
+ aws_mem_acquire_many(
+ allocator,
+ 2,
+ &provider,
+ sizeof(struct aws_credentials_provider),
+ &impl,
+ sizeof(struct aws_credentials_provider_sts_impl));
+
+ AWS_LOGF_DEBUG(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "static: creating STS credentials provider");
+ if (!provider) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*provider);
+ AWS_ZERO_STRUCT(*impl);
+
+ aws_credentials_provider_init_base(provider, allocator, &s_aws_credentials_provider_sts_vtable, impl);
+
+ impl->function_table = g_aws_credentials_provider_http_function_table;
+
+ if (options->function_table) {
+ impl->function_table = options->function_table;
+ }
+
+ struct aws_tls_connection_options tls_connection_options;
+ AWS_ZERO_STRUCT(tls_connection_options);
+
+ if (!options->creds_provider) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER, "(id=%p): A credentials provider must be specified", (void *)provider);
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ goto cleanup_provider;
+ }
+
+ impl->role_session_name =
+ aws_string_new_from_array(allocator, options->session_name.ptr, options->session_name.len);
+
+ if (!impl->role_session_name) {
+ goto cleanup_provider;
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): using session_name %s",
+ (void *)provider,
+ aws_string_c_str(impl->role_session_name));
+
+ impl->assume_role_profile = aws_string_new_from_array(allocator, options->role_arn.ptr, options->role_arn.len);
+
+ if (!impl->assume_role_profile) {
+ goto cleanup_provider;
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): using assume_role_arn %s",
+ (void *)provider,
+ aws_string_c_str(impl->assume_role_profile));
+
+ impl->duration_seconds = options->duration_seconds;
+
+ if (options->system_clock_fn != NULL) {
+ impl->system_clock_fn = options->system_clock_fn;
+ } else {
+ impl->system_clock_fn = aws_sys_clock_get_ticks;
+ }
+
+ /* minimum for STS is 900 seconds*/
+ if (impl->duration_seconds < aws_sts_assume_role_default_duration_secs) {
+ impl->duration_seconds = aws_sts_assume_role_default_duration_secs;
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): using credentials duration %" PRIu16,
+ (void *)provider,
+ impl->duration_seconds);
+
+ impl->provider = options->creds_provider;
+ aws_credentials_provider_acquire(impl->provider);
+
+ aws_tls_connection_options_init_from_ctx(&tls_connection_options, options->tls_ctx);
+
+ if (aws_tls_connection_options_set_server_name(&tls_connection_options, allocator, &s_host_header.value)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): failed to create a tls connection options with error %s",
+ (void *)provider,
+ aws_error_debug_str(aws_last_error()));
+ goto cleanup_provider;
+ }
+
+ struct aws_socket_options socket_options = {
+ .type = AWS_SOCKET_STREAM,
+ .domain = AWS_SOCKET_IPV6,
+ .connect_timeout_ms = 3000,
+ };
+
+ struct aws_http_connection_manager_options connection_manager_options = {
+ .bootstrap = options->bootstrap,
+ .host = s_host_header.value,
+ .initial_window_size = SIZE_MAX,
+ .max_connections = 2,
+ .port = 443,
+ .socket_options = &socket_options,
+ .tls_connection_options = &tls_connection_options,
+ .proxy_options = options->http_proxy_options,
+ };
+
+ impl->connection_manager =
+ impl->function_table->aws_http_connection_manager_new(allocator, &connection_manager_options);
+
+ if (!impl->connection_manager) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): failed to create a connection manager with error %s",
+ (void *)provider,
+ aws_error_debug_str(aws_last_error()));
+ goto cleanup_provider;
+ }
+
+ /*
+ * Save the wrapped provider's shutdown callback and then swap it with our own.
+ */
+ impl->source_shutdown_options = impl->provider->shutdown_options;
+ impl->provider->shutdown_options.shutdown_callback = s_on_credentials_provider_shutdown;
+ impl->provider->shutdown_options.shutdown_user_data = provider;
+
+ provider->shutdown_options = options->shutdown_options;
+
+ struct aws_standard_retry_options retry_options = {
+ .backoff_retry_options =
+ {
+ .el_group = options->bootstrap->event_loop_group,
+ .max_retries = s_max_retries,
+ },
+ };
+
+ impl->retry_strategy = aws_retry_strategy_new_standard(allocator, &retry_options);
+
+ if (!impl->retry_strategy) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): failed to create a retry strategy with error %s",
+ (void *)provider,
+ aws_error_debug_str(aws_last_error()));
+ goto cleanup_provider;
+ }
+
+ aws_tls_connection_options_clean_up(&tls_connection_options);
+ return provider;
+
+cleanup_provider:
+ aws_tls_connection_options_clean_up(&tls_connection_options);
+ aws_credentials_provider_release(provider);
+
+ return NULL;
+}
diff --git a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_sts_web_identity.c b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_sts_web_identity.c
new file mode 100644
index 0000000000..66d84d006c
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_sts_web_identity.c
@@ -0,0 +1,1188 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/credentials.h>
+
+#include <aws/auth/private/aws_profile.h>
+#include <aws/auth/private/credentials_utils.h>
+#include <aws/common/clock.h>
+#include <aws/common/date_time.h>
+#include <aws/common/environment.h>
+#include <aws/common/string.h>
+#include <aws/common/uuid.h>
+#include <aws/common/xml_parser.h>
+#include <aws/http/connection.h>
+#include <aws/http/connection_manager.h>
+#include <aws/http/request_response.h>
+#include <aws/http/status_code.h>
+#include <aws/io/file_utils.h>
+#include <aws/io/logging.h>
+#include <aws/io/socket.h>
+#include <aws/io/stream.h>
+#include <aws/io/tls_channel_handler.h>
+#include <aws/io/uri.h>
+#include <inttypes.h>
+
+#if defined(_MSC_VER)
+# pragma warning(disable : 4204)
+# pragma warning(disable : 4232)
+#endif /* _MSC_VER */
+
+#define STS_WEB_IDENTITY_RESPONSE_SIZE_INITIAL 2048
+#define STS_WEB_IDENTITY_RESPONSE_SIZE_LIMIT 10000
+#define STS_WEB_IDENTITY_CONNECT_TIMEOUT_DEFAULT_IN_SECONDS 2
+#define STS_WEB_IDENTITY_CREDS_DEFAULT_DURATION_SECONDS 900
+#define STS_WEB_IDENTITY_MAX_ATTEMPTS 3
+
+static void s_on_connection_manager_shutdown(void *user_data);
+
+struct aws_credentials_provider_sts_web_identity_impl {
+ struct aws_http_connection_manager *connection_manager;
+ const struct aws_auth_http_system_vtable *function_table;
+ struct aws_string *role_arn;
+ struct aws_string *role_session_name;
+ struct aws_string *token_file_path;
+};
+
+/*
+ * Tracking structure for each outstanding async query to an sts_web_identity provider
+ */
+struct sts_web_identity_user_data {
+ /* immutable post-creation */
+ struct aws_allocator *allocator;
+ struct aws_credentials_provider *sts_web_identity_provider;
+ aws_on_get_credentials_callback_fn *original_callback;
+ void *original_user_data;
+
+ /* mutable */
+ struct aws_http_connection *connection;
+ struct aws_http_message *request;
+ struct aws_byte_buf response;
+
+ struct aws_string *access_key_id;
+ struct aws_string *secret_access_key;
+ struct aws_string *session_token;
+ uint64_t expiration_timepoint_in_seconds;
+
+ struct aws_byte_buf payload_buf;
+
+ int status_code;
+ int error_code;
+ int attempt_count;
+};
+
+static void s_user_data_reset_request_and_response(struct sts_web_identity_user_data *user_data) {
+ aws_byte_buf_reset(&user_data->response, true /*zero out*/);
+ aws_byte_buf_reset(&user_data->payload_buf, true /*zero out*/);
+ user_data->status_code = 0;
+ if (user_data->request) {
+ aws_input_stream_destroy(aws_http_message_get_body_stream(user_data->request));
+ }
+ aws_http_message_destroy(user_data->request);
+ user_data->request = NULL;
+
+ aws_string_destroy(user_data->access_key_id);
+ user_data->access_key_id = NULL;
+
+ aws_string_destroy_secure(user_data->secret_access_key);
+ user_data->secret_access_key = NULL;
+
+ aws_string_destroy_secure(user_data->session_token);
+ user_data->session_token = NULL;
+}
+
+static void s_user_data_destroy(struct sts_web_identity_user_data *user_data) {
+ if (user_data == NULL) {
+ return;
+ }
+
+ struct aws_credentials_provider_sts_web_identity_impl *impl = user_data->sts_web_identity_provider->impl;
+
+ if (user_data->connection) {
+ impl->function_table->aws_http_connection_manager_release_connection(
+ impl->connection_manager, user_data->connection);
+ }
+ s_user_data_reset_request_and_response(user_data);
+ aws_byte_buf_clean_up(&user_data->response);
+
+ aws_string_destroy(user_data->access_key_id);
+ aws_string_destroy_secure(user_data->secret_access_key);
+ aws_string_destroy_secure(user_data->session_token);
+
+ aws_byte_buf_clean_up(&user_data->payload_buf);
+
+ aws_credentials_provider_release(user_data->sts_web_identity_provider);
+ aws_mem_release(user_data->allocator, user_data);
+}
+
+static struct sts_web_identity_user_data *s_user_data_new(
+ struct aws_credentials_provider *sts_web_identity_provider,
+ aws_on_get_credentials_callback_fn callback,
+ void *user_data) {
+
+ struct sts_web_identity_user_data *wrapped_user_data =
+ aws_mem_calloc(sts_web_identity_provider->allocator, 1, sizeof(struct sts_web_identity_user_data));
+ if (wrapped_user_data == NULL) {
+ goto on_error;
+ }
+
+ wrapped_user_data->allocator = sts_web_identity_provider->allocator;
+ wrapped_user_data->sts_web_identity_provider = sts_web_identity_provider;
+ aws_credentials_provider_acquire(sts_web_identity_provider);
+ wrapped_user_data->original_user_data = user_data;
+ wrapped_user_data->original_callback = callback;
+
+ if (aws_byte_buf_init(
+ &wrapped_user_data->response,
+ sts_web_identity_provider->allocator,
+ STS_WEB_IDENTITY_RESPONSE_SIZE_INITIAL)) {
+ goto on_error;
+ }
+
+ if (aws_byte_buf_init(&wrapped_user_data->payload_buf, sts_web_identity_provider->allocator, 1024)) {
+ goto on_error;
+ }
+
+ return wrapped_user_data;
+
+on_error:
+
+ s_user_data_destroy(wrapped_user_data);
+
+ return NULL;
+}
+
+/*
+ * In general, the STS_WEB_IDENTITY response document looks something like:
+<AssumeRoleWithWebIdentityResponse xmlns="https://sts.amazonaws.com/doc/2011-06-15/">
+ <AssumeRoleWithWebIdentityResult>
+ <SubjectFromWebIdentityToken>amzn1.account.AF6RHO7KZU5XRVQJGXK6HB56KR2A</SubjectFromWebIdentityToken>
+ <Audience>client.5498841531868486423.1548@apps.example.com</Audience>
+ <AssumedRoleUser>
+ <Arn>arn:aws:sts::123456789012:assumed-role/FederatedWebIdentityRole/app1</Arn>
+ <AssumedRoleId>AROACLKWSDQRAOEXAMPLE:app1</AssumedRoleId>
+ </AssumedRoleUser>
+ <Credentials>
+ <SessionToken>AQoDYXdzEE0a8ANXXXXXXXXNO1ewxE5TijQyp+IEXAMPLE</SessionToken>
+ <SecretAccessKey>wJalrXUtnFEMI/K7MDENG/bPxRfiCYzEXAMPLEKEY</SecretAccessKey>
+ <Expiration>2014-10-24T23:00:23Z</Expiration>
+ <AccessKeyId>ASgeIAIOSFODNN7EXAMPLE</AccessKeyId>
+ </Credentials>
+ <Provider>www.amazon.com</Provider>
+ </AssumeRoleWithWebIdentityResult>
+ <ResponseMetadata>
+ <RequestId>ad4156e9-bce1-11e2-82e6-6b6efEXAMPLE</RequestId>
+ </ResponseMetadata>
+</AssumeRoleWithWebIdentityResponse>
+
+Error Response looks like:
+<?xml version="1.0" encoding="UTF-8"?>
+<Error>
+ <Code>ExceptionName</Code>
+ <Message>XXX</Message>
+ <Resource>YYY</Resource>
+ <RequestId>4442587FB7D0A2F9</RequestId>
+</Error>
+*/
+
+static bool s_on_error_node_encountered_fn(struct aws_xml_parser *parser, struct aws_xml_node *node, void *user_data) {
+
+ struct aws_byte_cursor node_name;
+ AWS_ZERO_STRUCT(node_name);
+
+ if (aws_xml_node_get_name(node, &node_name)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): While parsing xml error response for sts web identity credentials provider, could not get xml "
+ "node name for function s_on_error_node_encountered_fn.",
+ user_data);
+ return false;
+ }
+
+ if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Error")) {
+ return aws_xml_node_traverse(parser, node, s_on_error_node_encountered_fn, user_data);
+ }
+
+ bool *get_retryable_error = user_data;
+ struct aws_byte_cursor data_cursor;
+ AWS_ZERO_STRUCT(data_cursor);
+
+ if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Code")) {
+ aws_xml_node_as_body(parser, node, &data_cursor);
+ if (aws_byte_cursor_eq_c_str_ignore_case(&data_cursor, "IDPCommunicationError") ||
+ aws_byte_cursor_eq_c_str_ignore_case(&data_cursor, "InvalidIdentityToken")) {
+ *get_retryable_error = true;
+ }
+ }
+
+ return true;
+}
+
+static bool s_parse_retryable_error_from_response(struct aws_allocator *allocator, struct aws_byte_buf *response) {
+
+ struct aws_xml_parser_options options;
+ AWS_ZERO_STRUCT(options);
+ options.doc = aws_byte_cursor_from_buf(response);
+
+ struct aws_xml_parser *xml_parser = aws_xml_parser_new(allocator, &options);
+
+ if (xml_parser == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "Failed to init xml parser for sts web identity credentials provider to parse error information.");
+ return false;
+ }
+ bool get_retryable_error = false;
+ if (aws_xml_parser_parse(xml_parser, s_on_error_node_encountered_fn, &get_retryable_error)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "Failed to parse xml error response for sts web identity with error %s",
+ aws_error_str(aws_last_error()));
+ aws_xml_parser_destroy(xml_parser);
+ return false;
+ }
+
+ aws_xml_parser_destroy(xml_parser);
+ return get_retryable_error;
+}
+
+static bool s_on_creds_node_encountered_fn(struct aws_xml_parser *parser, struct aws_xml_node *node, void *user_data) {
+
+ struct aws_byte_cursor node_name;
+ AWS_ZERO_STRUCT(node_name);
+
+ if (aws_xml_node_get_name(node, &node_name)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): While parsing credentials xml response for sts web identity credentials provider, could not get "
+ "xml node name for function s_on_creds_node_encountered_fn.",
+ user_data);
+ return false;
+ }
+
+ if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "AssumeRoleWithWebIdentityResponse") ||
+ aws_byte_cursor_eq_c_str_ignore_case(&node_name, "AssumeRoleWithWebIdentityResult") ||
+ aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Credentials")) {
+ return aws_xml_node_traverse(parser, node, s_on_creds_node_encountered_fn, user_data);
+ }
+
+ struct sts_web_identity_user_data *query_user_data = user_data;
+ struct aws_byte_cursor credential_data;
+ AWS_ZERO_STRUCT(credential_data);
+ if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "AccessKeyId")) {
+ aws_xml_node_as_body(parser, node, &credential_data);
+ query_user_data->access_key_id =
+ aws_string_new_from_array(query_user_data->allocator, credential_data.ptr, credential_data.len);
+ }
+
+ if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "SecretAccessKey")) {
+ aws_xml_node_as_body(parser, node, &credential_data);
+ query_user_data->secret_access_key =
+ aws_string_new_from_array(query_user_data->allocator, credential_data.ptr, credential_data.len);
+ }
+
+ if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "SessionToken")) {
+ aws_xml_node_as_body(parser, node, &credential_data);
+ query_user_data->session_token =
+ aws_string_new_from_array(query_user_data->allocator, credential_data.ptr, credential_data.len);
+ }
+
+ /* As long as we parsed an usable expiration, use it, otherwise use
+ * the existing one: now + 900s, initialized before parsing.
+ */
+ if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Expiration")) {
+ aws_xml_node_as_body(parser, node, &credential_data);
+ if (credential_data.len != 0) {
+ struct aws_date_time expiration;
+ if (aws_date_time_init_from_str_cursor(&expiration, &credential_data, AWS_DATE_FORMAT_ISO_8601) ==
+ AWS_OP_SUCCESS) {
+ query_user_data->expiration_timepoint_in_seconds = (uint64_t)aws_date_time_as_epoch_secs(&expiration);
+ } else {
+ query_user_data->error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "Failed to parse time string from sts web identity xml response: %s",
+ aws_error_str(query_user_data->error_code));
+ }
+ }
+ }
+ return true;
+}
+
+static struct aws_credentials *s_parse_credentials_from_response(
+ struct sts_web_identity_user_data *query_user_data,
+ struct aws_byte_buf *response) {
+
+ if (!response || response->len == 0) {
+ return NULL;
+ }
+
+ struct aws_credentials *credentials = NULL;
+
+ struct aws_xml_parser_options options;
+ AWS_ZERO_STRUCT(options);
+ options.doc = aws_byte_cursor_from_buf(response);
+
+ struct aws_xml_parser *xml_parser = aws_xml_parser_new(query_user_data->allocator, &options);
+
+ if (xml_parser == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "Failed to init xml parser for sts web identity credentials provider to parse error information.");
+ return NULL;
+ }
+ uint64_t now = UINT64_MAX;
+ if (aws_sys_clock_get_ticks(&now) != AWS_OP_SUCCESS) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "Failed to get sys clock for sts web identity credentials provider to parse error information.");
+ goto on_finish;
+ }
+ uint64_t now_seconds = aws_timestamp_convert(now, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, NULL);
+ query_user_data->expiration_timepoint_in_seconds = now_seconds + STS_WEB_IDENTITY_CREDS_DEFAULT_DURATION_SECONDS;
+
+ if (aws_xml_parser_parse(xml_parser, s_on_creds_node_encountered_fn, query_user_data)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "Failed to parse xml response for sts web identity with error: %s",
+ aws_error_str(aws_last_error()));
+ goto on_finish;
+ }
+
+ if (!query_user_data->access_key_id || !query_user_data->secret_access_key) {
+ goto on_finish;
+ }
+
+ credentials = aws_credentials_new(
+ query_user_data->allocator,
+ aws_byte_cursor_from_string(query_user_data->access_key_id),
+ aws_byte_cursor_from_string(query_user_data->secret_access_key),
+ aws_byte_cursor_from_string(query_user_data->session_token),
+ query_user_data->expiration_timepoint_in_seconds);
+
+on_finish:
+
+ if (credentials == NULL) {
+ query_user_data->error_code = aws_last_error();
+ }
+
+ if (xml_parser != NULL) {
+ aws_xml_parser_destroy(xml_parser);
+ xml_parser = NULL;
+ }
+
+ return credentials;
+}
+
+/*
+ * No matter the result, this always gets called assuming that user_data is successfully allocated
+ */
+static void s_finalize_get_credentials_query(struct sts_web_identity_user_data *user_data) {
+ /* Try to build credentials from whatever, if anything, was in the result */
+ struct aws_credentials *credentials = NULL;
+ if (user_data->status_code == AWS_HTTP_STATUS_CODE_200_OK) {
+ credentials = s_parse_credentials_from_response(user_data, &user_data->response);
+ }
+
+ if (credentials != NULL) {
+ AWS_LOGF_INFO(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) STS_WEB_IDENTITY credentials provider successfully queried credentials",
+ (void *)user_data->sts_web_identity_provider);
+ } else {
+ AWS_LOGF_WARN(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) STS_WEB_IDENTITY credentials provider failed to query credentials",
+ (void *)user_data->sts_web_identity_provider);
+
+ if (user_data->error_code == AWS_ERROR_SUCCESS) {
+ user_data->error_code = AWS_AUTH_CREDENTIALS_PROVIDER_STS_WEB_IDENTITY_SOURCE_FAILURE;
+ }
+ }
+
+ /* pass the credentials back */
+ user_data->original_callback(credentials, user_data->error_code, user_data->original_user_data);
+
+ /* clean up */
+ s_user_data_destroy(user_data);
+ aws_credentials_release(credentials);
+}
+
+static int s_on_incoming_body_fn(
+ struct aws_http_stream *stream,
+ const struct aws_byte_cursor *body,
+ void *wrapped_user_data) {
+
+ (void)stream;
+
+ struct sts_web_identity_user_data *user_data = wrapped_user_data;
+ struct aws_credentials_provider_sts_web_identity_impl *impl = user_data->sts_web_identity_provider->impl;
+
+ AWS_LOGF_TRACE(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) STS_WEB_IDENTITY credentials provider received %zu response bytes",
+ (void *)user_data->sts_web_identity_provider,
+ body->len);
+
+ if (body->len + user_data->response.len > STS_WEB_IDENTITY_RESPONSE_SIZE_LIMIT) {
+ impl->function_table->aws_http_connection_close(user_data->connection);
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) STS_WEB_IDENTITY credentials provider query response exceeded maximum allowed length",
+ (void *)user_data->sts_web_identity_provider);
+
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ if (aws_byte_buf_append_dynamic(&user_data->response, body)) {
+ impl->function_table->aws_http_connection_close(user_data->connection);
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) STS_WEB_IDENTITY credentials provider query error appending response: %s",
+ (void *)user_data->sts_web_identity_provider,
+ aws_error_str(aws_last_error()));
+
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_on_incoming_headers_fn(
+ struct aws_http_stream *stream,
+ enum aws_http_header_block header_block,
+ const struct aws_http_header *header_array,
+ size_t num_headers,
+ void *wrapped_user_data) {
+
+ (void)header_array;
+ (void)num_headers;
+
+ if (header_block != AWS_HTTP_HEADER_BLOCK_MAIN) {
+ return AWS_OP_SUCCESS;
+ }
+
+ struct sts_web_identity_user_data *user_data = wrapped_user_data;
+ if (header_block == AWS_HTTP_HEADER_BLOCK_MAIN) {
+ if (user_data->status_code == 0) {
+ struct aws_credentials_provider_sts_web_identity_impl *impl = user_data->sts_web_identity_provider->impl;
+ if (impl->function_table->aws_http_stream_get_incoming_response_status(stream, &user_data->status_code)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) STS_WEB_IDENTITY credentials provider failed to get http status code: %s",
+ (void *)user_data->sts_web_identity_provider,
+ aws_error_str(aws_last_error()));
+
+ return AWS_OP_ERR;
+ }
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) STS_WEB_IDENTITY credentials provider query received http status code %d",
+ (void *)user_data->sts_web_identity_provider,
+ user_data->status_code);
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_query_credentials(struct sts_web_identity_user_data *user_data);
+
+static void s_on_stream_complete_fn(struct aws_http_stream *stream, int error_code, void *data) {
+ struct sts_web_identity_user_data *user_data = data;
+
+ struct aws_credentials_provider_sts_web_identity_impl *impl = user_data->sts_web_identity_provider->impl;
+ struct aws_http_connection *connection = impl->function_table->aws_http_stream_get_connection(stream);
+ impl->function_table->aws_http_stream_release(stream);
+ impl->function_table->aws_http_connection_manager_release_connection(impl->connection_manager, connection);
+
+ /*
+ * On anything other than a 200, if we can retry the request based on
+ * error response, retry it, otherwise, call the finalize function.
+ */
+ if (user_data->status_code != AWS_HTTP_STATUS_CODE_200_OK || error_code != AWS_OP_SUCCESS) {
+ if (++user_data->attempt_count < STS_WEB_IDENTITY_MAX_ATTEMPTS && user_data->response.len) {
+ if (s_parse_retryable_error_from_response(user_data->allocator, &user_data->response)) {
+ s_query_credentials(user_data);
+ return;
+ }
+ }
+ }
+
+ s_finalize_get_credentials_query(user_data);
+}
+
+static struct aws_http_header s_host_header = {
+ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("host"),
+ .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("sts.amazonaws.com"),
+};
+
+static struct aws_http_header s_content_type_header = {
+ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("content-type"),
+ .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("application/x-www-form-urlencoded"),
+};
+
+static struct aws_http_header s_api_version_header = {
+ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-api-version"),
+ .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("2011-06-15"),
+};
+static struct aws_http_header s_accept_header = {
+ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Accept"),
+ .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("*/*"),
+};
+
+static struct aws_http_header s_user_agent_header = {
+ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("User-Agent"),
+ .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("aws-sdk-crt/sts-web-identity-credentials-provider"),
+};
+
+static struct aws_http_header s_keep_alive_header = {
+ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Connection"),
+ .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("keep-alive"),
+};
+
+static struct aws_byte_cursor s_content_length = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("content-length");
+static struct aws_byte_cursor s_path = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/");
+
+static int s_make_sts_web_identity_http_query(
+ struct sts_web_identity_user_data *user_data,
+ struct aws_byte_cursor *body_cursor) {
+ AWS_FATAL_ASSERT(user_data->connection);
+
+ struct aws_http_stream *stream = NULL;
+ struct aws_input_stream *input_stream = NULL;
+ struct aws_http_message *request = aws_http_message_new_request(user_data->allocator);
+ if (request == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_credentials_provider_sts_web_identity_impl *impl = user_data->sts_web_identity_provider->impl;
+
+ char content_length[21];
+ AWS_ZERO_ARRAY(content_length);
+ snprintf(content_length, sizeof(content_length), "%" PRIu64, (uint64_t)body_cursor->len);
+
+ struct aws_http_header content_len_header = {
+ .name = s_content_length,
+ .value = aws_byte_cursor_from_c_str(content_length),
+ };
+
+ if (aws_http_message_add_header(request, content_len_header)) {
+ goto on_error;
+ }
+
+ if (aws_http_message_add_header(request, s_content_type_header)) {
+ goto on_error;
+ }
+
+ if (aws_http_message_add_header(request, s_host_header)) {
+ goto on_error;
+ }
+
+ if (aws_http_message_add_header(request, s_api_version_header)) {
+ goto on_error;
+ }
+
+ if (aws_http_message_add_header(request, s_accept_header)) {
+ goto on_error;
+ }
+
+ if (aws_http_message_add_header(request, s_user_agent_header)) {
+ goto on_error;
+ }
+
+ if (aws_http_message_add_header(request, s_keep_alive_header)) {
+ goto on_error;
+ }
+
+ input_stream = aws_input_stream_new_from_cursor(user_data->allocator, body_cursor);
+ if (!input_stream) {
+ goto on_error;
+ }
+
+ aws_http_message_set_body_stream(request, input_stream);
+
+ if (aws_http_message_set_request_path(request, s_path)) {
+ goto on_error;
+ }
+
+ if (aws_http_message_set_request_method(request, aws_http_method_post)) {
+ goto on_error;
+ }
+
+ user_data->request = request;
+
+ struct aws_http_make_request_options request_options = {
+ .self_size = sizeof(request_options),
+ .on_response_headers = s_on_incoming_headers_fn,
+ .on_response_header_block_done = NULL,
+ .on_response_body = s_on_incoming_body_fn,
+ .on_complete = s_on_stream_complete_fn,
+ .user_data = user_data,
+ .request = request,
+ };
+
+ stream = impl->function_table->aws_http_connection_make_request(user_data->connection, &request_options);
+
+ if (!stream) {
+ goto on_error;
+ }
+
+ if (impl->function_table->aws_http_stream_activate(stream)) {
+ goto on_error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ impl->function_table->aws_http_stream_release(stream);
+ aws_input_stream_destroy(input_stream);
+ aws_http_message_destroy(request);
+ user_data->request = NULL;
+ return AWS_OP_ERR;
+}
+
+static void s_query_credentials(struct sts_web_identity_user_data *user_data) {
+ AWS_FATAL_ASSERT(user_data->connection);
+
+ struct aws_credentials_provider_sts_web_identity_impl *impl = user_data->sts_web_identity_provider->impl;
+
+ /* "Clear" the result */
+ s_user_data_reset_request_and_response(user_data);
+
+ /*
+ * Calculate body message:
+ * "Action=AssumeRoleWithWebIdentity"
+ * + "&Version=2011-06-15"
+ * + "&RoleSessionName=" + url_encode(role_session_name)
+ * + "&RoleArn=" + url_encode(role_arn)
+ * + "&WebIdentityToken=" + url_encode(token);
+ */
+ struct aws_byte_buf token_buf;
+ bool success = false;
+
+ AWS_ZERO_STRUCT(token_buf);
+
+ struct aws_byte_cursor work_cursor =
+ aws_byte_cursor_from_c_str("Action=AssumeRoleWithWebIdentity&Version=2011-06-15&RoleArn=");
+ if (aws_byte_buf_append_dynamic(&user_data->payload_buf, &work_cursor)) {
+ goto on_finish;
+ }
+
+ work_cursor = aws_byte_cursor_from_string(impl->role_arn);
+ if (aws_byte_buf_append_encoding_uri_param(&user_data->payload_buf, &work_cursor)) {
+ goto on_finish;
+ }
+
+ work_cursor = aws_byte_cursor_from_c_str("&RoleSessionName=");
+ if (aws_byte_buf_append_dynamic(&user_data->payload_buf, &work_cursor)) {
+ goto on_finish;
+ }
+
+ work_cursor = aws_byte_cursor_from_string(impl->role_session_name);
+ if (aws_byte_buf_append_encoding_uri_param(&user_data->payload_buf, &work_cursor)) {
+ goto on_finish;
+ }
+
+ work_cursor = aws_byte_cursor_from_c_str("&WebIdentityToken=");
+ if (aws_byte_buf_append_dynamic(&user_data->payload_buf, &work_cursor)) {
+ goto on_finish;
+ }
+
+ if (aws_byte_buf_init_from_file(&token_buf, user_data->allocator, aws_string_c_str(impl->token_file_path))) {
+ goto on_finish;
+ }
+ work_cursor = aws_byte_cursor_from_buf(&token_buf);
+ if (aws_byte_buf_append_encoding_uri_param(&user_data->payload_buf, &work_cursor)) {
+ goto on_finish;
+ }
+ struct aws_byte_cursor body_cursor = aws_byte_cursor_from_buf(&user_data->payload_buf);
+
+ if (s_make_sts_web_identity_http_query(user_data, &body_cursor) == AWS_OP_ERR) {
+ goto on_finish;
+ }
+ success = true;
+
+on_finish:
+ aws_byte_buf_clean_up(&token_buf);
+ if (!success) {
+ s_finalize_get_credentials_query(user_data);
+ }
+}
+
+static void s_on_acquire_connection(struct aws_http_connection *connection, int error_code, void *data) {
+ struct sts_web_identity_user_data *user_data = data;
+
+ if (connection == NULL) {
+ AWS_LOGF_WARN(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "id=%p: STS_WEB_IDENTITY provider failed to acquire a connection, error code %d(%s)",
+ (void *)user_data->sts_web_identity_provider,
+ error_code,
+ aws_error_str(error_code));
+
+ s_finalize_get_credentials_query(user_data);
+ return;
+ }
+
+ user_data->connection = connection;
+
+ s_query_credentials(user_data);
+}
+
+static int s_credentials_provider_sts_web_identity_get_credentials_async(
+ struct aws_credentials_provider *provider,
+ aws_on_get_credentials_callback_fn callback,
+ void *user_data) {
+
+ struct aws_credentials_provider_sts_web_identity_impl *impl = provider->impl;
+
+ struct sts_web_identity_user_data *wrapped_user_data = s_user_data_new(provider, callback, user_data);
+ if (wrapped_user_data == NULL) {
+ goto error;
+ }
+
+ impl->function_table->aws_http_connection_manager_acquire_connection(
+ impl->connection_manager, s_on_acquire_connection, wrapped_user_data);
+
+ return AWS_OP_SUCCESS;
+
+error:
+ s_user_data_destroy(wrapped_user_data);
+ return AWS_OP_ERR;
+}
+
+static void s_credentials_provider_sts_web_identity_destroy(struct aws_credentials_provider *provider) {
+ struct aws_credentials_provider_sts_web_identity_impl *impl = provider->impl;
+ if (impl == NULL) {
+ return;
+ }
+
+ aws_string_destroy(impl->role_arn);
+ aws_string_destroy(impl->role_session_name);
+ aws_string_destroy(impl->token_file_path);
+ /* aws_http_connection_manager_release will eventually leads to call of s_on_connection_manager_shutdown,
+ * which will do memory release for provider and impl. So We should be freeing impl
+ * related memory first, then call aws_http_connection_manager_release.
+ */
+ if (impl->connection_manager) {
+ impl->function_table->aws_http_connection_manager_release(impl->connection_manager);
+ } else {
+ /* If provider setup failed halfway through, connection_manager might not exist.
+ * In this case invoke shutdown completion callback directly to finish cleanup */
+ s_on_connection_manager_shutdown(provider);
+ }
+
+ /* freeing the provider takes place in the shutdown callback below */
+}
+
+static struct aws_credentials_provider_vtable s_aws_credentials_provider_sts_web_identity_vtable = {
+ .get_credentials = s_credentials_provider_sts_web_identity_get_credentials_async,
+ .destroy = s_credentials_provider_sts_web_identity_destroy,
+};
+
+static void s_on_connection_manager_shutdown(void *user_data) {
+ struct aws_credentials_provider *provider = user_data;
+
+ aws_credentials_provider_invoke_shutdown_callback(provider);
+ aws_mem_release(provider->allocator, provider);
+}
+
+AWS_STATIC_STRING_FROM_LITERAL(s_region_config, "region");
+AWS_STATIC_STRING_FROM_LITERAL(s_region_env, "AWS_DEFAULT_REGION");
+AWS_STATIC_STRING_FROM_LITERAL(s_role_arn_config, "role_arn");
+AWS_STATIC_STRING_FROM_LITERAL(s_role_arn_env, "AWS_ROLE_ARN");
+AWS_STATIC_STRING_FROM_LITERAL(s_role_session_name_config, "role_session_name");
+AWS_STATIC_STRING_FROM_LITERAL(s_role_session_name_env, "AWS_ROLE_SESSION_NAME");
+AWS_STATIC_STRING_FROM_LITERAL(s_token_file_path_config, "web_identity_token_file");
+AWS_STATIC_STRING_FROM_LITERAL(s_token_file_path_env, "AWS_WEB_IDENTITY_TOKEN_FILE");
+
+struct sts_web_identity_parameters {
+ struct aws_allocator *allocator;
+ /* region is actually used to construct endpoint */
+ struct aws_byte_buf endpoint;
+ struct aws_byte_buf role_arn;
+ struct aws_byte_buf role_session_name;
+ struct aws_byte_buf token_file_path;
+};
+
+struct aws_profile_collection *s_load_profile(struct aws_allocator *allocator) {
+
+ struct aws_profile_collection *config_profiles = NULL;
+ struct aws_string *config_file_path = NULL;
+
+ config_file_path = aws_get_config_file_path(allocator, NULL);
+ if (!config_file_path) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "Failed to resolve config file path during sts web identity provider initialization: %s",
+ aws_error_str(aws_last_error()));
+ goto on_error;
+ }
+
+ config_profiles = aws_profile_collection_new_from_file(allocator, config_file_path, AWS_PST_CONFIG);
+ if (config_profiles != NULL) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "Successfully built config profile collection from file at (%s)",
+ aws_string_c_str(config_file_path));
+ } else {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "Failed to build config profile collection from file at (%s) : %s",
+ aws_string_c_str(config_file_path),
+ aws_error_str(aws_last_error()));
+ goto on_error;
+ }
+
+ aws_string_destroy(config_file_path);
+ return config_profiles;
+
+on_error:
+ aws_string_destroy(config_file_path);
+ aws_profile_collection_destroy(config_profiles);
+ return NULL;
+}
+
+static struct aws_byte_cursor s_default_profile_name_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("default");
+static struct aws_byte_cursor s_dot_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(".");
+static struct aws_byte_cursor s_amazonaws_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(".amazonaws.com");
+static struct aws_byte_cursor s_cn_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(".cn");
+AWS_STATIC_STRING_FROM_LITERAL(s_sts_service_name, "sts");
+
+static int s_construct_endpoint(
+ struct aws_allocator *allocator,
+ struct aws_byte_buf *endpoint,
+ const struct aws_string *region,
+ const struct aws_string *service_name) {
+
+ if (!allocator || !endpoint || !region || !service_name) {
+ return AWS_ERROR_INVALID_ARGUMENT;
+ }
+ aws_byte_buf_clean_up(endpoint);
+
+ struct aws_byte_cursor service_cursor = aws_byte_cursor_from_string(service_name);
+ if (aws_byte_buf_init_copy_from_cursor(endpoint, allocator, service_cursor)) {
+ goto on_error;
+ }
+
+ if (aws_byte_buf_append_dynamic(endpoint, &s_dot_cursor)) {
+ goto on_error;
+ }
+
+ struct aws_byte_cursor region_cursor;
+ region_cursor = aws_byte_cursor_from_array(region->bytes, region->len);
+ if (aws_byte_buf_append_dynamic(endpoint, &region_cursor)) {
+ goto on_error;
+ }
+
+ if (aws_byte_buf_append_dynamic(endpoint, &s_amazonaws_cursor)) {
+ goto on_error;
+ }
+
+ if (aws_string_eq_c_str_ignore_case(region, "cn-north-1") ||
+ aws_string_eq_c_str_ignore_case(region, "cn-northwest-1")) {
+ if (aws_byte_buf_append_dynamic(endpoint, &s_cn_cursor)) {
+ goto on_error;
+ }
+ }
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_byte_buf_clean_up(endpoint);
+ return AWS_OP_ERR;
+}
+
+static int s_generate_uuid_to_buf(struct aws_allocator *allocator, struct aws_byte_buf *dst) {
+
+ if (!allocator || !dst) {
+ return AWS_ERROR_INVALID_ARGUMENT;
+ }
+
+ struct aws_uuid uuid;
+ if (aws_uuid_init(&uuid)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to initiate an uuid struct: %s", aws_error_str(aws_last_error()));
+ return aws_last_error();
+ }
+
+ char uuid_str[AWS_UUID_STR_LEN] = {0};
+ struct aws_byte_buf uuid_buf = aws_byte_buf_from_array(uuid_str, sizeof(uuid_str));
+ uuid_buf.len = 0;
+ if (aws_uuid_to_str(&uuid, &uuid_buf)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to stringify uuid: %s", aws_error_str(aws_last_error()));
+ return aws_last_error();
+ }
+ if (aws_byte_buf_init_copy(dst, allocator, &uuid_buf)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "Failed to generate role session name during sts web identity provider initialization: %s",
+ aws_error_str(aws_last_error()));
+ return aws_last_error();
+ }
+ return AWS_OP_SUCCESS;
+}
+
+static void s_check_or_get_with_profile_config(
+ struct aws_allocator *allocator,
+ const struct aws_profile *profile,
+ struct aws_string **target,
+ const struct aws_string *config_key) {
+
+ if (!allocator || !profile || !config_key) {
+ return;
+ }
+ if ((!(*target) || !(*target)->len)) {
+ if (*target) {
+ aws_string_destroy(*target);
+ }
+ const struct aws_profile_property *property = aws_profile_get_property(profile, config_key);
+ if (property) {
+ *target = aws_string_new_from_string(allocator, aws_profile_property_get_value(property));
+ }
+ }
+}
+
+static void s_parameters_destroy(struct sts_web_identity_parameters *parameters) {
+ if (!parameters) {
+ return;
+ }
+ aws_byte_buf_clean_up(&parameters->endpoint);
+ aws_byte_buf_clean_up(&parameters->role_arn);
+ aws_byte_buf_clean_up(&parameters->role_session_name);
+ aws_byte_buf_clean_up(&parameters->token_file_path);
+ aws_mem_release(parameters->allocator, parameters);
+}
+
+static struct sts_web_identity_parameters *s_parameters_new(
+ struct aws_allocator *allocator,
+ struct aws_profile_collection *config_profile_collection_cached) {
+
+ struct sts_web_identity_parameters *parameters =
+ aws_mem_calloc(allocator, 1, sizeof(struct sts_web_identity_parameters));
+ if (parameters == NULL) {
+ return NULL;
+ }
+ parameters->allocator = allocator;
+
+ bool success = false;
+ struct aws_string *region = NULL;
+ struct aws_string *role_arn = NULL;
+ struct aws_string *role_session_name = NULL;
+ struct aws_string *token_file_path = NULL;
+
+ /* check environment variables */
+ aws_get_environment_value(allocator, s_region_env, &region);
+ aws_get_environment_value(allocator, s_role_arn_env, &role_arn);
+ aws_get_environment_value(allocator, s_role_session_name_env, &role_session_name);
+ aws_get_environment_value(allocator, s_token_file_path_env, &token_file_path);
+
+ /**
+ * check config profile if either region, role_arn or token_file_path or role_session_name is not resolved from
+ * environment variable. Role session name can also be generated by us using uuid if not found from both sources.
+ */
+ struct aws_profile_collection *config_profile = NULL;
+ struct aws_string *profile_name = NULL;
+ const struct aws_profile *profile = NULL;
+ bool get_all_parameters =
+ (region && region->len && role_arn && role_arn->len && token_file_path && token_file_path->len);
+ if (!get_all_parameters) {
+ if (config_profile_collection_cached) {
+ /* Use cached profile collection */
+ config_profile = aws_profile_collection_acquire(config_profile_collection_cached);
+ } else {
+ /* Load profile collection from files */
+ config_profile = s_load_profile(allocator);
+ if (!config_profile) {
+ goto on_finish;
+ }
+ }
+
+ profile_name = aws_get_profile_name(allocator, &s_default_profile_name_cursor);
+ if (profile_name) {
+ profile = aws_profile_collection_get_profile(config_profile, profile_name);
+ }
+
+ if (!profile) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "Failed to resolve either region, role arn or token file path during sts web identity provider "
+ "initialization.");
+ goto on_finish;
+
+ } else {
+ s_check_or_get_with_profile_config(allocator, profile, &region, s_region_config);
+ s_check_or_get_with_profile_config(allocator, profile, &role_arn, s_role_arn_config);
+ s_check_or_get_with_profile_config(allocator, profile, &role_session_name, s_role_session_name_config);
+ s_check_or_get_with_profile_config(allocator, profile, &token_file_path, s_token_file_path_config);
+ }
+ }
+
+ /* determin endpoint */
+ if (s_construct_endpoint(allocator, &parameters->endpoint, region, s_sts_service_name)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to construct sts endpoint with, probably region is missing.");
+ goto on_finish;
+ }
+
+ /* determine role_arn */
+ if (!role_arn || !role_arn->len ||
+ aws_byte_buf_init_copy_from_cursor(&parameters->role_arn, allocator, aws_byte_cursor_from_string(role_arn))) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "Failed to resolve role arn during sts web identity provider initialization.");
+ goto on_finish;
+ }
+
+ /* determine token_file_path */
+ if (!token_file_path || !token_file_path->len ||
+ aws_byte_buf_init_copy_from_cursor(
+ &parameters->token_file_path, allocator, aws_byte_cursor_from_string(token_file_path))) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "Failed to resolve token file path during sts web identity provider initialization.");
+ goto on_finish;
+ }
+
+ /* determine role_session_name */
+ if (role_session_name && role_session_name->len) {
+ if (aws_byte_buf_init_copy_from_cursor(
+ &parameters->role_session_name, allocator, aws_byte_cursor_from_string(role_session_name))) {
+ goto on_finish;
+ }
+ } else if (s_generate_uuid_to_buf(allocator, &parameters->role_session_name)) {
+ goto on_finish;
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "Successfully loaded all required parameters for sts web identity credentials provider.");
+ success = true;
+
+on_finish:
+ aws_string_destroy(region);
+ aws_string_destroy(role_arn);
+ aws_string_destroy(role_session_name);
+ aws_string_destroy(token_file_path);
+ aws_string_destroy(profile_name);
+ aws_profile_collection_release(config_profile);
+ if (!success) {
+ s_parameters_destroy(parameters);
+ parameters = NULL;
+ }
+ return parameters;
+}
+
+struct aws_credentials_provider *aws_credentials_provider_new_sts_web_identity(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_sts_web_identity_options *options) {
+
+ struct sts_web_identity_parameters *parameters =
+ s_parameters_new(allocator, options->config_profile_collection_cached);
+ if (!parameters) {
+ return NULL;
+ }
+
+ struct aws_tls_connection_options tls_connection_options;
+ AWS_ZERO_STRUCT(tls_connection_options);
+
+ struct aws_credentials_provider *provider = NULL;
+ struct aws_credentials_provider_sts_web_identity_impl *impl = NULL;
+
+ aws_mem_acquire_many(
+ allocator,
+ 2,
+ &provider,
+ sizeof(struct aws_credentials_provider),
+ &impl,
+ sizeof(struct aws_credentials_provider_sts_web_identity_impl));
+
+ if (!provider) {
+ goto on_error;
+ }
+
+ AWS_ZERO_STRUCT(*provider);
+ AWS_ZERO_STRUCT(*impl);
+
+ aws_credentials_provider_init_base(provider, allocator, &s_aws_credentials_provider_sts_web_identity_vtable, impl);
+
+ if (!options->tls_ctx) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "a TLS context must be provided to the STS web identity credentials provider");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ aws_tls_connection_options_init_from_ctx(&tls_connection_options, options->tls_ctx);
+ struct aws_byte_cursor host = aws_byte_cursor_from_buf(&parameters->endpoint);
+ if (aws_tls_connection_options_set_server_name(&tls_connection_options, allocator, &host)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): failed to create a tls connection options with error %s",
+ (void *)provider,
+ aws_error_str(aws_last_error()));
+ goto on_error;
+ }
+
+ struct aws_socket_options socket_options;
+ AWS_ZERO_STRUCT(socket_options);
+ socket_options.type = AWS_SOCKET_STREAM;
+ socket_options.domain = AWS_SOCKET_IPV4;
+ socket_options.connect_timeout_ms = (uint32_t)aws_timestamp_convert(
+ STS_WEB_IDENTITY_CONNECT_TIMEOUT_DEFAULT_IN_SECONDS, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL);
+
+ struct aws_http_connection_manager_options manager_options;
+ AWS_ZERO_STRUCT(manager_options);
+ manager_options.bootstrap = options->bootstrap;
+ manager_options.initial_window_size = STS_WEB_IDENTITY_RESPONSE_SIZE_LIMIT;
+ manager_options.socket_options = &socket_options;
+ manager_options.host = host;
+ manager_options.port = 443;
+ manager_options.max_connections = 2;
+ manager_options.shutdown_complete_callback = s_on_connection_manager_shutdown;
+ manager_options.shutdown_complete_user_data = provider;
+ manager_options.tls_connection_options = &tls_connection_options;
+
+ impl->function_table = options->function_table;
+ if (impl->function_table == NULL) {
+ impl->function_table = g_aws_credentials_provider_http_function_table;
+ }
+
+ impl->connection_manager = impl->function_table->aws_http_connection_manager_new(allocator, &manager_options);
+ if (impl->connection_manager == NULL) {
+ goto on_error;
+ }
+
+ impl->role_arn = aws_string_new_from_array(allocator, parameters->role_arn.buffer, parameters->role_arn.len);
+ if (impl->role_arn == NULL) {
+ goto on_error;
+ }
+
+ impl->role_session_name =
+ aws_string_new_from_array(allocator, parameters->role_session_name.buffer, parameters->role_session_name.len);
+ if (impl->role_session_name == NULL) {
+ goto on_error;
+ }
+
+ impl->token_file_path =
+ aws_string_new_from_array(allocator, parameters->token_file_path.buffer, parameters->token_file_path.len);
+ if (impl->token_file_path == NULL) {
+ goto on_error;
+ }
+
+ provider->shutdown_options = options->shutdown_options;
+ s_parameters_destroy(parameters);
+ aws_tls_connection_options_clean_up(&tls_connection_options);
+ return provider;
+
+on_error:
+
+ aws_credentials_provider_destroy(provider);
+ s_parameters_destroy(parameters);
+ aws_tls_connection_options_clean_up(&tls_connection_options);
+ return NULL;
+}
diff --git a/contrib/restricted/aws/aws-c-auth/source/credentials_provider_x509.c b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_x509.c
new file mode 100644
index 0000000000..8917636a26
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/source/credentials_provider_x509.c
@@ -0,0 +1,629 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/credentials.h>
+
+#include <aws/auth/private/credentials_utils.h>
+#include <aws/common/clock.h>
+#include <aws/common/date_time.h>
+#include <aws/common/string.h>
+#include <aws/http/connection.h>
+#include <aws/http/connection_manager.h>
+#include <aws/http/request_response.h>
+#include <aws/http/status_code.h>
+#include <aws/io/logging.h>
+#include <aws/io/socket.h>
+#include <aws/io/tls_channel_handler.h>
+#include <aws/io/uri.h>
+
+#include <aws/common/json.h>
+
+#if defined(_MSC_VER)
+# pragma warning(disable : 4204)
+# pragma warning(disable : 4232)
+#endif /* _MSC_VER */
+
+/* IoT Core credentials body response is currently ~ 1100 Bytes*/
+#define X509_RESPONSE_SIZE_INITIAL 1024
+#define X509_RESPONSE_SIZE_LIMIT 2048
+#define X509_CONNECT_TIMEOUT_DEFAULT_IN_SECONDS 2
+
+struct aws_credentials_provider_x509_impl {
+ struct aws_http_connection_manager *connection_manager;
+ const struct aws_auth_http_system_vtable *function_table;
+ struct aws_byte_buf thing_name;
+ struct aws_byte_buf role_alias_path;
+ struct aws_byte_buf endpoint;
+ struct aws_tls_connection_options tls_connection_options;
+};
+
+/*
+ * Tracking structure for each outstanding async query to an x509 provider
+ */
+struct aws_credentials_provider_x509_user_data {
+ /* immutable post-creation */
+ struct aws_allocator *allocator;
+ struct aws_credentials_provider *x509_provider;
+ aws_on_get_credentials_callback_fn *original_callback;
+ void *original_user_data;
+
+ /* mutable */
+ struct aws_http_connection *connection;
+ struct aws_http_message *request;
+ struct aws_byte_buf response;
+ int status_code;
+ int error_code;
+};
+
+static void s_aws_credentials_provider_x509_user_data_destroy(
+ struct aws_credentials_provider_x509_user_data *user_data) {
+ if (user_data == NULL) {
+ return;
+ }
+
+ struct aws_credentials_provider_x509_impl *impl = user_data->x509_provider->impl;
+
+ if (user_data->connection) {
+ impl->function_table->aws_http_connection_manager_release_connection(
+ impl->connection_manager, user_data->connection);
+ }
+
+ aws_byte_buf_clean_up(&user_data->response);
+
+ if (user_data->request) {
+ aws_http_message_destroy(user_data->request);
+ }
+ aws_credentials_provider_release(user_data->x509_provider);
+ aws_mem_release(user_data->allocator, user_data);
+}
+
+static struct aws_credentials_provider_x509_user_data *s_aws_credentials_provider_x509_user_data_new(
+ struct aws_credentials_provider *x509_provider,
+ aws_on_get_credentials_callback_fn callback,
+ void *user_data) {
+
+ struct aws_credentials_provider_x509_user_data *wrapped_user_data =
+ aws_mem_calloc(x509_provider->allocator, 1, sizeof(struct aws_credentials_provider_x509_user_data));
+ if (wrapped_user_data == NULL) {
+ goto on_error;
+ }
+
+ wrapped_user_data->allocator = x509_provider->allocator;
+ wrapped_user_data->x509_provider = x509_provider;
+ aws_credentials_provider_acquire(x509_provider);
+ wrapped_user_data->original_user_data = user_data;
+ wrapped_user_data->original_callback = callback;
+
+ if (aws_byte_buf_init(&wrapped_user_data->response, x509_provider->allocator, X509_RESPONSE_SIZE_INITIAL)) {
+ goto on_error;
+ }
+
+ return wrapped_user_data;
+
+on_error:
+
+ s_aws_credentials_provider_x509_user_data_destroy(wrapped_user_data);
+
+ return NULL;
+}
+
+static void s_aws_credentials_provider_x509_user_data_reset_response(
+ struct aws_credentials_provider_x509_user_data *x509_user_data) {
+ x509_user_data->response.len = 0;
+ x509_user_data->status_code = 0;
+
+ if (x509_user_data->request) {
+ aws_http_message_destroy(x509_user_data->request);
+ x509_user_data->request = NULL;
+ }
+}
+
+/*
+ * In general, the returned json document looks something like:
+{
+ "credentials": {
+ "accessKeyId" : "...",
+ "secretAccessKey" : "...",
+ "sessionToken" : "...",
+ "expiration" : "2019-05-29T00:21:43Z"
+ }
+}
+ */
+static struct aws_credentials *s_parse_credentials_from_iot_core_document(
+ struct aws_allocator *allocator,
+ struct aws_byte_buf *document) {
+
+ struct aws_credentials *credentials = NULL;
+ struct aws_json_value *document_root = NULL;
+
+ if (aws_byte_buf_append_null_terminator(document)) {
+ goto done;
+ }
+
+ struct aws_byte_cursor document_cursor = aws_byte_cursor_from_buf(document);
+ document_root = aws_json_value_new_from_string(allocator, document_cursor);
+ if (document_root == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to parse IoT Core response as Json document.");
+ goto done;
+ }
+
+ /*
+ * pull out the root "Credentials" components
+ */
+ struct aws_json_value *creds =
+ aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str("credentials"));
+ if (!aws_json_value_is_object(creds)) {
+ AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to parse credentials from IoT Core response.");
+ goto done;
+ }
+
+ struct aws_parse_credentials_from_json_doc_options parse_options = {
+ .access_key_id_name = "accessKeyId",
+ .secret_access_key_name = "secretAccessKey",
+ .token_name = "sessionToken",
+ .expiration_name = "expiration",
+ .expiration_format = AWS_PCEF_STRING_ISO_8601_DATE,
+ .token_required = true,
+ .expiration_required = false,
+ };
+
+ credentials = aws_parse_credentials_from_aws_json_object(allocator, creds, &parse_options);
+ if (!credentials) {
+ AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "X509 credentials provider failed to parse credentials");
+ }
+
+done:
+
+ if (document_root != NULL) {
+ aws_json_value_destroy(document_root);
+ }
+
+ return credentials;
+}
+
+/*
+ * No matter the result, this always gets called assuming that x509_user_data is successfully allocated
+ */
+static void s_x509_finalize_get_credentials_query(struct aws_credentials_provider_x509_user_data *x509_user_data) {
+ /* Try to build credentials from whatever, if anything, was in the result */
+ struct aws_credentials *credentials =
+ s_parse_credentials_from_iot_core_document(x509_user_data->allocator, &x509_user_data->response);
+
+ if (credentials != NULL) {
+ AWS_LOGF_INFO(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) X509 credentials provider successfully queried credentials",
+ (void *)x509_user_data->x509_provider);
+ } else {
+ if (x509_user_data->error_code == AWS_ERROR_SUCCESS) {
+ x509_user_data->error_code = aws_last_error();
+ if (x509_user_data->error_code == AWS_ERROR_SUCCESS) {
+ x509_user_data->error_code = AWS_AUTH_CREDENTIALS_PROVIDER_X509_SOURCE_FAILURE;
+ }
+ }
+
+ AWS_LOGF_WARN(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) X509 credentials provider failed to query credentials with error %d(%s)",
+ (void *)x509_user_data->x509_provider,
+ x509_user_data->error_code,
+ aws_error_str(x509_user_data->error_code));
+ }
+
+ /* pass the credentials back */
+ x509_user_data->original_callback(credentials, x509_user_data->error_code, x509_user_data->original_user_data);
+
+ /* clean up */
+ s_aws_credentials_provider_x509_user_data_destroy(x509_user_data);
+ aws_credentials_release(credentials);
+}
+
+static int s_x509_on_incoming_body_fn(
+ struct aws_http_stream *stream,
+ const struct aws_byte_cursor *data,
+ void *user_data) {
+
+ (void)stream;
+
+ struct aws_credentials_provider_x509_user_data *x509_user_data = user_data;
+ struct aws_credentials_provider_x509_impl *impl = x509_user_data->x509_provider->impl;
+
+ AWS_LOGF_TRACE(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) X509 credentials provider received %zu response bytes",
+ (void *)x509_user_data->x509_provider,
+ data->len);
+
+ if (data->len + x509_user_data->response.len > X509_RESPONSE_SIZE_LIMIT) {
+ impl->function_table->aws_http_connection_close(x509_user_data->connection);
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) X509 credentials provider query response exceeded maximum allowed length",
+ (void *)x509_user_data->x509_provider);
+
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ if (aws_byte_buf_append_dynamic(&x509_user_data->response, data)) {
+ impl->function_table->aws_http_connection_close(x509_user_data->connection);
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) X509 credentials provider query error appending response",
+ (void *)x509_user_data->x509_provider);
+
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_x509_on_incoming_headers_fn(
+ struct aws_http_stream *stream,
+ enum aws_http_header_block header_block,
+ const struct aws_http_header *header_array,
+ size_t num_headers,
+ void *user_data) {
+
+ (void)header_array;
+ (void)num_headers;
+
+ if (header_block != AWS_HTTP_HEADER_BLOCK_MAIN) {
+ return AWS_OP_SUCCESS;
+ }
+
+ struct aws_credentials_provider_x509_user_data *x509_user_data = user_data;
+ if (header_block == AWS_HTTP_HEADER_BLOCK_MAIN) {
+ if (x509_user_data->status_code == 0) {
+ struct aws_credentials_provider_x509_impl *impl = x509_user_data->x509_provider->impl;
+ if (impl->function_table->aws_http_stream_get_incoming_response_status(
+ stream, &x509_user_data->status_code)) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) X509 credentials provider failed to get http status code",
+ (void *)x509_user_data->x509_provider);
+
+ return AWS_OP_ERR;
+ }
+ AWS_LOGF_DEBUG(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p) X509 credentials provider query received http status code %d",
+ (void *)x509_user_data->x509_provider,
+ x509_user_data->status_code);
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_x509_on_stream_complete_fn(struct aws_http_stream *stream, int error_code, void *user_data) {
+ struct aws_credentials_provider_x509_user_data *x509_user_data = user_data;
+
+ aws_http_message_destroy(x509_user_data->request);
+ x509_user_data->request = NULL;
+
+ struct aws_credentials_provider_x509_impl *impl = x509_user_data->x509_provider->impl;
+ impl->function_table->aws_http_stream_release(stream);
+
+ /*
+ * On anything other than a 200, nullify the response and pretend there was
+ * an error
+ */
+ if (x509_user_data->status_code != AWS_HTTP_STATUS_CODE_200_OK || error_code != AWS_OP_SUCCESS) {
+ x509_user_data->response.len = 0;
+
+ if (error_code != AWS_OP_SUCCESS) {
+ x509_user_data->error_code = error_code;
+ } else {
+ x509_user_data->error_code = AWS_AUTH_CREDENTIALS_PROVIDER_HTTP_STATUS_FAILURE;
+ }
+ }
+
+ s_x509_finalize_get_credentials_query(x509_user_data);
+}
+
+AWS_STATIC_STRING_FROM_LITERAL(s_x509_accept_header, "Accept");
+AWS_STATIC_STRING_FROM_LITERAL(s_x509_accept_header_value, "*/*");
+AWS_STATIC_STRING_FROM_LITERAL(s_x509_user_agent_header, "User-Agent");
+AWS_STATIC_STRING_FROM_LITERAL(s_x509_user_agent_header_value, "aws-sdk-crt/x509-credentials-provider");
+AWS_STATIC_STRING_FROM_LITERAL(s_x509_h1_0_keep_alive_header, "Connection");
+AWS_STATIC_STRING_FROM_LITERAL(s_x509_h1_0_keep_alive_header_value, "keep-alive");
+AWS_STATIC_STRING_FROM_LITERAL(s_x509_thing_name_header, "x-amzn-iot-thingname");
+AWS_STATIC_STRING_FROM_LITERAL(s_x509_host_header, "Host");
+
+static int s_make_x509_http_query(
+ struct aws_credentials_provider_x509_user_data *x509_user_data,
+ struct aws_byte_cursor *request_path) {
+ AWS_FATAL_ASSERT(x509_user_data->connection);
+
+ struct aws_http_stream *stream = NULL;
+ struct aws_http_message *request = aws_http_message_new_request(x509_user_data->allocator);
+ if (request == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_credentials_provider_x509_impl *impl = x509_user_data->x509_provider->impl;
+
+ struct aws_http_header thing_name_header = {
+ .name = aws_byte_cursor_from_string(s_x509_thing_name_header),
+ .value = aws_byte_cursor_from_buf(&impl->thing_name),
+ };
+ if (aws_http_message_add_header(request, thing_name_header)) {
+ goto on_error;
+ }
+
+ struct aws_http_header accept_header = {
+ .name = aws_byte_cursor_from_string(s_x509_accept_header),
+ .value = aws_byte_cursor_from_string(s_x509_accept_header_value),
+ };
+ if (aws_http_message_add_header(request, accept_header)) {
+ goto on_error;
+ }
+
+ struct aws_http_header user_agent_header = {
+ .name = aws_byte_cursor_from_string(s_x509_user_agent_header),
+ .value = aws_byte_cursor_from_string(s_x509_user_agent_header_value),
+ };
+ if (aws_http_message_add_header(request, user_agent_header)) {
+ goto on_error;
+ }
+
+ struct aws_http_header keep_alive_header = {
+ .name = aws_byte_cursor_from_string(s_x509_h1_0_keep_alive_header),
+ .value = aws_byte_cursor_from_string(s_x509_h1_0_keep_alive_header_value),
+ };
+ if (aws_http_message_add_header(request, keep_alive_header)) {
+ goto on_error;
+ }
+
+ struct aws_http_header host_header = {
+ .name = aws_byte_cursor_from_string(s_x509_host_header),
+ .value = aws_byte_cursor_from_buf(&impl->endpoint),
+ };
+ if (aws_http_message_add_header(request, host_header)) {
+ goto on_error;
+ }
+
+ if (aws_http_message_set_request_path(request, *request_path)) {
+ goto on_error;
+ }
+
+ if (aws_http_message_set_request_method(request, aws_byte_cursor_from_c_str("GET"))) {
+ goto on_error;
+ }
+
+ x509_user_data->request = request;
+
+ struct aws_http_make_request_options request_options = {
+ .self_size = sizeof(request_options),
+ .on_response_headers = s_x509_on_incoming_headers_fn,
+ .on_response_header_block_done = NULL,
+ .on_response_body = s_x509_on_incoming_body_fn,
+ .on_complete = s_x509_on_stream_complete_fn,
+ .user_data = x509_user_data,
+ .request = request,
+ };
+
+ stream = impl->function_table->aws_http_connection_make_request(x509_user_data->connection, &request_options);
+
+ if (!stream) {
+ goto on_error;
+ }
+
+ if (impl->function_table->aws_http_stream_activate(stream)) {
+ goto on_error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ impl->function_table->aws_http_stream_release(stream);
+ aws_http_message_destroy(request);
+ x509_user_data->request = NULL;
+ return AWS_OP_ERR;
+}
+
+static void s_x509_query_credentials(struct aws_credentials_provider_x509_user_data *x509_user_data) {
+ AWS_FATAL_ASSERT(x509_user_data->connection);
+
+ struct aws_credentials_provider_x509_impl *impl = x509_user_data->x509_provider->impl;
+
+ /* "Clear" the result */
+ s_aws_credentials_provider_x509_user_data_reset_response(x509_user_data);
+
+ struct aws_byte_cursor request_path_cursor = aws_byte_cursor_from_buf(&impl->role_alias_path);
+ if (s_make_x509_http_query(x509_user_data, &request_path_cursor) == AWS_OP_ERR) {
+ s_x509_finalize_get_credentials_query(x509_user_data);
+ }
+}
+
+static void s_x509_on_acquire_connection(struct aws_http_connection *connection, int error_code, void *user_data) {
+ struct aws_credentials_provider_x509_user_data *x509_user_data = user_data;
+
+ if (connection == NULL) {
+ AWS_LOGF_WARN(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "id=%p: X509 provider failed to acquire a connection, error code %d(%s)",
+ (void *)x509_user_data->x509_provider,
+ error_code,
+ aws_error_str(error_code));
+
+ x509_user_data->error_code = error_code;
+
+ s_x509_finalize_get_credentials_query(x509_user_data);
+ return;
+ }
+
+ x509_user_data->connection = connection;
+
+ s_x509_query_credentials(x509_user_data);
+}
+
+static int s_credentials_provider_x509_get_credentials_async(
+ struct aws_credentials_provider *provider,
+ aws_on_get_credentials_callback_fn callback,
+ void *user_data) {
+
+ struct aws_credentials_provider_x509_impl *impl = provider->impl;
+
+ struct aws_credentials_provider_x509_user_data *wrapped_user_data =
+ s_aws_credentials_provider_x509_user_data_new(provider, callback, user_data);
+ if (wrapped_user_data == NULL) {
+ goto error;
+ }
+
+ impl->function_table->aws_http_connection_manager_acquire_connection(
+ impl->connection_manager, s_x509_on_acquire_connection, wrapped_user_data);
+
+ return AWS_OP_SUCCESS;
+
+error:
+
+ s_aws_credentials_provider_x509_user_data_destroy(wrapped_user_data);
+
+ return AWS_OP_ERR;
+}
+
+static void s_credentials_provider_x509_destroy(struct aws_credentials_provider *provider) {
+ struct aws_credentials_provider_x509_impl *impl = provider->impl;
+ if (impl == NULL) {
+ return;
+ }
+
+ aws_byte_buf_clean_up(&impl->thing_name);
+ aws_byte_buf_clean_up(&impl->role_alias_path);
+ aws_byte_buf_clean_up(&impl->endpoint);
+ aws_tls_connection_options_clean_up(&impl->tls_connection_options);
+ /* aws_http_connection_manager_release will eventually leads to call of s_on_connection_manager_shutdown,
+ * which will do memory release for provider and impl. So We should be freeing impl
+ * related memory first, then call aws_http_connection_manager_release.
+ */
+ impl->function_table->aws_http_connection_manager_release(impl->connection_manager);
+
+ /* freeing the provider takes place in the shutdown callback below */
+}
+
+static struct aws_credentials_provider_vtable s_aws_credentials_provider_x509_vtable = {
+ .get_credentials = s_credentials_provider_x509_get_credentials_async,
+ .destroy = s_credentials_provider_x509_destroy,
+};
+
+static void s_on_connection_manager_shutdown(void *user_data) {
+ struct aws_credentials_provider *provider = user_data;
+
+ aws_credentials_provider_invoke_shutdown_callback(provider);
+
+ aws_mem_release(provider->allocator, provider);
+}
+
+struct aws_credentials_provider *aws_credentials_provider_new_x509(
+ struct aws_allocator *allocator,
+ const struct aws_credentials_provider_x509_options *options) {
+
+ struct aws_credentials_provider *provider = NULL;
+ struct aws_credentials_provider_x509_impl *impl = NULL;
+
+ if (options->tls_connection_options == NULL || options->thing_name.len == 0 || options->role_alias.len == 0) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "To create an X.509 creds provider, a tls_connection_options, an IoT thing name and an IAM role alias are "
+ "required.");
+ goto on_error;
+ }
+
+ aws_mem_acquire_many(
+ allocator,
+ 2,
+ &provider,
+ sizeof(struct aws_credentials_provider),
+ &impl,
+ sizeof(struct aws_credentials_provider_x509_impl));
+
+ if (!provider) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*provider);
+ AWS_ZERO_STRUCT(*impl);
+
+ aws_credentials_provider_init_base(provider, allocator, &s_aws_credentials_provider_x509_vtable, impl);
+
+ if (aws_tls_connection_options_copy(&impl->tls_connection_options, options->tls_connection_options)) {
+ goto on_error;
+ }
+
+ struct aws_byte_cursor server_name = options->endpoint;
+ if (aws_tls_connection_options_set_server_name(&impl->tls_connection_options, allocator, &(server_name))) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "(id=%p): failed to set tls connection options's server name with error %s",
+ (void *)provider,
+ aws_error_debug_str(aws_last_error()));
+ goto on_error;
+ }
+
+ struct aws_socket_options socket_options;
+ AWS_ZERO_STRUCT(socket_options);
+ socket_options.type = AWS_SOCKET_STREAM;
+ socket_options.domain = AWS_SOCKET_IPV4;
+ socket_options.connect_timeout_ms = (uint32_t)aws_timestamp_convert(
+ X509_CONNECT_TIMEOUT_DEFAULT_IN_SECONDS, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL);
+
+ struct aws_http_connection_manager_options manager_options;
+ AWS_ZERO_STRUCT(manager_options);
+ manager_options.bootstrap = options->bootstrap;
+ manager_options.initial_window_size = X509_RESPONSE_SIZE_LIMIT;
+ manager_options.socket_options = &socket_options;
+ manager_options.host = options->endpoint;
+ manager_options.port = 443;
+ manager_options.max_connections = 2;
+ manager_options.shutdown_complete_callback = s_on_connection_manager_shutdown;
+ manager_options.shutdown_complete_user_data = provider;
+ manager_options.tls_connection_options = &impl->tls_connection_options;
+ manager_options.proxy_options = options->proxy_options;
+
+ impl->function_table = options->function_table;
+ if (impl->function_table == NULL) {
+ impl->function_table = g_aws_credentials_provider_http_function_table;
+ }
+
+ impl->connection_manager = impl->function_table->aws_http_connection_manager_new(allocator, &manager_options);
+ if (impl->connection_manager == NULL) {
+ goto on_error;
+ }
+
+ if (aws_byte_buf_init_copy_from_cursor(&impl->thing_name, allocator, options->thing_name)) {
+ goto on_error;
+ }
+
+ if (aws_byte_buf_init_copy_from_cursor(&impl->endpoint, allocator, options->endpoint)) {
+ goto on_error;
+ }
+
+ /* the expected path is "/role-aliases/<your role alias>/credentials" */
+ struct aws_byte_cursor prefix_cursor = aws_byte_cursor_from_c_str("/role-aliases/");
+ if (aws_byte_buf_init_copy_from_cursor(&impl->role_alias_path, allocator, prefix_cursor)) {
+ goto on_error;
+ }
+
+ if (aws_byte_buf_append_dynamic(&impl->role_alias_path, &options->role_alias)) {
+ goto on_error;
+ }
+
+ struct aws_byte_cursor creds_cursor = aws_byte_cursor_from_c_str("/credentials");
+ if (aws_byte_buf_append_dynamic(&impl->role_alias_path, &creds_cursor)) {
+ goto on_error;
+ }
+
+ provider->shutdown_options = options->shutdown_options;
+
+ return provider;
+
+on_error:
+
+ aws_credentials_provider_destroy(provider);
+
+ return NULL;
+}
diff --git a/contrib/restricted/aws/aws-c-auth/source/credentials_utils.c b/contrib/restricted/aws/aws-c-auth/source/credentials_utils.c
new file mode 100644
index 0000000000..2cb61d6547
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/source/credentials_utils.c
@@ -0,0 +1,294 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/private/credentials_utils.h>
+
+#include <aws/common/date_time.h>
+#include <aws/common/json.h>
+#include <aws/common/string.h>
+#include <aws/common/uuid.h>
+#include <aws/http/connection.h>
+#include <aws/http/request_response.h>
+#include <aws/http/status_code.h>
+
+#if defined(_MSC_VER)
+# pragma warning(disable : 4232)
+#endif /* _MSC_VER */
+
+static struct aws_auth_http_system_vtable s_default_function_table = {
+ .aws_http_connection_manager_new = aws_http_connection_manager_new,
+ .aws_http_connection_manager_release = aws_http_connection_manager_release,
+ .aws_http_connection_manager_acquire_connection = aws_http_connection_manager_acquire_connection,
+ .aws_http_connection_manager_release_connection = aws_http_connection_manager_release_connection,
+ .aws_http_connection_make_request = aws_http_connection_make_request,
+ .aws_http_stream_activate = aws_http_stream_activate,
+ .aws_http_stream_get_connection = aws_http_stream_get_connection,
+ .aws_http_stream_get_incoming_response_status = aws_http_stream_get_incoming_response_status,
+ .aws_http_stream_release = aws_http_stream_release,
+ .aws_http_connection_close = aws_http_connection_close,
+};
+
+const struct aws_auth_http_system_vtable *g_aws_credentials_provider_http_function_table = &s_default_function_table;
+
+void aws_credentials_query_init(
+ struct aws_credentials_query *query,
+ struct aws_credentials_provider *provider,
+ aws_on_get_credentials_callback_fn *callback,
+ void *user_data) {
+ AWS_ZERO_STRUCT(*query);
+
+ query->provider = provider;
+ query->user_data = user_data;
+ query->callback = callback;
+
+ aws_credentials_provider_acquire(provider);
+}
+
+void aws_credentials_query_clean_up(struct aws_credentials_query *query) {
+ if (query != NULL) {
+ aws_credentials_provider_release(query->provider);
+ }
+}
+
+void aws_credentials_provider_init_base(
+ struct aws_credentials_provider *provider,
+ struct aws_allocator *allocator,
+ struct aws_credentials_provider_vtable *vtable,
+ void *impl) {
+
+ provider->allocator = allocator;
+ provider->vtable = vtable;
+ provider->impl = impl;
+
+ aws_atomic_init_int(&provider->ref_count, 1);
+}
+
+void aws_credentials_provider_invoke_shutdown_callback(struct aws_credentials_provider *provider) {
+ if (provider && provider->shutdown_options.shutdown_callback) {
+ provider->shutdown_options.shutdown_callback(provider->shutdown_options.shutdown_user_data);
+ }
+}
+
+static bool s_parse_expiration_value_from_json_object(
+ struct aws_json_value *value,
+ const struct aws_parse_credentials_from_json_doc_options *options,
+ uint64_t *expiration_timepoint_in_seconds) {
+
+ if (value == NULL) {
+ AWS_LOGF_INFO(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "No credentials Expiration field in Json document.");
+ return false;
+ }
+
+ struct aws_byte_cursor expiration_cursor = {
+ .ptr = NULL,
+ .len = 0,
+ };
+
+ switch (options->expiration_format) {
+ case AWS_PCEF_STRING_ISO_8601_DATE: {
+
+ if (aws_json_value_get_string(value, &expiration_cursor)) {
+ AWS_LOGF_INFO(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "Unabled to extract credentials Expiration field from Json document.");
+ return false;
+ }
+
+ if (expiration_cursor.len == 0) {
+ AWS_LOGF_INFO(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Parsed a credentials json document with empty expiration.");
+ return false;
+ }
+
+ struct aws_date_time expiration;
+ if (aws_date_time_init_from_str_cursor(&expiration, &expiration_cursor, AWS_DATE_FORMAT_ISO_8601)) {
+ AWS_LOGF_INFO(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "credentials Expiration in Json document is not a valid ISO_8601 date string.");
+ return false;
+ }
+
+ *expiration_timepoint_in_seconds = (uint64_t)aws_date_time_as_epoch_secs(&expiration);
+ return true;
+ }
+
+ case AWS_PCEF_NUMBER_UNIX_EPOCH: {
+ double expiration_value = 0;
+ if (aws_json_value_get_number(value, &expiration_value)) {
+ AWS_LOGF_INFO(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "Unabled to extract credentials Expiration field from Json document.");
+ return false;
+ }
+
+ *expiration_timepoint_in_seconds = (uint64_t)expiration_value;
+ return true;
+ }
+
+ default:
+ return false;
+ }
+}
+
+struct aws_credentials *aws_parse_credentials_from_aws_json_object(
+ struct aws_allocator *allocator,
+ struct aws_json_value *document_root,
+ const struct aws_parse_credentials_from_json_doc_options *options) {
+
+ AWS_FATAL_ASSERT(allocator);
+ AWS_FATAL_ASSERT(document_root);
+ AWS_FATAL_ASSERT(options);
+ AWS_FATAL_ASSERT(options->access_key_id_name);
+ AWS_FATAL_ASSERT(options->secret_access_key_name);
+
+ if (options->token_required) {
+ AWS_FATAL_ASSERT(options->token_name);
+ }
+
+ if (options->expiration_required) {
+ AWS_FATAL_ASSERT(options->expiration_name);
+ }
+
+ struct aws_credentials *credentials = NULL;
+ struct aws_json_value *access_key_id = NULL;
+ struct aws_json_value *secrete_access_key = NULL;
+ struct aws_json_value *token = NULL;
+ struct aws_json_value *creds_expiration = NULL;
+
+ bool parse_error = true;
+
+ /*
+ * Pull out the credentials components
+ */
+ struct aws_byte_cursor access_key_id_cursor;
+ access_key_id =
+ aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str((char *)options->access_key_id_name));
+ if (!aws_json_value_is_string(access_key_id) ||
+ aws_json_value_get_string(access_key_id, &access_key_id_cursor) == AWS_OP_ERR) {
+ AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to parse AccessKeyId from Json document.");
+ goto done;
+ }
+
+ struct aws_byte_cursor secrete_access_key_cursor;
+ secrete_access_key = aws_json_value_get_from_object(
+ document_root, aws_byte_cursor_from_c_str((char *)options->secret_access_key_name));
+ if (!aws_json_value_is_string(secrete_access_key) ||
+ aws_json_value_get_string(secrete_access_key, &secrete_access_key_cursor) == AWS_OP_ERR) {
+ AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to parse SecretAccessKey from Json document.");
+ goto done;
+ }
+
+ struct aws_byte_cursor token_cursor;
+ if (options->token_name) {
+ token = aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str((char *)options->token_name));
+ if (!aws_json_value_is_string(token) || aws_json_value_get_string(token, &token_cursor) == AWS_OP_ERR) {
+ if (options->token_required) {
+ AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to parse Token from Json document.");
+ goto done;
+ }
+ }
+ }
+
+ // needed to avoid uninitialized local variable error
+ uint64_t expiration_timepoint_in_seconds = UINT64_MAX;
+ if (options->expiration_name) {
+ creds_expiration =
+ aws_json_value_get_from_object(document_root, aws_byte_cursor_from_c_str((char *)options->expiration_name));
+
+ if (!s_parse_expiration_value_from_json_object(creds_expiration, options, &expiration_timepoint_in_seconds)) {
+ if (options->expiration_required) {
+ AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to parse Expiration from Json document.");
+ goto done;
+ }
+ }
+ }
+
+ /*
+ * Build the credentials
+ */
+ if (access_key_id_cursor.len == 0 || secrete_access_key_cursor.len == 0) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "Parsed an unexpected credentials json document, either access key, secret key is empty.");
+ goto done;
+ }
+
+ struct aws_byte_cursor session_token_cursor;
+ AWS_ZERO_STRUCT(session_token_cursor);
+
+ if (token) {
+ aws_json_value_get_string(token, &session_token_cursor);
+ if (options->token_required && session_token_cursor.len == 0) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Parsed an unexpected credentials json document with empty token.");
+ goto done;
+ }
+ }
+
+ credentials = aws_credentials_new(
+ allocator,
+ access_key_id_cursor,
+ secrete_access_key_cursor,
+ session_token_cursor,
+ expiration_timepoint_in_seconds);
+
+ if (credentials == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to allocate memory for credentials.");
+ parse_error = false;
+ goto done;
+ }
+
+done:
+
+ if (parse_error) {
+ aws_raise_error(AWS_AUTH_PROVIDER_PARSER_UNEXPECTED_RESPONSE);
+ }
+
+ return credentials;
+}
+
+struct aws_credentials *aws_parse_credentials_from_json_document(
+ struct aws_allocator *allocator,
+ const char *document,
+ const struct aws_parse_credentials_from_json_doc_options *options) {
+
+ struct aws_json_value *document_root =
+ aws_json_value_new_from_string(allocator, aws_byte_cursor_from_c_str(document));
+ if (document_root == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_AUTH_CREDENTIALS_PROVIDER, "Failed to parse document as Json document.");
+ return NULL;
+ }
+ struct aws_credentials *credentials = aws_parse_credentials_from_aws_json_object(allocator, document_root, options);
+ aws_json_value_destroy(document_root);
+ return credentials;
+}
+
+static bool s_is_transient_network_error(int error_code) {
+ return error_code == AWS_ERROR_HTTP_CONNECTION_CLOSED || error_code == AWS_ERROR_HTTP_SERVER_CLOSED ||
+ error_code == AWS_IO_SOCKET_CLOSED || error_code == AWS_IO_SOCKET_CONNECT_ABORTED ||
+ error_code == AWS_IO_SOCKET_CONNECTION_REFUSED || error_code == AWS_IO_SOCKET_NETWORK_DOWN ||
+ error_code == AWS_IO_DNS_QUERY_FAILED || error_code == AWS_IO_DNS_NO_ADDRESS_FOR_HOST ||
+ error_code == AWS_IO_SOCKET_TIMEOUT || error_code == AWS_IO_TLS_NEGOTIATION_TIMEOUT ||
+ error_code == AWS_HTTP_STATUS_CODE_408_REQUEST_TIMEOUT;
+}
+
+enum aws_retry_error_type aws_credentials_provider_compute_retry_error_type(int response_code, int error_code) {
+
+ enum aws_retry_error_type error_type = response_code >= 400 && response_code < 500
+ ? AWS_RETRY_ERROR_TYPE_CLIENT_ERROR
+ : AWS_RETRY_ERROR_TYPE_SERVER_ERROR;
+
+ if (s_is_transient_network_error(error_code)) {
+ error_type = AWS_RETRY_ERROR_TYPE_TRANSIENT;
+ }
+
+ /* server throttling us is retryable */
+ if (response_code == AWS_HTTP_STATUS_CODE_429_TOO_MANY_REQUESTS) {
+ /* force a new connection on this. */
+ error_type = AWS_RETRY_ERROR_TYPE_THROTTLING;
+ }
+
+ return error_type;
+}
diff --git a/contrib/restricted/aws/aws-c-auth/source/key_derivation.c b/contrib/restricted/aws/aws-c-auth/source/key_derivation.c
new file mode 100644
index 0000000000..662ff24021
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/source/key_derivation.c
@@ -0,0 +1,370 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/private/key_derivation.h>
+
+#include <aws/auth/credentials.h>
+#include <aws/cal/ecc.h>
+#include <aws/cal/hash.h>
+#include <aws/cal/hmac.h>
+#include <aws/common/byte_buf.h>
+#include <aws/common/string.h>
+
+/*
+ * The maximum number of iterations we will attempt to derive a valid ecc key for. The probability that this counter
+ * value ever gets reached is vanishingly low -- with reasonable uniformity/independence assumptions, it's
+ * approximately
+ *
+ * 2 ^ (-32 * 254)
+ */
+#define MAX_KEY_DERIVATION_COUNTER_VALUE 254
+
+/*
+ * The encoding (32-bit, big-endian) of the prefix to the FixedInputString when fed to the hmac function, per
+ * the sigv4a key derivation specification.
+ */
+AWS_STATIC_STRING_FROM_LITERAL(s_1_as_four_bytes_be, "\x00\x00\x00\x01");
+
+/*
+ * The encoding (32-bit, big-endian) of the "Length" component of the sigv4a key derivation specification
+ */
+AWS_STATIC_STRING_FROM_LITERAL(s_256_as_four_bytes_be, "\x00\x00\x01\x00");
+
+AWS_STRING_FROM_LITERAL(g_signature_type_sigv4a_http_request, "AWS4-ECDSA-P256-SHA256");
+
+AWS_STATIC_STRING_FROM_LITERAL(s_secret_buffer_prefix, "AWS4A");
+
+/*
+ * This constructs the fixed input byte sequence of the Sigv4a key derivation specification. It also includes the
+ * value (0x01 as a 32-bit big endian value) that is pre-pended to the fixed input before invoking the hmac to
+ * generate the candidate key value.
+ *
+ * The final output looks like
+ *
+ * 0x00000001 || "AWS4-ECDSA-P256-SHA256" || 0x00 || AccessKeyId || CounterValue as uint8_t || 0x00000100 (Length)
+ *
+ * From this, we can determine the necessary buffer capacity when setting up the fixed input buffer:
+ *
+ * 4 + 22 + 1 + len(AccessKeyId) + 1 + 4 = 32 + len(AccessKeyId)
+ */
+static int s_aws_build_fixed_input_buffer(
+ struct aws_byte_buf *fixed_input,
+ const struct aws_credentials *credentials,
+ const uint8_t counter) {
+
+ if (counter == 0 || counter > MAX_KEY_DERIVATION_COUNTER_VALUE) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ if (!aws_byte_buf_is_valid(fixed_input)) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ aws_byte_buf_reset(fixed_input, false);
+
+ /*
+ * A placeholder value that's not actually part of the fixed input string in the spec, but is always this value
+ * and is always the first byte of the hmac-ed string.
+ */
+ struct aws_byte_cursor one_cursor = aws_byte_cursor_from_string(s_1_as_four_bytes_be);
+ if (aws_byte_buf_append_dynamic(fixed_input, &one_cursor)) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_byte_cursor sigv4a_algorithm_cursor = aws_byte_cursor_from_string(g_signature_type_sigv4a_http_request);
+ if (aws_byte_buf_append(fixed_input, &sigv4a_algorithm_cursor)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_byte_buf_append_byte_dynamic(fixed_input, 0)) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_byte_cursor access_key_cursor = aws_credentials_get_access_key_id(credentials);
+ if (aws_byte_buf_append(fixed_input, &access_key_cursor)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_byte_buf_append_byte_dynamic(fixed_input, counter)) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_byte_cursor encoded_bit_length_cursor = aws_byte_cursor_from_string(s_256_as_four_bytes_be);
+ if (aws_byte_buf_append_dynamic(fixed_input, &encoded_bit_length_cursor)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/*
+ * aws_be_bytes_compare_constant_time() and aws_be_bytes_add_one_constant_time() are constant-time arithmetic functions
+ * that operate on raw bytes as if they were unbounded integers in a big-endian base 255 format.
+ */
+
+/*
+ * In the following function gt and eq are updated together. After each update, the variables will be
+ * in one of the following states:
+ *
+ * (1) gt is 0, eq is 1, and from an ordering perspective, lhs == rhs, as checked "so far"
+ * (2) gt is 1, eq is 0, (lhs > rhs)
+ * (3) gt is 0, eq is 0, (lhs < rhs)
+ *
+ * States (2) and (3) are terminal states that cannot be exited since eq is 0 and is the and-wise mask of all
+ * subsequent gt updates. Similarly, once eq is zero it cannot ever become non-zero.
+ *
+ * Intuitively these ideas match the standard way of comparing magnitude equality by considering digit count and
+ * digits from most significant to least significant.
+ *
+ * Let l and r be the the two digits that we are
+ * comparing between lhs and rhs. Assume 0 <= l, r <= 255 seated in 32-bit integers
+ *
+ * gt is maintained by the following bit trick:
+ *
+ * l > r <=>
+ * (r - l) < 0 <=>
+ * (r - l) as an int32 has the high bit set <=>
+ * ((r - l) >> 31) & 0x01 == 1
+ *
+ * eq is maintained by the following bit trick:
+ *
+ * l == r <=>
+ * l ^ r == 0 <=>
+ * (l ^ r) - 1 == -1 <=>
+ * (((l ^ r) - 1) >> 31) & 0x01 == 1
+ *
+ * We apply to the volatile type modifier to attempt to prevent all early-out optimizations that a compiler might
+ * apply if it performed constraint-based reasoning on the logic. This is based on treating volatile
+ * semantically as "this value can change underneath you at any time so you always have to re-read it and cannot
+ * reason statically about program behavior when it reaches a certain value (like 0)"
+ */
+
+/**
+ * Compares two large unsigned integers in a raw byte format.
+ * The two operands *must* be the same size (simplifies the problem significantly).
+ *
+ * The output parameter comparison_result is set to:
+ * -1 if lhs_raw_be_bigint < rhs_raw_be_bigint
+ * 0 if lhs_raw_be_bigint == rhs_raw_be_bigint
+ * 1 if lhs_raw_be_bigint > rhs_raw_be_bigint
+ */
+int aws_be_bytes_compare_constant_time(
+ const struct aws_byte_buf *lhs_raw_be_bigint,
+ const struct aws_byte_buf *rhs_raw_be_bigint,
+ int *comparison_result) {
+
+ AWS_FATAL_PRECONDITION(aws_byte_buf_is_valid(lhs_raw_be_bigint));
+ AWS_FATAL_PRECONDITION(aws_byte_buf_is_valid(rhs_raw_be_bigint));
+
+ /*
+ * We only need to support comparing byte sequences of the same length here
+ */
+ const size_t lhs_len = lhs_raw_be_bigint->len;
+ if (lhs_len != rhs_raw_be_bigint->len) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ volatile uint8_t gt = 0;
+ volatile uint8_t eq = 1;
+
+ const uint8_t *lhs_raw_bytes = lhs_raw_be_bigint->buffer;
+ const uint8_t *rhs_raw_bytes = rhs_raw_be_bigint->buffer;
+ for (size_t i = 0; i < lhs_len; ++i) {
+ volatile int32_t lhs_digit = (int32_t)lhs_raw_bytes[i];
+ volatile int32_t rhs_digit = (int32_t)rhs_raw_bytes[i];
+
+ /*
+ * For each digit, check for a state (1) => (2) ie lhs > rhs, or (1) => (3) ie lhs < rhs transition
+ * based on comparing the two digits in constant time using the ideas explained in the giant comment
+ * block above this function.
+ */
+ gt |= ((rhs_digit - lhs_digit) >> 31) & eq;
+ eq &= (((lhs_digit ^ rhs_digit) - 1) >> 31) & 0x01;
+ }
+
+ *comparison_result = gt + gt + eq - 1;
+
+ return AWS_OP_SUCCESS;
+}
+
+/**
+ * Adds one to a large unsigned integer represented by a sequence of bytes.
+ *
+ * A maximal value will roll over to zero. This does not affect the correctness of the users
+ * of this function.
+ */
+void aws_be_bytes_add_one_constant_time(struct aws_byte_buf *raw_be_bigint) {
+ AWS_FATAL_PRECONDITION(aws_byte_buf_is_valid(raw_be_bigint));
+
+ const size_t byte_count = raw_be_bigint->len;
+
+ volatile uint32_t carry = 1;
+ uint8_t *raw_bytes = raw_be_bigint->buffer;
+
+ for (size_t i = 0; i < byte_count; ++i) {
+ const size_t index = byte_count - i - 1;
+
+ volatile uint32_t current_digit = raw_bytes[index];
+ current_digit += carry;
+
+ carry = (current_digit >> 8) & 0x01;
+
+ raw_bytes[index] = (uint8_t)(current_digit & 0xFF);
+ }
+}
+
+/* clang-format off */
+
+/* In the spec, this is N-2 */
+static uint8_t s_n_minus_2[32] = {
+ 0xFF, 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00, 0x00,
+ 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
+ 0xBC, 0xE6, 0xFA, 0xAD, 0xA7, 0x17, 0x9E, 0x84,
+ 0xF3, 0xB9, 0xCA, 0xC2, 0xFC, 0x63, 0x25, 0x4F,
+};
+
+/* clang-format on */
+
+enum aws_key_derivation_result {
+ AKDR_SUCCESS,
+ AKDR_NEXT_COUNTER,
+ AKDR_FAILURE,
+};
+
+static enum aws_key_derivation_result s_aws_derive_ecc_private_key(
+ struct aws_byte_buf *private_key_value,
+ const struct aws_byte_buf *k0) {
+ AWS_FATAL_ASSERT(k0->len == aws_ecc_key_coordinate_byte_size_from_curve_name(AWS_CAL_ECDSA_P256));
+
+ aws_byte_buf_reset(private_key_value, false);
+
+ struct aws_byte_buf s_n_minus_2_buf = {
+ .allocator = NULL,
+ .buffer = s_n_minus_2,
+ .capacity = AWS_ARRAY_SIZE(s_n_minus_2),
+ .len = AWS_ARRAY_SIZE(s_n_minus_2),
+ };
+
+ int comparison_result = 0;
+ if (aws_be_bytes_compare_constant_time(k0, &s_n_minus_2_buf, &comparison_result)) {
+ return AKDR_FAILURE;
+ }
+
+ if (comparison_result > 0) {
+ return AKDR_NEXT_COUNTER;
+ }
+
+ struct aws_byte_cursor k0_cursor = aws_byte_cursor_from_buf(k0);
+ if (aws_byte_buf_append(private_key_value, &k0_cursor)) {
+ return AKDR_FAILURE;
+ }
+
+ aws_be_bytes_add_one_constant_time(private_key_value);
+
+ return AKDR_SUCCESS;
+}
+
+static int s_init_secret_buf(
+ struct aws_byte_buf *secret_buf,
+ struct aws_allocator *allocator,
+ const struct aws_credentials *credentials) {
+
+ struct aws_byte_cursor secret_access_key_cursor = aws_credentials_get_secret_access_key(credentials);
+ size_t secret_buffer_length = secret_access_key_cursor.len + s_secret_buffer_prefix->len;
+ if (aws_byte_buf_init(secret_buf, allocator, secret_buffer_length)) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_byte_cursor prefix_cursor = aws_byte_cursor_from_string(s_secret_buffer_prefix);
+ if (aws_byte_buf_append(secret_buf, &prefix_cursor)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_byte_buf_append(secret_buf, &secret_access_key_cursor)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+struct aws_ecc_key_pair *aws_ecc_key_pair_new_ecdsa_p256_key_from_aws_credentials(
+ struct aws_allocator *allocator,
+ const struct aws_credentials *credentials) {
+
+ if (allocator == NULL || credentials == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_ecc_key_pair *ecc_key_pair = NULL;
+
+ struct aws_byte_buf fixed_input;
+ AWS_ZERO_STRUCT(fixed_input);
+
+ struct aws_byte_buf fixed_input_hmac_digest;
+ AWS_ZERO_STRUCT(fixed_input_hmac_digest);
+
+ struct aws_byte_buf private_key_buf;
+ AWS_ZERO_STRUCT(private_key_buf);
+
+ struct aws_byte_buf secret_buf;
+ AWS_ZERO_STRUCT(secret_buf);
+
+ size_t access_key_length = aws_credentials_get_access_key_id(credentials).len;
+
+ /*
+ * This value is calculated based on the format of the fixed input string as described above at
+ * the definition of s_aws_build_fixed_input_buffer()
+ */
+ size_t required_fixed_input_capacity = 32 + access_key_length;
+ if (aws_byte_buf_init(&fixed_input, allocator, required_fixed_input_capacity)) {
+ goto done;
+ }
+
+ if (aws_byte_buf_init(&fixed_input_hmac_digest, allocator, AWS_SHA256_LEN)) {
+ goto done;
+ }
+
+ size_t key_length = aws_ecc_key_coordinate_byte_size_from_curve_name(AWS_CAL_ECDSA_P256);
+ AWS_FATAL_ASSERT(key_length == AWS_SHA256_LEN);
+ if (aws_byte_buf_init(&private_key_buf, allocator, key_length)) {
+ goto done;
+ }
+
+ if (s_init_secret_buf(&secret_buf, allocator, credentials)) {
+ goto done;
+ }
+ struct aws_byte_cursor secret_cursor = aws_byte_cursor_from_buf(&secret_buf);
+
+ uint8_t counter = 1;
+ enum aws_key_derivation_result result = AKDR_NEXT_COUNTER;
+ while ((result == AKDR_NEXT_COUNTER) && (counter <= MAX_KEY_DERIVATION_COUNTER_VALUE)) {
+ if (s_aws_build_fixed_input_buffer(&fixed_input, credentials, counter++)) {
+ break;
+ }
+
+ aws_byte_buf_reset(&fixed_input_hmac_digest, true);
+
+ struct aws_byte_cursor fixed_input_cursor = aws_byte_cursor_from_buf(&fixed_input);
+ if (aws_sha256_hmac_compute(allocator, &secret_cursor, &fixed_input_cursor, &fixed_input_hmac_digest, 0)) {
+ break;
+ }
+
+ result = s_aws_derive_ecc_private_key(&private_key_buf, &fixed_input_hmac_digest);
+ }
+
+ if (result == AKDR_SUCCESS) {
+ struct aws_byte_cursor private_key_cursor = aws_byte_cursor_from_buf(&private_key_buf);
+ ecc_key_pair = aws_ecc_key_pair_new_from_private_key(allocator, AWS_CAL_ECDSA_P256, &private_key_cursor);
+ }
+
+done:
+
+ aws_byte_buf_clean_up_secure(&secret_buf);
+ aws_byte_buf_clean_up_secure(&private_key_buf);
+ aws_byte_buf_clean_up_secure(&fixed_input_hmac_digest);
+ aws_byte_buf_clean_up(&fixed_input);
+
+ return ecc_key_pair;
+}
diff --git a/contrib/restricted/aws/aws-c-auth/source/signable.c b/contrib/restricted/aws/aws-c-auth/source/signable.c
new file mode 100644
index 0000000000..9b1d526f0f
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/source/signable.c
@@ -0,0 +1,165 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/signable.h>
+
+#include <aws/common/string.h>
+
+void aws_signable_destroy(struct aws_signable *signable) {
+ if (signable == NULL) {
+ return;
+ }
+
+ if (signable->vtable != NULL) {
+ signable->vtable->destroy(signable);
+ }
+}
+
+int aws_signable_get_property(
+ const struct aws_signable *signable,
+ const struct aws_string *name,
+ struct aws_byte_cursor *out_value) {
+
+ AWS_ASSERT(signable && signable->vtable && signable->vtable->get_property);
+
+ return signable->vtable->get_property(signable, name, out_value);
+}
+
+int aws_signable_get_property_list(
+ const struct aws_signable *signable,
+ const struct aws_string *name,
+ struct aws_array_list **out_property_list) {
+
+ AWS_ASSERT(signable && signable->vtable && signable->vtable->get_property_list);
+
+ return signable->vtable->get_property_list(signable, name, out_property_list);
+}
+
+int aws_signable_get_payload_stream(const struct aws_signable *signable, struct aws_input_stream **out_input_stream) {
+
+ AWS_ASSERT(signable && signable->vtable && signable->vtable->get_payload_stream);
+
+ return signable->vtable->get_payload_stream(signable, out_input_stream);
+}
+
+AWS_STRING_FROM_LITERAL(g_aws_http_headers_property_list_name, "headers");
+AWS_STRING_FROM_LITERAL(g_aws_http_query_params_property_list_name, "params");
+AWS_STRING_FROM_LITERAL(g_aws_http_method_property_name, "method");
+AWS_STRING_FROM_LITERAL(g_aws_http_uri_property_name, "uri");
+AWS_STRING_FROM_LITERAL(g_aws_signature_property_name, "signature");
+AWS_STRING_FROM_LITERAL(g_aws_previous_signature_property_name, "previous-signature");
+AWS_STRING_FROM_LITERAL(g_aws_canonical_request_property_name, "canonical-request");
+
+/*
+ * This is a simple aws_signable wrapper implementation for AWS's canonical representation of an http request
+ */
+struct aws_signable_canonical_request_impl {
+ struct aws_string *canonical_request;
+};
+
+static int s_aws_signable_canonical_request_get_property(
+ const struct aws_signable *signable,
+ const struct aws_string *name,
+ struct aws_byte_cursor *out_value) {
+
+ struct aws_signable_canonical_request_impl *impl = signable->impl;
+
+ AWS_ZERO_STRUCT(*out_value);
+
+ /*
+ * uri and method can be queried directly from the wrapper request
+ */
+ if (aws_string_eq(name, g_aws_canonical_request_property_name)) {
+ *out_value = aws_byte_cursor_from_string(impl->canonical_request);
+ } else {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_aws_signable_canonical_request_get_property_list(
+ const struct aws_signable *signable,
+ const struct aws_string *name,
+ struct aws_array_list **out_list) {
+ (void)signable;
+ (void)name;
+ (void)out_list;
+
+ *out_list = NULL;
+
+ return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION);
+}
+
+static int s_aws_signable_canonical_request_get_payload_stream(
+ const struct aws_signable *signable,
+ struct aws_input_stream **out_input_stream) {
+ (void)signable;
+
+ *out_input_stream = NULL;
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_aws_signable_canonical_request_destroy(struct aws_signable *signable) {
+ if (signable == NULL) {
+ return;
+ }
+
+ struct aws_signable_canonical_request_impl *impl = signable->impl;
+ if (impl == NULL) {
+ return;
+ }
+
+ aws_string_destroy(impl->canonical_request);
+
+ aws_mem_release(signable->allocator, signable);
+}
+
+static struct aws_signable_vtable s_signable_canonical_request_vtable = {
+ .get_property = s_aws_signable_canonical_request_get_property,
+ .get_property_list = s_aws_signable_canonical_request_get_property_list,
+ .get_payload_stream = s_aws_signable_canonical_request_get_payload_stream,
+ .destroy = s_aws_signable_canonical_request_destroy,
+};
+
+struct aws_signable *aws_signable_new_canonical_request(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor canonical_request) {
+
+ struct aws_signable *signable = NULL;
+ struct aws_signable_canonical_request_impl *impl = NULL;
+ aws_mem_acquire_many(
+ allocator,
+ 2,
+ &signable,
+ sizeof(struct aws_signable),
+ &impl,
+ sizeof(struct aws_signable_canonical_request_impl));
+
+ if (signable == NULL || impl == NULL) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*signable);
+ AWS_ZERO_STRUCT(*impl);
+
+ signable->allocator = allocator;
+ signable->vtable = &s_signable_canonical_request_vtable;
+ signable->impl = impl;
+
+ impl->canonical_request = aws_string_new_from_array(allocator, canonical_request.ptr, canonical_request.len);
+ if (impl->canonical_request == NULL) {
+ goto on_error;
+ }
+
+ return signable;
+
+on_error:
+
+ aws_signable_destroy(signable);
+
+ return NULL;
+}
diff --git a/contrib/restricted/aws/aws-c-auth/source/signable_chunk.c b/contrib/restricted/aws/aws-c-auth/source/signable_chunk.c
new file mode 100644
index 0000000000..302ce9d3a1
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/source/signable_chunk.c
@@ -0,0 +1,116 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/signable.h>
+#include <aws/common/string.h>
+#include <aws/io/stream.h>
+
+/*
+ * This is a simple aws_signable wrapper implementation for an s3 chunk
+ */
+struct aws_signable_chunk_impl {
+ struct aws_input_stream *chunk_data;
+ struct aws_string *previous_signature;
+};
+
+static int s_aws_signable_chunk_get_property(
+ const struct aws_signable *signable,
+ const struct aws_string *name,
+ struct aws_byte_cursor *out_value) {
+
+ struct aws_signable_chunk_impl *impl = signable->impl;
+
+ AWS_ZERO_STRUCT(*out_value);
+
+ /*
+ * uri and method can be queried directly from the wrapper request
+ */
+ if (aws_string_eq(name, g_aws_previous_signature_property_name)) {
+ *out_value = aws_byte_cursor_from_string(impl->previous_signature);
+ } else {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_aws_signable_chunk_get_property_list(
+ const struct aws_signable *signable,
+ const struct aws_string *name,
+ struct aws_array_list **out_list) {
+ (void)signable;
+ (void)name;
+ (void)out_list;
+
+ return AWS_OP_ERR;
+}
+
+static int s_aws_signable_chunk_get_payload_stream(
+ const struct aws_signable *signable,
+ struct aws_input_stream **out_input_stream) {
+
+ struct aws_signable_chunk_impl *impl = signable->impl;
+ *out_input_stream = impl->chunk_data;
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_aws_signable_chunk_destroy(struct aws_signable *signable) {
+ if (signable == NULL) {
+ return;
+ }
+
+ struct aws_signable_chunk_impl *impl = signable->impl;
+ if (impl == NULL) {
+ return;
+ }
+ aws_input_stream_release(impl->chunk_data);
+ aws_string_destroy(impl->previous_signature);
+
+ aws_mem_release(signable->allocator, signable);
+}
+
+static struct aws_signable_vtable s_signable_chunk_vtable = {
+ .get_property = s_aws_signable_chunk_get_property,
+ .get_property_list = s_aws_signable_chunk_get_property_list,
+ .get_payload_stream = s_aws_signable_chunk_get_payload_stream,
+ .destroy = s_aws_signable_chunk_destroy,
+};
+
+struct aws_signable *aws_signable_new_chunk(
+ struct aws_allocator *allocator,
+ struct aws_input_stream *chunk_data,
+ struct aws_byte_cursor previous_signature) {
+
+ struct aws_signable *signable = NULL;
+ struct aws_signable_chunk_impl *impl = NULL;
+ aws_mem_acquire_many(
+ allocator, 2, &signable, sizeof(struct aws_signable), &impl, sizeof(struct aws_signable_chunk_impl));
+
+ if (signable == NULL || impl == NULL) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*signable);
+ AWS_ZERO_STRUCT(*impl);
+
+ signable->allocator = allocator;
+ signable->vtable = &s_signable_chunk_vtable;
+ signable->impl = impl;
+
+ impl->chunk_data = aws_input_stream_acquire(chunk_data);
+ impl->previous_signature = aws_string_new_from_array(allocator, previous_signature.ptr, previous_signature.len);
+ if (impl->previous_signature == NULL) {
+ goto on_error;
+ }
+
+ return signable;
+
+on_error:
+
+ aws_signable_destroy(signable);
+
+ return NULL;
+}
diff --git a/contrib/restricted/aws/aws-c-auth/source/signable_http_request.c b/contrib/restricted/aws/aws-c-auth/source/signable_http_request.c
new file mode 100644
index 0000000000..1236026052
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/source/signable_http_request.c
@@ -0,0 +1,130 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/signable.h>
+#include <aws/common/string.h>
+#include <aws/http/request_response.h>
+
+/*
+ * This is a simple aws_signable wrapper implementation for the aws_http_message struct
+ */
+struct aws_signable_http_request_impl {
+ struct aws_http_message *request;
+ struct aws_array_list headers;
+};
+
+static int s_aws_signable_http_request_get_property(
+ const struct aws_signable *signable,
+ const struct aws_string *name,
+ struct aws_byte_cursor *out_value) {
+
+ struct aws_signable_http_request_impl *impl = signable->impl;
+
+ AWS_ZERO_STRUCT(*out_value);
+
+ /*
+ * uri and method can be queried directly from the wrapper request
+ */
+ if (aws_string_eq(name, g_aws_http_uri_property_name)) {
+ aws_http_message_get_request_path(impl->request, out_value);
+ } else if (aws_string_eq(name, g_aws_http_method_property_name)) {
+ aws_http_message_get_request_method(impl->request, out_value);
+ } else {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_aws_signable_http_request_get_property_list(
+ const struct aws_signable *signable,
+ const struct aws_string *name,
+ struct aws_array_list **out_list) {
+
+ struct aws_signable_http_request_impl *impl = signable->impl;
+
+ *out_list = NULL;
+
+ if (aws_string_eq(name, g_aws_http_headers_property_list_name)) {
+ *out_list = &impl->headers;
+ } else {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_aws_signable_http_request_get_payload_stream(
+ const struct aws_signable *signable,
+ struct aws_input_stream **out_input_stream) {
+
+ struct aws_signable_http_request_impl *impl = signable->impl;
+ *out_input_stream = aws_http_message_get_body_stream(impl->request);
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_aws_signable_http_request_destroy(struct aws_signable *signable) {
+ if (signable == NULL) {
+ return;
+ }
+
+ struct aws_signable_http_request_impl *impl = signable->impl;
+ if (impl == NULL) {
+ return;
+ }
+
+ aws_array_list_clean_up(&impl->headers);
+ aws_mem_release(signable->allocator, signable);
+}
+
+static struct aws_signable_vtable s_signable_http_request_vtable = {
+ .get_property = s_aws_signable_http_request_get_property,
+ .get_property_list = s_aws_signable_http_request_get_property_list,
+ .get_payload_stream = s_aws_signable_http_request_get_payload_stream,
+ .destroy = s_aws_signable_http_request_destroy,
+};
+
+struct aws_signable *aws_signable_new_http_request(struct aws_allocator *allocator, struct aws_http_message *request) {
+
+ struct aws_signable *signable = NULL;
+ struct aws_signable_http_request_impl *impl = NULL;
+ aws_mem_acquire_many(
+ allocator, 2, &signable, sizeof(struct aws_signable), &impl, sizeof(struct aws_signable_http_request_impl));
+
+ AWS_ZERO_STRUCT(*signable);
+ AWS_ZERO_STRUCT(*impl);
+
+ signable->allocator = allocator;
+ signable->vtable = &s_signable_http_request_vtable;
+ signable->impl = impl;
+
+ /*
+ * Copy the headers since they're not different types
+ */
+ size_t header_count = aws_http_message_get_header_count(request);
+ if (aws_array_list_init_dynamic(
+ &impl->headers, allocator, header_count, sizeof(struct aws_signable_property_list_pair))) {
+ goto on_error;
+ }
+
+ for (size_t i = 0; i < header_count; ++i) {
+ struct aws_http_header header;
+ aws_http_message_get_header(request, &header, i);
+
+ struct aws_signable_property_list_pair property = {.name = header.name, .value = header.value};
+ aws_array_list_push_back(&impl->headers, &property);
+ }
+
+ impl->request = request;
+
+ return signable;
+
+on_error:
+
+ aws_signable_destroy(signable);
+
+ return NULL;
+}
diff --git a/contrib/restricted/aws/aws-c-auth/source/signable_trailer.c b/contrib/restricted/aws/aws-c-auth/source/signable_trailer.c
new file mode 100644
index 0000000000..b5742643de
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/source/signable_trailer.c
@@ -0,0 +1,138 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/signable.h>
+#include <aws/common/string.h>
+#include <aws/http/request_response.h>
+
+struct aws_signable_trailing_headers_impl {
+ struct aws_http_headers *trailing_headers;
+ struct aws_array_list headers;
+ struct aws_string *previous_signature;
+};
+
+static int s_aws_signable_trailing_headers_get_property(
+ const struct aws_signable *signable,
+ const struct aws_string *name,
+ struct aws_byte_cursor *out_value) {
+
+ struct aws_signable_trailing_headers_impl *impl = signable->impl;
+
+ AWS_ZERO_STRUCT(*out_value);
+
+ /*
+ * uri and method can be queried directly from the wrapper request
+ */
+ if (aws_string_eq(name, g_aws_previous_signature_property_name)) {
+ *out_value = aws_byte_cursor_from_string(impl->previous_signature);
+ } else {
+ return AWS_OP_ERR;
+ }
+ return AWS_OP_SUCCESS;
+}
+
+static int s_aws_signable_trailing_headers_get_property_list(
+ const struct aws_signable *signable,
+ const struct aws_string *name,
+ struct aws_array_list **out_list) {
+ (void)signable;
+ (void)name;
+ (void)out_list;
+
+ struct aws_signable_trailing_headers_impl *impl = signable->impl;
+
+ *out_list = NULL;
+
+ if (aws_string_eq(name, g_aws_http_headers_property_list_name)) {
+ *out_list = &impl->headers;
+ } else {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_aws_signable_trailing_headers_get_payload_stream(
+ const struct aws_signable *signable,
+ struct aws_input_stream **out_input_stream) {
+ (void)signable;
+ *out_input_stream = NULL;
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_aws_signable_trailing_headers_destroy(struct aws_signable *signable) {
+ if (signable == NULL) {
+ return;
+ }
+
+ struct aws_signable_trailing_headers_impl *impl = signable->impl;
+ if (impl == NULL) {
+ return;
+ }
+
+ aws_http_headers_release(impl->trailing_headers);
+ aws_string_destroy(impl->previous_signature);
+ aws_array_list_clean_up(&impl->headers);
+ aws_mem_release(signable->allocator, signable);
+}
+
+static struct aws_signable_vtable s_signable_trailing_headers_vtable = {
+ .get_property = s_aws_signable_trailing_headers_get_property,
+ .get_property_list = s_aws_signable_trailing_headers_get_property_list,
+ .get_payload_stream = s_aws_signable_trailing_headers_get_payload_stream,
+ .destroy = s_aws_signable_trailing_headers_destroy,
+};
+
+struct aws_signable *aws_signable_new_trailing_headers(
+ struct aws_allocator *allocator,
+ struct aws_http_headers *trailing_headers,
+ struct aws_byte_cursor previous_signature) {
+
+ struct aws_signable *signable = NULL;
+ struct aws_signable_trailing_headers_impl *impl = NULL;
+ aws_mem_acquire_many(
+ allocator, 2, &signable, sizeof(struct aws_signable), &impl, sizeof(struct aws_signable_trailing_headers_impl));
+
+ AWS_ZERO_STRUCT(*signable);
+ AWS_ZERO_STRUCT(*impl);
+
+ /* Keep the headers alive. We're referencing the underlying strings. */
+ aws_http_headers_acquire(trailing_headers);
+ impl->trailing_headers = trailing_headers;
+ signable->allocator = allocator;
+ signable->vtable = &s_signable_trailing_headers_vtable;
+ signable->impl = impl;
+
+ /*
+ * Convert headers list to aws_signable_property_list_pair arraylist since they're not different types.
+ */
+ size_t header_count = aws_http_headers_count(trailing_headers);
+ if (aws_array_list_init_dynamic(
+ &impl->headers, allocator, header_count, sizeof(struct aws_signable_property_list_pair))) {
+ goto on_error;
+ }
+
+ for (size_t i = 0; i < header_count; ++i) {
+ struct aws_http_header header;
+ aws_http_headers_get_index(trailing_headers, i, &header);
+
+ struct aws_signable_property_list_pair property = {.name = header.name, .value = header.value};
+ aws_array_list_push_back(&impl->headers, &property);
+ }
+
+ impl->previous_signature = aws_string_new_from_array(allocator, previous_signature.ptr, previous_signature.len);
+ if (impl->previous_signature == NULL) {
+ goto on_error;
+ }
+
+ return signable;
+
+on_error:
+
+ aws_signable_destroy(signable);
+
+ return NULL;
+}
diff --git a/contrib/restricted/aws/aws-c-auth/source/signing.c b/contrib/restricted/aws/aws-c-auth/source/signing.c
new file mode 100644
index 0000000000..1182de0fb5
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/source/signing.c
@@ -0,0 +1,183 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/signing.h>
+
+#include <aws/auth/credentials.h>
+#include <aws/auth/private/aws_signing.h>
+#include <aws/io/uri.h>
+
+/*
+ * Aws signing implementation
+ */
+
+static int s_aws_last_error_or_unknown(void) {
+ int last_error = aws_last_error();
+ if (last_error == AWS_ERROR_SUCCESS) {
+ last_error = AWS_ERROR_UNKNOWN;
+ }
+
+ return last_error;
+}
+
+static void s_perform_signing(struct aws_signing_state_aws *state) {
+ struct aws_signing_result *result = NULL;
+
+ if (state->error_code != AWS_ERROR_SUCCESS) {
+ goto done;
+ }
+
+ if (aws_credentials_is_anonymous(state->config.credentials)) {
+ result = &state->result;
+ goto done;
+ }
+
+ if (aws_signing_build_canonical_request(state)) {
+ state->error_code = s_aws_last_error_or_unknown();
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_SIGNING,
+ "(id=%p) Signing failed to build canonical request via algorithm %s, error %d(%s)",
+ (void *)state->signable,
+ aws_signing_algorithm_to_string(state->config.algorithm),
+ state->error_code,
+ aws_error_debug_str(state->error_code));
+ goto done;
+ }
+
+ AWS_LOGF_INFO(
+ AWS_LS_AUTH_SIGNING,
+ "(id=%p) Signing successfully built canonical request for algorithm %s, with contents \n" PRInSTR "\n",
+ (void *)state->signable,
+ aws_signing_algorithm_to_string(state->config.algorithm),
+ AWS_BYTE_BUF_PRI(state->canonical_request));
+
+ if (aws_signing_build_string_to_sign(state)) {
+ state->error_code = s_aws_last_error_or_unknown();
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_SIGNING,
+ "(id=%p) Signing failed to build string-to-sign via algorithm %s, error %d(%s)",
+ (void *)state->signable,
+ aws_signing_algorithm_to_string(state->config.algorithm),
+ state->error_code,
+ aws_error_debug_str(state->error_code));
+ goto done;
+ }
+
+ AWS_LOGF_INFO(
+ AWS_LS_AUTH_SIGNING,
+ "(id=%p) Signing successfully built string-to-sign via algorithm %s, with contents \n" PRInSTR "\n",
+ (void *)state->signable,
+ aws_signing_algorithm_to_string(state->config.algorithm),
+ AWS_BYTE_BUF_PRI(state->string_to_sign));
+
+ if (aws_signing_build_authorization_value(state)) {
+ state->error_code = s_aws_last_error_or_unknown();
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_SIGNING,
+ "(id=%p) Signing failed to build final authorization value via algorithm %s",
+ (void *)state->signable,
+ aws_signing_algorithm_to_string(state->config.algorithm));
+ goto done;
+ }
+
+ result = &state->result;
+
+done:
+
+ state->on_complete(result, state->error_code, state->userdata);
+ aws_signing_state_destroy(state);
+}
+
+static void s_aws_signing_on_get_credentials(struct aws_credentials *credentials, int error_code, void *user_data) {
+ struct aws_signing_state_aws *state = user_data;
+
+ if (!credentials) {
+ if (error_code == AWS_ERROR_SUCCESS) {
+ error_code = AWS_ERROR_UNKNOWN;
+ }
+
+ /* Log the credentials sourcing error */
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_SIGNING,
+ "(id=%p) Credentials Provider failed to source credentials with error %d(%s)",
+ (void *)state->signable,
+ error_code,
+ aws_error_debug_str(error_code));
+
+ state->error_code = AWS_AUTH_SIGNING_NO_CREDENTIALS;
+ } else {
+ if (state->config.algorithm == AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC &&
+ !aws_credentials_is_anonymous(credentials)) {
+
+ state->config.credentials = aws_credentials_new_ecc_from_aws_credentials(state->allocator, credentials);
+ if (state->config.credentials == NULL) {
+ state->error_code = AWS_AUTH_SIGNING_NO_CREDENTIALS;
+ }
+ } else {
+ state->config.credentials = credentials;
+ aws_credentials_acquire(credentials);
+ }
+ }
+
+ s_perform_signing(state);
+}
+
+int aws_sign_request_aws(
+ struct aws_allocator *allocator,
+ const struct aws_signable *signable,
+ const struct aws_signing_config_base *base_config,
+ aws_signing_complete_fn *on_complete,
+ void *userdata) {
+
+ AWS_PRECONDITION(base_config);
+
+ if (base_config->config_type != AWS_SIGNING_CONFIG_AWS) {
+ return aws_raise_error(AWS_AUTH_SIGNING_MISMATCHED_CONFIGURATION);
+ }
+
+ const struct aws_signing_config_aws *config = (void *)base_config;
+
+ struct aws_signing_state_aws *signing_state =
+ aws_signing_state_new(allocator, config, signable, on_complete, userdata);
+ if (!signing_state) {
+ return AWS_OP_ERR;
+ }
+
+ if (signing_state->config.algorithm == AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC) {
+ if (signing_state->config.credentials != NULL &&
+ !aws_credentials_is_anonymous(signing_state->config.credentials)) {
+ /*
+ * If these are regular credentials, try to derive ecc-based ones
+ */
+ if (aws_credentials_get_ecc_key_pair(signing_state->config.credentials) == NULL) {
+ struct aws_credentials *ecc_credentials =
+ aws_credentials_new_ecc_from_aws_credentials(allocator, signing_state->config.credentials);
+ aws_credentials_release(signing_state->config.credentials);
+ signing_state->config.credentials = ecc_credentials;
+ if (signing_state->config.credentials == NULL) {
+ goto on_error;
+ }
+ }
+ }
+ }
+
+ bool can_sign_immediately = signing_state->config.credentials != NULL;
+
+ if (can_sign_immediately) {
+ s_perform_signing(signing_state);
+ } else {
+ if (aws_credentials_provider_get_credentials(
+ signing_state->config.credentials_provider, s_aws_signing_on_get_credentials, signing_state)) {
+ goto on_error;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+
+ aws_signing_state_destroy(signing_state);
+ return AWS_OP_ERR;
+}
diff --git a/contrib/restricted/aws/aws-c-auth/source/signing_config.c b/contrib/restricted/aws/aws-c-auth/source/signing_config.c
new file mode 100644
index 0000000000..c0b6b0f2dd
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/source/signing_config.c
@@ -0,0 +1,133 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/signing_config.h>
+
+#include <aws/auth/credentials.h>
+
+const struct aws_byte_cursor g_aws_signed_body_value_empty_sha256 =
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855");
+
+const struct aws_byte_cursor g_aws_signed_body_value_unsigned_payload =
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("UNSIGNED-PAYLOAD");
+
+const struct aws_byte_cursor g_aws_signed_body_value_streaming_unsigned_payload_trailer =
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("STREAMING-UNSIGNED-PAYLOAD-TRAILER");
+
+const struct aws_byte_cursor g_aws_signed_body_value_streaming_aws4_hmac_sha256_payload =
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("STREAMING-AWS4-HMAC-SHA256-PAYLOAD");
+
+const struct aws_byte_cursor g_aws_signed_body_value_streaming_aws4_hmac_sha256_payload_trailer =
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER");
+
+const struct aws_byte_cursor g_aws_signed_body_value_streaming_aws4_ecdsa_p256_sha256_payload =
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD");
+
+const struct aws_byte_cursor g_aws_signed_body_value_streaming_aws4_ecdsa_p256_sha256_payload_trailer =
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("STREAMING-AWS4-ECDSA-P256-SHA256-PAYLOAD-TRAILER");
+
+const struct aws_byte_cursor g_aws_signed_body_value_streaming_aws4_hmac_sha256_events =
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("STREAMING-AWS4-HMAC-SHA256-EVENTS");
+
+const char *aws_signing_algorithm_to_string(enum aws_signing_algorithm algorithm) {
+ switch (algorithm) {
+ case AWS_SIGNING_ALGORITHM_V4:
+ return "SigV4";
+
+ case AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC:
+ return "SigV4Asymmetric";
+
+ default:
+ break;
+ }
+
+ return "Unknown";
+}
+
+int aws_validate_aws_signing_config_aws(const struct aws_signing_config_aws *config) {
+ if (config == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_AUTH_SIGNING, "AWS signing config is null");
+ return aws_raise_error(AWS_AUTH_SIGNING_INVALID_CONFIGURATION);
+ }
+
+ if (config->signature_type == AWS_ST_HTTP_REQUEST_EVENT && config->algorithm != AWS_SIGNING_ALGORITHM_V4) {
+ /*
+ * Not supported yet.
+ *
+ * Need to determine if the Transcribe service supports Sigv4a and how to test it.
+ * Transcribe's examples are insufficient.
+ */
+ AWS_LOGF_ERROR(AWS_LS_AUTH_SIGNING, "(id=%p) Event signing is only supported for Sigv4 yet", (void *)config);
+ return aws_raise_error(AWS_AUTH_SIGNING_INVALID_CONFIGURATION);
+ }
+
+ if (config->signature_type != AWS_ST_HTTP_REQUEST_HEADERS &&
+ config->signature_type != AWS_ST_HTTP_REQUEST_QUERY_PARAMS) {
+ /*
+ * If we're not signing the full request then it's critical that the credentials we're using are the same
+ * credentials used on the original request. If we're using a provider to fetch credentials then that is
+ * not guaranteed. For now, force users to always pass in credentials when signing events or chunks.
+ *
+ * The correct long-term solution would be to add a way to pass the credentials used in the initial
+ * signing back to the user in the completion callback. Then the user could supply those credentials
+ * to all subsequent chunk/event signings. The fact that we don't do that yet doesn't invalidate this check.
+ */
+ if (config->credentials == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_SIGNING,
+ "(id=%p) Chunk/event signing config must contain explicit credentials",
+ (void *)config);
+ return aws_raise_error(AWS_AUTH_SIGNING_INVALID_CONFIGURATION);
+ }
+ }
+
+ if (config->region.len == 0) {
+ AWS_LOGF_ERROR(AWS_LS_AUTH_SIGNING, "(id=%p) Signing config is missing a region identifier", (void *)config);
+ return aws_raise_error(AWS_AUTH_SIGNING_INVALID_CONFIGURATION);
+ }
+
+ if (config->service.len == 0) {
+ AWS_LOGF_ERROR(AWS_LS_AUTH_SIGNING, "(id=%p) Signing config is missing a service identifier", (void *)config);
+ return aws_raise_error(AWS_AUTH_SIGNING_INVALID_CONFIGURATION);
+ }
+
+ switch (config->algorithm) {
+ case AWS_SIGNING_ALGORITHM_V4:
+ if (config->credentials == NULL && config->credentials_provider == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_SIGNING,
+ "(id=%p) Sigv4 signing config is missing a credentials provider or credentials",
+ (void *)config);
+ return aws_raise_error(AWS_AUTH_SIGNING_INVALID_CONFIGURATION);
+ }
+
+ if (config->credentials != NULL && !aws_credentials_is_anonymous(config->credentials)) {
+ if (aws_credentials_get_access_key_id(config->credentials).len == 0 ||
+ aws_credentials_get_secret_access_key(config->credentials).len == 0) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_SIGNING,
+ "(id=%p) Sigv4 signing configured with invalid credentials",
+ (void *)config);
+ return aws_raise_error(AWS_AUTH_SIGNING_INVALID_CREDENTIALS);
+ }
+ }
+ break;
+
+ case AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC:
+ if (config->credentials == NULL && config->credentials_provider == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_SIGNING,
+ "(id=%p) Sigv4 asymmetric signing config is missing a credentials provider or credentials",
+ (void *)config);
+ return aws_raise_error(AWS_AUTH_SIGNING_INVALID_CONFIGURATION);
+ }
+ break;
+
+ default:
+ return aws_raise_error(AWS_AUTH_SIGNING_INVALID_CONFIGURATION);
+ }
+
+ return AWS_OP_SUCCESS;
+}
diff --git a/contrib/restricted/aws/aws-c-auth/source/signing_result.c b/contrib/restricted/aws/aws-c-auth/source/signing_result.c
new file mode 100644
index 0000000000..b0f8eb909f
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/source/signing_result.c
@@ -0,0 +1,247 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/signing_result.h>
+
+#include <aws/common/byte_buf.h>
+#include <aws/common/string.h>
+
+#define INITIAL_SIGNING_RESULT_PROPERTIES_SIZE 10
+#define INITIAL_SIGNING_RESULT_PROPERTY_LISTS_TABLE_SIZE 10
+#define INITIAL_SIGNING_RESULT_PROPERTY_LIST_SIZE 10
+
+static void s_aws_signing_result_property_clean_up(struct aws_signing_result_property *pair) {
+ aws_string_destroy(pair->name);
+ aws_string_destroy(pair->value);
+}
+
+static void s_aws_hash_callback_property_list_destroy(void *value) {
+ struct aws_array_list *property_list = value;
+
+ size_t property_count = aws_array_list_length(property_list);
+ for (size_t i = 0; i < property_count; ++i) {
+ struct aws_signing_result_property property;
+ AWS_ZERO_STRUCT(property);
+
+ if (aws_array_list_get_at(property_list, &property, i)) {
+ continue;
+ }
+
+ s_aws_signing_result_property_clean_up(&property);
+ }
+
+ struct aws_allocator *allocator = property_list->alloc;
+ aws_array_list_clean_up(property_list);
+
+ aws_mem_release(allocator, property_list);
+}
+
+int aws_signing_result_init(struct aws_signing_result *result, struct aws_allocator *allocator) {
+ AWS_ZERO_STRUCT(*result);
+
+ result->allocator = allocator;
+ if (aws_hash_table_init(
+ &result->properties,
+ allocator,
+ INITIAL_SIGNING_RESULT_PROPERTIES_SIZE,
+ aws_hash_string,
+ aws_hash_callback_string_eq,
+ aws_hash_callback_string_destroy,
+ aws_hash_callback_string_destroy) ||
+ aws_hash_table_init(
+ &result->property_lists,
+ allocator,
+ INITIAL_SIGNING_RESULT_PROPERTY_LISTS_TABLE_SIZE,
+ aws_hash_string,
+ aws_hash_callback_string_eq,
+ aws_hash_callback_string_destroy,
+ s_aws_hash_callback_property_list_destroy)) {
+ goto on_error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+
+ aws_signing_result_clean_up(result);
+
+ return AWS_OP_ERR;
+}
+
+void aws_signing_result_clean_up(struct aws_signing_result *result) {
+ aws_hash_table_clean_up(&result->properties);
+ aws_hash_table_clean_up(&result->property_lists);
+}
+
+int aws_signing_result_set_property(
+ struct aws_signing_result *result,
+ const struct aws_string *property_name,
+ const struct aws_byte_cursor *property_value) {
+
+ struct aws_string *name = NULL;
+ struct aws_string *value = NULL;
+
+ name = aws_string_new_from_string(result->allocator, property_name);
+ value = aws_string_new_from_array(result->allocator, property_value->ptr, property_value->len);
+ if (name == NULL || value == NULL) {
+ goto on_error;
+ }
+
+ if (aws_hash_table_put(&result->properties, name, value, NULL)) {
+ goto on_error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+
+ aws_string_destroy(name);
+ aws_string_destroy(value);
+
+ return AWS_OP_ERR;
+}
+
+int aws_signing_result_get_property(
+ const struct aws_signing_result *result,
+ const struct aws_string *property_name,
+ struct aws_string **out_property_value) {
+
+ struct aws_hash_element *element = NULL;
+ aws_hash_table_find(&result->properties, property_name, &element);
+
+ *out_property_value = NULL;
+ if (element != NULL) {
+ *out_property_value = element->value;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static struct aws_array_list *s_get_or_create_property_list(
+ struct aws_signing_result *result,
+ const struct aws_string *list_name) {
+ struct aws_hash_element *element = NULL;
+ aws_hash_table_find(&result->property_lists, list_name, &element);
+
+ if (element != NULL) {
+ return element->value;
+ }
+
+ struct aws_array_list *properties = aws_mem_acquire(result->allocator, sizeof(struct aws_array_list));
+ if (properties == NULL) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*properties);
+ struct aws_string *name_copy = aws_string_new_from_string(result->allocator, list_name);
+ if (name_copy == NULL) {
+ goto on_error;
+ }
+
+ if (aws_array_list_init_dynamic(
+ properties,
+ result->allocator,
+ INITIAL_SIGNING_RESULT_PROPERTY_LIST_SIZE,
+ sizeof(struct aws_signing_result_property))) {
+ goto on_error;
+ }
+
+ if (aws_hash_table_put(&result->property_lists, name_copy, properties, NULL)) {
+ goto on_error;
+ }
+
+ return properties;
+
+on_error:
+
+ aws_string_destroy(name_copy);
+ aws_array_list_clean_up(properties);
+ aws_mem_release(result->allocator, properties);
+
+ return NULL;
+}
+
+int aws_signing_result_append_property_list(
+ struct aws_signing_result *result,
+ const struct aws_string *list_name,
+ const struct aws_byte_cursor *property_name,
+ const struct aws_byte_cursor *property_value) {
+
+ struct aws_array_list *properties = s_get_or_create_property_list(result, list_name);
+ if (properties == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_string *name = NULL;
+ struct aws_string *value = NULL;
+
+ name = aws_string_new_from_array(result->allocator, property_name->ptr, property_name->len);
+ value = aws_string_new_from_array(result->allocator, property_value->ptr, property_value->len);
+
+ struct aws_signing_result_property property;
+ property.name = name;
+ property.value = value;
+
+ if (aws_array_list_push_back(properties, &property)) {
+ goto on_error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+
+ aws_string_destroy(name);
+ aws_string_destroy(value);
+
+ return AWS_OP_ERR;
+}
+
+void aws_signing_result_get_property_list(
+ const struct aws_signing_result *result,
+ const struct aws_string *list_name,
+ struct aws_array_list **out_list) {
+
+ *out_list = NULL;
+
+ struct aws_hash_element *element = NULL;
+ aws_hash_table_find(&result->property_lists, list_name, &element);
+
+ if (element != NULL) {
+ *out_list = element->value;
+ }
+}
+
+void aws_signing_result_get_property_value_in_property_list(
+ const struct aws_signing_result *result,
+ const struct aws_string *list_name,
+ const struct aws_string *property_name,
+ struct aws_string **out_value) {
+
+ *out_value = NULL;
+
+ struct aws_array_list *property_list = NULL;
+ aws_signing_result_get_property_list(result, list_name, &property_list);
+ if (property_list == NULL) {
+ return;
+ }
+
+ size_t pair_count = aws_array_list_length(property_list);
+ for (size_t i = 0; i < pair_count; ++i) {
+ struct aws_signing_result_property pair;
+ AWS_ZERO_STRUCT(pair);
+ if (aws_array_list_get_at(property_list, &pair, i)) {
+ continue;
+ }
+
+ if (pair.name == NULL) {
+ continue;
+ }
+
+ if (aws_string_eq_ignore_case(property_name, pair.name)) {
+ *out_value = pair.value;
+ break;
+ }
+ }
+}
diff --git a/contrib/restricted/aws/aws-c-auth/source/sigv4_http_request.c b/contrib/restricted/aws/aws-c-auth/source/sigv4_http_request.c
new file mode 100644
index 0000000000..6e73f2e7f9
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/source/sigv4_http_request.c
@@ -0,0 +1,168 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/private/sigv4_http_request.h>
+
+#include <aws/auth/credentials.h>
+#include <aws/auth/signable.h>
+#include <aws/auth/signing.h>
+#include <aws/auth/signing_result.h>
+#include <aws/common/condition_variable.h>
+#include <aws/common/mutex.h>
+#include <aws/common/string.h>
+#include <aws/http/request_response.h>
+#include <aws/io/uri.h>
+
+#if defined(_MSC_VER)
+# pragma warning(disable : 4204)
+#endif /* _MSC_VER */
+
+#define DEFAULT_QUERY_PARAM_COUNT 10
+
+/*
+ * Uses the signing result to rebuild the request's URI. If the signing was not done via
+ * query params, then this ends up doing nothing.
+ */
+static int s_build_request_uri(
+ struct aws_allocator *allocator,
+ struct aws_http_message *request,
+ const struct aws_signing_result *signing_result) {
+
+ /* first let's see if we need to do anything at all */
+ struct aws_array_list *result_param_list = NULL;
+ aws_signing_result_get_property_list(
+ signing_result, g_aws_http_query_params_property_list_name, &result_param_list);
+ if (result_param_list == NULL) {
+ return AWS_OP_SUCCESS;
+ }
+
+ /*
+ * There are query params to apply. Use the following algorithm:
+ *
+ * (1) Take the old uri and parse it into a URI structure
+ * (2) Make a new URI builder and add the old URI's components to it
+ * (3) Add the signing query params to the builder
+ * (4) Use the builder to make a new URI
+ */
+ int result = AWS_OP_ERR;
+ size_t signed_query_param_count = aws_array_list_length(result_param_list);
+
+ struct aws_uri old_uri;
+ AWS_ZERO_STRUCT(old_uri);
+
+ struct aws_uri new_uri;
+ AWS_ZERO_STRUCT(new_uri);
+
+ struct aws_uri_builder_options new_uri_builder;
+ AWS_ZERO_STRUCT(new_uri_builder);
+
+ struct aws_array_list query_params;
+ AWS_ZERO_STRUCT(query_params);
+
+ struct aws_byte_cursor old_path;
+ aws_http_message_get_request_path(request, &old_path);
+
+ /* start with the old uri and parse it */
+ if (aws_uri_init_parse(&old_uri, allocator, &old_path)) {
+ goto done;
+ }
+
+ /* pull out the old query params */
+ if (aws_array_list_init_dynamic(
+ &query_params, allocator, DEFAULT_QUERY_PARAM_COUNT, sizeof(struct aws_uri_param))) {
+ goto done;
+ }
+
+ if (aws_uri_query_string_params(&old_uri, &query_params)) {
+ goto done;
+ }
+
+ /* initialize a builder for the new uri matching the old uri */
+ new_uri_builder.host_name = old_uri.host_name;
+ new_uri_builder.path = old_uri.path;
+ new_uri_builder.port = old_uri.port;
+ new_uri_builder.scheme = old_uri.scheme;
+ new_uri_builder.query_params = &query_params;
+
+ /* and now add any signing query params */
+ for (size_t i = 0; i < signed_query_param_count; ++i) {
+ struct aws_signing_result_property source_param;
+ if (aws_array_list_get_at(result_param_list, &source_param, i)) {
+ goto done;
+ }
+
+ struct aws_uri_param signed_param;
+ signed_param.key = aws_byte_cursor_from_string(source_param.name);
+ signed_param.value = aws_byte_cursor_from_string(source_param.value);
+
+ aws_array_list_push_back(&query_params, &signed_param);
+ }
+
+ /* create the new uri */
+ if (aws_uri_init_from_builder_options(&new_uri, allocator, &new_uri_builder)) {
+ goto done;
+ }
+
+ /* copy the full string */
+ struct aws_byte_cursor new_uri_cursor = aws_byte_cursor_from_buf(&new_uri.uri_str);
+ if (aws_http_message_set_request_path(request, new_uri_cursor)) {
+ goto done;
+ }
+
+ result = AWS_OP_SUCCESS;
+
+done:
+
+ aws_array_list_clean_up(&query_params);
+
+ aws_uri_clean_up(&new_uri);
+ aws_uri_clean_up(&old_uri);
+
+ return result;
+}
+
+/*
+ * Takes a mutable http request and adds all the additional query params and/or headers generated by the
+ * signing process.
+ */
+int aws_apply_signing_result_to_http_request(
+ struct aws_http_message *request,
+ struct aws_allocator *allocator,
+ const struct aws_signing_result *result) {
+
+ /* uri/query params */
+ if (s_build_request_uri(allocator, request, result)) {
+ return AWS_OP_ERR;
+ }
+
+ /* headers */
+ size_t signing_header_count = 0;
+ struct aws_array_list *result_header_list = NULL;
+ aws_signing_result_get_property_list(result, g_aws_http_headers_property_list_name, &result_header_list);
+ if (result_header_list != NULL) {
+ signing_header_count = aws_array_list_length(result_header_list);
+ }
+
+ for (size_t i = 0; i < signing_header_count; ++i) {
+ struct aws_signing_result_property source_header;
+ AWS_ZERO_STRUCT(source_header);
+
+ if (aws_array_list_get_at(result_header_list, &source_header, i)) {
+ return AWS_OP_ERR;
+ }
+
+ if (source_header.name == NULL || source_header.value == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_http_header dest_header = {
+ .name = aws_byte_cursor_from_string(source_header.name),
+ .value = aws_byte_cursor_from_string(source_header.value),
+ };
+ aws_http_message_add_header(request, dest_header);
+ }
+
+ return AWS_OP_SUCCESS;
+}
diff --git a/contrib/restricted/aws/aws-c-auth/ya.make b/contrib/restricted/aws/aws-c-auth/ya.make
new file mode 100644
index 0000000000..a0f2151cee
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-auth/ya.make
@@ -0,0 +1,87 @@
+# Generated by devtools/yamaker from nixpkgs 23.05.
+
+LIBRARY()
+
+LICENSE(Apache-2.0)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+VERSION(0.6.26)
+
+ORIGINAL_SOURCE(https://github.com/awslabs/aws-c-auth/archive/v0.6.26.tar.gz)
+
+PEERDIR(
+ contrib/restricted/aws/aws-c-cal
+ contrib/restricted/aws/aws-c-common
+ contrib/restricted/aws/aws-c-http
+ contrib/restricted/aws/aws-c-io
+ contrib/restricted/aws/aws-c-sdkutils
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/aws/aws-c-auth/include
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_RUNTIME()
+
+CFLAGS(
+ -DAWS_AUTH_USE_IMPORT_EXPORT
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DCJSON_HIDE_SYMBOLS
+ -DHAVE_SYSCONF
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+)
+
+SRCS(
+ source/auth.c
+ source/aws_imds_client.c
+ source/aws_profile.c
+ source/aws_signing.c
+ source/credentials.c
+ source/credentials_provider_anonymous.c
+ source/credentials_provider_cached.c
+ source/credentials_provider_chain.c
+ source/credentials_provider_cognito.c
+ source/credentials_provider_default_chain.c
+ source/credentials_provider_delegate.c
+ source/credentials_provider_ecs.c
+ source/credentials_provider_environment.c
+ source/credentials_provider_imds.c
+ source/credentials_provider_process.c
+ source/credentials_provider_profile.c
+ source/credentials_provider_static.c
+ source/credentials_provider_sts.c
+ source/credentials_provider_sts_web_identity.c
+ source/credentials_provider_x509.c
+ source/credentials_utils.c
+ source/key_derivation.c
+ source/signable.c
+ source/signable_chunk.c
+ source/signable_http_request.c
+ source/signable_trailer.c
+ source/signing.c
+ source/signing_config.c
+ source/signing_result.c
+ source/sigv4_http_request.c
+)
+
+END()
diff --git a/contrib/restricted/aws/aws-c-common/CMakeLists.darwin-arm64.txt b/contrib/restricted/aws/aws-c-common/CMakeLists.darwin-arm64.txt
index 673acade92..4132ce81b1 100644
--- a/contrib/restricted/aws/aws-c-common/CMakeLists.darwin-arm64.txt
+++ b/contrib/restricted/aws/aws-c-common/CMakeLists.darwin-arm64.txt
@@ -10,6 +10,7 @@
add_library(restricted-aws-aws-c-common)
target_compile_options(restricted-aws-aws-c-common PRIVATE
-DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_PTHREAD_GETNAME_TAKES_3ARGS
-DAWS_PTHREAD_SETNAME_TAKES_2ARGS
-DCJSON_HIDE_SYMBOLS
-DHAVE_SYSCONF
@@ -29,7 +30,6 @@ target_sources(restricted-aws-aws-c-common PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/allocator_sba.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/array_list.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/assert.c
- ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/bus.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/byte_buf.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/cache.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/codegen.c
diff --git a/contrib/restricted/aws/aws-c-common/CMakeLists.darwin-x86_64.txt b/contrib/restricted/aws/aws-c-common/CMakeLists.darwin-x86_64.txt
index c6d3f58fef..a5385e5d75 100644
--- a/contrib/restricted/aws/aws-c-common/CMakeLists.darwin-x86_64.txt
+++ b/contrib/restricted/aws/aws-c-common/CMakeLists.darwin-x86_64.txt
@@ -10,6 +10,7 @@
add_library(restricted-aws-aws-c-common)
target_compile_options(restricted-aws-aws-c-common PRIVATE
-DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_PTHREAD_GETNAME_TAKES_3ARGS
-DAWS_PTHREAD_SETNAME_TAKES_2ARGS
-DCJSON_HIDE_SYMBOLS
-DHAVE_SYSCONF
@@ -32,7 +33,6 @@ target_sources(restricted-aws-aws-c-common PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/allocator_sba.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/array_list.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/assert.c
- ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/bus.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/byte_buf.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/cache.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/codegen.c
diff --git a/contrib/restricted/aws/aws-c-common/CMakeLists.linux-aarch64.txt b/contrib/restricted/aws/aws-c-common/CMakeLists.linux-aarch64.txt
index a146f74288..3bdc0bff17 100644
--- a/contrib/restricted/aws/aws-c-common/CMakeLists.linux-aarch64.txt
+++ b/contrib/restricted/aws/aws-c-common/CMakeLists.linux-aarch64.txt
@@ -10,6 +10,7 @@
add_library(restricted-aws-aws-c-common)
target_compile_options(restricted-aws-aws-c-common PRIVATE
-DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_PTHREAD_GETNAME_TAKES_3ARGS
-DAWS_PTHREAD_SETNAME_TAKES_2ARGS
-DCJSON_HIDE_SYMBOLS
-DHAVE_SYSCONF
@@ -28,7 +29,6 @@ target_sources(restricted-aws-aws-c-common PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/allocator_sba.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/array_list.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/assert.c
- ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/bus.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/byte_buf.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/cache.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/codegen.c
diff --git a/contrib/restricted/aws/aws-c-common/CMakeLists.linux-x86_64.txt b/contrib/restricted/aws/aws-c-common/CMakeLists.linux-x86_64.txt
index 2e36a7692b..d33211d955 100644
--- a/contrib/restricted/aws/aws-c-common/CMakeLists.linux-x86_64.txt
+++ b/contrib/restricted/aws/aws-c-common/CMakeLists.linux-x86_64.txt
@@ -10,6 +10,7 @@
add_library(restricted-aws-aws-c-common)
target_compile_options(restricted-aws-aws-c-common PRIVATE
-DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_PTHREAD_GETNAME_TAKES_3ARGS
-DAWS_PTHREAD_SETNAME_TAKES_2ARGS
-DCJSON_HIDE_SYMBOLS
-DHAVE_SYSCONF
@@ -31,7 +32,6 @@ target_sources(restricted-aws-aws-c-common PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/allocator_sba.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/array_list.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/assert.c
- ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/bus.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/byte_buf.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/cache.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/codegen.c
diff --git a/contrib/restricted/aws/aws-c-common/CMakeLists.windows-x86_64.txt b/contrib/restricted/aws/aws-c-common/CMakeLists.windows-x86_64.txt
index 4477e48d94..b137a4a3b8 100644
--- a/contrib/restricted/aws/aws-c-common/CMakeLists.windows-x86_64.txt
+++ b/contrib/restricted/aws/aws-c-common/CMakeLists.windows-x86_64.txt
@@ -10,6 +10,7 @@
add_library(restricted-aws-aws-c-common)
target_compile_options(restricted-aws-aws-c-common PRIVATE
-DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_PTHREAD_GETNAME_TAKES_3ARGS
-DAWS_PTHREAD_SETNAME_TAKES_2ARGS
-DCJSON_HIDE_SYMBOLS
-DHAVE_SYSCONF
@@ -28,7 +29,6 @@ target_sources(restricted-aws-aws-c-common PRIVATE
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/allocator_sba.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/array_list.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/assert.c
- ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/bus.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/byte_buf.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/cache.c
${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-common/source/codegen.c
diff --git a/contrib/restricted/aws/aws-c-common/README.md b/contrib/restricted/aws/aws-c-common/README.md
index be3db7e620..fcedee6076 100644
--- a/contrib/restricted/aws/aws-c-common/README.md
+++ b/contrib/restricted/aws/aws-c-common/README.md
@@ -2,8 +2,6 @@
[![GitHub](https://img.shields.io/github/license/awslabs/aws-c-common.svg)](https://github.com/awslabs/aws-c-common/blob/main/LICENSE)
-[![Language grade: C/C++](https://img.shields.io/lgtm/grade/cpp/g/awslabs/aws-c-common.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/awslabs/aws-c-common/context:cpp)
-[![Total alerts](https://img.shields.io/lgtm/alerts/g/awslabs/aws-c-common.svg?logo=lgtm&logoWidth=18)](https://lgtm.com/projects/g/awslabs/aws-c-common/alerts/)
Core c99 package for AWS SDK for C. Includes cross-platform primitives, configuration, data structures, and error handling.
@@ -192,14 +190,19 @@ Example:
* Don't typedef enums. It breaks forward declaration ability.
* typedef function definitions for use as function pointers as values and suffixed with _fn.
-Example:
-
- typedef int(fn_name_fn)(void *);
+ Do this:
-Not:
+ typedef int(fn_name_fn)(void *);
- typedef int(*fn_name_fn)(void *);
+ Not this:
+ typedef int(*fn_name_fn)(void *);
+
+* If a callback may be async, then always have it be async.
+ Callbacks that are sometimes async and sometimes sync are hard to code around and lead to bugs
+ (see [this blog post](https://blog.ometer.com/2011/07/24/callbacks-synchronous-and-asynchronous/)).
+ Unfortunately many callbacks in this codebase currently violate this rule,
+ so be careful. But do not add any more.
* Every source and header file must have a copyright header (The standard AWS one for apache 2).
* Use standard include guards (e.g. #IFNDEF HEADER_NAME #define HEADER_NAME etc...).
* Include order should be:
@@ -235,24 +238,24 @@ definition. This mainly applies to header files. Obviously, if you are writing a
platform, you have more liberty on this.
* When checking more than one error condition, check and log each condition separately with a unique message.
-Example:
-
- if (options->callback == NULL) {
- AWS_LOGF_ERROR(AWS_LS_SOME_SUBJECT, "Invalid options - callback is null");
- return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
- }
-
- if (options->allocator == NULL) {
- AWS_LOGF_ERROR(AWS_LS_SOME_SUBJECT, "Invalid options - allocator is null");
- return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
- }
-
-Not:
-
- if (options->callback == NULL || options->allocator == NULL) {
- AWS_LOGF_ERROR(AWS_LS_SOME_SUBJECT, "Invalid options - something is null");
- return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
- }
+ Do this:
+
+ if (options->callback == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_SOME_SUBJECT, "Invalid options - callback is null");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (options->allocator == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_SOME_SUBJECT, "Invalid options - allocator is null");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ Not this:
+
+ if (options->callback == NULL || options->allocator == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_SOME_SUBJECT, "Invalid options - something is null");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
## CBMC
diff --git a/contrib/restricted/aws/aws-c-common/generated/include/aws/common/config.h b/contrib/restricted/aws/aws-c-common/generated/include/aws/common/config.h
index f152531c17..69939cc4cb 100644
--- a/contrib/restricted/aws/aws-c-common/generated/include/aws/common/config.h
+++ b/contrib/restricted/aws/aws-c-common/generated/include/aws/common/config.h
@@ -14,7 +14,7 @@
*/
#define AWS_HAVE_GCC_OVERFLOW_MATH_EXTENSIONS
#define AWS_HAVE_GCC_INLINE_ASM
-/* #undef AWS_HAVE_MSVC_MULX */
+/* #undef AWS_HAVE_MSVC_INTRINSICS_X64 */
#define AWS_HAVE_POSIX_LARGE_FILE_SUPPORT
/* #undef AWS_HAVE_EXECINFO */
/* #undef AWS_HAVE_WINAPI_DESKTOP */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/array_list.inl b/contrib/restricted/aws/aws-c-common/include/aws/common/array_list.inl
index 4e64a96a66..8bd104c095 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/array_list.inl
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/array_list.inl
@@ -27,7 +27,7 @@ int aws_array_list_init_dynamic(
AWS_ZERO_STRUCT(*list);
- size_t allocation_size;
+ size_t allocation_size = 0;
if (aws_mul_size_checked(initial_item_allocation, item_size, &allocation_size)) {
goto error;
}
@@ -67,10 +67,13 @@ void aws_array_list_init_static(
AWS_FATAL_PRECONDITION(item_count > 0);
AWS_FATAL_PRECONDITION(item_size > 0);
+ AWS_ZERO_STRUCT(*list);
list->alloc = NULL;
- int no_overflow = !aws_mul_size_checked(item_count, item_size, &list->current_size);
+ size_t current_size = 0;
+ int no_overflow = !aws_mul_size_checked(item_count, item_size, &current_size);
AWS_FATAL_PRECONDITION(no_overflow);
+ list->current_size = current_size;
list->item_size = item_size;
list->length = 0;
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/assert.h b/contrib/restricted/aws/aws-c-common/include/aws/common/assert.h
index e7ce341ce0..9bd614c7b8 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/assert.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/assert.h
@@ -82,9 +82,11 @@ AWS_EXTERN_C_END
__pragma(warning(pop))
# else
# define AWS_FATAL_ASSERT(cond) \
- if (!(cond)) { \
- aws_fatal_assert(#cond, __FILE__, __LINE__); \
- }
+ do { \
+ if (!(cond)) { \
+ aws_fatal_assert(#cond, __FILE__, __LINE__); \
+ } \
+ } while (0)
# endif /* defined(_MSC_VER) */
#endif /* defined(CBMC) */
@@ -96,16 +98,20 @@ AWS_EXTERN_C_END
* Violations of the function contracts are undefined behaviour.
*/
#ifdef CBMC
+// clang-format off
+// disable clang format, since it likes to break formatting of stringize macro.
+// seems to be fixed in v15 plus, but we are not ready to update to it yet
# define AWS_PRECONDITION2(cond, explanation) __CPROVER_precondition((cond), (explanation))
-# define AWS_PRECONDITION1(cond) __CPROVER_precondition((cond), # cond " check failed")
+# define AWS_PRECONDITION1(cond) __CPROVER_precondition((cond), #cond " check failed")
# define AWS_FATAL_PRECONDITION2(cond, explanation) __CPROVER_precondition((cond), (explanation))
-# define AWS_FATAL_PRECONDITION1(cond) __CPROVER_precondition((cond), # cond " check failed")
+# define AWS_FATAL_PRECONDITION1(cond) __CPROVER_precondition((cond), #cond " check failed")
# define AWS_POSTCONDITION2(cond, explanation) __CPROVER_assert((cond), (explanation))
-# define AWS_POSTCONDITION1(cond) __CPROVER_assert((cond), # cond " check failed")
+# define AWS_POSTCONDITION1(cond) __CPROVER_assert((cond), #cond " check failed")
# define AWS_FATAL_POSTCONDITION2(cond, explanation) __CPROVER_assert((cond), (explanation))
-# define AWS_FATAL_POSTCONDITION1(cond) __CPROVER_assert((cond), # cond " check failed")
+# define AWS_FATAL_POSTCONDITION1(cond) __CPROVER_assert((cond), #cond " check failed")
# define AWS_MEM_IS_READABLE_CHECK(base, len) (((len) == 0) || (__CPROVER_r_ok((base), (len))))
# define AWS_MEM_IS_WRITABLE_CHECK(base, len) (((len) == 0) || (__CPROVER_r_ok((base), (len))))
+// clang-format on
#else
# define AWS_PRECONDITION2(cond, expl) AWS_ASSERT(cond)
# define AWS_PRECONDITION1(cond) AWS_ASSERT(cond)
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/bus.h b/contrib/restricted/aws/aws-c-common/include/aws/common/bus.h
deleted file mode 100644
index fe5127e6f7..0000000000
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/bus.h
+++ /dev/null
@@ -1,97 +0,0 @@
-#ifndef AWS_COMMON_BUS_H
-#define AWS_COMMON_BUS_H
-
-/**
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- * SPDX-License-Identifier: Apache-2.0.
- */
-
-#include <aws/common/common.h>
-
-/*
- * A message bus is a mapping of integer message addresses/types -> listeners/callbacks.
- * A listener can listen to a single message, or to all messages on a bus
- * Message addresses/types can be any 64-bit integer, starting at 1.
- * AWS_BUS_ADDRESS_ALL (0xffffffffffffffff) is reserved for broadcast to all listeners.
- * AWS_BUS_ADDRESS_CLOSE (0) is reserved for notifying listeners to clean up
- * Listeners will be sent a message of type AWS_BUS_ADDRESS_CLOSE when it is time to clean any state up.
- * Listeners are owned by the subscriber, and are no longer referenced by the bus once unsubscribed.
- * Under the AWS_BUS_ASYNC policy, message delivery happens in a separate thread from sending, so listeners are
- * responsible for their own thread safety.
- */
-struct aws_bus;
-
-enum aws_bus_policy {
- /**
- * Messages will be delivered, even if dynamic allocation is required. Default.
- */
- AWS_BUS_ASYNC_RELIABLE = 0x0,
- /**
- * Only memory from the bus's internal buffer will be used (if a buffer size is supplied at bus creation time).
- * If the buffer is full, older buffered messages will be discarded to make room for newer messages.
- */
- AWS_BUS_ASYNC_UNRELIABLE = 0x1,
- /**
- * Message delivery is immediate, and therefore reliable by definition
- */
- AWS_BUS_SYNC_RELIABLE = 0x2,
-};
-
-/**
- * Subscribing to AWS_BUS_ADDRESS_ALL will cause the listener to be invoked for every message sent to the bus
- * It is possible to send to AWS_BUS_ADDRESS_ALL, just be aware that this will only send to listeners subscribed
- * to AWS_BUS_ADDRESS_ALL.
- */
-#define AWS_BUS_ADDRESS_ALL ((uint64_t)-1)
-#define AWS_BUS_ADDRESS_CLOSE 0
-
-struct aws_bus_options {
- enum aws_bus_policy policy;
- /**
- * Size of buffer for unreliable message delivery queue.
- * Unused if policy is AWS_BUS_ASYNC_RELIABNLE or AWS_BUS_SYNC_RELIABLE
- * Messages are 40 bytes. Default buffer_size is 4K. The bus will not allocate memory beyond this size.
- */
- size_t buffer_size;
- /* Not supported yet, but event loop group for delivery */
- struct aws_event_loop_group *event_loop_group;
-};
-
-/* Signature for listener callbacks */
-typedef void(aws_bus_listener_fn)(uint64_t address, const void *payload, void *user_data);
-
-/**
- * Allocates and initializes a message bus
- */
-AWS_COMMON_API
-struct aws_bus *aws_bus_new(struct aws_allocator *allocator, const struct aws_bus_options *options);
-
-/**
- * Cleans up a message bus, including notifying all remaining listeners to close
- */
-AWS_COMMON_API
-void aws_bus_destroy(struct aws_bus *bus);
-
-/**
- * Subscribes a listener to a message type. user_data's lifetime is the responsibility of the subscriber.
- */
-AWS_COMMON_API
-int aws_bus_subscribe(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *listener, void *user_data);
-
-/**
- * Unsubscribe a listener from a specific message. This is only necessary if the listener has lifetime concerns.
- * Otherwise, the listener will be called with an address of AWS_BUS_ADDRESS_CLOSE, which indicates that user_data
- * can be cleaned up if necessary and the listener will never be called again.
- */
-AWS_COMMON_API
-void aws_bus_unsubscribe(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *listener, void *user_data);
-
-/**
- * Sends a message to any listeners. payload will live until delivered, and then the destructor (if
- * provided) will be called. Note that anything payload references must also live at least until it is destroyed.
- * Will return AWS_OP_ERR if the bus is closing/has been closed
- */
-AWS_COMMON_API
-int aws_bus_send(struct aws_bus *bus, uint64_t address, void *payload, void (*destructor)(void *));
-
-#endif /* AWS_COMMON_BUS_H */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/command_line_parser.h b/contrib/restricted/aws/aws-c-common/include/aws/common/command_line_parser.h
index 7184dcd68a..4e7454057b 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/command_line_parser.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/command_line_parser.h
@@ -13,7 +13,7 @@ enum aws_cli_options_has_arg {
};
/**
- * Invoked when a subcommand is encountered. argc and argv[] begins at the command encounterd.
+ * Invoked when a subcommand is encountered. argc and argv[] begins at the command encountered.
* command_name is the name of the command being handled.
*/
typedef int(aws_cli_options_subcommand_fn)(int argc, char *const argv[], const char *command_name, void *user_data);
@@ -56,7 +56,7 @@ AWS_COMMON_API extern const char *aws_cli_positional_arg;
/**
* A mostly compliant implementation of posix getopt_long(). Parses command-line arguments. argc is the number of
* command line arguments passed in argv. optstring contains the legitimate option characters. The option characters
- * coorespond to aws_cli_option::val. If the character is followed by a :, the option requires an argument. If it is
+ * correspond to aws_cli_option::val. If the character is followed by a :, the option requires an argument. If it is
* followed by '::', the argument is optional (not implemented yet).
*
* longopts, is an array of struct aws_cli_option. These are the allowed options for the program.
@@ -91,7 +91,7 @@ AWS_COMMON_API void aws_cli_reset_state(void);
* @param parse_cb, optional, specify NULL if you don't want to handle this. This argument is for parsing "meta"
* commands from the command line options prior to dispatch occurring.
* @param dispatch_table table containing functions and command name to dispatch on.
- * @param table_length numnber of entries in dispatch_table.
+ * @param table_length number of entries in dispatch_table.
* @return AWS_OP_SUCCESS(0) on success, AWS_OP_ERR(-1) on failure
*/
AWS_COMMON_API int aws_cli_dispatch_on_subcommand(
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/common.h b/contrib/restricted/aws/aws-c-common/include/aws/common/common.h
index 7968a5e009..ce09ef226c 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/common.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/common.h
@@ -26,14 +26,14 @@
AWS_EXTERN_C_BEGIN
/**
- * Initializes internal datastructures used by aws-c-common.
+ * Initializes internal data structures used by aws-c-common.
* Must be called before using any functionality in aws-c-common.
*/
AWS_COMMON_API
void aws_common_library_init(struct aws_allocator *allocator);
/**
- * Shuts down the internal datastructures used by aws-c-common.
+ * Shuts down the internal data structures used by aws-c-common.
*/
AWS_COMMON_API
void aws_common_library_clean_up(void);
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/cpuid.h b/contrib/restricted/aws/aws-c-common/include/aws/common/cpuid.h
index 30f8c0350f..8b3ec883b9 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/cpuid.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/cpuid.h
@@ -13,6 +13,7 @@ enum aws_cpu_feature_name {
AWS_CPU_FEATURE_SSE_4_2,
AWS_CPU_FEATURE_AVX2,
AWS_CPU_FEATURE_ARM_CRC,
+ AWS_CPU_FEATURE_BMI2,
AWS_CPU_FEATURE_COUNT,
};
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/encoding.h b/contrib/restricted/aws/aws-c-common/include/aws/common/encoding.h
index 4e02c30b29..b707c70be4 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/encoding.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/encoding.h
@@ -146,40 +146,82 @@ AWS_STATIC_IMPL enum aws_text_encoding aws_text_detect_encoding(const uint8_t *b
/*
* Returns true if aws_text_detect_encoding() determines the text is UTF8 or ASCII.
* Note that this immediately returns true if the UTF8 BOM is seen.
- * To fully validate every byte, use aws_text_is_valid_utf8().
+ * To fully validate every byte, use aws_decode_utf8().
*/
AWS_STATIC_IMPL bool aws_text_is_utf8(const uint8_t *bytes, size_t size);
+struct aws_utf8_decoder_options {
+ /**
+ * Optional.
+ * Callback invoked for each Unicode codepoint.
+ * Use this callback to store codepoints as they're decoded,
+ * or to perform additional validation. RFC-3629 is already enforced,
+ * which forbids codepoints between U+D800 and U+DFFF,
+ * but you may whish to forbid codepoints like U+0000.
+ *
+ * @return AWS_OP_SUCCESS to continue processing the string, otherwise
+ * return AWS_OP_ERROR and raise an error (i.e. AWS_ERROR_INVALID_UTF8)
+ * to stop processing the string and report failure.
+ */
+ int (*on_codepoint)(uint32_t codepoint, void *user_data);
+
+ /* Optional. Pointer passed to on_codepoint callback. */
+ void *user_data;
+};
+
/**
- * Scans every byte, and returns true if it is valid UTF8/ASCII as defined in RFC-3629.
+ * Decode a complete string of UTF8/ASCII text.
+ * Text is always validated according to RFC-3629 (you may perform additional
+ * validation in the on_codepoint callback).
* The text does not need to begin with a UTF8 BOM.
+ * If you need to decode text incrementally as you receive it, use aws_utf8_decoder_new() instead.
+ *
+ * @param bytes Text to decode.
+ * @param options Options for decoding. If NULL is passed, the text is simply validated.
+ *
+ * @return AWS_OP_SUCCESS if successful.
+ * An error is raised if the text is not valid, or the on_codepoint callback raises an error.
*/
-AWS_COMMON_API bool aws_text_is_valid_utf8(struct aws_byte_cursor bytes);
+AWS_COMMON_API int aws_decode_utf8(struct aws_byte_cursor bytes, const struct aws_utf8_decoder_options *options);
+
+struct aws_utf8_decoder;
/**
- * A UTF8 validator scans every byte of text, incrementally,
- * and raises AWS_ERROR_INVALID_UTF8 if isn't valid UTF8/ASCII as defined in RFC-3629.
+ * Create a UTF8/ASCII decoder, which can process text incrementally as you receive it.
+ * Text is always validated according to RFC-3629 (you may perform additional
+ * validation in the on_codepoint callback).
* The text does not need to begin with a UTF8 BOM.
- * To validate text all at once, simply use aws_text_is_valid_utf8().
+ * To decode text all at once, simply use aws_decode_utf8().
+ *
+ * Feed bytes into the decoder with aws_utf8_decoder_update(),
+ * and call aws_utf8_decoder_finalize() when the text is complete.
+ *
+ * @param allocator Allocator
+ * @param options Options for decoder. If NULL is passed, the text is simply validated.
*/
-struct aws_utf8_validator;
+AWS_COMMON_API struct aws_utf8_decoder *aws_utf8_decoder_new(
+ struct aws_allocator *allocator,
+ const struct aws_utf8_decoder_options *options);
-AWS_COMMON_API struct aws_utf8_validator *aws_utf8_validator_new(struct aws_allocator *allocator);
-AWS_COMMON_API void aws_utf8_validator_destroy(struct aws_utf8_validator *validator);
-AWS_COMMON_API void aws_utf8_validator_reset(struct aws_utf8_validator *validator);
+AWS_COMMON_API void aws_utf8_decoder_destroy(struct aws_utf8_decoder *decoder);
+AWS_COMMON_API void aws_utf8_decoder_reset(struct aws_utf8_decoder *decoder);
/**
- * Update the validator with more bytes of text.
- * Raises AWS_ERROR_INVALID_UTF8 if invalid UTF8 is encountered.
+ * Update the decoder with more bytes of text.
+ * The on_codepoint callback will be invoked for each codepoint encountered.
+ * Raises an error if invalid UTF8 is encountered or the on_codepoint callback reports an error.
+ *
+ * Note: You must call aws_utf8_decoder_finalize() when the text is 100% complete,
+ * to ensure the input was completely valid.
*/
-AWS_COMMON_API int aws_utf8_validator_update(struct aws_utf8_validator *validator, struct aws_byte_cursor bytes);
+AWS_COMMON_API int aws_utf8_decoder_update(struct aws_utf8_decoder *decoder, struct aws_byte_cursor bytes);
/**
- * Tell the validator that you've reached the end of your text.
+ * Tell the decoder that you've reached the end of your text.
* Raises AWS_ERROR_INVALID_UTF8 if the text did not end with a complete UTF8 codepoint.
- * This also resets the validator.
+ * This also resets the decoder.
*/
-AWS_COMMON_API int aws_utf8_validator_finalize(struct aws_utf8_validator *validator);
+AWS_COMMON_API int aws_utf8_decoder_finalize(struct aws_utf8_decoder *decoder);
#ifndef AWS_NO_STATIC_IMPL
# include <aws/common/encoding.inl>
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/error.h b/contrib/restricted/aws/aws-c-common/include/aws/common/error.h
index 42ebb6eb5f..00aa5f8bca 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/error.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/error.h
@@ -129,7 +129,9 @@ AWS_COMMON_API
void aws_unregister_error_info(const struct aws_error_info_list *error_info);
/**
- * Convert a c library io error into an aws error.
+ * Convert a c library io error into an aws error, and raise it.
+ * If no conversion is found, AWS_ERROR_SYS_CALL_FAILURE is raised.
+ * Always returns AWS_OP_ERR.
*/
AWS_COMMON_API
int aws_translate_and_raise_io_error(int error_no);
@@ -196,6 +198,7 @@ enum aws_common_error {
AWS_ERROR_DIRECTORY_NOT_EMPTY,
AWS_ERROR_PLATFORM_NOT_SUPPORTED,
AWS_ERROR_INVALID_UTF8,
+ AWS_ERROR_GET_HOME_DIRECTORY_FAILED,
AWS_ERROR_END_COMMON_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_COMMON_PACKAGE_ID)
};
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/external/cJSON.h b/contrib/restricted/aws/aws-c-common/include/aws/common/external/cJSON.h
index 3210e8ab37..5695914770 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/external/cJSON.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/external/cJSON.h
@@ -25,6 +25,7 @@ THE SOFTWARE.
* (1) Address clang-tidy errors by renaming function parameters in a number of places
* to match their .c counterparts.
* (2) Misc tweaks to unchecked writes to make security static analysis happier
+ * (3) Remove cJSON_GetErrorPtr and global_error as they are not thread-safe.
*/
/* clang-format off */
@@ -181,8 +182,6 @@ CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index);
CJSON_PUBLIC(cJSON *) cJSON_GetObjectItem(const cJSON * const object, const char * const string);
CJSON_PUBLIC(cJSON *) cJSON_GetObjectItemCaseSensitive(const cJSON * const object, const char * const string);
CJSON_PUBLIC(cJSON_bool) cJSON_HasObjectItem(const cJSON *object, const char *string);
-/* For analysing failed parses. This returns a pointer to the parse error. You'll probably need to look a few chars back to make sense of it. Defined when cJSON_Parse() returns 0. 0 when cJSON_Parse() succeeds. */
-CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void);
/* Check item type and return its value */
CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON * const item);
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/file.h b/contrib/restricted/aws/aws-c-common/include/aws/common/file.h
index 4bbc1540db..6a47825173 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/file.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/file.h
@@ -56,12 +56,15 @@ typedef bool(aws_on_directory_entry)(const struct aws_directory_entry *entry, vo
AWS_EXTERN_C_BEGIN
/**
- * Don't use this. It never should have been added in the first place. It's now deprecated.
+ * Deprecated - Use aws_fopen_safe() instead, avoid const char * in public APIs.
+ * Opens file at file_path using mode. Returns the FILE pointer if successful.
+ * Otherwise, aws_last_error() will contain the error that occurred
*/
AWS_COMMON_API FILE *aws_fopen(const char *file_path, const char *mode);
/**
* Opens file at file_path using mode. Returns the FILE pointer if successful.
+ * Otherwise, aws_last_error() will contain the error that occurred
*/
AWS_COMMON_API FILE *aws_fopen_safe(const struct aws_string *file_path, const struct aws_string *mode);
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/json.h b/contrib/restricted/aws/aws-c-common/include/aws/common/json.h
index 2061db173b..e8a5f476dd 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/json.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/json.h
@@ -11,6 +11,8 @@
struct aws_json_value;
+AWS_EXTERN_C_BEGIN
+
// ====================
// Create and pass type
@@ -412,4 +414,6 @@ AWS_COMMON_API
struct aws_json_value *aws_json_value_new_from_string(struct aws_allocator *allocator, struct aws_byte_cursor string);
// ====================
+AWS_EXTERN_C_END
+
#endif // AWS_COMMON_JSON_H
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/logging.h b/contrib/restricted/aws/aws-c-common/include/aws/common/logging.h
index 9a5bc8fad4..e5fbe88586 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/logging.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/logging.h
@@ -126,17 +126,15 @@ struct aws_logger {
/**
* The base formatted logging macro that all other formatted logging macros resolve to.
* Checks for a logger and filters based on log level.
- *
*/
#define AWS_LOGF(log_level, subject, ...) \
- { \
+ do { \
AWS_ASSERT(log_level > 0); \
struct aws_logger *logger = aws_logger_get(); \
if (logger != NULL && logger->vtable->get_log_level(logger, (subject)) >= (log_level)) { \
logger->vtable->log(logger, log_level, subject, __VA_ARGS__); \
} \
- }
-
+ } while (0)
/**
* Unconditional logging macro that takes a logger and does not do a level check or a null check. Intended for
* situations when you need to log many things and do a single manual level check before beginning.
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/macros.h b/contrib/restricted/aws/aws-c-common/include/aws/common/macros.h
index 48f90ad501..aacfb94167 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/macros.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/macros.h
@@ -43,7 +43,7 @@ AWS_STATIC_ASSERT(CALL_OVERLOAD_TEST(1, 2, 3) == 3);
/**
* Format macro for strings of a specified length.
* Allows non null-terminated strings to be used with the printf family of functions.
- * Ex: printf("scheme is " PRInSTR, 4, "http://example.org"); // ouputs: "scheme is http"
+ * Ex: printf("scheme is " PRInSTR, 4, "http://example.org"); // outputs: "scheme is http"
*/
#define PRInSTR "%.*s"
@@ -70,7 +70,7 @@ AWS_STATIC_ASSERT(CALL_OVERLOAD_TEST(1, 2, 3) == 3);
# if defined(__cplusplus)
# define AWS_VARIABLE_LENGTH_ARRAY(type, name, length) type *name = alloca(sizeof(type) * (length))
# else
-# define AWS_VARIABLE_LENGTH_ARRAY(type, name, length) type name[length];
+# define AWS_VARIABLE_LENGTH_ARRAY(type, name, length) type name[length]
# endif /* defined(__cplusplus) */
# endif /* defined(__GNUC__) || defined(__clang__) */
#endif /* defined(_MSC_VER) */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/math.gcc_builtin.inl b/contrib/restricted/aws/aws-c-common/include/aws/common/math.gcc_builtin.inl
index 7be7126aef..b834c5dc8b 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/math.gcc_builtin.inl
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/math.gcc_builtin.inl
@@ -21,18 +21,30 @@ AWS_EXTERN_C_BEGIN
* Search from the MSB to LSB, looking for a 1
*/
AWS_STATIC_IMPL size_t aws_clz_u32(uint32_t n) {
- return __builtin_clzl(n);
+ if (n == 0) {
+ return sizeof(n) * 8;
+ }
+ return __builtin_clz(n);
}
AWS_STATIC_IMPL size_t aws_clz_i32(int32_t n) {
+ if (n == 0) {
+ return sizeof(n) * 8;
+ }
return __builtin_clz(n);
}
AWS_STATIC_IMPL size_t aws_clz_u64(uint64_t n) {
+ if (n == 0) {
+ return sizeof(n) * 8;
+ }
return __builtin_clzll(n);
}
AWS_STATIC_IMPL size_t aws_clz_i64(int64_t n) {
+ if (n == 0) {
+ return sizeof(n) * 8;
+ }
return __builtin_clzll(n);
}
@@ -48,18 +60,30 @@ AWS_STATIC_IMPL size_t aws_clz_size(size_t n) {
* Search from the LSB to MSB, looking for a 1
*/
AWS_STATIC_IMPL size_t aws_ctz_u32(uint32_t n) {
+ if (n == 0) {
+ return sizeof(n) * 8;
+ }
return __builtin_ctzl(n);
}
AWS_STATIC_IMPL size_t aws_ctz_i32(int32_t n) {
+ if (n == 0) {
+ return sizeof(n) * 8;
+ }
return __builtin_ctz(n);
}
AWS_STATIC_IMPL size_t aws_ctz_u64(uint64_t n) {
+ if (n == 0) {
+ return sizeof(n) * 8;
+ }
return __builtin_ctzll(n);
}
AWS_STATIC_IMPL size_t aws_ctz_i64(int64_t n) {
+ if (n == 0) {
+ return sizeof(n) * 8;
+ }
return __builtin_ctzll(n);
}
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/math.h b/contrib/restricted/aws/aws-c-common/include/aws/common/math.h
index 108e983639..2473dcf5e8 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/math.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/math.h
@@ -28,7 +28,7 @@ AWS_EXTERN_C_BEGIN
#if defined(AWS_HAVE_GCC_OVERFLOW_MATH_EXTENSIONS) && (defined(__clang__) || !defined(__cplusplus)) || \
(defined(__x86_64__) || defined(__aarch64__)) && defined(AWS_HAVE_GCC_INLINE_ASM) || \
- defined(AWS_HAVE_MSVC_MULX) || defined(CBMC) || !defined(AWS_HAVE_GCC_OVERFLOW_MATH_EXTENSIONS)
+ defined(AWS_HAVE_MSVC_INTRINSICS_X64) || defined(CBMC) || !defined(AWS_HAVE_GCC_OVERFLOW_MATH_EXTENSIONS)
/* In all these cases, we can use fast static inline versions of this code */
# define AWS_COMMON_MATH_API AWS_STATIC_IMPL
#else
@@ -156,7 +156,7 @@ AWS_STATIC_IMPL bool aws_is_power_of_two(const size_t x);
AWS_STATIC_IMPL int aws_round_up_to_power_of_two(size_t n, size_t *result);
/**
- * Counts the number of leading 0 bits in an integer
+ * Counts the number of leading 0 bits in an integer. 0 will return the size of the integer in bits.
*/
AWS_STATIC_IMPL size_t aws_clz_u32(uint32_t n);
AWS_STATIC_IMPL size_t aws_clz_i32(int32_t n);
@@ -165,7 +165,7 @@ AWS_STATIC_IMPL size_t aws_clz_i64(int64_t n);
AWS_STATIC_IMPL size_t aws_clz_size(size_t n);
/**
- * Counts the number of trailing 0 bits in an integer
+ * Counts the number of trailing 0 bits in an integer. 0 will return the size of the integer in bits.
*/
AWS_STATIC_IMPL size_t aws_ctz_u32(uint32_t n);
AWS_STATIC_IMPL size_t aws_ctz_i32(int32_t n);
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/math.inl b/contrib/restricted/aws/aws-c-common/include/aws/common/math.inl
index 9dbacedac2..2081fbccaf 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/math.inl
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/math.inl
@@ -27,7 +27,7 @@ AWS_EXTERN_C_BEGIN
# include <aws/common/math.gcc_x64_asm.inl>
#elif defined(__aarch64__) && defined(AWS_HAVE_GCC_INLINE_ASM)
# include <aws/common/math.gcc_arm64_asm.inl>
-#elif defined(AWS_HAVE_MSVC_MULX)
+#elif defined(AWS_HAVE_MSVC_INTRINSICS_X64)
# include <aws/common/math.msvc.inl>
#elif defined(CBMC)
# include <aws/common/math.cbmc.inl>
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/math.msvc.inl b/contrib/restricted/aws/aws-c-common/include/aws/common/math.msvc.inl
index 1faabe64aa..0aad29e8ef 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/math.msvc.inl
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/math.msvc.inl
@@ -11,6 +11,7 @@
* highlighting happier.
*/
#include <aws/common/common.h>
+#include <aws/common/cpuid.h>
#include <aws/common/math.h>
#include <immintrin.h>
@@ -40,51 +41,106 @@ AWS_STATIC_IMPL int aws_mul_u64_checked(uint64_t a, uint64_t b, uint64_t *r) {
return AWS_OP_SUCCESS;
}
+static uint32_t (*s_mul_u32_saturating_fn_ptr)(uint32_t a, uint32_t b) = NULL;
+
+static uint32_t s_mulx_u32_saturating(uint32_t a, uint32_t b) {
+ uint32_t high_32;
+ uint32_t ret_val = _mulx_u32(a, b, &high_32);
+ return (high_32 == 0) ? ret_val : UINT32_MAX;
+}
+
+static uint32_t s_emulu_saturating(uint32_t a, uint32_t b) {
+ uint64_t result = __emulu(a, b);
+ return (result > UINT32_MAX) ? UINT32_MAX : (uint32_t)result;
+}
/**
* Multiplies a * b. If the result overflows, returns 2^32 - 1.
*/
AWS_STATIC_IMPL uint32_t aws_mul_u32_saturating(uint32_t a, uint32_t b) {
- uint32_t out;
- uint32_t ret_val = _mulx_u32(a, b, &out);
- return (out == 0) ? ret_val : UINT32_MAX;
+ if (AWS_UNLIKELY(!s_mul_u32_saturating_fn_ptr)) {
+ if (aws_cpu_has_feature(AWS_CPU_FEATURE_BMI2)) {
+ s_mul_u32_saturating_fn_ptr = s_mulx_u32_saturating;
+ } else {
+ /* If BMI2 unavailable, use __emulu instead */
+ s_mul_u32_saturating_fn_ptr = s_emulu_saturating;
+ }
+ }
+ return s_mul_u32_saturating_fn_ptr(a, b);
+}
+
+static int (*s_mul_u32_checked_fn_ptr)(uint32_t a, uint32_t b, uint32_t *r) = NULL;
+
+static int s_mulx_u32_checked(uint32_t a, uint32_t b, uint32_t *r) {
+ uint32_t high_32;
+ *r = _mulx_u32(a, b, &high_32);
+
+ if (high_32 != 0) {
+ return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
+ }
+ return AWS_OP_SUCCESS;
}
+static int s_emulu_checked(uint32_t a, uint32_t b, uint32_t *r) {
+ uint64_t result = __emulu(a, b);
+ if (result > UINT32_MAX) {
+ return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
+ }
+ *r = (uint32_t)result;
+ return AWS_OP_SUCCESS;
+}
/**
* If a * b overflows, returns AWS_OP_ERR; otherwise multiplies
* a * b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_mul_u32_checked(uint32_t a, uint32_t b, uint32_t *r) {
- uint32_t out;
- *r = _mulx_u32(a, b, &out);
-
- if (out != 0) {
- return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
+ if (AWS_UNLIKELY(!s_mul_u32_checked_fn_ptr)) {
+ if (aws_cpu_has_feature(AWS_CPU_FEATURE_BMI2)) {
+ s_mul_u32_checked_fn_ptr = s_mulx_u32_checked;
+ } else {
+ /* If BMI2 unavailable, use __emulu instead */
+ s_mul_u32_checked_fn_ptr = s_emulu_checked;
+ }
}
- return AWS_OP_SUCCESS;
+ return s_mul_u32_checked_fn_ptr(a, b, r);
}
/**
* If a + b overflows, returns AWS_OP_ERR; otherwise adds
* a + b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
-AWS_STATIC_IMPL int aws_add_u64_checked(uint32_t a, uint32_t b, uint32_t *r) {
- if (_addcarry_u64(0, a, b, *r)) {
+AWS_STATIC_IMPL int aws_add_u64_checked(uint64_t a, uint64_t b, uint64_t *r) {
+#if !defined(_MSC_VER) || _MSC_VER < 1920
+ /* Fallback MSVC 2017 and older, _addcarry doesn't work correctly for those compiler */
+ if ((b > 0) && (a > (UINT64_MAX - b))) {
+ return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
+ }
+ *r = a + b;
+ return AWS_OP_SUCCESS;
+#else
+ if (_addcarry_u64((uint8_t)0, a, b, r)) {
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
}
return AWS_OP_SUCCESS;
+#endif
}
/**
* Adds a + b. If the result overflows, returns 2^64 - 1.
*/
-AWS_STATIC_IMPL uint64_t aws_add_u64_saturating(uint32_t a, uint32_t b) {
- uint32_t res;
-
- if (_addcarry_u64(0, a, b, &res)) {
+AWS_STATIC_IMPL uint64_t aws_add_u64_saturating(uint64_t a, uint64_t b) {
+#if !defined(_MSC_VER) || _MSC_VER < 1920
+ /* Fallback MSVC 2017 and older, _addcarry doesn't work correctly for those compiler */
+ if ((b > 0) && (a > (UINT64_MAX - b))) {
+ return UINT64_MAX;
+ }
+ return a + b;
+#else
+ uint64_t res = 0;
+ if (_addcarry_u64((uint8_t)0, a, b, &res)) {
res = UINT64_MAX;
}
-
return res;
+#endif
}
/**
@@ -92,23 +148,37 @@ AWS_STATIC_IMPL uint64_t aws_add_u64_saturating(uint32_t a, uint32_t b) {
* a + b, returns the result in *r, and returns AWS_OP_SUCCESS.
*/
AWS_STATIC_IMPL int aws_add_u32_checked(uint32_t a, uint32_t b, uint32_t *r) {
- if(_addcarry_u32(0, a, b, *r){
+#if !defined(_MSC_VER) || _MSC_VER < 1920
+ /* Fallback MSVC 2017 and older, _addcarry doesn't work correctly for those compiler */
+ if ((b > 0) && (a > (UINT32_MAX - b))) {
return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
}
+ *r = a + b;
return AWS_OP_SUCCESS;
+#else
+ if (_addcarry_u32((uint8_t)0, a, b, r)) {
+ return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
+ }
+ return AWS_OP_SUCCESS;
+#endif
}
/**
* Adds a + b. If the result overflows, returns 2^32 - 1.
*/
-AWS_STATIC_IMPL uint64_t aws_add_u32_saturating(uint32_t a, uint32_t b) {
- uint32_t res;
-
- if (_addcarry_u32(0, a, b, &res)) {
+AWS_STATIC_IMPL uint32_t aws_add_u32_saturating(uint32_t a, uint32_t b) {
+#if !defined(_MSC_VER) || _MSC_VER < 1920
+ /* Fallback MSVC 2017 and older, _addcarry doesn't work correctly for those compiler */
+ if ((b > 0) && (a > (UINT32_MAX - b)))
+ return UINT32_MAX;
+ return a + b;
+#else
+ uint32_t res = 0;
+ if (_addcarry_u32((uint8_t)0, a, b, &res)) {
res = UINT32_MAX;
}
-
return res;
+#endif
}
/**
@@ -116,26 +186,34 @@ AWS_STATIC_IMPL uint64_t aws_add_u32_saturating(uint32_t a, uint32_t b) {
*/
AWS_STATIC_IMPL size_t aws_clz_u32(uint32_t n) {
unsigned long idx = 0;
- _BitScanReverse(&idx, n);
- return idx;
+ if (_BitScanReverse(&idx, n)) {
+ return 31 - idx;
+ }
+ return 32;
}
AWS_STATIC_IMPL size_t aws_clz_i32(int32_t n) {
unsigned long idx = 0;
- _BitScanReverse(&idx, n);
- return idx;
+ if (_BitScanReverse(&idx, n)) {
+ return 31 - idx;
+ }
+ return 32;
}
AWS_STATIC_IMPL size_t aws_clz_u64(uint64_t n) {
unsigned long idx = 0;
- _BitScanReverse64(&idx, n);
- return idx;
+ if (_BitScanReverse64(&idx, n)) {
+ return 63 - idx;
+ }
+ return 64;
}
AWS_STATIC_IMPL size_t aws_clz_i64(int64_t n) {
unsigned long idx = 0;
- _BitScanReverse64(&idx, n);
- return idx;
+ if (_BitScanReverse64(&idx, n)) {
+ return 63 - idx;
+ }
+ return 64;
}
AWS_STATIC_IMPL size_t aws_clz_size(size_t n) {
@@ -151,26 +229,34 @@ AWS_STATIC_IMPL size_t aws_clz_size(size_t n) {
*/
AWS_STATIC_IMPL size_t aws_ctz_u32(uint32_t n) {
unsigned long idx = 0;
- _BitScanForward(&idx, n);
- return idx;
+ if (_BitScanForward(&idx, n)) {
+ return idx;
+ }
+ return 32;
}
AWS_STATIC_IMPL size_t aws_ctz_i32(int32_t n) {
unsigned long idx = 0;
- _BitScanForward(&idx, n);
- return idx;
+ if (_BitScanForward(&idx, n)) {
+ return idx;
+ }
+ return 32;
}
AWS_STATIC_IMPL size_t aws_ctz_u64(uint64_t n) {
unsigned long idx = 0;
- _BitScanForward64(&idx, n);
- return idx;
+ if (_BitScanForward64(&idx, n)) {
+ return idx;
+ }
+ return 64;
}
AWS_STATIC_IMPL size_t aws_ctz_i64(int64_t n) {
unsigned long idx = 0;
- _BitScanForward64(&idx, n);
- return idx;
+ if (_BitScanForward64(&idx, n)) {
+ return idx;
+ }
+ return 64;
}
AWS_STATIC_IMPL size_t aws_ctz_size(size_t n) {
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/private/hash_table_impl.h b/contrib/restricted/aws/aws-c-common/include/aws/common/private/hash_table_impl.h
index 86ffb1401f..98169a7eeb 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/private/hash_table_impl.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/private/hash_table_impl.h
@@ -16,7 +16,7 @@ struct hash_table_entry {
};
/* Using a flexible array member is the C99 compliant way to have the hash_table_entries
- * immediatly follow the struct.
+ * immediately follow the struct.
*
* MSVC doesn't know this for some reason so we need to use a pragma to make
* it happy.
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/private/lookup3.inl b/contrib/restricted/aws/aws-c-common/include/aws/common/private/lookup3.inl
index 50b269fc7b..5695861c8f 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/private/lookup3.inl
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/private/lookup3.inl
@@ -60,7 +60,7 @@ on 1 byte), but shoehorning those bytes into integers efficiently is messy.
# include <endian.h> /* attempt to define endianness */
#endif
-#if _MSC_VER
+#ifdef _MSC_VER
#pragma warning(push)
#pragma warning(disable:4127) /*Disable "conditional expression is constant" */
#endif /* _MSC_VER */
@@ -1055,7 +1055,7 @@ int main()
#endif /* SELF_TEST */
-#if _MSC_VER
+#ifdef _MSC_VER
#pragma warning(pop)
#endif /* _MSC_VER */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/string.h b/contrib/restricted/aws/aws-c-common/include/aws/common/string.h
index c73a24ad4a..fbc513911f 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/string.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/string.h
@@ -291,7 +291,10 @@ int aws_array_list_comparator_string(const void *a, const void *b);
const size_t len; \
const uint8_t bytes[sizeof(literal)]; \
} name##_s = {NULL, sizeof(literal) - 1, literal}; \
- static const struct aws_string *(name) = (struct aws_string *)(&name##_s)
+ static const struct aws_string *name = (struct aws_string *)(&name##_s) /* NOLINT(bugprone-macro-parentheses) */
+
+/* NOLINT above is because clang-tidy complains that (name) isn't in parentheses,
+ * but gcc8-c++ complains that the parentheses are unnecessary */
/*
* A related macro that declares the string pointer without static, allowing it to be externed as a global constant
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/thread.h b/contrib/restricted/aws/aws-c-common/include/aws/common/thread.h
index 53aa6a6eef..5b3014288a 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/thread.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/thread.h
@@ -6,6 +6,7 @@
* SPDX-License-Identifier: Apache-2.0.
*/
#include <aws/common/byte_buf.h>
+#include <aws/common/string.h>
#ifndef _WIN32
# include <pthread.h>
@@ -23,7 +24,7 @@ enum aws_thread_detach_state {
* in the managed thread system. The managed thread system provides logic to guarantee a join on all participating
* threads at the cost of laziness (the user cannot control when joins happen).
*
- * Manual - thread does not particpate in the managed thread system; any joins must be done by the user. This
+ * Manual - thread does not participate in the managed thread system; any joins must be done by the user. This
* is the default. The user must call aws_thread_clean_up(), but only after any desired join operation has completed.
* Not doing so will cause the windows handle to leak.
*
@@ -237,6 +238,31 @@ AWS_COMMON_API void aws_thread_increment_unjoined_count(void);
*/
AWS_COMMON_API void aws_thread_decrement_unjoined_count(void);
+/**
+ * Gets name of the current thread.
+ * Caller is responsible for destroying returned string.
+ * If thread does not have a name, AWS_OP_SUCCESS is returned and out_name is
+ * set to NULL.
+ * If underlying OS call fails, AWS_ERROR_SYS_CALL_FAILURE will be raised
+ * If OS does not support getting thread name, AWS_ERROR_PLATFORM_NOT_SUPPORTED
+ * will be raised
+ */
+AWS_COMMON_API int aws_thread_current_name(struct aws_allocator *allocator, struct aws_string **out_name);
+
+/**
+ * Gets name of the thread.
+ * Caller is responsible for destroying returned string.
+ * If thread does not have a name, AWS_OP_SUCCESS is returned and out_name is
+ * set to NULL.
+ * If underlying OS call fails, AWS_ERROR_SYS_CALL_FAILURE will be raised
+ * If OS does not support getting thread name, AWS_ERROR_PLATFORM_NOT_SUPPORTED
+ * will be raised
+ */
+AWS_COMMON_API int aws_thread_name(
+ struct aws_allocator *allocator,
+ aws_thread_id_t thread_id,
+ struct aws_string **out_name);
+
AWS_EXTERN_C_END
#endif /* AWS_COMMON_THREAD_H */
diff --git a/contrib/restricted/aws/aws-c-common/include/aws/common/uri.h b/contrib/restricted/aws/aws-c-common/include/aws/common/uri.h
index c2a55372cf..9677276b87 100644
--- a/contrib/restricted/aws/aws-c-common/include/aws/common/uri.h
+++ b/contrib/restricted/aws/aws-c-common/include/aws/common/uri.h
@@ -87,7 +87,7 @@ AWS_COMMON_API const struct aws_byte_cursor *aws_uri_scheme(const struct aws_uri
AWS_COMMON_API const struct aws_byte_cursor *aws_uri_authority(const struct aws_uri *uri);
/**
- * Returns the path portion of the uri. If the original value was empty, this value will be "/".
+ * Returns the path portion of the uri, including any leading '/'. If not present, this value will be empty.
*/
AWS_COMMON_API const struct aws_byte_cursor *aws_uri_path(const struct aws_uri *uri);
diff --git a/contrib/restricted/aws/aws-c-common/source/allocator.c b/contrib/restricted/aws/aws-c-common/source/allocator.c
index a672662470..67e8695996 100644
--- a/contrib/restricted/aws/aws-c-common/source/allocator.c
+++ b/contrib/restricted/aws/aws-c-common/source/allocator.c
@@ -37,7 +37,7 @@ bool aws_allocator_is_valid(const struct aws_allocator *alloc) {
static void *s_default_malloc(struct aws_allocator *allocator, size_t size) {
(void)allocator;
/* larger allocations should be aligned so that AVX and friends can avoid
- * the extra preable during unaligned versions of memcpy/memset on big buffers
+ * the extra preamble during unaligned versions of memcpy/memset on big buffers
* This will also accelerate hardware CRC and SHA on ARM chips
*
* 64 byte alignment for > page allocations on 64 bit systems
diff --git a/contrib/restricted/aws/aws-c-common/source/arch/intel/cpuid.c b/contrib/restricted/aws/aws-c-common/source/arch/intel/cpuid.c
index ffc6e0d4c9..98c51b88d1 100644
--- a/contrib/restricted/aws/aws-c-common/source/arch/intel/cpuid.c
+++ b/contrib/restricted/aws/aws-c-common/source/arch/intel/cpuid.c
@@ -85,11 +85,26 @@ static bool s_has_avx2(void) {
return true;
}
+static bool s_has_bmi2(void) {
+ uint32_t abcd[4];
+
+ /* Check BMI2:
+ * CPUID.(EAX=07H, ECX=0H):EBX.BMI2[bit 8]==1 */
+ uint32_t bmi2_mask = (1 << 8);
+ aws_run_cpuid(7, 0, abcd);
+ if ((abcd[1] & bmi2_mask) != bmi2_mask) {
+ return false;
+ }
+
+ return true;
+}
+
has_feature_fn *s_check_cpu_feature[AWS_CPU_FEATURE_COUNT] = {
[AWS_CPU_FEATURE_CLMUL] = s_has_clmul,
[AWS_CPU_FEATURE_SSE_4_1] = s_has_sse41,
[AWS_CPU_FEATURE_SSE_4_2] = s_has_sse42,
[AWS_CPU_FEATURE_AVX2] = s_has_avx2,
+ [AWS_CPU_FEATURE_BMI2] = s_has_bmi2,
};
bool aws_cpu_has_feature(enum aws_cpu_feature_name feature_name) {
diff --git a/contrib/restricted/aws/aws-c-common/source/array_list.c b/contrib/restricted/aws/aws-c-common/source/array_list.c
index 7e05636a75..45c8a3cc76 100644
--- a/contrib/restricted/aws/aws-c-common/source/array_list.c
+++ b/contrib/restricted/aws/aws-c-common/source/array_list.c
@@ -10,7 +10,7 @@
int aws_array_list_calc_necessary_size(struct aws_array_list *AWS_RESTRICT list, size_t index, size_t *necessary_size) {
AWS_PRECONDITION(aws_array_list_is_valid(list));
- size_t index_inc;
+ size_t index_inc = 0;
if (aws_add_size_checked(index, 1, &index_inc)) {
AWS_POSTCONDITION(aws_array_list_is_valid(list));
return AWS_OP_ERR;
@@ -199,7 +199,8 @@ void aws_array_list_swap(struct aws_array_list *AWS_RESTRICT list, size_t a, siz
return;
}
- void *item1 = NULL, *item2 = NULL;
+ void *item1 = NULL;
+ void *item2 = NULL;
aws_array_list_get_at_ptr(list, &item1, a);
aws_array_list_get_at_ptr(list, &item2, b);
aws_array_list_mem_swap(item1, item2, list->item_size);
diff --git a/contrib/restricted/aws/aws-c-common/source/bus.c b/contrib/restricted/aws/aws-c-common/source/bus.c
deleted file mode 100644
index 68bb29deda..0000000000
--- a/contrib/restricted/aws/aws-c-common/source/bus.c
+++ /dev/null
@@ -1,724 +0,0 @@
-/*
- * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
- *
- * Licensed under the Apache License, Version 2.0 (the "License").
- * You may not use this file except in compliance with the License.
- * A copy of the License is located at
- *
- * http://aws.amazon.com/apache2.0
- *
- * or in the "license" file accompanying this file. This file is distributed
- * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
- * express or implied. See the License for the specific language governing
- * permissions and limitations under the License.
- */
-
-#include <aws/common/bus.h>
-
-#include <aws/common/allocator.h>
-#include <aws/common/atomics.h>
-#include <aws/common/byte_buf.h>
-#include <aws/common/condition_variable.h>
-#include <aws/common/hash_table.h>
-#include <aws/common/linked_list.h>
-#include <aws/common/logging.h>
-#include <aws/common/mutex.h>
-#include <aws/common/thread.h>
-
-#include <inttypes.h>
-
-#ifdef _MSC_VER
-# pragma warning(push)
-# pragma warning(disable : 4204) /* nonstandard extension used: non-constant aggregate initializer */
-#endif
-
-struct aws_bus {
- struct aws_allocator *allocator;
-
- /* vtable and additional data structures for delivery policy */
- void *impl;
-};
-
-/* MUST be the first member of any impl to allow blind casting */
-struct bus_vtable {
- void (*clean_up)(struct aws_bus *bus);
-
- int (*send)(struct aws_bus *bus, uint64_t address, void *payload, void (*destructor)(void *));
-
- int (*subscribe)(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *callback, void *user_data);
-
- void (*unsubscribe)(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *callback, void *user_data);
-};
-
-/* each bound callback is stored as a bus_listener in the slots table */
-struct bus_listener {
- struct aws_linked_list_node list_node;
- void *user_data;
- aws_bus_listener_fn *deliver;
-};
-
-/* value type stored in each slot in the slots table in a bus */
-struct listener_list {
- struct aws_allocator *allocator;
- struct aws_linked_list listeners;
-};
-
-/* find a listener list (or NULL) by address */
-static struct listener_list *bus_find_listeners(struct aws_hash_table *slots, uint64_t address) {
- struct aws_hash_element *elem = NULL;
- if (aws_hash_table_find(slots, (void *)(uintptr_t)address, &elem)) {
- return NULL;
- }
-
- if (!elem) {
- return NULL;
- }
-
- struct listener_list *list = elem->value;
- return list;
-}
-
-/* find a listener list by address, or create/insert/return a new one */
-static struct listener_list *bus_find_or_create_listeners(
- struct aws_allocator *allocator,
- struct aws_hash_table *slots,
- uint64_t address) {
- struct listener_list *list = bus_find_listeners(slots, address);
- if (list) {
- return list;
- }
-
- list = aws_mem_calloc(allocator, 1, sizeof(struct listener_list));
- list->allocator = allocator;
- aws_linked_list_init(&list->listeners);
- aws_hash_table_put(slots, (void *)(uintptr_t)address, list, NULL);
- return list;
-}
-
-static void s_bus_deliver_msg_to_slot(
- struct aws_bus *bus,
- uint64_t slot,
- uint64_t address,
- struct aws_hash_table *slots,
- const void *payload) {
- (void)bus;
- struct listener_list *list = bus_find_listeners(slots, slot);
- if (!list) {
- return;
- }
- struct aws_linked_list_node *node = aws_linked_list_begin(&list->listeners);
- for (; node != aws_linked_list_end(&list->listeners); node = aws_linked_list_next(node)) {
- struct bus_listener *listener = AWS_CONTAINER_OF(node, struct bus_listener, list_node);
- listener->deliver(address, payload, listener->user_data);
- }
-}
-
-/* common delivery logic */
-static void s_bus_deliver_msg(
- struct aws_bus *bus,
- uint64_t address,
- struct aws_hash_table *slots,
- const void *payload) {
- s_bus_deliver_msg_to_slot(bus, AWS_BUS_ADDRESS_ALL, address, slots, payload);
- s_bus_deliver_msg_to_slot(bus, address, address, slots, payload);
-}
-
-/* common subscribe logic */
-static int s_bus_subscribe(
- struct aws_bus *bus,
- uint64_t address,
- struct aws_hash_table *slots,
- aws_bus_listener_fn *callback,
- void *user_data) {
-
- if (address == AWS_BUS_ADDRESS_CLOSE) {
- AWS_LOGF_ERROR(AWS_LS_COMMON_BUS, "Cannot directly subscribe to AWS_BUS_ADDRESS_CLOSE(0)");
- return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
- }
-
- struct listener_list *list = bus_find_or_create_listeners(bus->allocator, slots, address);
- struct bus_listener *listener = aws_mem_calloc(bus->allocator, 1, sizeof(struct bus_listener));
- listener->deliver = callback;
- listener->user_data = user_data;
- aws_linked_list_push_back(&list->listeners, &listener->list_node);
-
- return AWS_OP_SUCCESS;
-}
-
-/* common unsubscribe logic */
-static void s_bus_unsubscribe(
- struct aws_bus *bus,
- uint64_t address,
- struct aws_hash_table *slots,
- aws_bus_listener_fn *callback,
- void *user_data) {
- (void)bus;
-
- if (address == AWS_BUS_ADDRESS_CLOSE) {
- AWS_LOGF_WARN(AWS_LS_COMMON_BUS, "Attempted to unsubscribe from invalid address AWS_BUS_ADDRESS_CLOSE")
- return;
- }
-
- struct listener_list *list = bus_find_listeners(slots, address);
- if (!list) {
- return;
- }
-
- struct aws_linked_list_node *node;
- for (node = aws_linked_list_begin(&list->listeners); node != aws_linked_list_end(&list->listeners);
- node = aws_linked_list_next(node)) {
-
- struct bus_listener *listener = AWS_CONTAINER_OF(node, struct bus_listener, list_node);
- if (listener->deliver == callback && listener->user_data == user_data) {
- aws_linked_list_remove(node);
- aws_mem_release(list->allocator, listener);
- return;
- }
- }
-}
-
-/* destructor for listener lists in the slots tables */
-void s_bus_destroy_listener_list(void *data) {
- struct listener_list *list = data;
- AWS_PRECONDITION(list->allocator);
- /* call all listeners with an AWS_BUS_ADDRESS_CLOSE message type to clean up */
- while (!aws_linked_list_empty(&list->listeners)) {
- struct aws_linked_list_node *back = aws_linked_list_back(&list->listeners);
- struct bus_listener *listener = AWS_CONTAINER_OF(back, struct bus_listener, list_node);
- listener->deliver(AWS_BUS_ADDRESS_CLOSE, NULL, listener->user_data);
- aws_linked_list_pop_back(&list->listeners);
- aws_mem_release(list->allocator, listener);
- }
- aws_mem_release(list->allocator, list);
-}
-
-/*
- * AWS_BUS_SYNC implementation
- */
-struct bus_sync_impl {
- struct bus_vtable vtable;
- struct {
- /* Map of address -> list of listeners */
- struct aws_hash_table table;
- } slots;
-};
-
-static void s_bus_sync_clean_up(struct aws_bus *bus) {
- struct bus_sync_impl *impl = bus->impl;
- aws_hash_table_clean_up(&impl->slots.table);
- aws_mem_release(bus->allocator, impl);
-}
-
-static int s_bus_sync_send(struct aws_bus *bus, uint64_t address, void *payload, void (*destructor)(void *)) {
- struct bus_sync_impl *impl = bus->impl;
- s_bus_deliver_msg(bus, address, &impl->slots.table, payload);
- if (destructor) {
- destructor(payload);
- }
- return AWS_OP_SUCCESS;
-}
-
-static int s_bus_sync_subscribe(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *callback, void *user_data) {
- struct bus_sync_impl *impl = bus->impl;
- return s_bus_subscribe(bus, address, &impl->slots.table, callback, user_data);
-}
-
-static void s_bus_sync_unsubscribe(
- struct aws_bus *bus,
- uint64_t address,
- aws_bus_listener_fn *callback,
- void *user_data) {
- struct bus_sync_impl *impl = bus->impl;
- s_bus_unsubscribe(bus, address, &impl->slots.table, callback, user_data);
-}
-
-static struct bus_vtable bus_sync_vtable = {
- .clean_up = s_bus_sync_clean_up,
- .send = s_bus_sync_send,
- .subscribe = s_bus_sync_subscribe,
- .unsubscribe = s_bus_sync_unsubscribe,
-};
-
-static void s_bus_sync_init(struct aws_bus *bus, const struct aws_bus_options *options) {
- (void)options;
-
- struct bus_sync_impl *impl = bus->impl = aws_mem_calloc(bus->allocator, 1, sizeof(struct bus_sync_impl));
- impl->vtable = bus_sync_vtable;
-
- if (aws_hash_table_init(
- &impl->slots.table, bus->allocator, 8, aws_hash_ptr, aws_ptr_eq, NULL, s_bus_destroy_listener_list)) {
- goto error;
- }
-
- return;
-
-error:
- aws_mem_release(bus->allocator, impl);
-}
-
-/*
- * AWS_BUS_ASYNC implementation
- */
-struct bus_async_impl {
- struct bus_vtable vtable;
- struct {
- /* Map of address -> list of listeners */
- struct aws_hash_table table;
- } slots;
-
- /* Queue of bus_messages to deliver */
- struct {
- struct aws_mutex mutex;
- /* backing memory for the message free list */
- void *buffer;
- void *buffer_end; /* 1 past the end of buffer */
- /* message free list */
- struct aws_linked_list free; /* struct bus_message */
- /* message delivery queue */
- struct aws_linked_list msgs; /* struct bus_message */
- /* list of pending adds/removes of listeners */
- struct aws_linked_list subs; /* struct pending_listener */
- } queue;
-
- /* dispatch thread */
- struct {
- struct aws_thread thread;
- struct aws_condition_variable notify;
- bool running;
- struct aws_atomic_var started;
- struct aws_atomic_var exited;
- } dispatch;
-
- bool reliable;
-};
-
-/* represents a message in the queue on impls that queue */
-struct bus_message {
- struct aws_linked_list_node list_node;
- uint64_t address;
- void *payload;
-
- void (*destructor)(void *);
-};
-
-struct pending_listener {
- struct aws_linked_list_node list_node;
- uint64_t address;
- aws_bus_listener_fn *listener;
- void *user_data;
- uint32_t add : 1;
- uint32_t remove : 1;
-};
-
-static void s_bus_message_clean_up(struct bus_message *msg) {
- if (msg->destructor) {
- msg->destructor(msg->payload);
- }
- msg->destructor = NULL;
- msg->payload = NULL;
-}
-
-/* Assumes the caller holds the lock */
-static void s_bus_async_free_message(struct aws_bus *bus, struct bus_message *msg) {
- struct bus_async_impl *impl = bus->impl;
- s_bus_message_clean_up(msg);
- if ((void *)msg >= impl->queue.buffer && (void *)msg < impl->queue.buffer_end) {
- AWS_ZERO_STRUCT(*msg);
- aws_linked_list_push_back(&impl->queue.free, &msg->list_node);
- return;
- }
- aws_mem_release(bus->allocator, msg);
-}
-
-/* Assumes the caller holds the lock */
-struct bus_message *s_bus_async_alloc_message(struct aws_bus *bus) {
- struct bus_async_impl *impl = bus->impl;
-
- /* try the free list first */
- if (!aws_linked_list_empty(&impl->queue.free)) {
- struct aws_linked_list_node *msg_node = aws_linked_list_pop_back(&impl->queue.free);
- struct bus_message *msg = AWS_CONTAINER_OF(msg_node, struct bus_message, list_node);
- return msg;
- }
-
- /* unreliable will re-use the oldest message */
- if (!impl->reliable) {
- struct aws_linked_list_node *msg_node = aws_linked_list_pop_front(&impl->queue.msgs);
- struct bus_message *msg = AWS_CONTAINER_OF(msg_node, struct bus_message, list_node);
- s_bus_async_free_message(bus, msg);
- return s_bus_async_alloc_message(bus);
- }
-
- return aws_mem_calloc(bus->allocator, 1, sizeof(struct bus_message));
-}
-
-/*
- * resolve all adds and removes of listeners, in FIFO order
- * NOTE: expects mutex to be held by caller
- */
-static void s_bus_apply_listeners(struct aws_bus *bus, struct aws_linked_list *pending_subs) {
- struct bus_async_impl *impl = bus->impl;
- while (!aws_linked_list_empty(pending_subs)) {
- struct aws_linked_list_node *node = aws_linked_list_pop_front(pending_subs);
- struct pending_listener *listener = AWS_CONTAINER_OF(node, struct pending_listener, list_node);
- if (listener->add) {
- s_bus_subscribe(bus, listener->address, &impl->slots.table, listener->listener, listener->user_data);
- } else if (listener->remove) {
- s_bus_unsubscribe(bus, listener->address, &impl->slots.table, listener->listener, listener->user_data);
- }
- aws_mem_release(bus->allocator, listener);
- }
-}
-
-static void s_bus_async_deliver_messages(struct aws_bus *bus, struct aws_linked_list *pending_msgs) {
- struct bus_async_impl *impl = bus->impl;
- struct aws_linked_list_node *msg_node = aws_linked_list_begin(pending_msgs);
- for (; msg_node != aws_linked_list_end(pending_msgs); msg_node = aws_linked_list_next(msg_node)) {
- struct bus_message *msg = AWS_CONTAINER_OF(msg_node, struct bus_message, list_node);
- s_bus_deliver_msg(bus, msg->address, &impl->slots.table, msg->payload);
- s_bus_message_clean_up(msg);
- }
-
- /* push all pending messages back on the free list */
- aws_mutex_lock(&impl->queue.mutex);
- {
- while (!aws_linked_list_empty(pending_msgs)) {
- msg_node = aws_linked_list_pop_front(pending_msgs);
- struct bus_message *msg = AWS_CONTAINER_OF(msg_node, struct bus_message, list_node);
- s_bus_async_free_message(bus, msg);
- }
- }
- aws_mutex_unlock(&impl->queue.mutex);
-}
-
-static void s_bus_async_clean_up(struct aws_bus *bus) {
- struct bus_async_impl *impl = bus->impl;
-
- /* shut down delivery thread, clean up dispatch */
- AWS_LOGF_TRACE(AWS_LS_COMMON_BUS, "bus: %p clean_up: starting final drain", (void *)bus);
- aws_mutex_lock(&impl->queue.mutex);
- impl->dispatch.running = false;
- aws_mutex_unlock(&impl->queue.mutex);
- aws_condition_variable_notify_one(&impl->dispatch.notify);
- /* Spin wait for the final drain and dispatch thread to complete */
- while (!aws_atomic_load_int(&impl->dispatch.exited)) {
- aws_thread_current_sleep(1000 * 1000); /* 1 microsecond */
- }
- AWS_LOGF_TRACE(AWS_LS_COMMON_BUS, "bus: %p clean_up: finished final drain", (void *)bus);
- aws_thread_join(&impl->dispatch.thread);
- aws_thread_clean_up(&impl->dispatch.thread);
- aws_condition_variable_clean_up(&impl->dispatch.notify);
-
- /* should be impossible for subs or msgs to remain after final drain */
- AWS_FATAL_ASSERT(aws_linked_list_empty(&impl->queue.msgs));
- AWS_FATAL_ASSERT(aws_linked_list_empty(&impl->queue.subs));
-
- /* this frees everything that the free/msgs lists point to */
- if (impl->queue.buffer) {
- aws_mem_release(bus->allocator, impl->queue.buffer);
- }
-
- aws_mutex_clean_up(&impl->queue.mutex);
-
- aws_hash_table_clean_up(&impl->slots.table);
- aws_mem_release(bus->allocator, impl);
-}
-
-static bool s_bus_async_should_wake_up(void *user_data) {
- struct bus_async_impl *impl = user_data;
- return !impl->dispatch.running || !aws_linked_list_empty(&impl->queue.subs) ||
- !aws_linked_list_empty(&impl->queue.msgs);
-}
-
-static bool s_bus_async_is_running(struct bus_async_impl *impl) {
- aws_mutex_lock(&impl->queue.mutex);
- bool running = impl->dispatch.running;
- aws_mutex_unlock(&impl->queue.mutex);
- return running;
-}
-
-/* Async bus delivery thread loop */
-static void s_bus_async_deliver(void *user_data) {
- struct aws_bus *bus = user_data;
- struct bus_async_impl *impl = bus->impl;
-
- aws_atomic_store_int(&impl->dispatch.started, 1);
- AWS_LOGF_DEBUG(AWS_LS_COMMON_BUS, "bus %p: delivery thread loop started", (void *)bus);
-
- /* once shutdown has been triggered, need to drain one more time to ensure all queues are empty */
- int pending_drains = 1;
- do {
- struct aws_linked_list pending_msgs;
- aws_linked_list_init(&pending_msgs);
-
- struct aws_linked_list pending_subs;
- aws_linked_list_init(&pending_subs);
-
- aws_mutex_lock(&impl->queue.mutex);
- {
- aws_condition_variable_wait_pred(
- &impl->dispatch.notify, &impl->queue.mutex, s_bus_async_should_wake_up, impl);
-
- /* copy out any queued subs/unsubs */
- aws_linked_list_swap_contents(&impl->queue.subs, &pending_subs);
- /* copy out any queued messages */
- aws_linked_list_swap_contents(&impl->queue.msgs, &pending_msgs);
- }
- aws_mutex_unlock(&impl->queue.mutex);
-
- /* first resolve subs/unsubs */
- if (!aws_linked_list_empty(&pending_subs)) {
- s_bus_apply_listeners(bus, &pending_subs);
- }
-
- /* Then deliver queued messages */
- if (!aws_linked_list_empty(&pending_msgs)) {
- s_bus_async_deliver_messages(bus, &pending_msgs);
- }
- } while (s_bus_async_is_running(impl) || pending_drains--);
-
- /* record that the dispatch thread is done */
- aws_atomic_store_int(&impl->dispatch.exited, 1);
-}
-
-int s_bus_async_send(struct aws_bus *bus, uint64_t address, void *payload, void (*destructor)(void *)) {
- struct bus_async_impl *impl = bus->impl;
-
- aws_mutex_lock(&impl->queue.mutex);
- {
- if (!impl->dispatch.running) {
- AWS_LOGF_WARN(
- AWS_LS_COMMON_BUS, "bus %p: message sent after clean_up: address: %" PRIu64 "", (void *)bus, address);
- aws_mutex_unlock(&impl->queue.mutex);
- return aws_raise_error(AWS_ERROR_INVALID_STATE);
- }
-
- struct bus_message *msg = s_bus_async_alloc_message(bus);
- msg->address = address;
- msg->payload = payload;
- msg->destructor = destructor;
-
- /* push the message onto the delivery queue */
- aws_linked_list_push_back(&impl->queue.msgs, &msg->list_node);
- }
- aws_mutex_unlock(&impl->queue.mutex);
-
- /* notify the delivery thread to wake up */
- aws_condition_variable_notify_one(&impl->dispatch.notify);
-
- return AWS_OP_SUCCESS;
-}
-
-int s_bus_async_subscribe(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *listener, void *user_data) {
- struct bus_async_impl *impl = bus->impl;
-
- if (address == AWS_BUS_ADDRESS_CLOSE) {
- AWS_LOGF_ERROR(AWS_LS_COMMON_BUS, "Cannot subscribe to AWS_BUS_ADDRESS_CLOSE");
- return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
- }
-
- aws_mutex_lock(&impl->queue.mutex);
- {
- if (!impl->dispatch.running) {
- AWS_LOGF_WARN(
- AWS_LS_COMMON_BUS,
- "bus %p: subscribe requested after clean_up: address: %" PRIu64 "",
- (void *)bus,
- address);
- aws_mutex_unlock(&impl->queue.mutex);
- return aws_raise_error(AWS_ERROR_INVALID_STATE);
- }
-
- struct pending_listener *sub = aws_mem_calloc(bus->allocator, 1, sizeof(struct pending_listener));
- sub->address = address;
- sub->listener = listener;
- sub->user_data = user_data;
- sub->add = true;
- aws_linked_list_push_back(&impl->queue.subs, &sub->list_node);
- }
- aws_mutex_unlock(&impl->queue.mutex);
-
- /* notify the delivery thread to wake up */
- aws_condition_variable_notify_one(&impl->dispatch.notify);
- return AWS_OP_SUCCESS;
-}
-
-void s_bus_async_unsubscribe(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *listener, void *user_data) {
- struct bus_async_impl *impl = bus->impl;
-
- if (address == AWS_BUS_ADDRESS_CLOSE) {
- AWS_LOGF_ERROR(AWS_LS_COMMON_BUS, "Cannot unsubscribe from AWS_BUS_ADDRESS_CLOSE");
- return;
- }
-
- aws_mutex_lock(&impl->queue.mutex);
- {
- if (!impl->dispatch.running) {
- AWS_LOGF_WARN(
- AWS_LS_COMMON_BUS,
- "bus %p: unsubscribe requested after clean_up: address: %" PRIu64 "",
- (void *)bus,
- address);
- aws_mutex_unlock(&impl->queue.mutex);
- return;
- }
-
- struct pending_listener *unsub = aws_mem_calloc(bus->allocator, 1, sizeof(struct pending_listener));
- unsub->address = address;
- unsub->listener = listener;
- unsub->user_data = user_data;
- unsub->remove = true;
- aws_linked_list_push_back(&impl->queue.subs, &unsub->list_node);
- }
- aws_mutex_unlock(&impl->queue.mutex);
-
- /* notify the delivery thread to wake up */
- aws_condition_variable_notify_one(&impl->dispatch.notify);
-}
-
-static struct bus_vtable bus_async_vtable = {
- .clean_up = s_bus_async_clean_up,
- .send = s_bus_async_send,
- .subscribe = s_bus_async_subscribe,
- .unsubscribe = s_bus_async_unsubscribe,
-};
-
-static void s_bus_async_init(struct aws_bus *bus, const struct aws_bus_options *options) {
- struct bus_async_impl *impl = bus->impl = aws_mem_calloc(bus->allocator, 1, sizeof(struct bus_async_impl));
- impl->vtable = bus_async_vtable;
- impl->reliable = (options->policy == AWS_BUS_ASYNC_RELIABLE);
-
- /* init msg queue */
- if (aws_mutex_init(&impl->queue.mutex)) {
- AWS_LOGF_ERROR(
- AWS_LS_COMMON_BUS,
- "bus %p: Unable to initialize queue synchronization: %s",
- (void *)bus,
- aws_error_name(aws_last_error()));
- goto error;
- }
- aws_linked_list_init(&impl->queue.msgs);
- aws_linked_list_init(&impl->queue.free);
- aws_linked_list_init(&impl->queue.subs);
-
- /* push as many bus_messages as we can into the free list from the buffer */
- if (options->buffer_size) {
- impl->queue.buffer = aws_mem_calloc(bus->allocator, 1, options->buffer_size);
- impl->queue.buffer_end = ((uint8_t *)impl->queue.buffer) + options->buffer_size;
- const int msg_count = (int)(options->buffer_size / sizeof(struct bus_message));
- for (int msg_idx = 0; msg_idx < msg_count; ++msg_idx) {
- struct bus_message *msg = (void *)&((char *)impl->queue.buffer)[msg_idx * sizeof(struct bus_message)];
- aws_linked_list_push_back(&impl->queue.free, &msg->list_node);
- }
- }
-
- /* init subscription table */
- if (aws_hash_table_init(
- &impl->slots.table, bus->allocator, 8, aws_hash_ptr, aws_ptr_eq, NULL, s_bus_destroy_listener_list)) {
- AWS_LOGF_ERROR(
- AWS_LS_COMMON_BUS,
- "bus %p: Unable to initialize bus addressing table: %s",
- (void *)bus,
- aws_error_name(aws_last_error()));
- goto error;
- }
-
- /* Setup dispatch thread */
- if (aws_condition_variable_init(&impl->dispatch.notify)) {
- AWS_LOGF_ERROR(
- AWS_LS_COMMON_BUS,
- "bus %p: Unable to initialize async notify: %s",
- (void *)bus,
- aws_error_name(aws_last_error()));
- goto error;
- }
-
- if (aws_thread_init(&impl->dispatch.thread, bus->allocator)) {
- AWS_LOGF_ERROR(
- AWS_LS_COMMON_BUS,
- "bus %p: Unable to initialize background thread: %s",
- (void *)bus,
- aws_error_name(aws_last_error()));
- goto error;
- }
-
- impl->dispatch.running = true;
- aws_atomic_init_int(&impl->dispatch.started, 0);
- aws_atomic_init_int(&impl->dispatch.exited, 0);
- if (aws_thread_launch(&impl->dispatch.thread, s_bus_async_deliver, bus, aws_default_thread_options())) {
- AWS_LOGF_ERROR(
- AWS_LS_COMMON_BUS,
- "bus %p: Unable to launch delivery thread: %s",
- (void *)bus,
- aws_error_name(aws_last_error()));
- goto error;
- }
-
- /* wait for dispatch thread to start before returning control */
- AWS_LOGF_TRACE(AWS_LS_COMMON_BUS, "bus %p: Waiting for delivery thread to start", (void *)bus);
- while (!aws_atomic_load_int(&impl->dispatch.started)) {
- aws_thread_current_sleep(1000 * 1000);
- }
- AWS_LOGF_TRACE(AWS_LS_COMMON_BUS, "bus %p: Delivery thread started", (void *)bus);
-
- return;
-
-error:
- aws_thread_clean_up(&impl->dispatch.thread);
- aws_condition_variable_clean_up(&impl->dispatch.notify);
- aws_hash_table_clean_up(&impl->slots.table);
- aws_mem_release(bus->allocator, &impl->queue.buffer);
- aws_mutex_clean_up(&impl->queue.mutex);
- aws_mem_release(bus->allocator, impl);
- bus->impl = NULL;
-}
-
-/*
- * Public API
- */
-struct aws_bus *aws_bus_new(struct aws_allocator *allocator, const struct aws_bus_options *options) {
- struct aws_bus *bus = aws_mem_calloc(allocator, 1, sizeof(struct aws_bus));
- bus->allocator = allocator;
-
- switch (options->policy) {
- case AWS_BUS_ASYNC_RELIABLE:
- case AWS_BUS_ASYNC_UNRELIABLE:
- s_bus_async_init(bus, options);
- break;
- case AWS_BUS_SYNC_RELIABLE:
- s_bus_sync_init(bus, options);
- break;
- }
-
- if (!bus->impl) {
- aws_mem_release(allocator, bus);
- return NULL;
- }
-
- return bus;
-}
-
-void aws_bus_destroy(struct aws_bus *bus) {
- struct bus_vtable *vtable = bus->impl;
- vtable->clean_up(bus);
- aws_mem_release(bus->allocator, bus);
-}
-
-int aws_bus_subscribe(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *listener, void *user_data) {
- struct bus_vtable *vtable = bus->impl;
- return vtable->subscribe(bus, address, listener, user_data);
-}
-
-void aws_bus_unsubscribe(struct aws_bus *bus, uint64_t address, aws_bus_listener_fn *listener, void *user_data) {
- struct bus_vtable *vtable = bus->impl;
- vtable->unsubscribe(bus, address, listener, user_data);
-}
-
-int aws_bus_send(struct aws_bus *bus, uint64_t address, void *payload, void (*destructor)(void *)) {
- struct bus_vtable *vtable = bus->impl;
- return vtable->send(bus, address, payload, destructor);
-}
-
-#ifdef _MSC_VER
-# pragma warning(pop)
-#endif
diff --git a/contrib/restricted/aws/aws-c-common/source/byte_buf.c b/contrib/restricted/aws/aws-c-common/source/byte_buf.c
index f52aa16b45..b815b6bfb7 100644
--- a/contrib/restricted/aws/aws-c-common/source/byte_buf.c
+++ b/contrib/restricted/aws/aws-c-common/source/byte_buf.c
@@ -1631,7 +1631,7 @@ int aws_byte_buf_append_and_update(struct aws_byte_buf *to, struct aws_byte_curs
return AWS_OP_ERR;
}
- from_and_update->ptr = to->buffer + (to->len - from_and_update->len);
+ from_and_update->ptr = to->buffer == NULL ? NULL : to->buffer + (to->len - from_and_update->len);
return AWS_OP_SUCCESS;
}
diff --git a/contrib/restricted/aws/aws-c-common/source/command_line_parser.c b/contrib/restricted/aws/aws-c-common/source/command_line_parser.c
index bf2db81e0a..0699e7fbbd 100644
--- a/contrib/restricted/aws/aws-c-common/source/command_line_parser.c
+++ b/contrib/restricted/aws/aws-c-common/source/command_line_parser.c
@@ -54,13 +54,11 @@ static const struct aws_cli_option *s_find_option_from_c_str(
const struct aws_cli_option *option = &longopts[index];
while (option->name || option->val != 0) {
- if (option->name) {
- if (option->name && !strcmp(search_for, option->name)) {
- if (longindex) {
- *longindex = index;
- }
- return option;
+ if (option->name && !strcmp(search_for, option->name)) {
+ if (longindex) {
+ *longindex = index;
}
+ return option;
}
option = &longopts[++index];
diff --git a/contrib/restricted/aws/aws-c-common/source/common.c b/contrib/restricted/aws/aws-c-common/source/common.c
index a845e22acf..062d23228d 100644
--- a/contrib/restricted/aws/aws-c-common/source/common.c
+++ b/contrib/restricted/aws/aws-c-common/source/common.c
@@ -256,6 +256,12 @@ static struct aws_error_info errors[] = {
AWS_DEFINE_ERROR_INFO_COMMON(
AWS_ERROR_PLATFORM_NOT_SUPPORTED,
"Feature not supported on this platform"),
+ AWS_DEFINE_ERROR_INFO_COMMON(
+ AWS_ERROR_INVALID_UTF8,
+ "Invalid UTF-8"),
+ AWS_DEFINE_ERROR_INFO_COMMON(
+ AWS_ERROR_GET_HOME_DIRECTORY_FAILED,
+ "Failed to get home directory"),
};
/* clang-format on */
@@ -304,7 +310,7 @@ void aws_common_library_init(struct aws_allocator *allocator) {
/* libnuma defines set_mempolicy() as a WEAK symbol. Loading into the global symbol table overwrites symbols and
assumptions due to the way loaders and dlload are often implemented and those symbols are defined by things
like libpthread.so on some unix distros. Sorry about the memory usage here, but it's our only safe choice.
- Also, please don't do numa configurations if memory is your economic bottlneck. */
+ Also, please don't do numa configurations if memory is your economic bottleneck. */
g_libnuma_handle = dlopen("libnuma.so", RTLD_LOCAL);
/* turns out so versioning is really inconsistent these days */
diff --git a/contrib/restricted/aws/aws-c-common/source/date_time.c b/contrib/restricted/aws/aws-c-common/source/date_time.c
index 77ec6ae0c1..cee4a90d88 100644
--- a/contrib/restricted/aws/aws-c-common/source/date_time.c
+++ b/contrib/restricted/aws/aws-c-common/source/date_time.c
@@ -61,7 +61,7 @@ static void s_check_init_str_to_int(void) {
}
}
-/* Get the 0-11 monthy number from a string representing Month. Case insensitive and will stop on abbreviation*/
+/* Get the 0-11 monthly number from a string representing Month. Case insensitive and will stop on abbreviation*/
static int get_month_number_from_str(const char *time_string, size_t start_index, size_t stop_index) {
s_check_init_str_to_int();
diff --git a/contrib/restricted/aws/aws-c-common/source/encoding.c b/contrib/restricted/aws/aws-c-common/source/encoding.c
index 038a7d74e9..9ca5ca4fba 100644
--- a/contrib/restricted/aws/aws-c-common/source/encoding.c
+++ b/contrib/restricted/aws/aws-c-common/source/encoding.c
@@ -38,7 +38,7 @@ static inline bool aws_common_private_has_avx2(void) {
static const uint8_t *HEX_CHARS = (const uint8_t *)"0123456789abcdef";
-static const uint8_t BASE64_SENTIANAL_VALUE = 0xff;
+static const uint8_t BASE64_SENTINEL_VALUE = 0xff;
static const uint8_t BASE64_ENCODING_TABLE[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
/* in this table, 0xDD is an invalid decoded value, if you have to do byte counting for any reason, there's 16 bytes
@@ -337,10 +337,10 @@ int aws_base64_encode(const struct aws_byte_cursor *AWS_RESTRICT to_encode, stru
return AWS_OP_SUCCESS;
}
-static inline int s_base64_get_decoded_value(unsigned char to_decode, uint8_t *value, int8_t allow_sentinal) {
+static inline int s_base64_get_decoded_value(unsigned char to_decode, uint8_t *value, int8_t allow_sentinel) {
uint8_t decode_value = BASE64_DECODING_TABLE[(size_t)to_decode];
- if (decode_value != 0xDD && (decode_value != BASE64_SENTIANAL_VALUE || allow_sentinal)) {
+ if (decode_value != 0xDD && (decode_value != BASE64_SENTINEL_VALUE || allow_sentinel)) {
*value = decode_value;
return AWS_OP_SUCCESS;
}
@@ -401,9 +401,9 @@ int aws_base64_decode(const struct aws_byte_cursor *AWS_RESTRICT to_decode, stru
output->buffer[buffer_index++] = (uint8_t)((value1 << 2) | ((value2 >> 4) & 0x03));
- if (value3 != BASE64_SENTIANAL_VALUE) {
+ if (value3 != BASE64_SENTINEL_VALUE) {
output->buffer[buffer_index++] = (uint8_t)(((value2 << 4) & 0xF0) | ((value3 >> 2) & 0x0F));
- if (value4 != BASE64_SENTIANAL_VALUE) {
+ if (value4 != BASE64_SENTINEL_VALUE) {
output->buffer[buffer_index] = (uint8_t)((value3 & 0x03) << 6 | value4);
}
}
@@ -412,7 +412,7 @@ int aws_base64_decode(const struct aws_byte_cursor *AWS_RESTRICT to_decode, stru
return AWS_OP_SUCCESS;
}
-struct aws_utf8_validator {
+struct aws_utf8_decoder {
struct aws_allocator *alloc;
/* Value of current codepoint, updated as we read each byte */
uint32_t codepoint;
@@ -421,54 +421,66 @@ struct aws_utf8_validator {
uint32_t min;
/* Number of bytes remaining the current codepoint */
uint8_t remaining;
+ /* Custom callback */
+ int (*on_codepoint)(uint32_t codepoint, void *user_data);
+ /* user_data for on_codepoint */
+ void *user_data;
};
-struct aws_utf8_validator *aws_utf8_validator_new(struct aws_allocator *allocator) {
- struct aws_utf8_validator *validator = aws_mem_calloc(allocator, 1, sizeof(struct aws_utf8_validator));
- validator->alloc = allocator;
- return validator;
+struct aws_utf8_decoder *aws_utf8_decoder_new(
+ struct aws_allocator *allocator,
+ const struct aws_utf8_decoder_options *options) {
+
+ struct aws_utf8_decoder *decoder = aws_mem_calloc(allocator, 1, sizeof(struct aws_utf8_decoder));
+ decoder->alloc = allocator;
+ if (options) {
+ decoder->on_codepoint = options->on_codepoint;
+ decoder->user_data = options->user_data;
+ }
+ return decoder;
}
-void aws_utf8_validator_destroy(struct aws_utf8_validator *validator) {
- if (validator) {
- aws_mem_release(validator->alloc, validator);
+void aws_utf8_decoder_destroy(struct aws_utf8_decoder *decoder) {
+ if (decoder) {
+ aws_mem_release(decoder->alloc, decoder);
}
}
-void aws_utf8_validator_reset(struct aws_utf8_validator *validator) {
- validator->codepoint = 0;
- validator->min = 0;
- validator->remaining = 0;
+void aws_utf8_decoder_reset(struct aws_utf8_decoder *decoder) {
+ decoder->codepoint = 0;
+ decoder->min = 0;
+ decoder->remaining = 0;
}
/* Why yes, this could be optimized. */
-int aws_utf8_validator_update(struct aws_utf8_validator *validator, struct aws_byte_cursor bytes) {
+int aws_utf8_decoder_update(struct aws_utf8_decoder *decoder, struct aws_byte_cursor bytes) {
+
/* We're respecting RFC-3629, which uses 1 to 4 byte sequences (never 5 or 6) */
for (size_t i = 0; i < bytes.len; ++i) {
uint8_t byte = bytes.ptr[i];
- if (validator->remaining == 0) {
+ if (decoder->remaining == 0) {
/* Check first byte of the codepoint to determine how many more bytes remain */
if ((byte & 0x80) == 0x00) {
/* 1 byte codepoints start with 0xxxxxxx */
- validator->remaining = 0;
- validator->codepoint = byte;
- validator->min = 0;
+ decoder->remaining = 0;
+ decoder->codepoint = byte;
+ decoder->min = 0;
} else if ((byte & 0xE0) == 0xC0) {
/* 2 byte codepoints start with 110xxxxx */
- validator->remaining = 1;
- validator->codepoint = byte & 0x1F;
- validator->min = 0x80;
+ decoder->remaining = 1;
+ decoder->codepoint = byte & 0x1F;
+ decoder->min = 0x80;
} else if ((byte & 0xF0) == 0xE0) {
/* 3 byte codepoints start with 1110xxxx */
- validator->remaining = 2;
- validator->codepoint = byte & 0x0F;
- validator->min = 0x800;
+ decoder->remaining = 2;
+ decoder->codepoint = byte & 0x0F;
+ decoder->min = 0x800;
} else if ((byte & 0xF8) == 0xF0) {
/* 4 byte codepoints start with 11110xxx */
- validator->remaining = 3;
- validator->codepoint = byte & 0x07;
- validator->min = 0x10000;
+ decoder->remaining = 3;
+ decoder->codepoint = byte & 0x07;
+ decoder->min = 0x10000;
} else {
return aws_raise_error(AWS_ERROR_INVALID_UTF8);
}
@@ -481,45 +493,58 @@ int aws_utf8_validator_update(struct aws_utf8_validator *validator, struct aws_b
/* Insert the 6 newly decoded bits:
* shifting left anything we've already decoded, and insert the new bits to the right */
- validator->codepoint = (validator->codepoint << 6) | (byte & 0x3F);
+ decoder->codepoint = (decoder->codepoint << 6) | (byte & 0x3F);
/* If we've decoded the whole codepoint, check it for validity
* (don't need to do these particular checks on 1 byte codepoints) */
- if (--validator->remaining == 0) {
+ if (--decoder->remaining == 0) {
/* Check that it's not "overlong" (encoded using more bytes than necessary) */
- if (validator->codepoint < validator->min) {
+ if (decoder->codepoint < decoder->min) {
return aws_raise_error(AWS_ERROR_INVALID_UTF8);
}
/* UTF-8 prohibits encoding character numbers between U+D800 and U+DFFF,
* which are reserved for use with the UTF-16 encoding form (as
* surrogate pairs) and do not directly represent characters */
- if (validator->codepoint >= 0xD800 && validator->codepoint <= 0xDFFF) {
+ if (decoder->codepoint >= 0xD800 && decoder->codepoint <= 0xDFFF) {
return aws_raise_error(AWS_ERROR_INVALID_UTF8);
}
}
}
+
+ /* Invoke user's on_codepoint callback */
+ if (decoder->on_codepoint && decoder->remaining == 0) {
+ if (decoder->on_codepoint(decoder->codepoint, decoder->user_data)) {
+ return AWS_OP_ERR;
+ }
+ }
}
return AWS_OP_SUCCESS;
}
-int aws_utf8_validator_finalize(struct aws_utf8_validator *validator) {
- bool valid = validator->remaining == 0;
- aws_utf8_validator_reset(validator);
+int aws_utf8_decoder_finalize(struct aws_utf8_decoder *decoder) {
+ bool valid = decoder->remaining == 0;
+ aws_utf8_decoder_reset(decoder);
if (AWS_LIKELY(valid)) {
return AWS_OP_SUCCESS;
}
return aws_raise_error(AWS_ERROR_INVALID_UTF8);
}
-bool aws_text_is_valid_utf8(struct aws_byte_cursor bytes) {
- struct aws_utf8_validator validator = {.remaining = 0};
- if (aws_utf8_validator_update(&validator, bytes)) {
- return false;
+int aws_decode_utf8(struct aws_byte_cursor bytes, const struct aws_utf8_decoder_options *options) {
+ struct aws_utf8_decoder decoder = {
+ .on_codepoint = options ? options->on_codepoint : NULL,
+ .user_data = options ? options->user_data : NULL,
+ };
+
+ if (aws_utf8_decoder_update(&decoder, bytes)) {
+ return AWS_OP_ERR;
}
- if (validator.remaining != 0) {
- return false;
+
+ if (aws_utf8_decoder_finalize(&decoder)) {
+ return AWS_OP_ERR;
}
- return true;
+
+ return AWS_OP_SUCCESS;
}
diff --git a/contrib/restricted/aws/aws-c-common/source/error.c b/contrib/restricted/aws/aws-c-common/source/error.c
index bdd4dfcd67..ad3cec8693 100644
--- a/contrib/restricted/aws/aws-c-common/source/error.c
+++ b/contrib/restricted/aws/aws-c-common/source/error.c
@@ -145,8 +145,20 @@ void aws_register_error_info(const struct aws_error_info_list *error_info) {
}
#if DEBUG_BUILD
+ /* Assert that first error has the right value */
+ const int expected_first_code = slot_index << AWS_ERROR_ENUM_STRIDE_BITS;
+ if (error_info->error_list[0].error_code != expected_first_code) {
+ fprintf(
+ stderr,
+ "Missing info: First error in list should be %d, not %d (%s)\n",
+ expected_first_code,
+ error_info->error_list[0].error_code,
+ error_info->error_list[0].literal_name);
+ AWS_FATAL_ASSERT(0);
+ }
+
/* Assert that error info entries are in the right order. */
- for (int i = 1; i < error_info->count; ++i) {
+ for (int i = 0; i < error_info->count; ++i) {
const int expected_code = min_range + i;
const struct aws_error_info *info = &error_info->error_list[i];
if (info->error_code != expected_code) {
@@ -193,13 +205,17 @@ int aws_translate_and_raise_io_error(int error_no) {
case EISDIR:
case ENAMETOOLONG:
case ENOENT:
+ case ENOTDIR:
return aws_raise_error(AWS_ERROR_FILE_INVALID_PATH);
+ case EMFILE:
case ENFILE:
return aws_raise_error(AWS_ERROR_MAX_FDS_EXCEEDED);
case ENOMEM:
return aws_raise_error(AWS_ERROR_OOM);
case ENOSPC:
return aws_raise_error(AWS_ERROR_NO_SPACE);
+ case ENOTEMPTY:
+ return aws_raise_error(AWS_ERROR_DIRECTORY_NOT_EMPTY);
default:
return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE);
}
diff --git a/contrib/restricted/aws/aws-c-common/source/external/cJSON.c b/contrib/restricted/aws/aws-c-common/source/external/cJSON.c
index 8dd79bf1ec..d6e49d9a89 100644
--- a/contrib/restricted/aws/aws-c-common/source/external/cJSON.c
+++ b/contrib/restricted/aws/aws-c-common/source/external/cJSON.c
@@ -89,12 +89,6 @@ typedef struct {
const unsigned char *json;
size_t position;
} error;
-static error global_error = { NULL, 0 };
-
-CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void)
-{
- return (const char*) (global_error.json + global_error.position);
-}
CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON * const item)
{
@@ -1094,10 +1088,6 @@ CJSON_PUBLIC(cJSON *) cJSON_ParseWithLengthOpts(const char *value, size_t buffer
parse_buffer buffer = { 0, 0, 0, 0, { 0, 0, 0 } };
cJSON *item = NULL;
- /* reset error position */
- global_error.json = NULL;
- global_error.position = 0;
-
if (value == NULL || 0 == buffer_length)
{
goto fail;
@@ -1162,7 +1152,6 @@ fail:
*return_parse_end = (const char*)local_error.json + local_error.position;
}
- global_error = local_error;
}
return NULL;
diff --git a/contrib/restricted/aws/aws-c-common/source/file.c b/contrib/restricted/aws/aws-c-common/source/file.c
index 95a80b2ca5..5f490003a0 100644
--- a/contrib/restricted/aws/aws-c-common/source/file.c
+++ b/contrib/restricted/aws/aws-c-common/source/file.c
@@ -13,7 +13,14 @@
FILE *aws_fopen(const char *file_path, const char *mode) {
if (!file_path || strlen(file_path) == 0) {
- AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to open file %s", file_path);
+ AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to open file. path is empty");
+ aws_raise_error(AWS_ERROR_FILE_INVALID_PATH);
+ return NULL;
+ }
+
+ if (!mode || strlen(mode) == 0) {
+ AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to open file. mode is empty");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
return NULL;
}
@@ -34,9 +41,10 @@ int aws_byte_buf_init_from_file(struct aws_byte_buf *out_buf, struct aws_allocat
if (fp) {
if (fseek(fp, 0L, SEEK_END)) {
- AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to seek file %s with errno %d", filename, errno);
+ int errno_value = errno; /* Always cache errno before potential side-effect */
+ AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to seek file %s with errno %d", filename, errno_value);
fclose(fp);
- return aws_translate_and_raise_io_error(errno);
+ return aws_translate_and_raise_io_error(errno_value);
}
size_t allocation_size = (size_t)ftell(fp) + 1;
@@ -52,16 +60,18 @@ int aws_byte_buf_init_from_file(struct aws_byte_buf *out_buf, struct aws_allocat
out_buf->buffer[out_buf->len] = 0;
if (fseek(fp, 0L, SEEK_SET)) {
- AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to seek file %s with errno %d", filename, errno);
+ int errno_value = errno; /* Always cache errno before potential side-effect */
+ AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to seek file %s with errno %d", filename, errno_value);
aws_byte_buf_clean_up(out_buf);
fclose(fp);
- return aws_translate_and_raise_io_error(errno);
+ return aws_translate_and_raise_io_error(errno_value);
}
size_t read = fread(out_buf->buffer, 1, out_buf->len, fp);
+ int errno_cpy = errno; /* Always cache errno before potential side-effect */
fclose(fp);
if (read < out_buf->len) {
- AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to read file %s with errno %d", filename, errno);
+ AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to read file %s with errno %d", filename, errno_cpy);
aws_secure_zero(out_buf->buffer, out_buf->len);
aws_byte_buf_clean_up(out_buf);
return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE);
@@ -70,12 +80,7 @@ int aws_byte_buf_init_from_file(struct aws_byte_buf *out_buf, struct aws_allocat
return AWS_OP_SUCCESS;
}
- AWS_LOGF_ERROR(AWS_LS_COMMON_IO, "static: Failed to open file %s with errno %d", filename, errno);
-
- if (errno == 0) {
- return aws_raise_error(AWS_ERROR_FILE_INVALID_PATH);
- }
- return aws_translate_and_raise_io_error(errno);
+ return AWS_OP_ERR;
}
bool aws_is_any_directory_separator(char value) {
diff --git a/contrib/restricted/aws/aws-c-common/source/json.c b/contrib/restricted/aws/aws-c-common/source/json.c
index 387f41f183..0131ea116b 100644
--- a/contrib/restricted/aws/aws-c-common/source/json.c
+++ b/contrib/restricted/aws/aws-c-common/source/json.c
@@ -15,7 +15,7 @@ static struct aws_allocator *s_aws_json_module_allocator = NULL;
static bool s_aws_json_module_initialized = false;
struct aws_json_value *aws_json_value_new_string(struct aws_allocator *allocator, struct aws_byte_cursor string) {
- struct aws_string *tmp = aws_string_new_from_cursor((struct aws_allocator *)allocator, &string);
+ struct aws_string *tmp = aws_string_new_from_cursor(allocator, &string);
void *ret_val = cJSON_CreateString(aws_string_c_str(tmp));
aws_string_destroy_secure(tmp);
return ret_val;
@@ -47,7 +47,7 @@ struct aws_json_value *aws_json_value_new_object(struct aws_allocator *allocator
}
int aws_json_value_get_string(const struct aws_json_value *value, struct aws_byte_cursor *output) {
- struct cJSON *cjson = (struct cJSON *)value;
+ const struct cJSON *cjson = (const struct cJSON *)value;
if (!cJSON_IsString(cjson)) {
return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
}
@@ -56,7 +56,7 @@ int aws_json_value_get_string(const struct aws_json_value *value, struct aws_byt
}
int aws_json_value_get_number(const struct aws_json_value *value, double *output) {
- struct cJSON *cjson = (struct cJSON *)value;
+ const struct cJSON *cjson = (const struct cJSON *)value;
if (!cJSON_IsNumber(cjson)) {
return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
}
@@ -65,7 +65,7 @@ int aws_json_value_get_number(const struct aws_json_value *value, double *output
}
int aws_json_value_get_boolean(const struct aws_json_value *value, bool *output) {
- struct cJSON *cjson = (struct cJSON *)value;
+ const struct cJSON *cjson = (const struct cJSON *)value;
if (!cJSON_IsBool(cjson)) {
return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
}
@@ -109,7 +109,7 @@ struct aws_json_value *aws_json_value_get_from_object(const struct aws_json_valu
void *return_value = NULL;
struct aws_string *tmp = aws_string_new_from_cursor(s_aws_json_module_allocator, &key);
- struct cJSON *cjson = (struct cJSON *)object;
+ const struct cJSON *cjson = (const struct cJSON *)object;
if (!cJSON_IsObject(cjson)) {
aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
goto done;
@@ -130,7 +130,7 @@ bool aws_json_value_has_key(const struct aws_json_value *object, struct aws_byte
struct aws_string *tmp = aws_string_new_from_cursor(s_aws_json_module_allocator, &key);
bool result = false;
- struct cJSON *cjson = (struct cJSON *)object;
+ const struct cJSON *cjson = (const struct cJSON *)object;
if (!cJSON_IsObject(cjson)) {
goto done;
}
@@ -172,7 +172,7 @@ int aws_json_const_iterate_object(
void *user_data) {
int result = AWS_OP_ERR;
- struct cJSON *cjson = (struct cJSON *)object;
+ const struct cJSON *cjson = (const struct cJSON *)object;
if (!cJSON_IsObject(cjson)) {
aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
goto done;
@@ -182,7 +182,7 @@ int aws_json_const_iterate_object(
cJSON_ArrayForEach(key, cjson) {
bool should_continue = true;
struct aws_byte_cursor key_cur = aws_byte_cursor_from_c_str(key->string);
- if (on_member(&key_cur, (struct aws_json_value *)key, &should_continue, user_data)) {
+ if (on_member(&key_cur, (const struct aws_json_value *)key, &should_continue, user_data)) {
goto done;
}
@@ -214,8 +214,7 @@ int aws_json_value_add_array_element(struct aws_json_value *array, const struct
}
struct aws_json_value *aws_json_get_array_element(const struct aws_json_value *array, size_t index) {
-
- struct cJSON *cjson = (struct cJSON *)array;
+ const struct cJSON *cjson = (const struct cJSON *)array;
if (!cJSON_IsArray(cjson)) {
aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
return NULL;
@@ -230,7 +229,7 @@ struct aws_json_value *aws_json_get_array_element(const struct aws_json_value *a
}
size_t aws_json_get_array_size(const struct aws_json_value *array) {
- struct cJSON *cjson = (struct cJSON *)array;
+ const struct cJSON *cjson = (const struct cJSON *)array;
if (!cJSON_IsArray(cjson)) {
aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
return 0;
@@ -259,7 +258,7 @@ int aws_json_const_iterate_array(
void *user_data) {
int result = AWS_OP_ERR;
- struct cJSON *cjson = (struct cJSON *)array;
+ const struct cJSON *cjson = (const struct cJSON *)array;
if (!cJSON_IsArray(cjson)) {
aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
goto done;
@@ -269,7 +268,7 @@ int aws_json_const_iterate_array(
const cJSON *value = NULL;
cJSON_ArrayForEach(value, cjson) {
bool should_continue = true;
- if (on_value(idx, (struct aws_json_value *)value, &should_continue, user_data)) {
+ if (on_value(idx, (const struct aws_json_value *)value, &should_continue, user_data)) {
goto done;
}
@@ -286,13 +285,13 @@ done:
}
bool aws_json_value_compare(const struct aws_json_value *a, const struct aws_json_value *b, bool is_case_sensitive) {
- struct cJSON *cjson_a = (struct cJSON *)a;
- struct cJSON *cjson_b = (struct cJSON *)b;
+ const struct cJSON *cjson_a = (const struct cJSON *)a;
+ const struct cJSON *cjson_b = (const struct cJSON *)b;
return cJSON_Compare(cjson_a, cjson_b, is_case_sensitive);
}
struct aws_json_value *aws_json_value_duplicate(const struct aws_json_value *value) {
- struct cJSON *cjson = (struct cJSON *)value;
+ const struct cJSON *cjson = (const struct cJSON *)value;
if (cJSON_IsInvalid(cjson)) {
aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
return NULL;
@@ -307,7 +306,7 @@ struct aws_json_value *aws_json_value_duplicate(const struct aws_json_value *val
}
bool aws_json_value_is_string(const struct aws_json_value *value) {
- struct cJSON *cjson = (struct cJSON *)value;
+ const struct cJSON *cjson = (const struct cJSON *)value;
if (cJSON_IsInvalid(cjson)) {
return false;
}
@@ -315,7 +314,7 @@ bool aws_json_value_is_string(const struct aws_json_value *value) {
}
bool aws_json_value_is_number(const struct aws_json_value *value) {
- struct cJSON *cjson = (struct cJSON *)value;
+ const struct cJSON *cjson = (const struct cJSON *)value;
if (cJSON_IsInvalid(cjson)) {
return false;
}
@@ -323,7 +322,7 @@ bool aws_json_value_is_number(const struct aws_json_value *value) {
}
bool aws_json_value_is_array(const struct aws_json_value *value) {
- struct cJSON *cjson = (struct cJSON *)value;
+ const struct cJSON *cjson = (const struct cJSON *)value;
if (cJSON_IsInvalid(cjson)) {
return false;
}
@@ -331,7 +330,7 @@ bool aws_json_value_is_array(const struct aws_json_value *value) {
}
bool aws_json_value_is_boolean(const struct aws_json_value *value) {
- struct cJSON *cjson = (struct cJSON *)value;
+ const struct cJSON *cjson = (const struct cJSON *)value;
if (cJSON_IsInvalid(cjson)) {
return false;
}
@@ -339,7 +338,7 @@ bool aws_json_value_is_boolean(const struct aws_json_value *value) {
}
bool aws_json_value_is_null(const struct aws_json_value *value) {
- struct cJSON *cjson = (struct cJSON *)value;
+ const struct cJSON *cjson = (const struct cJSON *)value;
if (cJSON_IsInvalid(cjson)) {
return false;
}
@@ -347,7 +346,7 @@ bool aws_json_value_is_null(const struct aws_json_value *value) {
}
bool aws_json_value_is_object(const struct aws_json_value *value) {
- struct cJSON *cjson = (struct cJSON *)value;
+ const struct cJSON *cjson = (const struct cJSON *)value;
if (cJSON_IsInvalid(cjson)) {
return false;
}
@@ -391,7 +390,7 @@ void aws_json_value_destroy(struct aws_json_value *value) {
}
int aws_byte_buf_append_json_string(const struct aws_json_value *value, struct aws_byte_buf *output) {
- struct cJSON *cjson = (struct cJSON *)value;
+ const struct cJSON *cjson = (const struct cJSON *)value;
if (cJSON_IsInvalid(cjson)) {
return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
}
@@ -409,7 +408,7 @@ int aws_byte_buf_append_json_string(const struct aws_json_value *value, struct a
}
int aws_byte_buf_append_json_string_formatted(const struct aws_json_value *value, struct aws_byte_buf *output) {
- struct cJSON *cjson = (struct cJSON *)value;
+ const struct cJSON *cjson = (const struct cJSON *)value;
if (cJSON_IsInvalid(cjson)) {
return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
}
@@ -427,7 +426,7 @@ int aws_byte_buf_append_json_string_formatted(const struct aws_json_value *value
}
struct aws_json_value *aws_json_value_new_from_string(struct aws_allocator *allocator, struct aws_byte_cursor string) {
- struct aws_string *tmp = aws_string_new_from_cursor((struct aws_allocator *)allocator, &string);
+ struct aws_string *tmp = aws_string_new_from_cursor(allocator, &string);
struct cJSON *cjson = cJSON_Parse(aws_string_c_str(tmp));
aws_string_destroy_secure(tmp);
return (void *)cjson;
diff --git a/contrib/restricted/aws/aws-c-common/source/log_formatter.c b/contrib/restricted/aws/aws-c-common/source/log_formatter.c
index 513a7f87b4..d4be0c0c6d 100644
--- a/contrib/restricted/aws/aws-c-common/source/log_formatter.c
+++ b/contrib/restricted/aws/aws-c-common/source/log_formatter.c
@@ -16,7 +16,7 @@
* Default formatter implementation
*/
-#if _MSC_VER
+#ifdef _MSC_VER
# pragma warning(disable : 4204) /* non-constant aggregate initializer */
#endif
diff --git a/contrib/restricted/aws/aws-c-common/source/log_writer.c b/contrib/restricted/aws/aws-c-common/source/log_writer.c
index 6eea2fc3c5..5f5bc4f6fd 100644
--- a/contrib/restricted/aws/aws-c-common/source/log_writer.c
+++ b/contrib/restricted/aws/aws-c-common/source/log_writer.c
@@ -27,7 +27,8 @@ static int s_aws_file_writer_write(struct aws_log_writer *writer, const struct a
size_t length = output->len;
if (fwrite(output->bytes, 1, length, impl->log_file) < length) {
- return aws_translate_and_raise_io_error(errno);
+ int errno_value = errno; /* Always cache errno before potential side-effect */
+ return aws_translate_and_raise_io_error(errno_value);
}
return AWS_OP_SUCCESS;
@@ -76,7 +77,7 @@ static int s_aws_file_writer_init_internal(
impl->log_file = aws_fopen(file_name_to_open, "a+");
if (impl->log_file == NULL) {
aws_mem_release(allocator, impl);
- return aws_translate_and_raise_io_error(errno);
+ return AWS_OP_ERR;
}
impl->close_file_on_cleanup = true;
} else {
diff --git a/contrib/restricted/aws/aws-c-common/source/logging.c b/contrib/restricted/aws/aws-c-common/source/logging.c
index f9a6c5a2e6..fdc29576d8 100644
--- a/contrib/restricted/aws/aws-c-common/source/logging.c
+++ b/contrib/restricted/aws/aws-c-common/source/logging.c
@@ -15,7 +15,7 @@
#include <errno.h>
#include <stdarg.h>
-#if _MSC_VER
+#ifdef _MSC_VER
# pragma warning(disable : 4204) /* non-constant aggregate initializer */
#endif
@@ -468,7 +468,7 @@ static int s_noalloc_stderr_logger_log(
va_list format_args;
va_start(format_args, format);
-#if _MSC_VER
+#ifdef _MSC_VER
# pragma warning(push)
# pragma warning(disable : 4221) /* allow struct member to reference format_buffer */
#endif
@@ -484,7 +484,7 @@ static int s_noalloc_stderr_logger_log(
.amount_written = 0,
};
-#if _MSC_VER
+#ifdef _MSC_VER
# pragma warning(pop) /* disallow struct member to reference local value */
#endif
@@ -502,7 +502,8 @@ static int s_noalloc_stderr_logger_log(
int write_result = AWS_OP_SUCCESS;
if (fwrite(format_buffer, 1, format_data.amount_written, impl->file) < format_data.amount_written) {
- aws_translate_and_raise_io_error(errno);
+ int errno_value = errno; /* Always cache errno before potential side-effect */
+ aws_translate_and_raise_io_error(errno_value);
write_result = AWS_OP_ERR;
}
@@ -561,6 +562,10 @@ int aws_logger_init_noalloc(
} else { /* _MSC_VER */
if (options->filename != NULL) {
impl->file = aws_fopen(options->filename, "w");
+ if (!impl->file) {
+ aws_mem_release(allocator, impl);
+ return AWS_OP_ERR;
+ }
impl->should_close = true;
} else {
impl->file = stderr;
diff --git a/contrib/restricted/aws/aws-c-common/source/memtrace.c b/contrib/restricted/aws/aws-c-common/source/memtrace.c
index 7362e07a30..651fd93612 100644
--- a/contrib/restricted/aws/aws-c-common/source/memtrace.c
+++ b/contrib/restricted/aws/aws-c-common/source/memtrace.c
@@ -5,19 +5,19 @@
#include <aws/common/atomics.h>
#include <aws/common/byte_buf.h>
+#include <aws/common/clock.h>
#include <aws/common/hash_table.h>
#include <aws/common/logging.h>
#include <aws/common/mutex.h>
#include <aws/common/priority_queue.h>
#include <aws/common/string.h>
#include <aws/common/system_info.h>
-#include <aws/common/time.h>
/* describes a single live allocation.
* allocated by aws_default_allocator() */
struct alloc_info {
size_t size;
- time_t time;
+ uint64_t time;
uint64_t stack; /* hash of stack frame pointers */
};
@@ -43,8 +43,8 @@ struct stack_trace {
#endif
/* Tracking structure, used as the allocator impl.
- * This structure, and all its bookkeeping datastructures, are created with the aws_default_allocator().
- * This is not customizeable because it's too expensive for every little allocation to store
+ * This structure, and all its bookkeeping data structures, are created with the aws_default_allocator().
+ * This is not customizable because it's too expensive for every little allocation to store
* a pointer back to its original allocator. */
struct alloc_tracer {
struct aws_allocator *traced_allocator; /* underlying allocator */
@@ -110,7 +110,7 @@ static void s_alloc_tracer_init(
if (frames_per_stack > 128) {
frames_per_stack = 128;
}
- tracer->frames_per_stack = (frames_per_stack) ? frames_per_stack : 8;
+ tracer->frames_per_stack = frames_per_stack ? frames_per_stack : 8;
AWS_FATAL_ASSERT(
AWS_OP_SUCCESS ==
aws_hash_table_init(
@@ -128,7 +128,7 @@ static void s_alloc_tracer_track(struct alloc_tracer *tracer, void *ptr, size_t
struct alloc_info *alloc = aws_mem_calloc(aws_default_allocator(), 1, sizeof(struct alloc_info));
AWS_FATAL_ASSERT(alloc);
alloc->size = size;
- alloc->time = time(NULL);
+ aws_high_res_clock_get_ticks(&alloc->time);
if (tracer->level == AWS_MEMTRACE_STACKS) {
/* capture stack frames, skip 2 for this function and the allocation vtable function */
@@ -300,14 +300,14 @@ void aws_mem_tracer_dump(struct aws_allocator *trace_allocator) {
size_t num_allocs = aws_hash_table_get_entry_count(&tracer->allocs);
AWS_LOGF_TRACE(
- AWS_LS_COMMON_MEMTRACE, "################################################################################\n");
+ AWS_LS_COMMON_MEMTRACE, "################################################################################");
AWS_LOGF_TRACE(
- AWS_LS_COMMON_MEMTRACE, "# BEGIN MEMTRACE DUMP #\n");
+ AWS_LS_COMMON_MEMTRACE, "# BEGIN MEMTRACE DUMP #");
AWS_LOGF_TRACE(
- AWS_LS_COMMON_MEMTRACE, "################################################################################\n");
+ AWS_LS_COMMON_MEMTRACE, "################################################################################");
AWS_LOGF_TRACE(
AWS_LS_COMMON_MEMTRACE,
- "tracer: %zu bytes still allocated in %zu allocations\n",
+ "tracer: %zu bytes still allocated in %zu allocations",
aws_atomic_load_int(&tracer->allocated),
num_allocs);
@@ -333,21 +333,24 @@ void aws_mem_tracer_dump(struct aws_allocator *trace_allocator) {
&allocs, aws_default_allocator(), num_allocs, sizeof(struct alloc_info *), s_alloc_compare));
aws_hash_table_foreach(&tracer->allocs, s_insert_allocs, &allocs);
/* dump allocs by time */
- AWS_LOGF_TRACE(
- AWS_LS_COMMON_MEMTRACE, "################################################################################\n");
- AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Leaks in order of allocation:\n");
- AWS_LOGF_TRACE(
- AWS_LS_COMMON_MEMTRACE, "################################################################################\n");
+ AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
+ AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Leaks in order of allocation:");
+ AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
while (aws_priority_queue_size(&allocs)) {
struct alloc_info *alloc = NULL;
aws_priority_queue_pop(&allocs, &alloc);
- AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "ALLOC %zu bytes\n", alloc->size);
if (alloc->stack) {
struct aws_hash_element *item = NULL;
AWS_FATAL_ASSERT(
AWS_OP_SUCCESS == aws_hash_table_find(&stack_info, (void *)(uintptr_t)alloc->stack, &item));
struct stack_metadata *stack = item->value;
- AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, " stacktrace:\n%s\n", (const char *)aws_string_bytes(stack->trace));
+ AWS_LOGF_TRACE(
+ AWS_LS_COMMON_MEMTRACE,
+ "ALLOC %zu bytes, stacktrace:\n%s\n",
+ alloc->size,
+ aws_string_c_str(stack->trace));
+ } else {
+ AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "ALLOC %zu bytes", alloc->size);
}
}
@@ -365,18 +368,18 @@ void aws_mem_tracer_dump(struct aws_allocator *trace_allocator) {
sizeof(struct stack_metadata *),
s_stack_info_compare_size));
aws_hash_table_foreach(&stack_info, s_insert_stacks, &stacks_by_size);
- AWS_LOGF_TRACE(
- AWS_LS_COMMON_MEMTRACE,
- "################################################################################\n");
- AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Stacks by bytes leaked:\n");
- AWS_LOGF_TRACE(
- AWS_LS_COMMON_MEMTRACE,
- "################################################################################\n");
+ AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
+ AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Stacks by bytes leaked:");
+ AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
while (aws_priority_queue_size(&stacks_by_size) > 0) {
struct stack_metadata *stack = NULL;
aws_priority_queue_pop(&stacks_by_size, &stack);
- AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%zu bytes in %zu allocations:\n", stack->size, stack->count);
- AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%s\n", (const char *)aws_string_bytes(stack->trace));
+ AWS_LOGF_TRACE(
+ AWS_LS_COMMON_MEMTRACE,
+ "%zu bytes in %zu allocations:\n%s\n",
+ stack->size,
+ stack->count,
+ aws_string_c_str(stack->trace));
}
aws_priority_queue_clean_up(&stacks_by_size);
@@ -389,30 +392,30 @@ void aws_mem_tracer_dump(struct aws_allocator *trace_allocator) {
num_stacks,
sizeof(struct stack_metadata *),
s_stack_info_compare_count));
- AWS_LOGF_TRACE(
- AWS_LS_COMMON_MEMTRACE,
- "################################################################################\n");
- AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Stacks by number of leaks:\n");
- AWS_LOGF_TRACE(
- AWS_LS_COMMON_MEMTRACE,
- "################################################################################\n");
+ AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
+ AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "Stacks by number of leaks:");
+ AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~");
aws_hash_table_foreach(&stack_info, s_insert_stacks, &stacks_by_count);
while (aws_priority_queue_size(&stacks_by_count) > 0) {
struct stack_metadata *stack = NULL;
aws_priority_queue_pop(&stacks_by_count, &stack);
- AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%zu allocations leaking %zu bytes:\n", stack->count, stack->size);
- AWS_LOGF_TRACE(AWS_LS_COMMON_MEMTRACE, "%s\n", (const char *)aws_string_bytes(stack->trace));
+ AWS_LOGF_TRACE(
+ AWS_LS_COMMON_MEMTRACE,
+ "%zu allocations leaking %zu bytes:\n%s\n",
+ stack->count,
+ stack->size,
+ aws_string_c_str(stack->trace));
}
aws_priority_queue_clean_up(&stacks_by_count);
aws_hash_table_clean_up(&stack_info);
}
AWS_LOGF_TRACE(
- AWS_LS_COMMON_MEMTRACE, "################################################################################\n");
+ AWS_LS_COMMON_MEMTRACE, "################################################################################");
AWS_LOGF_TRACE(
- AWS_LS_COMMON_MEMTRACE, "# END MEMTRACE DUMP #\n");
+ AWS_LS_COMMON_MEMTRACE, "# END MEMTRACE DUMP #");
AWS_LOGF_TRACE(
- AWS_LS_COMMON_MEMTRACE, "################################################################################\n");
+ AWS_LS_COMMON_MEMTRACE, "################################################################################");
aws_mutex_unlock(&tracer->mutex);
}
@@ -460,7 +463,7 @@ struct aws_allocator *aws_mem_tracer_new(
enum aws_mem_trace_level level,
size_t frames_per_stack) {
- /* deprecated customizeable bookkeeping allocator */
+ /* deprecated customizable bookkeeping allocator */
(void)deprecated;
struct alloc_tracer *tracer = NULL;
diff --git a/contrib/restricted/aws/aws-c-common/source/posix/file.c b/contrib/restricted/aws/aws-c-common/source/posix/file.c
index 7c26ade8c3..e8b39c509c 100644
--- a/contrib/restricted/aws/aws-c-common/source/posix/file.c
+++ b/contrib/restricted/aws/aws-c-common/source/posix/file.c
@@ -5,47 +5,39 @@
#include <aws/common/environment.h>
#include <aws/common/file.h>
+#include <aws/common/logging.h>
#include <aws/common/string.h>
#include <dirent.h>
#include <errno.h>
+#include <pwd.h>
#include <stdio.h>
#include <sys/stat.h>
#include <unistd.h>
FILE *aws_fopen_safe(const struct aws_string *file_path, const struct aws_string *mode) {
- return fopen(aws_string_c_str(file_path), aws_string_c_str(mode));
-}
-
-static int s_parse_and_raise_error(int errno_cpy) {
- if (errno_cpy == 0) {
- return AWS_OP_SUCCESS;
- }
-
- if (errno_cpy == ENOENT || errno_cpy == ENOTDIR) {
- return aws_raise_error(AWS_ERROR_FILE_INVALID_PATH);
- }
-
- if (errno_cpy == EMFILE || errno_cpy == ENFILE) {
- return aws_raise_error(AWS_ERROR_MAX_FDS_EXCEEDED);
- }
-
- if (errno_cpy == EACCES) {
- return aws_raise_error(AWS_ERROR_NO_PERMISSION);
+ FILE *f = fopen(aws_string_c_str(file_path), aws_string_c_str(mode));
+ if (!f) {
+ int errno_cpy = errno; /* Always cache errno before potential side-effect */
+ aws_translate_and_raise_io_error(errno_cpy);
+ AWS_LOGF_ERROR(
+ AWS_LS_COMMON_IO,
+ "static: Failed to open file. path:'%s' mode:'%s' errno:%d aws-error:%d(%s)",
+ aws_string_c_str(file_path),
+ aws_string_c_str(mode),
+ errno_cpy,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
}
-
- if (errno_cpy == ENOTEMPTY) {
- return aws_raise_error(AWS_ERROR_DIRECTORY_NOT_EMPTY);
- }
-
- return aws_raise_error(AWS_ERROR_UNKNOWN);
+ return f;
}
int aws_directory_create(const struct aws_string *dir_path) {
int mkdir_ret = mkdir(aws_string_c_str(dir_path), S_IRWXU | S_IRWXG | S_IRWXO);
+ int errno_value = errno; /* Always cache errno before potential side-effect */
/** nobody cares if it already existed. */
- if (mkdir_ret != 0 && errno != EEXIST) {
- return s_parse_and_raise_error(errno);
+ if (mkdir_ret != 0 && errno_value != EEXIST) {
+ return aws_translate_and_raise_io_error(errno_value);
}
return AWS_OP_SUCCESS;
@@ -101,24 +93,27 @@ int aws_directory_delete(const struct aws_string *dir_path, bool recursive) {
}
int error_code = rmdir(aws_string_c_str(dir_path));
+ int errno_value = errno; /* Always cache errno before potential side-effect */
- return error_code == 0 ? AWS_OP_SUCCESS : s_parse_and_raise_error(errno);
+ return error_code == 0 ? AWS_OP_SUCCESS : aws_translate_and_raise_io_error(errno_value);
}
int aws_directory_or_file_move(const struct aws_string *from, const struct aws_string *to) {
int error_code = rename(aws_string_c_str(from), aws_string_c_str(to));
+ int errno_value = errno; /* Always cache errno before potential side-effect */
- return error_code == 0 ? AWS_OP_SUCCESS : s_parse_and_raise_error(errno);
+ return error_code == 0 ? AWS_OP_SUCCESS : aws_translate_and_raise_io_error(errno_value);
}
int aws_file_delete(const struct aws_string *file_path) {
int error_code = unlink(aws_string_c_str(file_path));
+ int errno_value = errno; /* Always cache errno before potential side-effect */
- if (!error_code || errno == ENOENT) {
+ if (!error_code || errno_value == ENOENT) {
return AWS_OP_SUCCESS;
}
- return s_parse_and_raise_error(errno);
+ return aws_translate_and_raise_io_error(errno_value);
}
int aws_directory_traverse(
@@ -128,9 +123,10 @@ int aws_directory_traverse(
aws_on_directory_entry *on_entry,
void *user_data) {
DIR *dir = opendir(aws_string_c_str(path));
+ int errno_value = errno; /* Always cache errno before potential side-effect */
if (!dir) {
- return s_parse_and_raise_error(errno);
+ return aws_translate_and_raise_io_error(errno_value);
}
struct aws_byte_cursor current_path = aws_byte_cursor_from_string(path);
@@ -227,13 +223,39 @@ AWS_STATIC_STRING_FROM_LITERAL(s_home_env_var, "HOME");
struct aws_string *aws_get_home_directory(struct aws_allocator *allocator) {
- /* ToDo: check getpwuid_r if environment check fails */
- struct aws_string *home_env_var_value = NULL;
- if (aws_get_environment_value(allocator, s_home_env_var, &home_env_var_value) == 0 && home_env_var_value != NULL) {
- return home_env_var_value;
+ /* First, check "HOME" environment variable.
+ * If it's set, then return it, even if it's an empty string. */
+ struct aws_string *home_value = NULL;
+ aws_get_environment_value(allocator, s_home_env_var, &home_value);
+ if (home_value != NULL) {
+ return home_value;
+ }
+
+ /* Next, check getpwuid_r().
+ * We need to allocate a tmp buffer to store the result strings,
+ * and the max possible size for this thing can be pretty big,
+ * so start with a reasonable allocation, and if that's not enough try something bigger. */
+ uid_t uid = getuid(); /* cannot fail */
+ struct passwd pwd;
+ struct passwd *result = NULL;
+ char *buf = NULL;
+ int status = ERANGE;
+ for (size_t bufsize = 1024; bufsize <= 16384 && status == ERANGE; bufsize *= 2) {
+ if (buf) {
+ aws_mem_release(allocator, buf);
+ }
+ buf = aws_mem_acquire(allocator, bufsize);
+ status = getpwuid_r(uid, &pwd, buf, bufsize, &result);
+ }
+
+ if (status == 0 && result != NULL && result->pw_dir != NULL) {
+ home_value = aws_string_new_from_c_str(allocator, result->pw_dir);
+ } else {
+ aws_raise_error(AWS_ERROR_GET_HOME_DIRECTORY_FAILED);
}
- return NULL;
+ aws_mem_release(allocator, buf);
+ return home_value;
}
bool aws_path_exists(const struct aws_string *path) {
@@ -251,10 +273,11 @@ int aws_fseek(FILE *file, int64_t offset, int whence) {
return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
}
int result = fseek(file, offset, whence);
-#endif /* AWS_HAVE_POSIX_LFS */
+#endif /* AWS_HAVE_POSIX_LFS */
+ int errno_value = errno; /* Always cache errno before potential side-effect */
if (result != 0) {
- return aws_translate_and_raise_io_error(errno);
+ return aws_translate_and_raise_io_error(errno_value);
}
return AWS_OP_SUCCESS;
@@ -270,7 +293,8 @@ int aws_file_get_length(FILE *file, int64_t *length) {
}
if (fstat(fd, &file_stats)) {
- return aws_translate_and_raise_io_error(errno);
+ int errno_value = errno; /* Always cache errno before potential side-effect */
+ return aws_translate_and_raise_io_error(errno_value);
}
*length = file_stats.st_size;
diff --git a/contrib/restricted/aws/aws-c-common/source/posix/thread.c b/contrib/restricted/aws/aws-c-common/source/posix/thread.c
index d17d859c3b..57d48aa9c7 100644
--- a/contrib/restricted/aws/aws-c-common/source/posix/thread.c
+++ b/contrib/restricted/aws/aws-c-common/source/posix/thread.c
@@ -23,9 +23,11 @@
#include <time.h>
#include <unistd.h>
-#if defined(__FreeBSD__) || defined(__NETBSD__)
+#if defined(__FreeBSD__) || defined(__NetBSD__)
# include <pthread_np.h>
typedef cpuset_t cpu_set_t;
+#elif defined(__OpenBSD__)
+# include <pthread_np.h>
#endif
#if !defined(AWS_AFFINITY_METHOD)
@@ -128,6 +130,8 @@ static void s_set_thread_name(pthread_t thread_id, const char *name) {
pthread_setname_np(name);
#elif defined(AWS_PTHREAD_SETNAME_TAKES_2ARGS)
pthread_setname_np(thread_id, name);
+#elif defined(AWS_PTHREAD_SET_NAME_TAKES_2ARGS)
+ pthread_set_name_np(thread_id, name);
#elif defined(AWS_PTHREAD_SETNAME_TAKES_3ARGS)
pthread_setname_np(thread_id, name, NULL);
#else
@@ -165,8 +169,9 @@ static void *thread_fn(void *arg) {
* and makes sure the numa node of the cpu we launched this thread on is where memory gets allocated. However,
* we don't want to fail the application if this fails, so make the call, and ignore the result. */
long resp = g_set_mempolicy_ptr(AWS_MPOL_PREFERRED_ALIAS, NULL, 0);
+ int errno_value = errno; /* Always cache errno before potential side-effect */
if (resp) {
- AWS_LOGF_WARN(AWS_LS_COMMON_THREAD, "call to set_mempolicy() failed with errno %d", errno);
+ AWS_LOGF_WARN(AWS_LS_COMMON_THREAD, "call to set_mempolicy() failed with errno %d", errno_value);
}
}
wrapper.func(wrapper.arg);
@@ -274,7 +279,7 @@ int aws_thread_launch(
/* AFAIK you can't set thread affinity on apple platforms, and it doesn't really matter since all memory
* NUMA or not is setup in interleave mode.
- * Thread afinity is also not supported on Android systems, and honestly, if you're running android on a NUMA
+ * Thread affinity is also not supported on Android systems, and honestly, if you're running android on a NUMA
* configuration, you've got bigger problems. */
#if AWS_AFFINITY_METHOD == AWS_AFFINITY_METHOD_PTHREAD_ATTR
if (options->cpu_id >= 0) {
@@ -460,3 +465,32 @@ int aws_thread_current_at_exit(aws_thread_atexit_fn *callback, void *user_data)
tl_wrapper->atexit = cb;
return AWS_OP_SUCCESS;
}
+
+int aws_thread_current_name(struct aws_allocator *allocator, struct aws_string **out_name) {
+ return aws_thread_name(allocator, aws_thread_current_thread_id(), out_name);
+}
+
+#define THREAD_NAME_BUFFER_SIZE 256
+int aws_thread_name(struct aws_allocator *allocator, aws_thread_id_t thread_id, struct aws_string **out_name) {
+ *out_name = NULL;
+#if defined(AWS_PTHREAD_GETNAME_TAKES_2ARGS) || defined(AWS_PTHREAD_GETNAME_TAKES_3ARGS) || \
+ defined(AWS_PTHREAD_GET_NAME_TAKES_2_ARGS)
+ char name[THREAD_NAME_BUFFER_SIZE] = {0};
+# ifdef AWS_PTHREAD_GETNAME_TAKES_3ARGS
+ if (pthread_getname_np(thread_id, name, THREAD_NAME_BUFFER_SIZE)) {
+# elif AWS_PTHREAD_GETNAME_TAKES_2ARGS
+ if (pthread_getname_np(thread_id, name)) {
+# elif AWS_PTHREAD_GET_NAME_TAKES_2ARGS
+ if (pthread_get_name_np(thread_id, name)) {
+# endif
+
+ return aws_raise_error(AWS_ERROR_SYS_CALL_FAILURE);
+ }
+
+ *out_name = aws_string_new_from_c_str(allocator, name);
+ return AWS_OP_SUCCESS;
+#else
+
+ return aws_raise_error(AWS_ERROR_PLATFORM_NOT_SUPPORTED);
+#endif
+}
diff --git a/contrib/restricted/aws/aws-c-common/source/priority_queue.c b/contrib/restricted/aws/aws-c-common/source/priority_queue.c
index f7d0f54e2d..86a91feb3a 100644
--- a/contrib/restricted/aws/aws-c-common/source/priority_queue.c
+++ b/contrib/restricted/aws/aws-c-common/source/priority_queue.c
@@ -59,7 +59,8 @@ static bool s_sift_down(struct aws_priority_queue *queue, size_t root) {
size_t left = LEFT_OF(root);
size_t right = RIGHT_OF(root);
size_t first = root;
- void *first_item = NULL, *other_item = NULL;
+ void *first_item = NULL;
+ void *other_item = NULL;
aws_array_list_get_at_ptr(&queue->container, &first_item, root);
aws_array_list_get_at_ptr(&queue->container, &other_item, left);
@@ -100,7 +101,8 @@ static bool s_sift_up(struct aws_priority_queue *queue, size_t index) {
bool did_move = false;
- void *parent_item = NULL, *child_item = NULL;
+ void *parent_item = NULL;
+ void *child_item = NULL;
size_t parent = PARENT_OF(index);
while (index) {
/*
@@ -216,8 +218,8 @@ bool aws_priority_queue_backpointers_valid(const struct aws_priority_queue *cons
/* Internal container validity */
bool backpointer_list_is_valid =
- ((aws_array_list_is_valid(&queue->backpointers) && (queue->backpointers.current_size != 0) &&
- (queue->backpointers.data != NULL)));
+ (aws_array_list_is_valid(&queue->backpointers) && (queue->backpointers.current_size != 0) &&
+ (queue->backpointers.data != NULL));
/* Backpointer struct should either be zero or should be
* initialized to be at most as long as the container, and having
diff --git a/contrib/restricted/aws/aws-c-common/source/uri.c b/contrib/restricted/aws/aws-c-common/source/uri.c
index 0559641420..1fafc9492e 100644
--- a/contrib/restricted/aws/aws-c-common/source/uri.c
+++ b/contrib/restricted/aws/aws-c-common/source/uri.c
@@ -11,7 +11,7 @@
#include <stdio.h>
#include <string.h>
-#if _MSC_VER
+#ifdef _MSC_VER
# pragma warning(disable : 4221) /* aggregate initializer using local variable addresses */
# pragma warning(disable : 4204) /* non-constant aggregate initializer */
#endif
@@ -264,7 +264,7 @@ int aws_uri_query_string_params(const struct aws_uri *uri, struct aws_array_list
}
static void s_parse_scheme(struct uri_parser *parser, struct aws_byte_cursor *str) {
- uint8_t *location_of_colon = memchr(str->ptr, ':', str->len);
+ const uint8_t *location_of_colon = memchr(str->ptr, ':', str->len);
if (!location_of_colon) {
parser->state = ON_AUTHORITY;
@@ -292,8 +292,8 @@ static void s_parse_scheme(struct uri_parser *parser, struct aws_byte_cursor *st
}
static void s_parse_authority(struct uri_parser *parser, struct aws_byte_cursor *str) {
- uint8_t *location_of_slash = memchr(str->ptr, '/', str->len);
- uint8_t *location_of_qmark = memchr(str->ptr, '?', str->len);
+ const uint8_t *location_of_slash = memchr(str->ptr, '/', str->len);
+ const uint8_t *location_of_qmark = memchr(str->ptr, '?', str->len);
if (!location_of_slash && !location_of_qmark && str->len) {
parser->uri->authority.ptr = str->ptr;
@@ -309,7 +309,7 @@ static void s_parse_authority(struct uri_parser *parser, struct aws_byte_cursor
aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
return;
} else {
- uint8_t *end = str->ptr + str->len;
+ const uint8_t *end = str->ptr + str->len;
if (location_of_slash) {
parser->state = ON_PATH;
end = location_of_slash;
@@ -325,7 +325,7 @@ static void s_parse_authority(struct uri_parser *parser, struct aws_byte_cursor
if (authority_parse_csr.len) {
/* RFC-3986 section 3.2: authority = [ userinfo "@" ] host [ ":" port ] */
- uint8_t *userinfo_delim = memchr(authority_parse_csr.ptr, '@', authority_parse_csr.len);
+ const uint8_t *userinfo_delim = memchr(authority_parse_csr.ptr, '@', authority_parse_csr.len);
if (userinfo_delim) {
parser->uri->userinfo =
@@ -351,7 +351,7 @@ static void s_parse_authority(struct uri_parser *parser, struct aws_byte_cursor
/* RFC-3986 section 3.2: host identified by IPv6 literal address is
* enclosed within square brackets. We must ignore any colons within
* IPv6 literals and only search for port delimiter after closing bracket.*/
- uint8_t *port_search_start = authority_parse_csr.ptr;
+ const uint8_t *port_search_start = authority_parse_csr.ptr;
size_t port_search_len = authority_parse_csr.len;
if (authority_parse_csr.len > 0 && authority_parse_csr.ptr[0] == '[') {
port_search_start = memchr(authority_parse_csr.ptr, ']', authority_parse_csr.len);
@@ -363,7 +363,7 @@ static void s_parse_authority(struct uri_parser *parser, struct aws_byte_cursor
port_search_len = authority_parse_csr.len - (port_search_start - authority_parse_csr.ptr);
}
- uint8_t *port_delim = memchr(port_search_start, ':', port_search_len);
+ const uint8_t *port_delim = memchr(port_search_start, ':', port_search_len);
if (!port_delim) {
parser->uri->port = 0;
@@ -407,7 +407,7 @@ static void s_parse_authority(struct uri_parser *parser, struct aws_byte_cursor
static void s_parse_path(struct uri_parser *parser, struct aws_byte_cursor *str) {
parser->uri->path_and_query = *str;
- uint8_t *location_of_q_mark = memchr(str->ptr, '?', str->len);
+ const uint8_t *location_of_q_mark = memchr(str->ptr, '?', str->len);
if (!location_of_q_mark) {
parser->uri->path.ptr = str->ptr;
@@ -540,8 +540,8 @@ static int s_encode_cursor_to_buffer(
struct aws_byte_buf *buffer,
const struct aws_byte_cursor *cursor,
unchecked_append_canonicalized_character_fn *append_canonicalized_character) {
- uint8_t *current_ptr = cursor->ptr;
- uint8_t *end_ptr = cursor->ptr + cursor->len;
+ const uint8_t *current_ptr = cursor->ptr;
+ const uint8_t *end_ptr = cursor->ptr + cursor->len;
/*
* reserve room up front for the worst possible case: everything gets % encoded
diff --git a/contrib/restricted/aws/aws-c-common/source/xml_parser.c b/contrib/restricted/aws/aws-c-common/source/xml_parser.c
index 692324ac9a..ac238cdfaf 100644
--- a/contrib/restricted/aws/aws-c-common/source/xml_parser.c
+++ b/contrib/restricted/aws/aws-c-common/source/xml_parser.c
@@ -151,13 +151,13 @@ int aws_xml_parser_parse(
/* burn everything that precedes the actual xml nodes. */
while (parser->doc.len) {
- uint8_t *start = memchr(parser->doc.ptr, '<', parser->doc.len);
+ const uint8_t *start = memchr(parser->doc.ptr, '<', parser->doc.len);
if (!start) {
AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid.");
return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
}
- uint8_t *location = memchr(parser->doc.ptr, '>', parser->doc.len);
+ const uint8_t *location = memchr(parser->doc.ptr, '>', parser->doc.len);
if (!location) {
AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid.");
@@ -312,14 +312,14 @@ int aws_xml_node_traverse(
/* look for the next node at the current level. do this until we encounter the parent node's
* closing tag. */
while (!parser->stop_parsing && !parser->error) {
- uint8_t *next_location = memchr(parser->doc.ptr, '<', parser->doc.len);
+ const uint8_t *next_location = memchr(parser->doc.ptr, '<', parser->doc.len);
if (!next_location) {
AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid.");
return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
}
- uint8_t *end_location = memchr(parser->doc.ptr, '>', parser->doc.len);
+ const uint8_t *end_location = memchr(parser->doc.ptr, '>', parser->doc.len);
if (!end_location) {
AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid.");
@@ -409,14 +409,14 @@ int aws_xml_node_get_attribute(
int s_node_next_sibling(struct aws_xml_parser *parser) {
AWS_PRECONDITION(parser);
- uint8_t *next_location = memchr(parser->doc.ptr, '<', parser->doc.len);
+ const uint8_t *next_location = memchr(parser->doc.ptr, '<', parser->doc.len);
if (!next_location) {
return parser->error;
}
aws_byte_cursor_advance(&parser->doc, next_location - parser->doc.ptr);
- uint8_t *end_location = memchr(parser->doc.ptr, '>', parser->doc.len);
+ const uint8_t *end_location = memchr(parser->doc.ptr, '>', parser->doc.len);
if (!end_location) {
AWS_LOGF_ERROR(AWS_LS_COMMON_XML_PARSER, "XML document is invalid.");
diff --git a/contrib/restricted/aws/aws-c-common/ya.make b/contrib/restricted/aws/aws-c-common/ya.make
index afc4b7af5b..85a2c01545 100644
--- a/contrib/restricted/aws/aws-c-common/ya.make
+++ b/contrib/restricted/aws/aws-c-common/ya.make
@@ -1,4 +1,4 @@
-# Generated by devtools/yamaker from nixpkgs 22.11.
+# Generated by devtools/yamaker from nixpkgs 23.05.
LIBRARY()
@@ -11,9 +11,9 @@ LICENSE(
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-VERSION(0.8.6)
+VERSION(0.8.15)
-ORIGINAL_SOURCE(https://github.com/awslabs/aws-c-common/archive/v0.8.6.tar.gz)
+ORIGINAL_SOURCE(https://github.com/awslabs/aws-c-common/archive/v0.8.15.tar.gz)
ADDINCL(
GLOBAL contrib/restricted/aws/aws-c-common/generated/include
@@ -26,6 +26,7 @@ NO_RUNTIME()
CFLAGS(
-DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_PTHREAD_GETNAME_TAKES_3ARGS
-DAWS_PTHREAD_SETNAME_TAKES_2ARGS
-DCJSON_HIDE_SYMBOLS
-DHAVE_SYSCONF
@@ -62,7 +63,6 @@ SRCS(
source/allocator_sba.c
source/array_list.c
source/assert.c
- source/bus.c
source/byte_buf.c
source/cache.c
source/codegen.c
diff --git a/contrib/restricted/aws/aws-c-compression/CMakeLists.darwin-arm64.txt b/contrib/restricted/aws/aws-c-compression/CMakeLists.darwin-arm64.txt
new file mode 100644
index 0000000000..23107a5c55
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-compression/CMakeLists.darwin-arm64.txt
@@ -0,0 +1,27 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-compression)
+target_compile_options(restricted-aws-aws-c-compression PRIVATE
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DHAVE_SYSCONF
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-compression PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-compression/include
+)
+target_link_libraries(restricted-aws-aws-c-compression PUBLIC
+ restricted-aws-aws-c-common
+)
+target_sources(restricted-aws-aws-c-compression PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-compression/source/compression.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-compression/source/huffman.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-compression/source/huffman_testing.c
+)
diff --git a/contrib/restricted/aws/aws-c-compression/CMakeLists.darwin-x86_64.txt b/contrib/restricted/aws/aws-c-compression/CMakeLists.darwin-x86_64.txt
new file mode 100644
index 0000000000..23107a5c55
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-compression/CMakeLists.darwin-x86_64.txt
@@ -0,0 +1,27 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-compression)
+target_compile_options(restricted-aws-aws-c-compression PRIVATE
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DHAVE_SYSCONF
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-compression PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-compression/include
+)
+target_link_libraries(restricted-aws-aws-c-compression PUBLIC
+ restricted-aws-aws-c-common
+)
+target_sources(restricted-aws-aws-c-compression PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-compression/source/compression.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-compression/source/huffman.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-compression/source/huffman_testing.c
+)
diff --git a/contrib/restricted/aws/aws-c-compression/CMakeLists.linux-aarch64.txt b/contrib/restricted/aws/aws-c-compression/CMakeLists.linux-aarch64.txt
new file mode 100644
index 0000000000..045999f577
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-compression/CMakeLists.linux-aarch64.txt
@@ -0,0 +1,28 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-compression)
+target_compile_options(restricted-aws-aws-c-compression PRIVATE
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DHAVE_SYSCONF
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-compression PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-compression/include
+)
+target_link_libraries(restricted-aws-aws-c-compression PUBLIC
+ contrib-libs-linux-headers
+ restricted-aws-aws-c-common
+)
+target_sources(restricted-aws-aws-c-compression PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-compression/source/compression.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-compression/source/huffman.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-compression/source/huffman_testing.c
+)
diff --git a/contrib/restricted/aws/aws-c-compression/CMakeLists.linux-x86_64.txt b/contrib/restricted/aws/aws-c-compression/CMakeLists.linux-x86_64.txt
new file mode 100644
index 0000000000..045999f577
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-compression/CMakeLists.linux-x86_64.txt
@@ -0,0 +1,28 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-compression)
+target_compile_options(restricted-aws-aws-c-compression PRIVATE
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DHAVE_SYSCONF
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-compression PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-compression/include
+)
+target_link_libraries(restricted-aws-aws-c-compression PUBLIC
+ contrib-libs-linux-headers
+ restricted-aws-aws-c-common
+)
+target_sources(restricted-aws-aws-c-compression PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-compression/source/compression.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-compression/source/huffman.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-compression/source/huffman_testing.c
+)
diff --git a/contrib/restricted/aws/aws-c-compression/CMakeLists.txt b/contrib/restricted/aws/aws-c-compression/CMakeLists.txt
new file mode 100644
index 0000000000..2dce3a77fe
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-compression/CMakeLists.txt
@@ -0,0 +1,19 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+if (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" AND NOT HAVE_CUDA)
+ include(CMakeLists.linux-aarch64.txt)
+elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
+ include(CMakeLists.darwin-x86_64.txt)
+elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
+ include(CMakeLists.darwin-arm64.txt)
+elseif (WIN32 AND CMAKE_SYSTEM_PROCESSOR STREQUAL "AMD64" AND NOT HAVE_CUDA)
+ include(CMakeLists.windows-x86_64.txt)
+elseif (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT HAVE_CUDA)
+ include(CMakeLists.linux-x86_64.txt)
+endif()
diff --git a/contrib/restricted/aws/aws-c-compression/CMakeLists.windows-x86_64.txt b/contrib/restricted/aws/aws-c-compression/CMakeLists.windows-x86_64.txt
new file mode 100644
index 0000000000..23107a5c55
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-compression/CMakeLists.windows-x86_64.txt
@@ -0,0 +1,27 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-compression)
+target_compile_options(restricted-aws-aws-c-compression PRIVATE
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DHAVE_SYSCONF
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-compression PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-compression/include
+)
+target_link_libraries(restricted-aws-aws-c-compression PUBLIC
+ restricted-aws-aws-c-common
+)
+target_sources(restricted-aws-aws-c-compression PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-compression/source/compression.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-compression/source/huffman.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-compression/source/huffman_testing.c
+)
diff --git a/contrib/restricted/aws/aws-c-compression/CODE_OF_CONDUCT.md b/contrib/restricted/aws/aws-c-compression/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..3b64466870
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-compression/CODE_OF_CONDUCT.md
@@ -0,0 +1,4 @@
+## Code of Conduct
+This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
+For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
+opensource-codeofconduct@amazon.com with any additional questions or comments.
diff --git a/contrib/restricted/aws/aws-c-compression/CONTRIBUTING.md b/contrib/restricted/aws/aws-c-compression/CONTRIBUTING.md
new file mode 100644
index 0000000000..2b12a6082b
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-compression/CONTRIBUTING.md
@@ -0,0 +1,61 @@
+# Contributing Guidelines
+
+Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional
+documentation, we greatly value feedback and contributions from our community.
+
+Please read through this document before submitting any issues or pull requests to ensure we have all the necessary
+information to effectively respond to your bug report or contribution.
+
+
+## Reporting Bugs/Feature Requests
+
+We welcome you to use the GitHub issue tracker to report bugs or suggest features.
+
+When filing an issue, please check [existing open](https://github.com/awslabs/aws-c-compression/issues), or [recently closed](https://github.com/awslabs/aws-c-compression/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already
+reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
+
+* A reproducible test case or series of steps
+* The version of our code being used
+* Any modifications you've made relevant to the bug
+* Anything unusual about your environment or deployment
+
+
+## Contributing via Pull Requests
+Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that:
+
+1. You are working against the latest source on the *main* branch.
+2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
+3. You open an issue to discuss any significant work - we would hate for your time to be wasted.
+
+To send us a pull request, please:
+
+1. Fork the repository.
+2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change.
+3. Ensure local tests pass.
+4. Commit to your fork using clear commit messages.
+5. Send us a pull request, answering any default questions in the pull request interface.
+6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation.
+
+GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
+[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
+
+
+## Finding contributions to work on
+Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels ((enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/awslabs/aws-c-compression/labels/help%20wanted) issues is a great place to start.
+
+
+## Code of Conduct
+This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
+For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
+opensource-codeofconduct@amazon.com with any additional questions or comments.
+
+
+## Security issue notifications
+If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue.
+
+
+## Licensing
+
+See the [LICENSE](https://github.com/awslabs/aws-c-compression/blob/main/LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution.
+
+We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes.
diff --git a/contrib/restricted/aws/aws-c-compression/LICENSE b/contrib/restricted/aws/aws-c-compression/LICENSE
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-compression/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/contrib/restricted/aws/aws-c-compression/NOTICE b/contrib/restricted/aws/aws-c-compression/NOTICE
new file mode 100644
index 0000000000..7c0e91fdb7
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-compression/NOTICE
@@ -0,0 +1,3 @@
+AWS C Compression
+Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+SPDX-License-Identifier: Apache-2.0.
diff --git a/contrib/restricted/aws/aws-c-compression/README.md b/contrib/restricted/aws/aws-c-compression/README.md
new file mode 100644
index 0000000000..2edaedb6cc
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-compression/README.md
@@ -0,0 +1,183 @@
+## AWS C Compression
+
+This is a cross-platform C99 implementation of compression algorithms such as
+gzip, and huffman encoding/decoding. Currently only huffman is implemented.
+
+## License
+
+This library is licensed under the Apache 2.0 License.
+
+## Usage
+
+### Building
+
+Note that aws-c-compression has a dependency on aws-c-common:
+
+```
+git clone git@github.com:awslabs/aws-c-common.git
+cmake -DCMAKE_PREFIX_PATH=<install-path> -DCMAKE_INSTALL_PREFIX=<install-path> -S aws-c-common -B aws-c-common/build
+cmake --build aws-c-common/build --target install
+
+git clone git@github.com:awslabs/aws-c-compression.git
+cmake -DCMAKE_PREFIX_PATH=<install-path> -DCMAKE_INSTALL_PREFIX=<install-path> -S aws-c-compression -B aws-c-compression/build
+cmake --build aws-c-compression/build --target install
+```
+
+### Huffman
+
+The Huffman implemention in this library is designed around the concept of a
+generic "symbol coder" object, which defines how each symbol (value between 0
+and 255) is encoded and decoded. This object looks like this:
+```c
+typedef struct aws_huffman_code (*aws_huffman_symbol_encoder)(uint8_t symbol, void *userdata);
+typedef uint8_t (*aws_huffman_symbol_decoder)(uint32_t bits, uint8_t *symbol, void *userdata);
+
+struct aws_huffman_symbol_coder {
+ aws_huffman_symbol_encoder encode;
+ aws_huffman_symbol_decoder decode;
+ void *userdata;
+};
+```
+These callbacks may be implemented manually, or you may use the included
+Huffman coder generator to generate one from a table definition file. The
+generator expects to be called with the following arguments:
+```shell
+$ aws-c-compression-huffman-generator path/to/table.def path/to/generated.c coder_name
+```
+
+The table definition file should be in the following format:
+```c
+/* sym bits code len */
+HUFFMAN_CODE( 0, "1100101110", 0x32e, 10)
+HUFFMAN_CODE( 1, "1100101111", 0x32f, 10)
+/* ... */
+```
+The HUFFMAN_CODE macro expects 4 arguments:
+* sym: the symbol value [0-255]
+* bits: the bits representing the symbol in string form
+* code: the bits representing the symbol in numeric form
+* len: the number of bits used to represent the symbol
+
+> #### Note
+> This file may also be `#include`d in the following way to generate a static
+> list of codes:
+> ```c
+> /* Provides the HUFFMAN_CODE macro */
+> #include <aws/testing/compression/huffman.h>
+>
+> static struct huffman_test_code_point code_points[] = {
+> #include "test_huffman_static_table.def"
+> };
+> ```
+
+This will emit a c file which exports a function with the following signiture:
+```c
+struct aws_huffman_symbol_coder *{coder_name}_get_coder();
+```
+Note that this function does not allocate, but maintains a static instance of
+the coder.
+
+
+An example implementation of this file is provided in
+`tests/test_huffman_static_table.def`.
+
+
+To use the coder, forward declare that function, and pass the result as the
+second argument to `aws_huffman_encoder_init` and `aws_huffman_decoder_init`.
+```c
+struct aws_huffman_encoder encoder;
+aws_huffman_encoder_init(&encoder, {coder_name}_get_coder());
+
+struct aws_huffman_decoder decoder;
+aws_huffman_decoder_init(&decoder, {coder_name}_get_coder())
+```
+
+#### Encoding
+```c
+/**
+ * Encode a symbol buffer into the output buffer.
+ *
+ * \param[in] encoder The encoder object to use
+ * \param[in] to_encode The symbol buffer to encode
+ * \param[in,out] length In: The length of to_decode. Out: The number of bytes read from to_encode
+ * \param[in] output The buffer to write encoded bytes to
+ * \param[in,out] output_size In: The size of output. Out: The number of bytes written to output
+ *
+ * \return AWS_OP_SUCCESS if encoding is successful, AWS_OP_ERR the code for the error that occured
+ */
+int aws_huffman_encode(struct aws_huffman_encoder *encoder, const char *to_encode, size_t *length, uint8_t *output, size_t *output_size);
+```
+The encoder is built to support partial encoding. This means that if there
+isn't enough space in `output`, the encoder will encode as much as possible,
+update `length` to indicate how much was consumed, `output_size` won't change,
+and `AWS_ERROR_SHORT_BUFFER` will be raised. `aws_huffman_encode` may then be
+called again like the following pseudo-code:
+```c
+void encode_and_send(const char *to_encode, size_t size) {
+ while (size > 0) {
+ uint8_t output[some_chunk_size];
+ size_t output_size = sizeof(output);
+ size_t bytes_read = size;
+ aws_huffman_encode(encoder, to_encode, &bytes_read, output, &output_size);
+ /* AWS_ERROR_SHORT_BUFFER was raised... */
+ send_output_to_someone_else(output, output_size);
+
+ to_encode += bytes_read;
+ size -= bytes_read;
+ }
+ /* Be sure to reset the encoder after use */
+ aws_huffman_encoder_reset(encoder);
+}
+```
+
+`aws_huffman_encoder` also has a `uint8_t` field called `eos_padding` that
+defines how any unwritten bits in the last byte of output are filled. The most
+significant bits will used. For example, if the last byte contains only 3 bits
+and `eos_padding` is `0b01010101`, `01010` will be appended to the byte.
+
+#### Decoding
+```c
+/**
+ * Decodes a byte buffer into the provided symbol array.
+ *
+ * \param[in] decoder The decoder object to use
+ * \param[in] to_decode The encoded byte buffer to read from
+ * \param[in,out] length In: The length of to_decode. Out: The number of bytes read from to_decode
+ * \param[in] output The buffer to write decoded symbols to
+ * \param[in,out] output_size In: The size of output. Out: The number of bytes written to output
+ *
+ * \return AWS_OP_SUCCESS if encoding is successful, AWS_OP_ERR the code for the error that occured
+ */
+int aws_huffman_decode(struct aws_huffman_decoder *decoder, const uint8_t *to_decode, size_t *length, char *output, size_t *output_size);
+```
+The decoder is built to support partial encoding. This means that if there
+isn't enough space in `output`, the decoder will decode as much as possible,
+update `length` to indicate how much was consumed, `output_size` won't change,
+and `AWS_ERROR_SHORT_BUFFER` will be raised. `aws_huffman_decode` may then be
+called again like the following pseudo-code:
+```c
+void decode_and_send(const char *to_decode, size_t size) {
+ while (size > 0) {
+ uint8_t output[some_chunk_size];
+ size_t output_size = sizeof(output);
+ size_t bytes_read = size;
+ aws_huffman_decode(decoder, to_decode, &bytes_read, output, &output_size);
+ /* AWS_ERROR_SHORT_BUFFER was raised... */
+ send_output_to_someone_else(output, output_size);
+
+ to_decode += bytes_read;
+ size -= bytes_read;
+ }
+ /* Be sure to reset the decoder after use */
+ aws_huffman_decoder_reset(decoder);
+}
+```
+
+Upon completion of a decode, the most significant bits of
+`decoder->working_bits` will contain the final bits of `to_decode` that could
+not match a symbol. This is useful for verifying the padding bits of a stream.
+For example, to validate that a stream ends in all 1's (like HPACK requires),
+you could do the following:
+```c
+AWS_ASSERT(decoder->working_bits == UINT64_MAX << (64 - decoder->num_bits));
+```
diff --git a/contrib/restricted/aws/aws-c-compression/include/aws/compression/compression.h b/contrib/restricted/aws/aws-c-compression/include/aws/compression/compression.h
new file mode 100644
index 0000000000..437c48961c
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-compression/include/aws/compression/compression.h
@@ -0,0 +1,35 @@
+#ifndef AWS_COMPRESSION_COMPRESSION_H
+#define AWS_COMPRESSION_COMPRESSION_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/compression/exports.h>
+
+#include <aws/common/common.h>
+
+#define AWS_C_COMPRESSION_PACKAGE_ID 3
+
+enum aws_compression_error {
+ AWS_ERROR_COMPRESSION_UNKNOWN_SYMBOL = AWS_ERROR_ENUM_BEGIN_RANGE(AWS_C_COMPRESSION_PACKAGE_ID),
+
+ AWS_ERROR_END_COMPRESSION_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_COMPRESSION_PACKAGE_ID)
+};
+
+/**
+ * Initializes internal datastructures used by aws-c-compression.
+ * Must be called before using any functionality in aws-c-compression.
+ */
+AWS_COMPRESSION_API
+void aws_compression_library_init(struct aws_allocator *alloc);
+
+/**
+ * Clean up internal datastructures used by aws-c-compression.
+ * Must not be called until application is done using functionality in aws-c-compression.
+ */
+AWS_COMPRESSION_API
+void aws_compression_library_clean_up(void);
+
+#endif /* AWS_COMPRESSION_COMPRESSION_H */
diff --git a/contrib/restricted/aws/aws-c-compression/include/aws/compression/exports.h b/contrib/restricted/aws/aws-c-compression/include/aws/compression/exports.h
new file mode 100644
index 0000000000..8c0ac00fbb
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-compression/include/aws/compression/exports.h
@@ -0,0 +1,28 @@
+#ifndef AWS_COMPRESSION_EXPORTS_H
+#define AWS_COMPRESSION_EXPORTS_H
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#if defined(USE_WINDOWS_DLL_SEMANTICS) || defined(WIN32)
+# ifdef AWS_COMPRESSION_USE_IMPORT_EXPORT
+# ifdef AWS_COMPRESSION_EXPORTS
+# define AWS_COMPRESSION_API __declspec(dllexport)
+# else
+# define AWS_COMPRESSION_API __declspec(dllimport)
+# endif /* AWS_COMPRESSION_EXPORTS */
+# else
+# define AWS_COMPRESSION_API
+# endif /* AWS_COMPRESSION_USE_IMPORT_EXPORT */
+
+#else /* defined (USE_WINDOWS_DLL_SEMANTICS) || defined (WIN32) */
+# if ((__GNUC__ >= 4) || defined(__clang__)) && defined(AWS_COMPRESSION_USE_IMPORT_EXPORT) && \
+ defined(AWS_COMPRESSION_EXPORTS)
+# define AWS_COMPRESSION_API __attribute__((visibility("default")))
+# else
+# define AWS_COMPRESSION_API
+# endif /* __GNUC__ >= 4 || defined(__clang__) */
+
+#endif /* defined (USE_WINDOWS_DLL_SEMANTICS) || defined (WIN32) */
+
+#endif /* AWS_COMPRESSION_EXPORTS_H */
diff --git a/contrib/restricted/aws/aws-c-compression/include/aws/compression/huffman.h b/contrib/restricted/aws/aws-c-compression/include/aws/compression/huffman.h
new file mode 100644
index 0000000000..aa83417dcb
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-compression/include/aws/compression/huffman.h
@@ -0,0 +1,161 @@
+#ifndef AWS_COMPRESSION_HUFFMAN_H
+#define AWS_COMPRESSION_HUFFMAN_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/compression/compression.h>
+
+#include <aws/common/byte_buf.h>
+
+/**
+ * Represents an encoded code
+ */
+struct aws_huffman_code {
+ /**
+ * The value of the code
+ * \note The pattern is stored in the least significant bits
+ */
+ uint32_t pattern;
+ /** The number of bits in pattern to use */
+ uint8_t num_bits;
+};
+
+/**
+ * Function used to encode a single symbol to an aws_huffman_code
+ *
+ * \param[in] symbol The symbol to encode
+ * \param[in] userdata Optional userdata (aws_huffman_symbol_coder.userdata)
+ *
+ * \returns The code representing the symbol. If this symbol is not recognized,
+ * return a code with num_bits set to 0.
+ */
+typedef struct aws_huffman_code(aws_huffman_symbol_encoder_fn)(uint8_t symbol, void *userdata);
+/**
+ * Function used to decode a code into a symbol
+ *
+ * \param[in] bits The bits to attept to decode a symbol from
+ * \param[out] symbol The symbol found. Do not write to if no valid symbol
+ * found \param[in] userdata Optional userdata
+ * (aws_huffman_symbol_coder.userdata)
+ *
+ * \returns The number of bits read from bits
+ */
+typedef uint8_t(aws_huffman_symbol_decoder_fn)(uint32_t bits, uint8_t *symbol, void *userdata);
+
+/**
+ * Structure used to define how symbols are encoded and decoded
+ */
+struct aws_huffman_symbol_coder {
+ aws_huffman_symbol_encoder_fn *encode;
+ aws_huffman_symbol_decoder_fn *decode;
+ void *userdata;
+};
+
+/**
+ * Structure used for persistent encoding.
+ * Allows for reading from or writing to incomplete buffers.
+ */
+struct aws_huffman_encoder {
+ /* Params */
+ struct aws_huffman_symbol_coder *coder;
+ uint8_t eos_padding;
+
+ /* State */
+ struct aws_huffman_code overflow_bits;
+};
+
+/**
+ * Structure used for persistent decoding.
+ * Allows for reading from or writing to incomplete buffers.
+ */
+struct aws_huffman_decoder {
+ /* Param */
+ struct aws_huffman_symbol_coder *coder;
+ bool allow_growth;
+
+ /* State */
+ uint64_t working_bits;
+ uint8_t num_bits;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Initialize a encoder object with a symbol coder.
+ */
+AWS_COMPRESSION_API
+void aws_huffman_encoder_init(struct aws_huffman_encoder *encoder, struct aws_huffman_symbol_coder *coder);
+
+/**
+ * Resets a decoder for use with a new binary stream
+ */
+AWS_COMPRESSION_API
+void aws_huffman_encoder_reset(struct aws_huffman_encoder *encoder);
+
+/**
+ * Initialize a decoder object with a symbol coder.
+ */
+AWS_COMPRESSION_API
+void aws_huffman_decoder_init(struct aws_huffman_decoder *decoder, struct aws_huffman_symbol_coder *coder);
+
+/**
+ * Resets a decoder for use with a new binary stream
+ */
+AWS_COMPRESSION_API
+void aws_huffman_decoder_reset(struct aws_huffman_decoder *decoder);
+
+/**
+ * Get the byte length of to_encode post-encoding.
+ *
+ * \param[in] encoder The encoder object to use
+ * \param[in] to_encode The symbol buffer to encode
+ *
+ * \return The length of the encoded string.
+ */
+AWS_COMPRESSION_API
+size_t aws_huffman_get_encoded_length(struct aws_huffman_encoder *encoder, struct aws_byte_cursor to_encode);
+
+/**
+ * Encode a symbol buffer into the output buffer.
+ *
+ * \param[in] encoder The encoder object to use
+ * \param[in] to_encode The symbol buffer to encode
+ * \param[in] output The buffer to write encoded bytes to
+ *
+ * \return AWS_OP_SUCCESS if encoding is successful, AWS_OP_ERR otherwise
+ */
+AWS_COMPRESSION_API
+int aws_huffman_encode(
+ struct aws_huffman_encoder *encoder,
+ struct aws_byte_cursor *to_encode,
+ struct aws_byte_buf *output);
+
+/**
+ * Decodes a byte buffer into the provided symbol array.
+ *
+ * \param[in] decoder The decoder object to use
+ * \param[in] to_decode The encoded byte buffer to read from
+ * \param[in] output The buffer to write decoded symbols to.
+ * If decoder is set to allow growth, capacity will be increased when necessary.
+ *
+ * \return AWS_OP_SUCCESS if encoding is successful, AWS_OP_ERR otherwise
+ */
+AWS_COMPRESSION_API
+int aws_huffman_decode(
+ struct aws_huffman_decoder *decoder,
+ struct aws_byte_cursor *to_decode,
+ struct aws_byte_buf *output);
+
+/**
+ * Set whether or not to increase capacity when the output buffer fills up while decoding.
+ * This is false by default.
+ */
+AWS_COMPRESSION_API
+void aws_huffman_decoder_allow_growth(struct aws_huffman_decoder *decoder, bool allow_growth);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_COMPRESSION_HUFFMAN_H */
diff --git a/contrib/restricted/aws/aws-c-compression/include/aws/compression/private/huffman_testing.h b/contrib/restricted/aws/aws-c-compression/include/aws/compression/private/huffman_testing.h
new file mode 100644
index 0000000000..0830ce060f
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-compression/include/aws/compression/private/huffman_testing.h
@@ -0,0 +1,99 @@
+#ifndef AWS_COMPRESSION_HUFFMAN_TESTING_H
+#define AWS_COMPRESSION_HUFFMAN_TESTING_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/compression/huffman.h>
+
+/**
+ * The intended use of file is to allow testing of huffman character coders.
+ * By doing the following, you can ensure the output of encoders decoders are
+ * correct:
+ *
+ * \code{c}
+ * static struct huffman_test_code_point code_points[] = {
+ * #include "test_huffman_static_table.def"
+ * };
+ * \endcode
+ *
+ * You may then iterate over each code point in the array, and test the
+ * following (pseudo-code):
+ *
+ * \code{c} for (cp in code_points) {
+ * AWS_ASSERT(my_coder->encode(cp.symbol) == cp.pattern);
+ * AWS_ASSERT(my_coder->decode(cp.pattern) == cp.symbol);
+ * }
+ * \endcode
+ */
+
+/**
+ * Structure containing all relevant information about a code point
+ */
+struct huffman_test_code_point {
+ uint8_t symbol;
+ struct aws_huffman_code code;
+};
+
+/**
+ * Macro to be used when including a table def file, populates an array of
+ * huffman_test_code_points
+ */
+#define HUFFMAN_CODE(psymbol, pbit_string, pbit_pattern, pnum_bits) \
+ { \
+ .symbol = (psymbol), \
+ .code = \
+ { \
+ .pattern = (pbit_pattern), \
+ .num_bits = (pnum_bits), \
+ }, \
+ },
+
+/**
+ * Function to test a huffman coder to ensure the transitive property applies
+ * (input == decode(incode(input)))
+ *
+ * \param[in] coder The symbol coder to test
+ * \param[in] input The buffer to test
+ * \param[in] size The size of input
+ * \param[in] encoded_size The length of the encoded buffer. Pass 0 to skip check.
+ * \param[out] error_string In case of failure, the error string to report
+ *
+ * \return AWS_OP_SUCCESS on success, AWS_OP_FAILURE on failure (error_string
+ * will be set)
+ */
+AWS_COMPRESSION_API
+int huffman_test_transitive(
+ struct aws_huffman_symbol_coder *coder,
+ const char *input,
+ size_t size,
+ size_t encoded_size,
+ const char **error_string);
+
+/**
+ * Function to test a huffman coder to ensure the transitive property applies
+ * when doing partial encodes/decodes (input == decode(incode(input)))
+ *
+ * \param[in] coder The symbol coder to test
+ * \param[in] input The buffer to test
+ * \param[in] size The size of input
+ * \param[in] encoded_size The length of the encoded buffer. Pass 0 to skip check.
+ * \param[in] output_chunk_size The amount of output to write at once
+ * \param[out] error_string In case of failure, the error string to
+ * report
+ *
+ * \return AWS_OP_SUCCESS on success, AWS_OP_FAILURE on failure (error_string
+ * will be set)
+ */
+AWS_COMPRESSION_API
+int huffman_test_transitive_chunked(
+ struct aws_huffman_symbol_coder *coder,
+ const char *input,
+ size_t size,
+ size_t encoded_size,
+ size_t output_chunk_size,
+ const char **error_string);
+
+#endif /* AWS_COMPRESSION_HUFFMAN_TESTING_H */
diff --git a/contrib/restricted/aws/aws-c-compression/source/compression.c b/contrib/restricted/aws/aws-c-compression/source/compression.c
new file mode 100644
index 0000000000..52777c0eeb
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-compression/source/compression.c
@@ -0,0 +1,44 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/compression/compression.h>
+
+#define DEFINE_ERROR_INFO(CODE, STR) \
+ [(CODE)-AWS_ERROR_ENUM_BEGIN_RANGE(AWS_C_COMPRESSION_PACKAGE_ID)] = \
+ AWS_DEFINE_ERROR_INFO(CODE, STR, "aws-c-compression")
+
+/* clang-format off */
+static struct aws_error_info s_errors[] = {
+ DEFINE_ERROR_INFO(
+ AWS_ERROR_COMPRESSION_UNKNOWN_SYMBOL,
+ "Compression encountered an unknown symbol."),
+};
+/* clang-format on */
+
+static struct aws_error_info_list s_error_list = {
+ .error_list = s_errors,
+ .count = AWS_ARRAY_SIZE(s_errors),
+};
+
+static bool s_library_initialized = false;
+void aws_compression_library_init(struct aws_allocator *alloc) {
+ if (s_library_initialized) {
+ return;
+ }
+ s_library_initialized = true;
+
+ aws_common_library_init(alloc);
+ aws_register_error_info(&s_error_list);
+}
+
+void aws_compression_library_clean_up(void) {
+ if (!s_library_initialized) {
+ return;
+ }
+ s_library_initialized = false;
+
+ aws_unregister_error_info(&s_error_list);
+ aws_common_library_clean_up();
+}
diff --git a/contrib/restricted/aws/aws-c-compression/source/huffman.c b/contrib/restricted/aws/aws-c-compression/source/huffman.c
new file mode 100644
index 0000000000..074c9f4ed5
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-compression/source/huffman.c
@@ -0,0 +1,285 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/compression/huffman.h>
+
+#define BITSIZEOF(val) (sizeof(val) * 8)
+
+static uint8_t MAX_PATTERN_BITS = BITSIZEOF(((struct aws_huffman_code *)0)->pattern);
+
+void aws_huffman_encoder_init(struct aws_huffman_encoder *encoder, struct aws_huffman_symbol_coder *coder) {
+
+ AWS_ASSERT(encoder);
+ AWS_ASSERT(coder);
+
+ AWS_ZERO_STRUCT(*encoder);
+ encoder->coder = coder;
+ encoder->eos_padding = UINT8_MAX;
+}
+
+void aws_huffman_encoder_reset(struct aws_huffman_encoder *encoder) {
+
+ AWS_ASSERT(encoder);
+
+ AWS_ZERO_STRUCT(encoder->overflow_bits);
+}
+
+void aws_huffman_decoder_init(struct aws_huffman_decoder *decoder, struct aws_huffman_symbol_coder *coder) {
+
+ AWS_ASSERT(decoder);
+ AWS_ASSERT(coder);
+
+ AWS_ZERO_STRUCT(*decoder);
+ decoder->coder = coder;
+}
+
+void aws_huffman_decoder_reset(struct aws_huffman_decoder *decoder) {
+
+ decoder->working_bits = 0;
+ decoder->num_bits = 0;
+}
+
+void aws_huffman_decoder_allow_growth(struct aws_huffman_decoder *decoder, bool allow_growth) {
+ decoder->allow_growth = allow_growth;
+}
+
+/* Much of encode is written in a helper function,
+ so this struct helps avoid passing all the parameters through by hand */
+struct encoder_state {
+ struct aws_huffman_encoder *encoder;
+ struct aws_byte_buf *output_buf;
+ uint8_t working;
+ uint8_t bit_pos;
+};
+
+/* Helper function to write a single bit_pattern to memory (or working_bits if
+ * out of buffer space) */
+static int encode_write_bit_pattern(struct encoder_state *state, struct aws_huffman_code bit_pattern) {
+ AWS_PRECONDITION(state->output_buf->len < state->output_buf->capacity);
+
+ if (bit_pattern.num_bits == 0) {
+ return aws_raise_error(AWS_ERROR_COMPRESSION_UNKNOWN_SYMBOL);
+ }
+
+ uint8_t bits_to_write = bit_pattern.num_bits;
+ while (bits_to_write > 0) {
+ uint8_t bits_for_current = bits_to_write > state->bit_pos ? state->bit_pos : bits_to_write;
+ /* Chop off the top 0s and bits that have already been read */
+ uint8_t bits_to_cut =
+ (BITSIZEOF(bit_pattern.pattern) - bit_pattern.num_bits) + (bit_pattern.num_bits - bits_to_write);
+
+ /* Write the appropiate number of bits to this byte
+ Shift to the left to cut any unneeded bits
+ Shift to the right to position the bits correctly */
+ state->working |= (bit_pattern.pattern << bits_to_cut) >> (MAX_PATTERN_BITS - state->bit_pos);
+
+ bits_to_write -= bits_for_current;
+ state->bit_pos -= bits_for_current;
+
+ if (state->bit_pos == 0) {
+ /* Save the whole byte */
+ aws_byte_buf_write_u8(state->output_buf, state->working);
+
+ state->bit_pos = 8;
+ state->working = 0;
+
+ if (state->output_buf->len == state->output_buf->capacity) {
+ state->encoder->overflow_bits.num_bits = bits_to_write;
+
+ if (bits_to_write) {
+ /* If buffer is full and there are remaining bits, save them to overflow and return */
+ bits_to_cut += bits_for_current;
+
+ state->encoder->overflow_bits.pattern =
+ (bit_pattern.pattern << bits_to_cut) >> (MAX_PATTERN_BITS - bits_to_write);
+
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+ }
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+size_t aws_huffman_get_encoded_length(struct aws_huffman_encoder *encoder, struct aws_byte_cursor to_encode) {
+
+ AWS_PRECONDITION(encoder);
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&to_encode));
+
+ size_t num_bits = 0;
+
+ while (to_encode.len) {
+ uint8_t new_byte = 0;
+ aws_byte_cursor_read_u8(&to_encode, &new_byte);
+ struct aws_huffman_code code_point = encoder->coder->encode(new_byte, encoder->coder->userdata);
+ num_bits += code_point.num_bits;
+ }
+
+ size_t length = num_bits / 8;
+
+ /* Round up */
+ if (num_bits % 8) {
+ ++length;
+ }
+
+ return length;
+}
+
+int aws_huffman_encode(
+ struct aws_huffman_encoder *encoder,
+ struct aws_byte_cursor *to_encode,
+ struct aws_byte_buf *output) {
+
+ AWS_ASSERT(encoder);
+ AWS_ASSERT(encoder->coder);
+ AWS_ASSERT(to_encode);
+ AWS_ASSERT(output);
+
+ struct encoder_state state = {
+ .working = 0,
+ .bit_pos = 8,
+ };
+ state.encoder = encoder;
+ state.output_buf = output;
+
+ /* Write any bits leftover from previous invocation */
+ if (encoder->overflow_bits.num_bits) {
+ if (output->len == output->capacity) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ if (encode_write_bit_pattern(&state, encoder->overflow_bits)) {
+ return AWS_OP_ERR;
+ }
+
+ encoder->overflow_bits.num_bits = 0;
+ }
+
+ while (to_encode->len) {
+ if (output->len == output->capacity) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ uint8_t new_byte = 0;
+ aws_byte_cursor_read_u8(to_encode, &new_byte);
+ struct aws_huffman_code code_point = encoder->coder->encode(new_byte, encoder->coder->userdata);
+
+ if (encode_write_bit_pattern(&state, code_point)) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ /* The following code only runs when the buffer has written successfully */
+
+ /* If whole buffer processed, write EOS */
+ if (state.bit_pos != 8) {
+ struct aws_huffman_code eos_cp;
+ eos_cp.pattern = encoder->eos_padding;
+ eos_cp.num_bits = state.bit_pos;
+ encode_write_bit_pattern(&state, eos_cp);
+ AWS_ASSERT(state.bit_pos == 8);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* Decode's reading is written in a helper function,
+ so this struct helps avoid passing all the parameters through by hand */
+struct huffman_decoder_state {
+ struct aws_huffman_decoder *decoder;
+ struct aws_byte_cursor *input_cursor;
+};
+
+static void decode_fill_working_bits(struct huffman_decoder_state *state) {
+
+ /* Read from bytes in the buffer until there are enough bytes to process */
+ while (state->decoder->num_bits < MAX_PATTERN_BITS && state->input_cursor->len) {
+
+ /* Read the appropiate number of bits from this byte */
+ uint8_t new_byte = 0;
+ aws_byte_cursor_read_u8(state->input_cursor, &new_byte);
+
+ uint64_t positioned = ((uint64_t)new_byte)
+ << (BITSIZEOF(state->decoder->working_bits) - 8 - state->decoder->num_bits);
+ state->decoder->working_bits |= positioned;
+
+ state->decoder->num_bits += 8;
+ }
+}
+
+int aws_huffman_decode(
+ struct aws_huffman_decoder *decoder,
+ struct aws_byte_cursor *to_decode,
+ struct aws_byte_buf *output) {
+
+ AWS_ASSERT(decoder);
+ AWS_ASSERT(decoder->coder);
+ AWS_ASSERT(to_decode);
+ AWS_ASSERT(output);
+
+ struct huffman_decoder_state state;
+ state.decoder = decoder;
+ state.input_cursor = to_decode;
+
+ /* Measures how much of the input was read */
+ size_t bits_left = decoder->num_bits + to_decode->len * 8;
+
+ while (1) {
+
+ decode_fill_working_bits(&state);
+
+ uint8_t symbol;
+ uint8_t bits_read = decoder->coder->decode(
+ (uint32_t)(decoder->working_bits >> (BITSIZEOF(decoder->working_bits) - MAX_PATTERN_BITS)),
+ &symbol,
+ decoder->coder->userdata);
+
+ if (bits_read == 0) {
+ if (bits_left < MAX_PATTERN_BITS) {
+ /* More input is needed to continue */
+ return AWS_OP_SUCCESS;
+ }
+ /* Unknown symbol found */
+ return aws_raise_error(AWS_ERROR_COMPRESSION_UNKNOWN_SYMBOL);
+ }
+ if (bits_read > bits_left) {
+ /* Check if the buffer has been overrun.
+ Note: because of the check in decode_fill_working_bits,
+ the buffer won't actually overrun, instead there will
+ be 0's in the bottom of working_bits. */
+
+ return AWS_OP_SUCCESS;
+ }
+
+ if (output->len == output->capacity) {
+ /* Check if we've hit the end of the output buffer.
+ * Grow buffer, or raise error, depending on settings */
+ if (decoder->allow_growth) {
+ /* Double the capacity */
+ if (aws_byte_buf_reserve_relative(output, output->capacity)) {
+ return AWS_OP_ERR;
+ }
+ } else {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+ }
+
+ bits_left -= bits_read;
+ decoder->working_bits <<= bits_read;
+ decoder->num_bits -= bits_read;
+
+ /* Store the found symbol */
+ aws_byte_buf_write_u8(output, symbol);
+
+ /* Successfully decoded whole buffer */
+ if (bits_left == 0) {
+ return AWS_OP_SUCCESS;
+ }
+ }
+
+ /* This case is unreachable */
+ AWS_ASSERT(0);
+}
diff --git a/contrib/restricted/aws/aws-c-compression/source/huffman_testing.c b/contrib/restricted/aws/aws-c-compression/source/huffman_testing.c
new file mode 100644
index 0000000000..67d17a2c2c
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-compression/source/huffman_testing.c
@@ -0,0 +1,173 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+/**
+ * See aws/testing/compression/huffman.h for docs.
+ */
+#define AWS_UNSTABLE_TESTING_API
+#include <aws/compression/private/huffman_testing.h>
+
+#include <aws/common/byte_buf.h>
+#include <aws/common/common.h>
+
+int huffman_test_transitive(
+ struct aws_huffman_symbol_coder *coder,
+ const char *input,
+ size_t size,
+ size_t encoded_size,
+ const char **error_string) {
+
+ struct aws_huffman_encoder encoder;
+ aws_huffman_encoder_init(&encoder, coder);
+ struct aws_huffman_decoder decoder;
+ aws_huffman_decoder_init(&decoder, coder);
+
+ const size_t intermediate_buffer_size = size * 2;
+ AWS_VARIABLE_LENGTH_ARRAY(uint8_t, intermediate_buffer, intermediate_buffer_size);
+ memset(intermediate_buffer, 0, intermediate_buffer_size);
+ AWS_VARIABLE_LENGTH_ARRAY(char, output_buffer, size);
+ memset(output_buffer, 0, size);
+
+ struct aws_byte_cursor to_encode = aws_byte_cursor_from_array((uint8_t *)input, size);
+ struct aws_byte_buf intermediate_buf = aws_byte_buf_from_empty_array(intermediate_buffer, intermediate_buffer_size);
+ struct aws_byte_buf output_buf = aws_byte_buf_from_empty_array(output_buffer, size);
+
+ int result = aws_huffman_encode(&encoder, &to_encode, &intermediate_buf);
+
+ if (result != AWS_OP_SUCCESS) {
+ *error_string = "aws_huffman_encode failed";
+ return AWS_OP_ERR;
+ }
+ if (to_encode.len != 0) {
+ *error_string = "not all data encoded";
+ return AWS_OP_ERR;
+ }
+ if (encoded_size && intermediate_buf.len != encoded_size) {
+ *error_string = "encoded length is incorrect";
+ return AWS_OP_ERR;
+ }
+
+ struct aws_byte_cursor intermediate_cur = aws_byte_cursor_from_buf(&intermediate_buf);
+ result = aws_huffman_decode(&decoder, &intermediate_cur, &output_buf);
+
+ if (result != AWS_OP_SUCCESS) {
+ *error_string = "aws_huffman_decode failed";
+ return AWS_OP_ERR;
+ }
+ if (intermediate_cur.len != 0) {
+ *error_string = "not all encoded data was decoded";
+ return AWS_OP_ERR;
+ }
+ if (output_buf.len != size) {
+ *error_string = "decode output size incorrect";
+ return AWS_OP_ERR;
+ }
+ if (memcmp(input, output_buffer, size) != 0) {
+ *error_string = "decoded data does not match input data";
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int huffman_test_transitive_chunked(
+ struct aws_huffman_symbol_coder *coder,
+ const char *input,
+ size_t size,
+ size_t encoded_size,
+ size_t output_chunk_size,
+ const char **error_string) {
+
+ struct aws_huffman_encoder encoder;
+ aws_huffman_encoder_init(&encoder, coder);
+ struct aws_huffman_decoder decoder;
+ aws_huffman_decoder_init(&decoder, coder);
+
+ const size_t intermediate_buffer_size = size * 2;
+ AWS_VARIABLE_LENGTH_ARRAY(uint8_t, intermediate_buffer, intermediate_buffer_size);
+ memset(intermediate_buffer, 0, intermediate_buffer_size);
+ AWS_VARIABLE_LENGTH_ARRAY(char, output_buffer, size);
+ memset(output_buffer, 0, size);
+
+ struct aws_byte_cursor to_encode = aws_byte_cursor_from_array(input, size);
+ struct aws_byte_buf intermediate_buf = aws_byte_buf_from_empty_array(intermediate_buffer, (size_t)-1);
+ intermediate_buf.capacity = 0;
+ struct aws_byte_buf output_buf = aws_byte_buf_from_empty_array(output_buffer, (size_t)-1);
+ output_buf.capacity = 0;
+
+ int result = AWS_OP_SUCCESS;
+
+ {
+ do {
+ const size_t previous_intermediate_len = intermediate_buf.len;
+
+ intermediate_buf.capacity += output_chunk_size;
+ result = aws_huffman_encode(&encoder, &to_encode, &intermediate_buf);
+
+ if (intermediate_buf.len == previous_intermediate_len) {
+ *error_string = "encode didn't write any data";
+ return AWS_OP_ERR;
+ }
+
+ if (result != AWS_OP_SUCCESS && aws_last_error() != AWS_ERROR_SHORT_BUFFER) {
+ *error_string = "encode returned wrong error code";
+ return AWS_OP_ERR;
+ }
+ } while (result != AWS_OP_SUCCESS);
+ }
+
+ if (result != AWS_OP_SUCCESS) {
+ *error_string = "aws_huffman_encode failed";
+ return AWS_OP_ERR;
+ }
+ if (intermediate_buf.len > intermediate_buffer_size) {
+ *error_string = "too much data encoded";
+ return AWS_OP_ERR;
+ }
+ if (encoded_size && intermediate_buf.len != encoded_size) {
+ *error_string = "encoded length is incorrect";
+ return AWS_OP_ERR;
+ }
+
+ struct aws_byte_cursor intermediate_cur = aws_byte_cursor_from_buf(&intermediate_buf);
+
+ {
+ do {
+ const size_t previous_output_len = output_buf.len;
+
+ output_buf.capacity += output_chunk_size;
+ if (output_buf.capacity > size) {
+ output_buf.capacity = size;
+ }
+
+ result = aws_huffman_decode(&decoder, &intermediate_cur, &output_buf);
+
+ if (output_buf.len == previous_output_len) {
+ *error_string = "decode didn't write any data";
+ return AWS_OP_ERR;
+ }
+
+ if (result != AWS_OP_SUCCESS && aws_last_error() != AWS_ERROR_SHORT_BUFFER) {
+ *error_string = "decode returned wrong error code";
+ return AWS_OP_ERR;
+ }
+ } while (result != AWS_OP_SUCCESS);
+ }
+
+ if (result != AWS_OP_SUCCESS) {
+ *error_string = "aws_huffman_decode failed";
+ return AWS_OP_ERR;
+ }
+ if (output_buf.len != size) {
+ *error_string = "decode output size incorrect";
+ return AWS_OP_ERR;
+ }
+ if (memcmp(input, output_buffer, size) != 0) {
+ *error_string = "decoded data does not match input data";
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
diff --git a/contrib/restricted/aws/aws-c-compression/ya.make b/contrib/restricted/aws/aws-c-compression/ya.make
new file mode 100644
index 0000000000..aca95ecfb5
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-compression/ya.make
@@ -0,0 +1,37 @@
+# Generated by devtools/yamaker from nixpkgs 23.05.
+
+LIBRARY()
+
+LICENSE(Apache-2.0)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+VERSION(0.2.16)
+
+ORIGINAL_SOURCE(https://github.com/awslabs/aws-c-compression/archive/v0.2.16.tar.gz)
+
+PEERDIR(
+ contrib/restricted/aws/aws-c-common
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/aws/aws-c-compression/include
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_RUNTIME()
+
+CFLAGS(
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DHAVE_SYSCONF
+)
+
+SRCS(
+ source/compression.c
+ source/huffman.c
+ source/huffman_testing.c
+)
+
+END()
diff --git a/contrib/restricted/aws/aws-c-event-stream/CMakeLists.darwin-arm64.txt b/contrib/restricted/aws/aws-c-event-stream/CMakeLists.darwin-arm64.txt
index d7f9cd17a6..0e233170c2 100644
--- a/contrib/restricted/aws/aws-c-event-stream/CMakeLists.darwin-arm64.txt
+++ b/contrib/restricted/aws/aws-c-event-stream/CMakeLists.darwin-arm64.txt
@@ -16,20 +16,17 @@ target_compile_options(restricted-aws-aws-c-event-stream PRIVATE
-DAWS_IO_USE_IMPORT_EXPORT
-DAWS_USE_EPOLL
-DHAVE_SYSCONF
- -DS2N_ADX
- -DS2N_BIKE_R3_AVX2
- -DS2N_BIKE_R3_AVX512
- -DS2N_BIKE_R3_PCLMUL
-DS2N_CLONE_SUPPORTED
-DS2N_CPUID_AVAILABLE
-DS2N_FALL_THROUGH_SUPPORTED
-DS2N_FEATURES_AVAILABLE
- -DS2N_HAVE_EXECINFO
-DS2N_KYBER512R3_AVX2_BMI2
-DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
-DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
-DS2N_MADVISE_SUPPORTED
- -DS2N_SIKE_P434_R3_ASM
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
-DS2N___RESTRICT__SUPPORTED
$<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
)
diff --git a/contrib/restricted/aws/aws-c-event-stream/CMakeLists.darwin-x86_64.txt b/contrib/restricted/aws/aws-c-event-stream/CMakeLists.darwin-x86_64.txt
index d7f9cd17a6..0e233170c2 100644
--- a/contrib/restricted/aws/aws-c-event-stream/CMakeLists.darwin-x86_64.txt
+++ b/contrib/restricted/aws/aws-c-event-stream/CMakeLists.darwin-x86_64.txt
@@ -16,20 +16,17 @@ target_compile_options(restricted-aws-aws-c-event-stream PRIVATE
-DAWS_IO_USE_IMPORT_EXPORT
-DAWS_USE_EPOLL
-DHAVE_SYSCONF
- -DS2N_ADX
- -DS2N_BIKE_R3_AVX2
- -DS2N_BIKE_R3_AVX512
- -DS2N_BIKE_R3_PCLMUL
-DS2N_CLONE_SUPPORTED
-DS2N_CPUID_AVAILABLE
-DS2N_FALL_THROUGH_SUPPORTED
-DS2N_FEATURES_AVAILABLE
- -DS2N_HAVE_EXECINFO
-DS2N_KYBER512R3_AVX2_BMI2
-DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
-DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
-DS2N_MADVISE_SUPPORTED
- -DS2N_SIKE_P434_R3_ASM
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
-DS2N___RESTRICT__SUPPORTED
$<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
)
diff --git a/contrib/restricted/aws/aws-c-event-stream/CMakeLists.linux-aarch64.txt b/contrib/restricted/aws/aws-c-event-stream/CMakeLists.linux-aarch64.txt
index e754eefb5a..bbca582240 100644
--- a/contrib/restricted/aws/aws-c-event-stream/CMakeLists.linux-aarch64.txt
+++ b/contrib/restricted/aws/aws-c-event-stream/CMakeLists.linux-aarch64.txt
@@ -16,20 +16,17 @@ target_compile_options(restricted-aws-aws-c-event-stream PRIVATE
-DAWS_IO_USE_IMPORT_EXPORT
-DAWS_USE_EPOLL
-DHAVE_SYSCONF
- -DS2N_ADX
- -DS2N_BIKE_R3_AVX2
- -DS2N_BIKE_R3_AVX512
- -DS2N_BIKE_R3_PCLMUL
-DS2N_CLONE_SUPPORTED
-DS2N_CPUID_AVAILABLE
-DS2N_FALL_THROUGH_SUPPORTED
-DS2N_FEATURES_AVAILABLE
- -DS2N_HAVE_EXECINFO
-DS2N_KYBER512R3_AVX2_BMI2
-DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
-DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
-DS2N_MADVISE_SUPPORTED
- -DS2N_SIKE_P434_R3_ASM
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
-DS2N___RESTRICT__SUPPORTED
$<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
)
diff --git a/contrib/restricted/aws/aws-c-event-stream/CMakeLists.linux-x86_64.txt b/contrib/restricted/aws/aws-c-event-stream/CMakeLists.linux-x86_64.txt
index e754eefb5a..bbca582240 100644
--- a/contrib/restricted/aws/aws-c-event-stream/CMakeLists.linux-x86_64.txt
+++ b/contrib/restricted/aws/aws-c-event-stream/CMakeLists.linux-x86_64.txt
@@ -16,20 +16,17 @@ target_compile_options(restricted-aws-aws-c-event-stream PRIVATE
-DAWS_IO_USE_IMPORT_EXPORT
-DAWS_USE_EPOLL
-DHAVE_SYSCONF
- -DS2N_ADX
- -DS2N_BIKE_R3_AVX2
- -DS2N_BIKE_R3_AVX512
- -DS2N_BIKE_R3_PCLMUL
-DS2N_CLONE_SUPPORTED
-DS2N_CPUID_AVAILABLE
-DS2N_FALL_THROUGH_SUPPORTED
-DS2N_FEATURES_AVAILABLE
- -DS2N_HAVE_EXECINFO
-DS2N_KYBER512R3_AVX2_BMI2
-DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
-DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
-DS2N_MADVISE_SUPPORTED
- -DS2N_SIKE_P434_R3_ASM
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
-DS2N___RESTRICT__SUPPORTED
$<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
)
diff --git a/contrib/restricted/aws/aws-c-event-stream/CMakeLists.windows-x86_64.txt b/contrib/restricted/aws/aws-c-event-stream/CMakeLists.windows-x86_64.txt
index d7f9cd17a6..0e233170c2 100644
--- a/contrib/restricted/aws/aws-c-event-stream/CMakeLists.windows-x86_64.txt
+++ b/contrib/restricted/aws/aws-c-event-stream/CMakeLists.windows-x86_64.txt
@@ -16,20 +16,17 @@ target_compile_options(restricted-aws-aws-c-event-stream PRIVATE
-DAWS_IO_USE_IMPORT_EXPORT
-DAWS_USE_EPOLL
-DHAVE_SYSCONF
- -DS2N_ADX
- -DS2N_BIKE_R3_AVX2
- -DS2N_BIKE_R3_AVX512
- -DS2N_BIKE_R3_PCLMUL
-DS2N_CLONE_SUPPORTED
-DS2N_CPUID_AVAILABLE
-DS2N_FALL_THROUGH_SUPPORTED
-DS2N_FEATURES_AVAILABLE
- -DS2N_HAVE_EXECINFO
-DS2N_KYBER512R3_AVX2_BMI2
-DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
-DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
-DS2N_MADVISE_SUPPORTED
- -DS2N_SIKE_P434_R3_ASM
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
-DS2N___RESTRICT__SUPPORTED
$<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
)
diff --git a/contrib/restricted/aws/aws-c-event-stream/README.md b/contrib/restricted/aws/aws-c-event-stream/README.md
index 58dfe5ec79..797ca7aa2c 100644
--- a/contrib/restricted/aws/aws-c-event-stream/README.md
+++ b/contrib/restricted/aws/aws-c-event-stream/README.md
@@ -10,61 +10,44 @@ This library is licensed under the Apache 2.0 License.
### Building
-#### Building s2n (Linux Only)
+CMake 3.1+ is required to build.
-If you are building on Linux, you will need to build s2n before being able to build aws-c-io. For our CRT's, we build s2n at a specific commit, and recommend doing the same when using it with this library. That commit hash can be found [here](https://github.com/awslabs/aws-crt-cpp/tree/main/crt). The commands below will build s2n using OpenSSL 1.1.1. For using other versions of OpenSSL, there is additional information in the [s2n Usage Guide](https://github.com/awslabs/s2n/blob/main/docs/USAGE-GUIDE.md).
+`<install-path>` must be an absolute path in the following instructions.
+
+#### Linux-Only Dependencies
+
+If you are building on Linux, you will need to build aws-lc and s2n-tls first.
```
-git clone git@github.com:awslabs/s2n.git
-cd s2n
-git checkout <s2n-commit-hash-used-by-aws-crt-cpp>
-
-# We keep the build artifacts in the -build directory
-cd libcrypto-build
-
-# Download the latest version of OpenSSL
-curl -LO https://www.openssl.org/source/openssl-1.1.1-latest.tar.gz
-tar -xzvf openssl-1.1.1-latest.tar.gz
-
-# Build openssl libcrypto. Note that the install path specified here must be absolute.
-cd `tar ztf openssl-1.1.1-latest.tar.gz | head -n1 | cut -f1 -d/`
-./config -fPIC no-shared \
- no-md2 no-rc5 no-rfc3779 no-sctp no-ssl-trace no-zlib \
- no-hw no-mdc2 no-seed no-idea enable-ec_nistp_64_gcc_128 no-camellia\
- no-bf no-ripemd no-dsa no-ssl2 no-ssl3 no-capieng \
- -DSSL_FORBID_ENULL -DOPENSSL_NO_DTLS1 -DOPENSSL_NO_HEARTBEATS \
- --prefix=<absolute-install-path>
-make
-make install
-
-# Build s2n
-cd ../../../
-cmake -DCMAKE_PREFIX_PATH=<install-path> -DCMAKE_INSTALL_PREFIX=<install-path> -S s2n -B s2n/build
-cmake --build s2n/build --target install
+git clone git@github.com:awslabs/aws-lc.git
+cmake -S aws-lc -B aws-lc/build -DCMAKE_INSTALL_PREFIX=<install-path>
+cmake --build aws-lc/build --target install
+
+git clone git@github.com:aws/s2n-tls.git
+cmake -S s2n-tls -B s2n-tls/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build s2n-tls/build --target install
```
#### Building aws-c-event-stream and Remaining Dependencies
-Note that aws-c-event-stream has several dependencies:
-
```
git clone git@github.com:awslabs/aws-c-common.git
-cmake -DCMAKE_PREFIX_PATH=<install-path> -DCMAKE_INSTALL_PREFIX=<install-path> -S aws-c-common -B aws-c-common/build
+cmake -S aws-c-common -B aws-c-common/build -DCMAKE_INSTALL_PREFIX=<install-path>
cmake --build aws-c-common/build --target install
git clone git@github.com:awslabs/aws-checksums.git
-cmake -DCMAKE_PREFIX_PATH=<install-path> -DCMAKE_INSTALL_PREFIX=<install-path> -S aws-checksums -B aws-checksums/build
+cmake -S aws-checksums -B aws-checksums/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
cmake --build aws-checksums/build --target install
git clone git@github.com:awslabs/aws-c-cal.git
-cmake -DCMAKE_PREFIX_PATH=<install-path> -DCMAKE_INSTALL_PREFIX=<install-path> -S aws-c-cal -B aws-c-cal/build
+cmake -S aws-c-cal -B aws-c-cal/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
cmake --build aws-c-cal/build --target install
git clone git@github.com:awslabs/aws-c-io.git
-cmake -DCMAKE_PREFIX_PATH=<install-path> -DCMAKE_INSTALL_PREFIX=<install-path> -S aws-c-io -B aws-c-io/build
+cmake -S aws-c-io -B aws-c-io/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
cmake --build aws-c-io/build --target install
git clone git@github.com:awslabs/aws-c-event-stream.git
-cmake -DCMAKE_PREFIX_PATH=<install-path> -DCMAKE_INSTALL_PREFIX=<install-path> -S aws-c-event-stream -B aws-c-event-stream/build
+cmake -S aws-c-event-stream -B aws-c-event-stream/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
cmake --build aws-c-event-stream/build --target install
```
diff --git a/contrib/restricted/aws/aws-c-event-stream/include/aws/event-stream/event_stream.h b/contrib/restricted/aws/aws-c-event-stream/include/aws/event-stream/event_stream.h
index 41302db9a1..f4edb12350 100644
--- a/contrib/restricted/aws/aws-c-event-stream/include/aws/event-stream/event_stream.h
+++ b/contrib/restricted/aws/aws-c-event-stream/include/aws/event-stream/event_stream.h
@@ -20,6 +20,12 @@
/* max header size is 128kb */
#define AWS_EVENT_STREAM_MAX_HEADERS_SIZE (128 * 1024)
+/* Max header name length is 127 bytes */
+#define AWS_EVENT_STREAM_HEADER_NAME_LEN_MAX (INT8_MAX)
+
+/* Max header static value length is 16 bytes */
+#define AWS_EVENT_STREAM_HEADER_STATIC_VALUE_LEN_MAX (16)
+
enum aws_event_stream_errors {
AWS_ERROR_EVENT_STREAM_BUFFER_LENGTH_MISMATCH = AWS_ERROR_ENUM_BEGIN_RANGE(AWS_C_EVENT_STREAM_PACKAGE_ID),
AWS_ERROR_EVENT_STREAM_INSUFFICIENT_BUFFER_LEN,
@@ -54,8 +60,7 @@ struct aws_event_stream_message_prelude {
struct aws_event_stream_message {
struct aws_allocator *alloc;
- uint8_t *message_buffer;
- uint8_t owns_buffer;
+ struct aws_byte_buf message_buffer;
};
#define AWS_EVENT_STREAM_PRELUDE_LENGTH (uint32_t)(sizeof(uint32_t) + sizeof(uint32_t) + sizeof(uint32_t))
@@ -76,13 +81,14 @@ enum aws_event_stream_header_value_type {
AWS_EVENT_STREAM_HEADER_UUID
};
+static const uint16_t UUID_LEN = 16U;
struct aws_event_stream_header_value_pair {
uint8_t header_name_len;
char header_name[INT8_MAX];
enum aws_event_stream_header_value_type header_value_type;
union {
uint8_t *variable_len_val;
- uint8_t static_val[16];
+ uint8_t static_val[AWS_EVENT_STREAM_HEADER_STATIC_VALUE_LEN_MAX];
} header_value;
uint16_t header_value_len;
@@ -127,6 +133,15 @@ typedef void(aws_event_stream_header_received_fn)(
void *user_data);
/**
+ * Called by aws_aws_event_stream_streaming_decoder when a message decoding is complete
+ * and crc is verified.
+ */
+typedef void(aws_event_stream_on_complete_fn)(
+ struct aws_event_stream_streaming_decoder *decoder,
+ uint32_t message_crc,
+ void *user_data);
+
+/**
* Called by aws_aws_event_stream_streaming_decoder when an error is encountered. The decoder is not in a good state for
* usage after this callback.
*/
@@ -150,10 +165,48 @@ struct aws_event_stream_streaming_decoder {
aws_event_stream_process_on_payload_segment_fn *on_payload;
aws_event_stream_prelude_received_fn *on_prelude;
aws_event_stream_header_received_fn *on_header;
+ aws_event_stream_on_complete_fn *on_complete;
aws_event_stream_on_error_fn *on_error;
void *user_context;
};
+struct aws_event_stream_streaming_decoder_options {
+ /**
+ * (Required)
+ * Invoked repeatedly as payload segment are received.
+ * See `aws_event_stream_process_on_payload_segment_fn`.
+ */
+ aws_event_stream_process_on_payload_segment_fn *on_payload_segment;
+ /**
+ * (Required)
+ * Invoked when when a new message has arrived. The prelude will contain metadata about the message.
+ * See `aws_event_stream_prelude_received_fn`.
+ */
+ aws_event_stream_prelude_received_fn *on_prelude;
+ /**
+ * (Required)
+ * Invoked repeatedly as headers are received.
+ * See `aws_event_stream_header_received_fn`.
+ */
+ aws_event_stream_header_received_fn *on_header;
+ /**
+ * (Optional)
+ * Invoked if a message is decoded successfully.
+ * See `aws_event_stream_on_complete_fn`.
+ */
+ aws_event_stream_on_complete_fn *on_complete;
+ /**
+ * (Required)
+ * Invoked when an error is encountered. The decoder is not in a good state for usage after this callback.
+ * See `aws_event_stream_on_error_fn`.
+ */
+ aws_event_stream_on_error_fn *on_error;
+ /**
+ * (Optional)
+ * user_data passed to callbacks.
+ */
+ void *user_data;
+};
AWS_EXTERN_C_BEGIN
/**
@@ -164,8 +217,8 @@ AWS_EXTERN_C_BEGIN
AWS_EVENT_STREAM_API int aws_event_stream_message_init(
struct aws_event_stream_message *message,
struct aws_allocator *alloc,
- struct aws_array_list *headers,
- struct aws_byte_buf *payload);
+ const struct aws_array_list *headers,
+ const struct aws_byte_buf *payload);
/**
* Zero allocation, Zero copy. The message will simply wrap the buffer. The message functions are only useful as long as
@@ -244,6 +297,22 @@ AWS_EVENT_STREAM_API const uint8_t *aws_event_stream_message_buffer(const struct
AWS_EVENT_STREAM_API uint32_t
aws_event_stream_compute_headers_required_buffer_len(const struct aws_array_list *headers);
+/**
+ * Writes headers to buf assuming buf is large enough to hold the data. Prefer this function over the unsafe variant
+ * 'aws_event_stream_write_headers_to_buffer'.
+ *
+ * Returns AWS_OP_SUCCESS if the headers were successfully and completely written and AWS_OP_ERR otherwise.
+ */
+AWS_EVENT_STREAM_API int aws_event_stream_write_headers_to_buffer_safe(
+ const struct aws_array_list *headers,
+ struct aws_byte_buf *buf);
+
+/**
+ * Deprecated in favor of 'aws_event_stream_write_headers_to_buffer_safe' as this API is unsafe.
+ *
+ * Writes headers to buffer and returns the length of bytes written to buffer. Assumes buffer is large enough to
+ * store the headers.
+ */
AWS_EVENT_STREAM_API size_t
aws_event_stream_write_headers_to_buffer(const struct aws_array_list *headers, uint8_t *buffer);
@@ -256,7 +325,19 @@ AWS_EVENT_STREAM_API int aws_event_stream_read_headers_from_buffer(
struct aws_array_list *headers,
const uint8_t *buffer,
size_t headers_len);
+
+/**
+ * Initialize a streaming decoder for messages with callbacks for usage
+ * and an optional user context pointer.
+ */
+AWS_EVENT_STREAM_API
+void aws_event_stream_streaming_decoder_init_from_options(
+ struct aws_event_stream_streaming_decoder *decoder,
+ struct aws_allocator *allocator,
+ const struct aws_event_stream_streaming_decoder_options *options);
+
/**
+ * Deprecated. Use aws_event_stream_streaming_decoder_init_from_options instead.
* Initialize a streaming decoder for messages with callbacks for usage and an optional user context pointer.
*/
AWS_EVENT_STREAM_API void aws_event_stream_streaming_decoder_init(
@@ -383,6 +464,133 @@ AWS_EVENT_STREAM_API int aws_event_stream_add_uuid_header(
const uint8_t *value);
/**
+ * Adds a generic header to the list of headers.
+ * Makes a copy of the underlaying data.
+ */
+AWS_EVENT_STREAM_API int aws_event_stream_add_header(
+ struct aws_array_list *headers,
+ const struct aws_event_stream_header_value_pair *header);
+
+/* Cursor-based header APIs */
+
+/**
+ * Adds a boolean-valued header to a header list
+ *
+ * @param headers header list to add to
+ * @param name name of the header to add
+ * @param value value of the header to add
+ * @return AWS_OP_SUCCESS on success, AWS_OP_ERR on failure
+ */
+AWS_EVENT_STREAM_API int aws_event_stream_add_bool_header_by_cursor(
+ struct aws_array_list *headers,
+ struct aws_byte_cursor name,
+ bool value);
+
+/**
+ * Adds a byte-valued header to a header list
+ *
+ * @param headers header list to add to
+ * @param name name of the header to add
+ * @param value value of the header to add
+ * @return AWS_OP_SUCCESS on success, AWS_OP_ERR on failure
+ */
+AWS_EVENT_STREAM_API int aws_event_stream_add_byte_header_by_cursor(
+ struct aws_array_list *headers,
+ struct aws_byte_cursor name,
+ int8_t value);
+
+/**
+ * Adds a int16-valued header to a header list
+ *
+ * @param headers header list to add to
+ * @param name name of the header to add
+ * @param value value of the header to add
+ * @return AWS_OP_SUCCESS on success, AWS_OP_ERR on failure
+ */
+AWS_EVENT_STREAM_API int aws_event_stream_add_int16_header_by_cursor(
+ struct aws_array_list *headers,
+ struct aws_byte_cursor name,
+ int16_t value);
+
+/**
+ * Adds a int32-valued header to a header list
+ *
+ * @param headers header list to add to
+ * @param name name of the header to add
+ * @param value value of the header to add
+ * @return AWS_OP_SUCCESS on success, AWS_OP_ERR on failure
+ */
+AWS_EVENT_STREAM_API int aws_event_stream_add_int32_header_by_cursor(
+ struct aws_array_list *headers,
+ struct aws_byte_cursor name,
+ int32_t value);
+
+/**
+ * Adds a int64-valued header to a header list
+ *
+ * @param headers header list to add to
+ * @param name name of the header to add
+ * @param value value of the header to add
+ * @return AWS_OP_SUCCESS on success, AWS_OP_ERR on failure
+ */
+AWS_EVENT_STREAM_API int aws_event_stream_add_int64_header_by_cursor(
+ struct aws_array_list *headers,
+ struct aws_byte_cursor name,
+ int64_t value);
+
+/**
+ * Adds a string-valued header to a header list
+ *
+ * @param headers header list to add to
+ * @param name name of the header to add
+ * @param value value of the header to add
+ * @return AWS_OP_SUCCESS on success, AWS_OP_ERR on failure
+ */
+AWS_EVENT_STREAM_API int aws_event_stream_add_string_header_by_cursor(
+ struct aws_array_list *headers,
+ struct aws_byte_cursor name,
+ struct aws_byte_cursor value);
+
+/**
+ * Adds a byte_buf-valued header to a header list
+ *
+ * @param headers header list to add to
+ * @param name name of the header to add
+ * @param value value of the header to add
+ * @return AWS_OP_SUCCESS on success, AWS_OP_ERR on failure
+ */
+AWS_EVENT_STREAM_API int aws_event_stream_add_byte_buf_header_by_cursor(
+ struct aws_array_list *headers,
+ struct aws_byte_cursor name,
+ struct aws_byte_cursor value);
+
+/**
+ * Adds a timestamp-valued header to a header list
+ *
+ * @param headers header list to add to
+ * @param name name of the header to add
+ * @param value value of the header to add
+ * @return AWS_OP_SUCCESS on success, AWS_OP_ERR on failure
+ */
+AWS_EVENT_STREAM_API int aws_event_stream_add_timestamp_header_by_cursor(
+ struct aws_array_list *headers,
+ struct aws_byte_cursor name,
+ int64_t value);
+
+/**
+ * Adds a uuid-valued header to a header list
+ *
+ * @param headers header list to add to
+ * @param name name of the header to add
+ * @param value value of the header to add
+ * @return AWS_OP_SUCCESS on success, AWS_OP_ERR on failure
+ */
+AWS_EVENT_STREAM_API int aws_event_stream_add_uuid_header_by_cursor(
+ struct aws_array_list *headers,
+ struct aws_byte_cursor name,
+ struct aws_byte_cursor value);
+
+/**
* Returns the header name. Note: this value is not null terminated
*/
AWS_EVENT_STREAM_API struct aws_byte_buf aws_event_stream_header_name(
diff --git a/contrib/restricted/aws/aws-c-event-stream/include/aws/event-stream/event_stream_rpc_client.h b/contrib/restricted/aws/aws-c-event-stream/include/aws/event-stream/event_stream_rpc_client.h
index 7962fd74cf..aeea093896 100644
--- a/contrib/restricted/aws/aws-c-event-stream/include/aws/event-stream/event_stream_rpc_client.h
+++ b/contrib/restricted/aws/aws-c-event-stream/include/aws/event-stream/event_stream_rpc_client.h
@@ -51,8 +51,20 @@ typedef void(aws_event_stream_rpc_client_on_connection_shutdown_fn)(
void *user_data);
/**
- * Invoked when a connection attempt completes. error_code of 0 indicates success, anything else indicates failure.
- * If the connection attempt fails, aws_event_stream_rpc_client_on_connection_shutdown_fn will not be invoked. *
+ * Invoked when a connection attempt completes.
+ *
+ * If the attempt was unsuccessful, the error_code will be non-zero and the connection pointer will be NULL,
+ * and aws_event_stream_rpc_client_on_connection_shutdown_fn will not be invoked.
+ *
+ * If the attempt was successful, error_code will be 0 and the connection pointer will be valid.
+ * You must call aws_event_stream_rpc_client_connection_acquire()
+ * to prevent the pointer's memory from being destroyed before you are ready.
+ * When you are completely done with the connection pointer you must call
+ * aws_event_stream_rpc_client_connection_release() or its memory will leak.
+ * aws_event_stream_rpc_client_on_connection_shutdown_fn will be invoked
+ * when the network connection has closed. If you are done with the connection,
+ * but it is still open, you must call aws_aws_event_stream_rpc_client_close()
+ * or network connection will remain open, even if you call release().
*/
typedef void(aws_event_stream_rpc_client_on_connection_setup_fn)(
struct aws_event_stream_rpc_client_connection *connection,
diff --git a/contrib/restricted/aws/aws-c-event-stream/include/aws/event-stream/event_stream_rpc_server.h b/contrib/restricted/aws/aws-c-event-stream/include/aws/event-stream/event_stream_rpc_server.h
index 0db8a1c29e..ae82f1b796 100644
--- a/contrib/restricted/aws/aws-c-event-stream/include/aws/event-stream/event_stream_rpc_server.h
+++ b/contrib/restricted/aws/aws-c-event-stream/include/aws/event-stream/event_stream_rpc_server.h
@@ -46,6 +46,12 @@ typedef void(aws_event_stream_rpc_server_connection_protocol_message_fn)(
/**
* Invoked when a new stream has been received on the connection. If you return AWS_OP_SUCCESS (0),
* You must fill in the fields for continuation options or the program will assert and exit.
+ *
+ * A failure path MUST leave the ref count of the continuation alone.
+ *
+ * A success path should probably take a ref which will leave the continuation (assuming no other interference)
+ * at two AFTER creation is complete: 1 for the connection's continuation table, and one for the callback
+ * recipient which is presumably tracking it as well.
*/
typedef int(aws_event_stream_rpc_server_on_incoming_stream_fn)(
struct aws_event_stream_rpc_server_connection *connection,
@@ -129,6 +135,13 @@ AWS_EVENT_STREAM_API void aws_event_stream_rpc_server_listener_release(
struct aws_event_stream_rpc_server_listener *listener);
/**
+ * Get the local port which the listener's socket is bound to.
+ */
+AWS_EVENT_STREAM_API
+uint16_t aws_event_stream_rpc_server_listener_get_bound_port(
+ const struct aws_event_stream_rpc_server_listener *listener);
+
+/**
* Bypasses server, and creates a connection on an already existing channel. No connection lifetime callbacks will be
* invoked on the returned connection. Returns NULL if an error occurs. If and only if, you use this API, the returned
* connection is already ref counted and you must call aws_event_stream_rpc_server_connection_release() even if you did
diff --git a/contrib/restricted/aws/aws-c-event-stream/include/aws/event-stream/private/event_stream_rpc_priv.h b/contrib/restricted/aws/aws-c-event-stream/include/aws/event-stream/private/event_stream_rpc_priv.h
index 3f555db8d2..43b10c1b34 100644
--- a/contrib/restricted/aws/aws-c-event-stream/include/aws/event-stream/private/event_stream_rpc_priv.h
+++ b/contrib/restricted/aws/aws-c-event-stream/include/aws/event-stream/private/event_stream_rpc_priv.h
@@ -29,25 +29,25 @@ static const struct aws_byte_cursor s_json_content_type_value =
static const struct aws_byte_cursor s_invalid_stream_id_error =
AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("{ \"message\": \"non-zero stream-id field is only allowed for messages of "
- "type APPLICATION_MESSAGE. The stream id max value is INT32_MAX.\"; }");
+ "type APPLICATION_MESSAGE. The stream id max value is INT32_MAX.\" }");
static const struct aws_byte_cursor s_invalid_client_stream_id_error =
AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("{ \"message\": \"stream-id values must be monotonically incrementing. A "
- "stream-id arrived that was lower than the last seen stream-id.\"; }");
+ "stream-id arrived that was lower than the last seen stream-id.\" }");
static const struct aws_byte_cursor s_invalid_new_client_stream_id_error =
AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("{ \"message\": \"stream-id values must be monotonically incrementing. A new "
- "stream-id arrived that was incremented by more than 1.\"; }");
+ "stream-id arrived that was incremented by more than 1.\" }");
-static const struct aws_byte_cursor s_invalid_message_type_error = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(
- "{ \"message\": \"an invalid value for message-type field was received.\"; }");
+static const struct aws_byte_cursor s_invalid_message_type_error =
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("{ \"message\": \"an invalid value for message-type field was received.\" }");
static const struct aws_byte_cursor s_invalid_message_error = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(
"{ \"message\": \"A message was received with missing required fields. Check that your client is sending at least, "
- ":message-type, :message-flags, and :stream-id\"; }");
+ ":message-type, :message-flags, and :stream-id\" }");
static const struct aws_byte_cursor s_internal_error = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(
- "{ \"message\": \"An error occurred on the peer endpoint. This is not likely caused by your endpoint.\"; }");
+ "{ \"message\": \"An error occurred on the peer endpoint. This is not likely caused by your endpoint.\" }");
static const struct aws_byte_cursor s_connect_not_completed_error = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(
"{ \"message\": \"A CONNECT message must be received, and the CONNECT_ACK must be sent in response, before any "
diff --git a/contrib/restricted/aws/aws-c-event-stream/source/event_stream.c b/contrib/restricted/aws/aws-c-event-stream/source/event_stream.c
index 224690f100..4b2c3f509f 100644
--- a/contrib/restricted/aws/aws-c-event-stream/source/event_stream.c
+++ b/contrib/restricted/aws/aws-c-event-stream/source/event_stream.c
@@ -7,6 +7,7 @@
#include <aws/checksums/crc.h>
+#include <aws/common/byte_buf.h>
#include <aws/common/encoding.h>
#include <aws/io/io.h>
@@ -14,7 +15,7 @@
#define LIB_NAME "libaws-c-event-stream"
-#if _MSC_VER
+#ifdef _MSC_VER
# pragma warning(push)
# pragma warning(disable : 4221) /* aggregate initializer using local variable addresses */
# pragma warning(disable : 4204) /* non-constant aggregate initializer */
@@ -128,81 +129,100 @@ uint32_t aws_event_stream_compute_headers_required_buffer_len(const struct aws_a
struct aws_event_stream_header_value_pair *header = NULL;
aws_array_list_get_at_ptr(headers, (void **)&header, i);
-
- headers_len += sizeof(header->header_name_len) + header->header_name_len + 1;
+ AWS_FATAL_ASSERT(
+ !aws_add_size_checked(headers_len, sizeof(header->header_name_len), &headers_len) &&
+ "integer overflow occurred computing total headers length.");
+ AWS_FATAL_ASSERT(
+ !aws_add_size_checked(headers_len, header->header_name_len + 1, &headers_len) &&
+ "integer overflow occurred computing total headers length.");
if (header->header_value_type == AWS_EVENT_STREAM_HEADER_STRING ||
header->header_value_type == AWS_EVENT_STREAM_HEADER_BYTE_BUF) {
- headers_len += sizeof(header->header_value_len);
+ AWS_FATAL_ASSERT(
+ !aws_add_size_checked(headers_len, sizeof(header->header_value_len), &headers_len) &&
+ "integer overflow occurred computing total headers length.");
}
if (header->header_value_type != AWS_EVENT_STREAM_HEADER_BOOL_FALSE &&
header->header_value_type != AWS_EVENT_STREAM_HEADER_BOOL_TRUE) {
- headers_len += header->header_value_len;
+ AWS_FATAL_ASSERT(
+ !aws_add_size_checked(headers_len, header->header_value_len, &headers_len) &&
+ "integer overflow occurred computing total headers length.");
}
}
return (uint32_t)headers_len;
}
-/* adds the headers represented in the headers list to the buffer.
- returns the new buffer offset for use elsewhere. Assumes buffer length is at least the length of the return value
- from compute_headers_length() */
-size_t aws_event_stream_write_headers_to_buffer(const struct aws_array_list *headers, uint8_t *buffer) {
+int aws_event_stream_write_headers_to_buffer_safe(const struct aws_array_list *headers, struct aws_byte_buf *buf) {
+ AWS_FATAL_PRECONDITION(buf);
+
if (!headers || !aws_array_list_length(headers)) {
- return 0;
+ return AWS_OP_SUCCESS;
}
size_t headers_count = aws_array_list_length(headers);
- uint8_t *buffer_alias = buffer;
for (size_t i = 0; i < headers_count; ++i) {
struct aws_event_stream_header_value_pair *header = NULL;
aws_array_list_get_at_ptr(headers, (void **)&header, i);
- *buffer_alias = (uint8_t)header->header_name_len;
- buffer_alias++;
- memcpy(buffer_alias, header->header_name, (size_t)header->header_name_len);
- buffer_alias += header->header_name_len;
- *buffer_alias = (uint8_t)header->header_value_type;
- buffer_alias++;
+ AWS_RETURN_ERROR_IF(
+ aws_byte_buf_write_u8(buf, header->header_name_len), AWS_ERROR_EVENT_STREAM_INSUFFICIENT_BUFFER_LEN);
+ AWS_RETURN_ERROR_IF(
+ aws_byte_buf_write(buf, (uint8_t *)header->header_name, (size_t)header->header_name_len),
+ AWS_ERROR_EVENT_STREAM_INSUFFICIENT_BUFFER_LEN);
+ AWS_RETURN_ERROR_IF(
+ aws_byte_buf_write_u8(buf, (uint8_t)header->header_value_type),
+ AWS_ERROR_EVENT_STREAM_INSUFFICIENT_BUFFER_LEN);
+
switch (header->header_value_type) {
case AWS_EVENT_STREAM_HEADER_BOOL_FALSE:
case AWS_EVENT_STREAM_HEADER_BOOL_TRUE:
break;
+ /* additions of integers here assume the endianness conversion has already happened */
case AWS_EVENT_STREAM_HEADER_BYTE:
- *buffer_alias = header->header_value.static_val[0];
- buffer_alias++;
- break;
- /* additions of integers here assume the endianness conversion has already happened */
case AWS_EVENT_STREAM_HEADER_INT16:
- memcpy(buffer_alias, header->header_value.static_val, sizeof(uint16_t));
- buffer_alias += sizeof(uint16_t);
- break;
case AWS_EVENT_STREAM_HEADER_INT32:
- memcpy(buffer_alias, header->header_value.static_val, sizeof(uint32_t));
- buffer_alias += sizeof(uint32_t);
- break;
case AWS_EVENT_STREAM_HEADER_INT64:
case AWS_EVENT_STREAM_HEADER_TIMESTAMP:
- memcpy(buffer_alias, header->header_value.static_val, sizeof(uint64_t));
- buffer_alias += sizeof(uint64_t);
+ case AWS_EVENT_STREAM_HEADER_UUID:
+ AWS_RETURN_ERROR_IF(
+ aws_byte_buf_write(buf, header->header_value.static_val, header->header_value_len),
+ AWS_ERROR_EVENT_STREAM_INSUFFICIENT_BUFFER_LEN);
break;
case AWS_EVENT_STREAM_HEADER_BYTE_BUF:
case AWS_EVENT_STREAM_HEADER_STRING:
- aws_write_u16(header->header_value_len, buffer_alias);
- buffer_alias += sizeof(uint16_t);
- memcpy(buffer_alias, header->header_value.variable_len_val, header->header_value_len);
- buffer_alias += header->header_value_len;
+ AWS_RETURN_ERROR_IF(
+ aws_byte_buf_write_be16(buf, header->header_value_len),
+ AWS_ERROR_EVENT_STREAM_INSUFFICIENT_BUFFER_LEN);
+ AWS_RETURN_ERROR_IF(
+ aws_byte_buf_write(buf, header->header_value.variable_len_val, header->header_value_len),
+ AWS_ERROR_EVENT_STREAM_INSUFFICIENT_BUFFER_LEN);
break;
- case AWS_EVENT_STREAM_HEADER_UUID:
- memcpy(buffer_alias, header->header_value.static_val, 16);
- buffer_alias += header->header_value_len;
+ default:
+ AWS_FATAL_ASSERT(false && !"Unknown header type!");
break;
}
}
- return buffer_alias - buffer;
+ return AWS_OP_SUCCESS;
+}
+
+/* adds the headers represented in the headers list to the buffer.
+ returns the new buffer offset for use elsewhere. Assumes buffer length is at least the length of the return value
+ from compute_headers_length() */
+size_t aws_event_stream_write_headers_to_buffer(const struct aws_array_list *headers, uint8_t *buffer) {
+ AWS_FATAL_PRECONDITION(buffer);
+
+ uint32_t min_buffer_len_assumption = aws_event_stream_compute_headers_required_buffer_len(headers);
+ struct aws_byte_buf safer_buf = aws_byte_buf_from_empty_array(buffer, min_buffer_len_assumption);
+
+ if (aws_event_stream_write_headers_to_buffer_safe(headers, &safer_buf)) {
+ return 0;
+ }
+
+ return safer_buf.len;
}
int aws_event_stream_read_headers_from_buffer(
@@ -210,23 +230,28 @@ int aws_event_stream_read_headers_from_buffer(
const uint8_t *buffer,
size_t headers_len) {
- if (AWS_UNLIKELY(headers_len > AWS_EVENT_STREAM_MAX_HEADERS_SIZE)) {
+ AWS_FATAL_PRECONDITION(headers);
+ AWS_FATAL_PRECONDITION(buffer);
+
+ if (AWS_UNLIKELY(headers_len > (size_t)AWS_EVENT_STREAM_MAX_HEADERS_SIZE)) {
return aws_raise_error(AWS_ERROR_EVENT_STREAM_MESSAGE_FIELD_SIZE_EXCEEDED);
}
+ struct aws_byte_cursor buffer_cur = aws_byte_cursor_from_array(buffer, headers_len);
/* iterate the buffer per header. */
- const uint8_t *buffer_start = buffer;
- while ((size_t)(buffer - buffer_start) < headers_len) {
+ while (buffer_cur.len) {
struct aws_event_stream_header_value_pair header;
AWS_ZERO_STRUCT(header);
/* get the header info from the buffer, make sure to increment buffer offset. */
- header.header_name_len = *buffer;
- buffer += sizeof(header.header_name_len);
- memcpy((void *)header.header_name, buffer, (size_t)header.header_name_len);
- buffer += header.header_name_len;
- header.header_value_type = (enum aws_event_stream_header_value_type) * buffer;
- buffer++;
+ aws_byte_cursor_read_u8(&buffer_cur, &header.header_name_len);
+ AWS_RETURN_ERROR_IF(header.header_name_len <= INT8_MAX, AWS_ERROR_EVENT_STREAM_MESSAGE_INVALID_HEADERS_LEN);
+ AWS_RETURN_ERROR_IF(
+ aws_byte_cursor_read(&buffer_cur, header.header_name, (size_t)header.header_name_len),
+ AWS_ERROR_EVENT_STREAM_BUFFER_LENGTH_MISMATCH);
+ AWS_RETURN_ERROR_IF(
+ aws_byte_cursor_read_u8(&buffer_cur, (uint8_t *)&header.header_value_type),
+ AWS_ERROR_EVENT_STREAM_BUFFER_LENGTH_MISMATCH);
switch (header.header_value_type) {
case AWS_EVENT_STREAM_HEADER_BOOL_FALSE:
@@ -239,36 +264,46 @@ int aws_event_stream_read_headers_from_buffer(
break;
case AWS_EVENT_STREAM_HEADER_BYTE:
header.header_value_len = sizeof(uint8_t);
- header.header_value.static_val[0] = *buffer;
- buffer++;
+ AWS_RETURN_ERROR_IF(
+ aws_byte_cursor_read(&buffer_cur, header.header_value.static_val, header.header_value_len),
+ AWS_ERROR_EVENT_STREAM_BUFFER_LENGTH_MISMATCH);
break;
case AWS_EVENT_STREAM_HEADER_INT16:
header.header_value_len = sizeof(uint16_t);
- memcpy(header.header_value.static_val, buffer, sizeof(uint16_t));
- buffer += sizeof(uint16_t);
+ AWS_RETURN_ERROR_IF(
+ aws_byte_cursor_read(&buffer_cur, header.header_value.static_val, header.header_value_len),
+ AWS_ERROR_EVENT_STREAM_BUFFER_LENGTH_MISMATCH);
break;
case AWS_EVENT_STREAM_HEADER_INT32:
header.header_value_len = sizeof(uint32_t);
- memcpy(header.header_value.static_val, buffer, sizeof(uint32_t));
- buffer += sizeof(uint32_t);
+ AWS_RETURN_ERROR_IF(
+ aws_byte_cursor_read(&buffer_cur, header.header_value.static_val, header.header_value_len),
+ AWS_ERROR_EVENT_STREAM_BUFFER_LENGTH_MISMATCH);
break;
case AWS_EVENT_STREAM_HEADER_INT64:
case AWS_EVENT_STREAM_HEADER_TIMESTAMP:
header.header_value_len = sizeof(uint64_t);
- memcpy(header.header_value.static_val, buffer, sizeof(uint64_t));
- buffer += sizeof(uint64_t);
+ AWS_RETURN_ERROR_IF(
+ aws_byte_cursor_read(&buffer_cur, header.header_value.static_val, header.header_value_len),
+ AWS_ERROR_EVENT_STREAM_BUFFER_LENGTH_MISMATCH);
break;
case AWS_EVENT_STREAM_HEADER_BYTE_BUF:
case AWS_EVENT_STREAM_HEADER_STRING:
- header.header_value_len = aws_read_u16(buffer);
- buffer += sizeof(header.header_value_len);
- header.header_value.variable_len_val = (uint8_t *)buffer;
- buffer += header.header_value_len;
+ AWS_RETURN_ERROR_IF(
+ aws_byte_cursor_read_be16(&buffer_cur, &header.header_value_len),
+ AWS_ERROR_EVENT_STREAM_BUFFER_LENGTH_MISMATCH);
+ AWS_RETURN_ERROR_IF(
+ header.header_value_len <= INT16_MAX, AWS_ERROR_EVENT_STREAM_MESSAGE_INVALID_HEADERS_LEN);
+ AWS_RETURN_ERROR_IF(
+ buffer_cur.len >= header.header_value_len, AWS_ERROR_EVENT_STREAM_BUFFER_LENGTH_MISMATCH);
+ header.header_value.variable_len_val = (uint8_t *)buffer_cur.ptr;
+ aws_byte_cursor_advance(&buffer_cur, header.header_value_len);
break;
case AWS_EVENT_STREAM_HEADER_UUID:
- header.header_value_len = 16;
- memcpy(header.header_value.static_val, buffer, 16);
- buffer += header.header_value_len;
+ header.header_value_len = UUID_LEN;
+ AWS_RETURN_ERROR_IF(
+ aws_byte_cursor_read(&buffer_cur, header.header_value.static_val, UUID_LEN),
+ AWS_ERROR_EVENT_STREAM_BUFFER_LENGTH_MISMATCH);
break;
}
@@ -286,8 +321,10 @@ int aws_event_stream_read_headers_from_buffer(
int aws_event_stream_message_init(
struct aws_event_stream_message *message,
struct aws_allocator *alloc,
- struct aws_array_list *headers,
- struct aws_byte_buf *payload) {
+ const struct aws_array_list *headers,
+ const struct aws_byte_buf *payload) {
+ AWS_FATAL_PRECONDITION(message);
+ AWS_FATAL_PRECONDITION(alloc);
size_t payload_len = payload ? payload->len : 0;
@@ -309,39 +346,33 @@ int aws_event_stream_message_init(
}
message->alloc = alloc;
- message->message_buffer = aws_mem_acquire(message->alloc, total_length);
-
- if (message->message_buffer) {
- message->owns_buffer = 1;
- aws_write_u32(total_length, message->message_buffer);
- uint8_t *buffer_offset = message->message_buffer + sizeof(total_length);
- aws_write_u32(headers_length, buffer_offset);
- buffer_offset += sizeof(headers_length);
+ aws_byte_buf_init(&message->message_buffer, message->alloc, total_length);
- uint32_t running_crc =
- aws_checksums_crc32(message->message_buffer, (int)(buffer_offset - message->message_buffer), 0);
+ aws_byte_buf_write_be32(&message->message_buffer, total_length);
+ aws_byte_buf_write_be32(&message->message_buffer, headers_length);
- const uint8_t *message_crc_boundary_start = buffer_offset;
- aws_write_u32(running_crc, buffer_offset);
- buffer_offset += sizeof(running_crc);
+ uint32_t running_crc = aws_checksums_crc32(message->message_buffer.buffer, (int)message->message_buffer.len, 0);
- if (headers_length) {
- buffer_offset += aws_event_stream_write_headers_to_buffer(headers, buffer_offset);
- }
+ const uint8_t *pre_prelude_marker = message->message_buffer.buffer + message->message_buffer.len;
+ size_t pre_prelude_position_marker = message->message_buffer.len;
+ aws_byte_buf_write_be32(&message->message_buffer, running_crc);
- if (payload) {
- memcpy(buffer_offset, payload->buffer, payload->len);
- buffer_offset += payload->len;
+ if (headers_length) {
+ if (aws_event_stream_write_headers_to_buffer_safe(headers, &message->message_buffer)) {
+ aws_event_stream_message_clean_up(message);
+ return AWS_OP_ERR;
}
+ }
- running_crc = aws_checksums_crc32(
- message_crc_boundary_start, (int)(buffer_offset - message_crc_boundary_start), running_crc);
- aws_write_u32(running_crc, buffer_offset);
-
- return AWS_OP_SUCCESS;
+ if (payload) {
+ aws_byte_buf_write_from_whole_buffer(&message->message_buffer, *payload);
}
- return aws_raise_error(AWS_ERROR_OOM);
+ running_crc = aws_checksums_crc32(
+ pre_prelude_marker, (int)(message->message_buffer.len - pre_prelude_position_marker), running_crc);
+ aws_byte_buf_write_be32(&message->message_buffer, running_crc);
+
+ return AWS_OP_SUCCESS;
}
/* add buffer to the message (non-owning). Verify buffer crcs and that length fields are reasonable. */
@@ -349,16 +380,20 @@ int aws_event_stream_message_from_buffer(
struct aws_event_stream_message *message,
struct aws_allocator *alloc,
struct aws_byte_buf *buffer) {
- AWS_ASSERT(buffer);
+ AWS_FATAL_PRECONDITION(message);
+ AWS_FATAL_PRECONDITION(alloc);
+ AWS_FATAL_PRECONDITION(buffer);
message->alloc = alloc;
- message->owns_buffer = 0;
if (AWS_UNLIKELY(buffer->len < AWS_EVENT_STREAM_PRELUDE_LENGTH + AWS_EVENT_STREAM_TRAILER_LENGTH)) {
return aws_raise_error(AWS_ERROR_EVENT_STREAM_BUFFER_LENGTH_MISMATCH);
}
- uint32_t message_length = aws_read_u32(buffer->buffer + TOTAL_LEN_OFFSET);
+ struct aws_byte_cursor parsing_cur = aws_byte_cursor_from_buf(buffer);
+
+ uint32_t message_length = 0;
+ aws_byte_cursor_read_be32(&parsing_cur, &message_length);
if (AWS_UNLIKELY(message_length != buffer->len)) {
return aws_raise_error(AWS_ERROR_EVENT_STREAM_BUFFER_LENGTH_MISMATCH);
@@ -367,17 +402,21 @@ int aws_event_stream_message_from_buffer(
if (AWS_UNLIKELY(message_length > AWS_EVENT_STREAM_MAX_MESSAGE_SIZE)) {
return aws_raise_error(AWS_ERROR_EVENT_STREAM_MESSAGE_FIELD_SIZE_EXCEEDED);
}
-
+ /* skip the headers for the moment, we'll handle those later. */
+ aws_byte_cursor_advance(&parsing_cur, sizeof(uint32_t));
uint32_t running_crc = aws_checksums_crc32(buffer->buffer, (int)PRELUDE_CRC_OFFSET, 0);
- uint32_t prelude_crc = aws_read_u32(buffer->buffer + PRELUDE_CRC_OFFSET);
+ uint32_t prelude_crc = 0;
+ const uint8_t *start_of_payload_checksum = parsing_cur.ptr;
+ size_t start_of_payload_checksum_pos = PRELUDE_CRC_OFFSET;
+ aws_byte_cursor_read_be32(&parsing_cur, &prelude_crc);
if (running_crc != prelude_crc) {
return aws_raise_error(AWS_ERROR_EVENT_STREAM_PRELUDE_CHECKSUM_FAILURE);
}
running_crc = aws_checksums_crc32(
- buffer->buffer + PRELUDE_CRC_OFFSET,
- (int)(message_length - PRELUDE_CRC_OFFSET - AWS_EVENT_STREAM_TRAILER_LENGTH),
+ start_of_payload_checksum,
+ (int)(message_length - start_of_payload_checksum_pos - AWS_EVENT_STREAM_TRAILER_LENGTH),
running_crc);
uint32_t message_crc = aws_read_u32(buffer->buffer + message_length - AWS_EVENT_STREAM_TRAILER_LENGTH);
@@ -385,11 +424,14 @@ int aws_event_stream_message_from_buffer(
return aws_raise_error(AWS_ERROR_EVENT_STREAM_MESSAGE_CHECKSUM_FAILURE);
}
- message->message_buffer = buffer->buffer;
+ message->message_buffer = *buffer;
+ /* we don't own this buffer, this is a zero allocation/copy path. Setting allocator to null will prevent the
+ * clean_up from attempting to free it */
+ message->message_buffer.allocator = NULL;
if (aws_event_stream_message_headers_len(message) >
message_length - AWS_EVENT_STREAM_PRELUDE_LENGTH - AWS_EVENT_STREAM_TRAILER_LENGTH) {
- message->message_buffer = 0;
+ AWS_ZERO_STRUCT(message->message_buffer);
return aws_raise_error(AWS_ERROR_EVENT_STREAM_MESSAGE_INVALID_HEADERS_LEN);
}
@@ -404,17 +446,9 @@ int aws_event_stream_message_from_buffer_copy(
int parse_value = aws_event_stream_message_from_buffer(message, alloc, (struct aws_byte_buf *)buffer);
if (!parse_value) {
- message->message_buffer = aws_mem_acquire(alloc, buffer->len);
-
- if (message->message_buffer) {
- memcpy(message->message_buffer, buffer->buffer, buffer->len);
- message->alloc = alloc;
- message->owns_buffer = 1;
-
- return AWS_OP_SUCCESS;
- }
-
- return aws_raise_error(AWS_ERROR_OOM);
+ aws_byte_buf_init_copy(&message->message_buffer, alloc, buffer);
+ message->alloc = alloc;
+ return AWS_OP_SUCCESS;
}
return parse_value;
@@ -422,47 +456,75 @@ int aws_event_stream_message_from_buffer_copy(
/* if buffer is owned, release the memory. */
void aws_event_stream_message_clean_up(struct aws_event_stream_message *message) {
- if (message->message_buffer && message->owns_buffer) {
- aws_mem_release(message->alloc, message->message_buffer);
- }
+ aws_byte_buf_clean_up(&message->message_buffer);
}
uint32_t aws_event_stream_message_total_length(const struct aws_event_stream_message *message) {
- return aws_read_u32(message->message_buffer + TOTAL_LEN_OFFSET);
+ struct aws_byte_cursor read_cur = aws_byte_cursor_from_buf(&message->message_buffer);
+ aws_byte_cursor_advance(&read_cur, TOTAL_LEN_OFFSET);
+ uint32_t total_len = 0;
+ aws_byte_cursor_read_be32(&read_cur, &total_len);
+
+ return total_len;
}
uint32_t aws_event_stream_message_headers_len(const struct aws_event_stream_message *message) {
- return aws_read_u32(message->message_buffer + HEADER_LEN_OFFSET);
+ struct aws_byte_cursor read_cur = aws_byte_cursor_from_buf(&message->message_buffer);
+ aws_byte_cursor_advance(&read_cur, HEADER_LEN_OFFSET);
+
+ uint32_t headers_len = 0;
+ aws_byte_cursor_read_be32(&read_cur, &headers_len);
+
+ return headers_len;
}
uint32_t aws_event_stream_message_prelude_crc(const struct aws_event_stream_message *message) {
- return aws_read_u32(message->message_buffer + PRELUDE_CRC_OFFSET);
+ struct aws_byte_cursor read_cur = aws_byte_cursor_from_buf(&message->message_buffer);
+ aws_byte_cursor_advance(&read_cur, PRELUDE_CRC_OFFSET);
+
+ uint32_t prelude_crc = 0;
+ aws_byte_cursor_read_be32(&read_cur, &prelude_crc);
+
+ return prelude_crc;
}
int aws_event_stream_message_headers(const struct aws_event_stream_message *message, struct aws_array_list *headers) {
+ struct aws_byte_cursor read_cur = aws_byte_cursor_from_buf(&message->message_buffer);
+ aws_byte_cursor_advance(&read_cur, AWS_EVENT_STREAM_PRELUDE_LENGTH);
+
return aws_event_stream_read_headers_from_buffer(
- headers,
- message->message_buffer + AWS_EVENT_STREAM_PRELUDE_LENGTH,
- aws_event_stream_message_headers_len(message));
+ headers, read_cur.ptr, aws_event_stream_message_headers_len(message));
}
const uint8_t *aws_event_stream_message_payload(const struct aws_event_stream_message *message) {
- return message->message_buffer + AWS_EVENT_STREAM_PRELUDE_LENGTH + aws_event_stream_message_headers_len(message);
+ AWS_FATAL_PRECONDITION(message);
+ struct aws_byte_cursor read_cur = aws_byte_cursor_from_buf(&message->message_buffer);
+ aws_byte_cursor_advance(&read_cur, AWS_EVENT_STREAM_PRELUDE_LENGTH + aws_event_stream_message_headers_len(message));
+ return read_cur.ptr;
}
uint32_t aws_event_stream_message_payload_len(const struct aws_event_stream_message *message) {
+ AWS_FATAL_PRECONDITION(message);
return aws_event_stream_message_total_length(message) -
(AWS_EVENT_STREAM_PRELUDE_LENGTH + aws_event_stream_message_headers_len(message) +
AWS_EVENT_STREAM_TRAILER_LENGTH);
}
uint32_t aws_event_stream_message_message_crc(const struct aws_event_stream_message *message) {
- return aws_read_u32(
- message->message_buffer + (aws_event_stream_message_total_length(message) - AWS_EVENT_STREAM_TRAILER_LENGTH));
+ AWS_FATAL_PRECONDITION(message);
+ struct aws_byte_cursor read_cur = aws_byte_cursor_from_buf(&message->message_buffer);
+ aws_byte_cursor_advance(
+ &read_cur, aws_event_stream_message_total_length(message) - AWS_EVENT_STREAM_TRAILER_LENGTH);
+
+ uint32_t message_crc = 0;
+ aws_byte_cursor_read_be32(&read_cur, &message_crc);
+
+ return message_crc;
}
const uint8_t *aws_event_stream_message_buffer(const struct aws_event_stream_message *message) {
- return message->message_buffer;
+ AWS_FATAL_PRECONDITION(message);
+ return message->message_buffer.buffer;
}
#define DEBUG_STR_PRELUDE_TOTAL_LEN "\"total_length\": "
@@ -474,6 +536,9 @@ const uint8_t *aws_event_stream_message_buffer(const struct aws_event_stream_mes
#define DEBUG_STR_HEADER_TYPE "\"type\": "
int aws_event_stream_message_to_debug_str(FILE *fd, const struct aws_event_stream_message *message) {
+ AWS_FATAL_PRECONDITION(fd);
+ AWS_FATAL_PRECONDITION(message);
+
struct aws_array_list headers;
aws_event_stream_headers_list_init(&headers, message->alloc);
aws_event_stream_message_headers(message, &headers);
@@ -527,9 +592,6 @@ int aws_event_stream_message_to_debug_str(FILE *fd, const struct aws_event_strea
size_t buffer_len = 0;
aws_base64_compute_encoded_len(header->header_value_len, &buffer_len);
char *encoded_buffer = (char *)aws_mem_acquire(message->alloc, buffer_len);
- if (!encoded_buffer) {
- return aws_raise_error(AWS_ERROR_OOM);
- }
struct aws_byte_buf encode_output = aws_byte_buf_from_array((uint8_t *)encoded_buffer, buffer_len);
@@ -565,10 +627,6 @@ int aws_event_stream_message_to_debug_str(FILE *fd, const struct aws_event_strea
aws_base64_compute_encoded_len(payload_len, &encoded_len);
char *encoded_payload = (char *)aws_mem_acquire(message->alloc, encoded_len);
- if (!encoded_payload) {
- return aws_raise_error(AWS_ERROR_OOM);
- }
-
struct aws_byte_cursor payload_buffer = aws_byte_cursor_from_array(payload, payload_len);
struct aws_byte_buf encoded_payload_buffer = aws_byte_buf_from_array((uint8_t *)encoded_payload, encoded_len);
@@ -580,13 +638,15 @@ int aws_event_stream_message_to_debug_str(FILE *fd, const struct aws_event_strea
}
int aws_event_stream_headers_list_init(struct aws_array_list *headers, struct aws_allocator *allocator) {
- AWS_ASSERT(headers);
- AWS_ASSERT(allocator);
+ AWS_FATAL_PRECONDITION(headers);
+ AWS_FATAL_PRECONDITION(allocator);
return aws_array_list_init_dynamic(headers, allocator, 4, sizeof(struct aws_event_stream_header_value_pair));
}
void aws_event_stream_headers_list_cleanup(struct aws_array_list *headers) {
+ AWS_FATAL_PRECONDITION(headers);
+
if (AWS_UNLIKELY(!headers || !aws_array_list_is_valid(headers))) {
return;
}
@@ -616,10 +676,6 @@ static int s_add_variable_len_header(
if (copy) {
header->header_value.variable_len_val = aws_mem_acquire(headers->alloc, value_len);
- if (!header->header_value.variable_len_val) {
- return aws_raise_error(AWS_ERROR_OOM);
- }
-
header->value_owned = 1;
memcpy((void *)header->header_value.variable_len_val, (void *)value, value_len);
} else {
@@ -644,10 +700,16 @@ int aws_event_stream_add_string_header(
const char *value,
uint16_t value_len,
int8_t copy) {
- struct aws_event_stream_header_value_pair header = {.header_name_len = name_len,
- .header_value_len = value_len,
- .value_owned = copy,
- .header_value_type = AWS_EVENT_STREAM_HEADER_STRING};
+ AWS_FATAL_PRECONDITION(headers);
+ AWS_RETURN_ERROR_IF(
+ name_len <= AWS_EVENT_STREAM_HEADER_NAME_LEN_MAX, AWS_ERROR_EVENT_STREAM_MESSAGE_INVALID_HEADERS_LEN);
+ AWS_RETURN_ERROR_IF(value_len <= INT16_MAX, AWS_ERROR_EVENT_STREAM_MESSAGE_INVALID_HEADERS_LEN);
+ struct aws_event_stream_header_value_pair header = {
+ .header_name_len = name_len,
+ .header_value_len = value_len,
+ .value_owned = copy,
+ .header_value_type = AWS_EVENT_STREAM_HEADER_STRING,
+ };
return s_add_variable_len_header(headers, &header, name, name_len, (uint8_t *)value, value_len, copy);
}
@@ -655,8 +717,8 @@ int aws_event_stream_add_string_header(
struct aws_event_stream_header_value_pair aws_event_stream_create_string_header(
struct aws_byte_cursor name,
struct aws_byte_cursor value) {
- AWS_PRECONDITION(name.len < INT8_MAX);
- AWS_PRECONDITION(value.len < INT16_MAX);
+ AWS_FATAL_PRECONDITION(name.len <= AWS_EVENT_STREAM_HEADER_NAME_LEN_MAX);
+ AWS_FATAL_PRECONDITION(value.len <= INT16_MAX);
struct aws_event_stream_header_value_pair header = {
.header_value_type = AWS_EVENT_STREAM_HEADER_STRING,
@@ -674,7 +736,7 @@ struct aws_event_stream_header_value_pair aws_event_stream_create_string_header(
struct aws_event_stream_header_value_pair aws_event_stream_create_int32_header(
struct aws_byte_cursor name,
int32_t value) {
- AWS_PRECONDITION(name.len < INT8_MAX);
+ AWS_FATAL_PRECONDITION(name.len <= AWS_EVENT_STREAM_HEADER_NAME_LEN_MAX);
struct aws_event_stream_header_value_pair header = {
.header_value_type = AWS_EVENT_STREAM_HEADER_INT32,
@@ -689,27 +751,61 @@ struct aws_event_stream_header_value_pair aws_event_stream_create_int32_header(
return header;
}
-int aws_event_stream_add_byte_header(struct aws_array_list *headers, const char *name, uint8_t name_len, int8_t value) {
- struct aws_event_stream_header_value_pair header = {.header_name_len = name_len,
- .header_value_len = 1,
- .value_owned = 0,
- .header_value_type = AWS_EVENT_STREAM_HEADER_BYTE,
- .header_value.static_val[0] = (uint8_t)value};
-
- memcpy((void *)header.header_name, (void *)name, (size_t)name_len);
+int aws_event_stream_add_bool_header(struct aws_array_list *headers, const char *name, uint8_t name_len, int8_t value) {
+ struct aws_byte_cursor name_cursor = aws_byte_cursor_from_array(name, (size_t)name_len);
- return aws_array_list_push_back(headers, (void *)&header);
+ return aws_event_stream_add_bool_header_by_cursor(headers, name_cursor, value != 0);
}
-int aws_event_stream_add_bool_header(struct aws_array_list *headers, const char *name, uint8_t name_len, int8_t value) {
+#define AWS_EVENT_STREAM_VALIDATE_HEADER_NAME_CURSOR(name_cursor) \
+ AWS_FATAL_PRECONDITION(name_cursor.len > 0); \
+ AWS_FATAL_PRECONDITION(name_cursor.ptr != NULL); \
+ AWS_RETURN_ERROR_IF( \
+ name_cursor.len <= AWS_EVENT_STREAM_HEADER_NAME_LEN_MAX, AWS_ERROR_EVENT_STREAM_MESSAGE_INVALID_HEADERS_LEN);
+
+int aws_event_stream_add_bool_header_by_cursor(
+ struct aws_array_list *headers,
+ struct aws_byte_cursor name,
+ bool value) {
+
+ AWS_FATAL_PRECONDITION(headers);
+ AWS_EVENT_STREAM_VALIDATE_HEADER_NAME_CURSOR(name);
+
struct aws_event_stream_header_value_pair header = {
- .header_name_len = name_len,
+ .header_name_len = (uint8_t)name.len,
.header_value_len = 0,
.value_owned = 0,
.header_value_type = value ? AWS_EVENT_STREAM_HEADER_BOOL_TRUE : AWS_EVENT_STREAM_HEADER_BOOL_FALSE,
};
- memcpy((void *)header.header_name, (void *)name, (size_t)name_len);
+ memcpy((void *)header.header_name, (void *)name.ptr, (size_t)name.len);
+
+ return aws_array_list_push_back(headers, (void *)&header);
+}
+
+int aws_event_stream_add_byte_header(struct aws_array_list *headers, const char *name, uint8_t name_len, int8_t value) {
+ struct aws_byte_cursor name_cursor = aws_byte_cursor_from_array(name, (size_t)name_len);
+
+ return aws_event_stream_add_byte_header_by_cursor(headers, name_cursor, value);
+}
+
+int aws_event_stream_add_byte_header_by_cursor(
+ struct aws_array_list *headers,
+ struct aws_byte_cursor name,
+ int8_t value) {
+
+ AWS_FATAL_PRECONDITION(headers);
+ AWS_EVENT_STREAM_VALIDATE_HEADER_NAME_CURSOR(name);
+
+ struct aws_event_stream_header_value_pair header = {
+ .header_name_len = (uint8_t)name.len,
+ .header_value_len = 1,
+ .value_owned = 0,
+ .header_value_type = AWS_EVENT_STREAM_HEADER_BYTE,
+ };
+ header.header_value.static_val[0] = (uint8_t)value;
+
+ memcpy((void *)header.header_name, (void *)name.ptr, (size_t)name.len);
return aws_array_list_push_back(headers, (void *)&header);
}
@@ -719,16 +815,31 @@ int aws_event_stream_add_int16_header(
const char *name,
uint8_t name_len,
int16_t value) {
+
+ struct aws_byte_cursor name_cursor = aws_byte_cursor_from_array(name, (size_t)name_len);
+
+ return aws_event_stream_add_int16_header_by_cursor(headers, name_cursor, value);
+}
+
+int aws_event_stream_add_int16_header_by_cursor(
+ struct aws_array_list *headers,
+ struct aws_byte_cursor name,
+ int16_t value) {
+
+ AWS_FATAL_PRECONDITION(headers);
+ AWS_EVENT_STREAM_VALIDATE_HEADER_NAME_CURSOR(name);
+
struct aws_event_stream_header_value_pair header = {
- .header_name_len = name_len,
+ .header_name_len = (uint8_t)name.len,
.header_value_len = sizeof(value),
.value_owned = 0,
.header_value_type = AWS_EVENT_STREAM_HEADER_INT16,
};
- memcpy((void *)header.header_name, (void *)name, (size_t)name_len);
aws_write_u16((uint16_t)value, header.header_value.static_val);
+ memcpy((void *)header.header_name, (void *)name.ptr, (size_t)name.len);
+
return aws_array_list_push_back(headers, (void *)&header);
}
@@ -737,16 +848,30 @@ int aws_event_stream_add_int32_header(
const char *name,
uint8_t name_len,
int32_t value) {
+
+ struct aws_byte_cursor name_cursor = aws_byte_cursor_from_array(name, (size_t)name_len);
+
+ return aws_event_stream_add_int32_header_by_cursor(headers, name_cursor, value);
+}
+
+int aws_event_stream_add_int32_header_by_cursor(
+ struct aws_array_list *headers,
+ struct aws_byte_cursor name,
+ int32_t value) {
+ AWS_FATAL_PRECONDITION(headers);
+ AWS_EVENT_STREAM_VALIDATE_HEADER_NAME_CURSOR(name);
+
struct aws_event_stream_header_value_pair header = {
- .header_name_len = name_len,
+ .header_name_len = (uint8_t)name.len,
.header_value_len = sizeof(value),
.value_owned = 0,
.header_value_type = AWS_EVENT_STREAM_HEADER_INT32,
};
- memcpy((void *)header.header_name, (void *)name, (size_t)name_len);
aws_write_u32((uint32_t)value, header.header_value.static_val);
+ memcpy((void *)header.header_name, (void *)name.ptr, (size_t)name.len);
+
return aws_array_list_push_back(headers, (void *)&header);
}
@@ -755,32 +880,72 @@ int aws_event_stream_add_int64_header(
const char *name,
uint8_t name_len,
int64_t value) {
+
+ struct aws_byte_cursor name_cursor = aws_byte_cursor_from_array(name, (size_t)name_len);
+
+ return aws_event_stream_add_int64_header_by_cursor(headers, name_cursor, value);
+}
+
+int aws_event_stream_add_int64_header_by_cursor(
+ struct aws_array_list *headers,
+ struct aws_byte_cursor name,
+ int64_t value) {
+
+ AWS_FATAL_PRECONDITION(headers);
+ AWS_EVENT_STREAM_VALIDATE_HEADER_NAME_CURSOR(name);
+
struct aws_event_stream_header_value_pair header = {
- .header_name_len = name_len,
+ .header_name_len = (uint8_t)name.len,
.header_value_len = sizeof(value),
.value_owned = 0,
.header_value_type = AWS_EVENT_STREAM_HEADER_INT64,
};
- memcpy((void *)header.header_name, (void *)name, (size_t)name_len);
aws_write_u64((uint64_t)value, header.header_value.static_val);
+ memcpy((void *)header.header_name, (void *)name.ptr, (size_t)name.len);
+
return aws_array_list_push_back(headers, (void *)&header);
}
-int aws_event_stream_add_bytebuf_header(
+int aws_event_stream_add_string_header_by_cursor(
struct aws_array_list *headers,
- const char *name,
- uint8_t name_len,
- uint8_t *value,
- uint16_t value_len,
- int8_t copy) {
- struct aws_event_stream_header_value_pair header = {.header_name_len = name_len,
- .header_value_len = value_len,
- .value_owned = copy,
- .header_value_type = AWS_EVENT_STREAM_HEADER_BYTE_BUF};
+ struct aws_byte_cursor name,
+ struct aws_byte_cursor value) {
- return s_add_variable_len_header(headers, &header, name, name_len, value, value_len, copy);
+ AWS_FATAL_PRECONDITION(headers);
+ AWS_EVENT_STREAM_VALIDATE_HEADER_NAME_CURSOR(name);
+ AWS_RETURN_ERROR_IF(value.len <= INT16_MAX, AWS_ERROR_EVENT_STREAM_MESSAGE_INVALID_HEADERS_LEN);
+
+ struct aws_event_stream_header_value_pair header = {
+ .header_name_len = (uint8_t)name.len,
+ .header_value_len = (uint16_t)value.len,
+ .value_owned = 1,
+ .header_value_type = AWS_EVENT_STREAM_HEADER_STRING,
+ };
+
+ return s_add_variable_len_header(
+ headers, &header, (const char *)name.ptr, (uint8_t)name.len, value.ptr, (uint16_t)value.len, 1);
+}
+
+int aws_event_stream_add_byte_buf_header_by_cursor(
+ struct aws_array_list *headers,
+ struct aws_byte_cursor name,
+ struct aws_byte_cursor value) {
+
+ AWS_FATAL_PRECONDITION(headers);
+ AWS_EVENT_STREAM_VALIDATE_HEADER_NAME_CURSOR(name);
+ AWS_RETURN_ERROR_IF(value.len <= INT16_MAX, AWS_ERROR_EVENT_STREAM_MESSAGE_INVALID_HEADERS_LEN);
+
+ struct aws_event_stream_header_value_pair header = {
+ .header_name_len = (uint8_t)name.len,
+ .header_value_len = (uint16_t)value.len,
+ .value_owned = 1,
+ .header_value_type = AWS_EVENT_STREAM_HEADER_BYTE_BUF,
+ };
+
+ return s_add_variable_len_header(
+ headers, &header, (const char *)name.ptr, (uint8_t)name.len, value.ptr, (uint16_t)value.len, 1);
}
int aws_event_stream_add_timestamp_header(
@@ -788,16 +953,31 @@ int aws_event_stream_add_timestamp_header(
const char *name,
uint8_t name_len,
int64_t value) {
+
+ struct aws_byte_cursor name_cursor = aws_byte_cursor_from_array(name, (size_t)name_len);
+
+ return aws_event_stream_add_timestamp_header_by_cursor(headers, name_cursor, value);
+}
+
+int aws_event_stream_add_timestamp_header_by_cursor(
+ struct aws_array_list *headers,
+ struct aws_byte_cursor name,
+ int64_t value) {
+
+ AWS_FATAL_PRECONDITION(headers);
+ AWS_EVENT_STREAM_VALIDATE_HEADER_NAME_CURSOR(name);
+
struct aws_event_stream_header_value_pair header = {
- .header_name_len = name_len,
- .header_value_len = sizeof(uint64_t),
+ .header_name_len = (uint8_t)name.len,
+ .header_value_len = sizeof(value),
.value_owned = 0,
.header_value_type = AWS_EVENT_STREAM_HEADER_TIMESTAMP,
};
- memcpy((void *)header.header_name, (void *)name, (size_t)name_len);
aws_write_u64((uint64_t)value, header.header_value.static_val);
+ memcpy((void *)header.header_name, (void *)name.ptr, (size_t)name.len);
+
return aws_array_list_push_back(headers, (void *)&header);
}
@@ -806,60 +986,139 @@ int aws_event_stream_add_uuid_header(
const char *name,
uint8_t name_len,
const uint8_t *value) {
+
+ struct aws_byte_cursor name_cursor = aws_byte_cursor_from_array(name, (size_t)name_len);
+ struct aws_byte_cursor value_cursor = aws_byte_cursor_from_array(value, (size_t)UUID_LEN);
+
+ return aws_event_stream_add_uuid_header_by_cursor(headers, name_cursor, value_cursor);
+}
+
+int aws_event_stream_add_uuid_header_by_cursor(
+ struct aws_array_list *headers,
+ struct aws_byte_cursor name,
+ struct aws_byte_cursor value) {
+
+ AWS_FATAL_PRECONDITION(headers);
+ AWS_EVENT_STREAM_VALIDATE_HEADER_NAME_CURSOR(name);
+ AWS_RETURN_ERROR_IF(value.len == UUID_LEN, AWS_ERROR_EVENT_STREAM_MESSAGE_INVALID_HEADERS_LEN);
+
struct aws_event_stream_header_value_pair header = {
- .header_name_len = name_len,
- .header_value_len = 16,
+ .header_name_len = (uint8_t)name.len,
+ .header_value_len = UUID_LEN,
.value_owned = 0,
.header_value_type = AWS_EVENT_STREAM_HEADER_UUID,
};
- memcpy((void *)header.header_name, (void *)name, (size_t)name_len);
- memcpy((void *)header.header_value.static_val, value, 16);
+ memcpy((void *)header.header_name, (void *)name.ptr, (size_t)name.len);
+ memcpy((void *)header.header_value.static_val, value.ptr, UUID_LEN);
return aws_array_list_push_back(headers, (void *)&header);
}
+int aws_event_stream_add_bytebuf_header(
+ struct aws_array_list *headers,
+ const char *name,
+ uint8_t name_len,
+ uint8_t *value,
+ uint16_t value_len,
+ int8_t copy) {
+ AWS_FATAL_PRECONDITION(headers);
+ AWS_FATAL_PRECONDITION(name);
+ AWS_RETURN_ERROR_IF(
+ name_len <= AWS_EVENT_STREAM_HEADER_NAME_LEN_MAX, AWS_ERROR_EVENT_STREAM_MESSAGE_INVALID_HEADERS_LEN);
+ AWS_RETURN_ERROR_IF(value_len <= INT16_MAX, AWS_ERROR_EVENT_STREAM_MESSAGE_INVALID_HEADERS_LEN);
+
+ struct aws_event_stream_header_value_pair header = {
+ .header_name_len = name_len,
+ .header_value_len = value_len,
+ .value_owned = copy,
+ .header_value_type = AWS_EVENT_STREAM_HEADER_BYTE_BUF};
+
+ return s_add_variable_len_header(headers, &header, name, name_len, value, value_len, copy);
+}
+
+int aws_event_stream_add_header(
+ struct aws_array_list *headers,
+ const struct aws_event_stream_header_value_pair *header) {
+ AWS_FATAL_PRECONDITION(headers);
+ AWS_FATAL_PRECONDITION(header);
+
+ struct aws_event_stream_header_value_pair header_copy = *header;
+
+ if (header->header_value_type == AWS_EVENT_STREAM_HEADER_STRING ||
+ header->header_value_type == AWS_EVENT_STREAM_HEADER_BYTE_BUF) {
+ return s_add_variable_len_header(
+ headers,
+ &header_copy,
+ header->header_name,
+ header->header_name_len,
+ header->header_value.variable_len_val,
+ header->header_value_len,
+ 1); /* Copy the header value */
+ }
+
+ return aws_array_list_push_back(headers, (void *)&header_copy);
+}
+
struct aws_byte_buf aws_event_stream_header_name(struct aws_event_stream_header_value_pair *header) {
+ AWS_FATAL_PRECONDITION(header);
+
return aws_byte_buf_from_array((uint8_t *)header->header_name, header->header_name_len);
}
int8_t aws_event_stream_header_value_as_byte(struct aws_event_stream_header_value_pair *header) {
+ AWS_FATAL_PRECONDITION(header);
+
return (int8_t)header->header_value.static_val[0];
}
struct aws_byte_buf aws_event_stream_header_value_as_string(struct aws_event_stream_header_value_pair *header) {
+ AWS_FATAL_PRECONDITION(header);
+
return aws_event_stream_header_value_as_bytebuf(header);
}
int8_t aws_event_stream_header_value_as_bool(struct aws_event_stream_header_value_pair *header) {
+ AWS_FATAL_PRECONDITION(header);
+
return header->header_value_type == AWS_EVENT_STREAM_HEADER_BOOL_TRUE ? (int8_t)1 : (int8_t)0;
}
int16_t aws_event_stream_header_value_as_int16(struct aws_event_stream_header_value_pair *header) {
+ AWS_FATAL_PRECONDITION(header);
+
return (int16_t)aws_read_u16(header->header_value.static_val);
}
int32_t aws_event_stream_header_value_as_int32(struct aws_event_stream_header_value_pair *header) {
+ AWS_FATAL_PRECONDITION(header);
+
return (int32_t)aws_read_u32(header->header_value.static_val);
}
int64_t aws_event_stream_header_value_as_int64(struct aws_event_stream_header_value_pair *header) {
+ AWS_FATAL_PRECONDITION(header);
return (int64_t)aws_read_u64(header->header_value.static_val);
}
struct aws_byte_buf aws_event_stream_header_value_as_bytebuf(struct aws_event_stream_header_value_pair *header) {
+ AWS_FATAL_PRECONDITION(header);
return aws_byte_buf_from_array(header->header_value.variable_len_val, header->header_value_len);
}
int64_t aws_event_stream_header_value_as_timestamp(struct aws_event_stream_header_value_pair *header) {
+ AWS_FATAL_PRECONDITION(header);
return aws_event_stream_header_value_as_int64(header);
}
struct aws_byte_buf aws_event_stream_header_value_as_uuid(struct aws_event_stream_header_value_pair *header) {
- return aws_byte_buf_from_array(header->header_value.static_val, 16);
+ AWS_FATAL_PRECONDITION(header);
+ return aws_byte_buf_from_array(header->header_value.static_val, UUID_LEN);
}
uint16_t aws_event_stream_header_value_length(struct aws_event_stream_header_value_pair *header) {
+ AWS_FATAL_PRECONDITION(header);
+
return header->header_value_len;
}
@@ -890,10 +1149,12 @@ static int s_read_header_value(
size_t current_pos = decoder->message_pos;
+ /* amount that we've already read */
size_t length_read = current_pos - decoder->current_header_value_offset;
struct aws_event_stream_header_value_pair *current_header = &decoder->current_header;
- if (!length_read) {
+ if (!length_read && (current_header->header_value_type == AWS_EVENT_STREAM_HEADER_BYTE_BUF ||
+ current_header->header_value_type == AWS_EVENT_STREAM_HEADER_STRING)) {
/* save an allocation, this can only happen if the data we were handed is larger than the length of the header
* value. we don't really need to handle offsets in this case. This expects the user is living by the contract
* that they cannot act like they own this memory beyond the lifetime of their callback, and they should not
@@ -915,17 +1176,10 @@ static int s_read_header_value(
/* a possible optimization later would be to only allocate this once, and then keep reusing the same buffer. for
* subsequent messages.*/
- if (current_header->header_value_type == AWS_EVENT_STREAM_HEADER_BYTE_BUF ||
- current_header->header_value_type == AWS_EVENT_STREAM_HEADER_STRING) {
- current_header->header_value.variable_len_val =
- aws_mem_acquire(decoder->alloc, decoder->current_header.header_value_len);
+ current_header->header_value.variable_len_val =
+ aws_mem_acquire(decoder->alloc, decoder->current_header.header_value_len);
- if (!current_header->header_value.variable_len_val) {
- return aws_raise_error(AWS_ERROR_OOM);
- }
-
- current_header->value_owned = 1;
- }
+ current_header->value_owned = 1;
}
size_t max_read =
@@ -995,55 +1249,53 @@ static int s_read_header_type(
decoder->current_header_value_offset++;
struct aws_event_stream_header_value_pair *current_header = &decoder->current_header;
- if (type >= AWS_EVENT_STREAM_HEADER_BOOL_FALSE && type <= AWS_EVENT_STREAM_HEADER_UUID) {
- current_header->header_value_type = (enum aws_event_stream_header_value_type)type;
-
- switch (type) {
- case AWS_EVENT_STREAM_HEADER_STRING:
- case AWS_EVENT_STREAM_HEADER_BYTE_BUF:
- decoder->state = s_read_header_value_len;
- break;
- case AWS_EVENT_STREAM_HEADER_BOOL_FALSE:
- current_header->header_value_len = 0;
- current_header->header_value.static_val[0] = 0;
- decoder->on_header(decoder, &decoder->prelude, current_header, decoder->user_context);
- s_reset_header_state(decoder, 1);
- break;
- case AWS_EVENT_STREAM_HEADER_BOOL_TRUE:
- current_header->header_value_len = 0;
- current_header->header_value.static_val[0] = 1;
- decoder->on_header(decoder, &decoder->prelude, current_header, decoder->user_context);
- s_reset_header_state(decoder, 1);
- break;
- case AWS_EVENT_STREAM_HEADER_BYTE:
- current_header->header_value_len = 1;
- decoder->state = s_read_header_value;
- break;
- case AWS_EVENT_STREAM_HEADER_INT16:
- current_header->header_value_len = sizeof(uint16_t);
- decoder->state = s_read_header_value;
- break;
- case AWS_EVENT_STREAM_HEADER_INT32:
- current_header->header_value_len = sizeof(uint32_t);
- decoder->state = s_read_header_value;
- break;
- case AWS_EVENT_STREAM_HEADER_INT64:
- case AWS_EVENT_STREAM_HEADER_TIMESTAMP:
- current_header->header_value_len = sizeof(uint64_t);
- decoder->state = s_read_header_value;
- break;
- case AWS_EVENT_STREAM_HEADER_UUID:
- current_header->header_value_len = 16;
- decoder->state = s_read_header_value;
- break;
- default:
- return aws_raise_error(AWS_ERROR_EVENT_STREAM_MESSAGE_UNKNOWN_HEADER_TYPE);
- }
-
- return AWS_OP_SUCCESS;
+ current_header->header_value_type = (enum aws_event_stream_header_value_type)type;
+
+ switch (type) {
+ case AWS_EVENT_STREAM_HEADER_STRING:
+ case AWS_EVENT_STREAM_HEADER_BYTE_BUF:
+ decoder->state = s_read_header_value_len;
+ break;
+ case AWS_EVENT_STREAM_HEADER_BOOL_FALSE:
+ current_header->header_value_len = 0;
+ current_header->header_value.static_val[0] = 0;
+ decoder->on_header(decoder, &decoder->prelude, current_header, decoder->user_context);
+ s_reset_header_state(decoder, 1);
+ decoder->state = s_headers_state;
+ break;
+ case AWS_EVENT_STREAM_HEADER_BOOL_TRUE:
+ current_header->header_value_len = 0;
+ current_header->header_value.static_val[0] = 1;
+ decoder->on_header(decoder, &decoder->prelude, current_header, decoder->user_context);
+ s_reset_header_state(decoder, 1);
+ decoder->state = s_headers_state;
+ break;
+ case AWS_EVENT_STREAM_HEADER_BYTE:
+ current_header->header_value_len = 1;
+ decoder->state = s_read_header_value;
+ break;
+ case AWS_EVENT_STREAM_HEADER_INT16:
+ current_header->header_value_len = sizeof(uint16_t);
+ decoder->state = s_read_header_value;
+ break;
+ case AWS_EVENT_STREAM_HEADER_INT32:
+ current_header->header_value_len = sizeof(uint32_t);
+ decoder->state = s_read_header_value;
+ break;
+ case AWS_EVENT_STREAM_HEADER_INT64:
+ case AWS_EVENT_STREAM_HEADER_TIMESTAMP:
+ current_header->header_value_len = sizeof(uint64_t);
+ decoder->state = s_read_header_value;
+ break;
+ case AWS_EVENT_STREAM_HEADER_UUID:
+ current_header->header_value_len = 16;
+ decoder->state = s_read_header_value;
+ break;
+ default:
+ return aws_raise_error(AWS_ERROR_EVENT_STREAM_MESSAGE_UNKNOWN_HEADER_TYPE);
}
- return aws_raise_error(AWS_ERROR_EVENT_STREAM_MESSAGE_UNKNOWN_HEADER_TYPE);
+ return AWS_OP_SUCCESS;
}
static int s_read_header_name(
@@ -1157,6 +1409,9 @@ static int s_read_trailer_state(
uint32_t message_crc = aws_read_u32(decoder->working_buffer);
if (message_crc == decoder->running_crc) {
+ if (decoder->on_complete) {
+ decoder->on_complete(decoder, message_crc, decoder->user_context);
+ }
s_reset_state(decoder);
} else {
char error_message[70];
@@ -1303,13 +1558,39 @@ static int s_start_state(
}
static void s_reset_state(struct aws_event_stream_streaming_decoder *decoder) {
+ memset(decoder->working_buffer, 0, sizeof(decoder->working_buffer));
decoder->message_pos = 0;
- decoder->prelude = s_empty_prelude;
decoder->running_crc = 0;
- memset(decoder->working_buffer, 0, sizeof(decoder->working_buffer));
+ decoder->current_header_name_offset = 0;
+ decoder->current_header_value_offset = 0;
+ AWS_ZERO_STRUCT(decoder->current_header);
+ decoder->prelude = s_empty_prelude;
decoder->state = s_start_state;
}
+void aws_event_stream_streaming_decoder_init_from_options(
+ struct aws_event_stream_streaming_decoder *decoder,
+ struct aws_allocator *allocator,
+ const struct aws_event_stream_streaming_decoder_options *options) {
+ AWS_ASSERT(decoder);
+ AWS_ASSERT(allocator);
+ AWS_ASSERT(options);
+ AWS_ASSERT(options->on_error);
+ AWS_ASSERT(options->on_header);
+ AWS_ASSERT(options->on_payload_segment);
+ AWS_ASSERT(options->on_prelude);
+ AWS_ASSERT(options->on_prelude);
+
+ s_reset_state(decoder);
+ decoder->alloc = allocator;
+ decoder->on_error = options->on_error;
+ decoder->on_header = options->on_header;
+ decoder->on_payload = options->on_payload_segment;
+ decoder->on_prelude = options->on_prelude;
+ decoder->on_complete = options->on_complete;
+ decoder->user_context = options->user_data;
+}
+
void aws_event_stream_streaming_decoder_init(
struct aws_event_stream_streaming_decoder *decoder,
struct aws_allocator *alloc,
@@ -1319,13 +1600,13 @@ void aws_event_stream_streaming_decoder_init(
aws_event_stream_on_error_fn *on_error,
void *user_data) {
- s_reset_state(decoder);
- decoder->alloc = alloc;
- decoder->on_error = on_error;
- decoder->on_header = on_header;
- decoder->on_payload = on_payload_segment;
- decoder->on_prelude = on_prelude;
- decoder->user_context = user_data;
+ struct aws_event_stream_streaming_decoder_options decoder_options = {
+ .on_payload_segment = on_payload_segment,
+ .on_prelude = on_prelude,
+ .on_header = on_header,
+ .on_error = on_error,
+ .user_data = user_data};
+ aws_event_stream_streaming_decoder_init_from_options(decoder, alloc, &decoder_options);
}
void aws_event_stream_streaming_decoder_clean_up(struct aws_event_stream_streaming_decoder *decoder) {
@@ -1335,6 +1616,7 @@ void aws_event_stream_streaming_decoder_clean_up(struct aws_event_stream_streami
decoder->on_payload = 0;
decoder->on_prelude = 0;
decoder->user_context = 0;
+ decoder->on_complete = 0;
}
/* Simply sends the data to the state machine until all has been processed or an error is returned. */
@@ -1350,6 +1632,6 @@ int aws_event_stream_streaming_decoder_pump(
return err_val;
}
-#if _MSC_VER
+#ifdef _MSC_VER
# pragma warning(pop)
#endif
diff --git a/contrib/restricted/aws/aws-c-event-stream/source/event_stream_channel_handler.c b/contrib/restricted/aws/aws-c-event-stream/source/event_stream_channel_handler.c
index 03b3129621..0e030b4094 100644
--- a/contrib/restricted/aws/aws-c-event-stream/source/event_stream_channel_handler.c
+++ b/contrib/restricted/aws/aws-c-event-stream/source/event_stream_channel_handler.c
@@ -358,7 +358,7 @@ int aws_event_stream_channel_handler_write_message(
AWS_LS_EVENT_STREAM_CHANNEL_HANDLER, "id=%p: Scheduling message write task", (void *)channel_handler);
aws_channel_task_init(
&write_data->task, s_write_handler_message, write_data, "aws_event_stream_channel_handler_write_message");
- aws_channel_schedule_task_now(handler->handler.slot->channel, &write_data->task);
+ aws_channel_schedule_task_now_serialized(handler->handler.slot->channel, &write_data->task);
return AWS_OP_SUCCESS;
}
diff --git a/contrib/restricted/aws/aws-c-event-stream/source/event_stream_rpc_client.c b/contrib/restricted/aws/aws-c-event-stream/source/event_stream_rpc_client.c
index 5fb493f691..1a292a82ee 100644
--- a/contrib/restricted/aws/aws-c-event-stream/source/event_stream_rpc_client.c
+++ b/contrib/restricted/aws/aws-c-event-stream/source/event_stream_rpc_client.c
@@ -353,6 +353,8 @@ void aws_event_stream_rpc_client_connection_release(const struct aws_event_strea
(void *)connection,
ref_count - 1);
+ AWS_FATAL_ASSERT(ref_count != 0 && "Connection ref count has gone negative");
+
if (ref_count == 1) {
s_destroy_connection(connection_mut);
}
@@ -543,7 +545,18 @@ static int s_send_protocol_message(
args->flush_fn = flush_fn;
- size_t headers_count = operation_name ? message_args->headers_count + 4 : message_args->headers_count + 3;
+ size_t headers_count = 0;
+
+ if (operation_name) {
+ if (aws_add_size_checked(message_args->headers_count, 4, &headers_count)) {
+ return AWS_OP_ERR;
+ }
+ } else {
+ if (aws_add_size_checked(message_args->headers_count, 3, &headers_count)) {
+ return AWS_OP_ERR;
+ }
+ }
+
struct aws_array_list headers_list;
AWS_ZERO_STRUCT(headers_list);
@@ -736,21 +749,31 @@ static void s_route_message_by_type(
struct aws_hash_element *continuation_element = NULL;
if (aws_hash_table_find(&connection->continuation_table, &stream_id, &continuation_element) ||
!continuation_element) {
+ bool old_stream_id = stream_id <= connection->latest_stream_id;
aws_mutex_unlock(&connection->stream_lock);
- AWS_LOGF_ERROR(
- AWS_LS_EVENT_STREAM_RPC_CLIENT,
- "id=%p: a stream id was received that was not created by this client",
- (void *)connection);
- aws_raise_error(AWS_ERROR_EVENT_STREAM_RPC_PROTOCOL_ERROR);
- s_send_connection_level_error(
- connection, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PROTOCOL_ERROR, 0, &s_invalid_client_stream_id_error);
+ if (!old_stream_id) {
+ AWS_LOGF_ERROR(
+ AWS_LS_EVENT_STREAM_RPC_CLIENT,
+ "id=%p: a stream id was received that was not created by this client",
+ (void *)connection);
+ aws_raise_error(AWS_ERROR_EVENT_STREAM_RPC_PROTOCOL_ERROR);
+ s_send_connection_level_error(
+ connection, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_PROTOCOL_ERROR, 0, &s_invalid_client_stream_id_error);
+ } else {
+ AWS_LOGF_WARN(
+ AWS_LS_EVENT_STREAM_RPC_CLIENT,
+ "id=%p: a stream id was received that corresponds to an already-closed stream",
+ (void *)connection);
+ }
return;
}
- aws_mutex_unlock(&connection->stream_lock);
-
continuation = continuation_element->value;
+ AWS_FATAL_ASSERT(continuation != NULL);
aws_event_stream_rpc_client_continuation_acquire(continuation);
+
+ aws_mutex_unlock(&connection->stream_lock);
+
continuation->continuation_fn(continuation, &message_args, continuation->user_data);
aws_event_stream_rpc_client_continuation_release(continuation);
@@ -935,6 +958,8 @@ void aws_event_stream_rpc_client_continuation_release(
(void *)continuation,
ref_count - 1);
+ AWS_FATAL_ASSERT(ref_count != 0 && "Continuation ref count has gone negative");
+
if (ref_count == 1) {
struct aws_allocator *allocator = continuation_mut->connection->allocator;
aws_event_stream_rpc_client_connection_release(continuation_mut->connection);
@@ -960,7 +985,8 @@ int aws_event_stream_rpc_client_continuation_activate(
aws_mutex_lock(&continuation->connection->stream_lock);
if (continuation->stream_id) {
- AWS_LOGF_ERROR(AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: stream has already been activated", (void *)continuation)
+ AWS_LOGF_ERROR(
+ AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: stream has already been activated", (void *)continuation);
aws_raise_error(AWS_ERROR_INVALID_STATE);
goto clean_up;
}
@@ -968,7 +994,7 @@ int aws_event_stream_rpc_client_continuation_activate(
/* Even though is_open is atomic, we need to hold a lock while checking it.
* This lets us coordinate with code that sets is_open to false. */
if (!aws_event_stream_rpc_client_connection_is_open(continuation->connection)) {
- AWS_LOGF_ERROR(AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: stream's connection is not open", (void *)continuation)
+ AWS_LOGF_ERROR(AWS_LS_EVENT_STREAM_RPC_CLIENT, "id=%p: stream's connection is not open", (void *)continuation);
aws_raise_error(AWS_ERROR_EVENT_STREAM_RPC_CONNECTION_CLOSED);
goto clean_up;
}
diff --git a/contrib/restricted/aws/aws-c-event-stream/source/event_stream_rpc_server.c b/contrib/restricted/aws/aws-c-event-stream/source/event_stream_rpc_server.c
index 6460383f1d..9b2d38e5d9 100644
--- a/contrib/restricted/aws/aws-c-event-stream/source/event_stream_rpc_server.c
+++ b/contrib/restricted/aws/aws-c-event-stream/source/event_stream_rpc_server.c
@@ -22,6 +22,7 @@
#include <aws/io/channel.h>
#include <aws/io/channel_bootstrap.h>
+#include <aws/io/socket.h>
#include <inttypes.h>
@@ -81,7 +82,17 @@ struct aws_event_stream_rpc_server_continuation_token {
void s_continuation_destroy(void *value) {
struct aws_event_stream_rpc_server_continuation_token *continuation = value;
AWS_LOGF_DEBUG(AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: destroying continuation", (void *)continuation);
- continuation->closed_fn(continuation, continuation->user_data);
+
+ /*
+ * When creating a stream, we end up putting the continuation in the table before we finish initializing it.
+ * If an error occurs in the on incoming stream callback, we end up with a continuation with no user data or
+ * callbacks. This means we have to check closed_fn for validity even though the success path does a fatal assert
+ * on validity.
+ */
+ if (continuation->closed_fn != NULL) {
+ continuation->closed_fn(continuation, continuation->user_data);
+ }
+
aws_event_stream_rpc_server_continuation_release(continuation);
}
@@ -199,7 +210,7 @@ struct aws_event_stream_rpc_server_connection *aws_event_stream_rpc_server_conne
const struct aws_event_stream_rpc_connection_options *connection_options) {
AWS_FATAL_ASSERT(
connection_options->on_connection_protocol_message && "on_connection_protocol_message must be specified!");
- AWS_FATAL_ASSERT(connection_options->on_incoming_stream && "on_connection_protocol_message must be specified");
+ AWS_FATAL_ASSERT(connection_options->on_incoming_stream && "on_incoming_stream must be specified");
struct aws_event_stream_rpc_server_connection *connection = s_create_connection_on_channel(server, channel);
@@ -289,7 +300,7 @@ static void s_on_accept_channel_setup(
AWS_FATAL_ASSERT(
connection_options.on_connection_protocol_message && "on_connection_protocol_message must be specified!");
- AWS_FATAL_ASSERT(connection_options.on_incoming_stream && "on_connection_protocol_message must be specified");
+ AWS_FATAL_ASSERT(connection_options.on_incoming_stream && "on_incoming_stream must be specified");
connection->on_incoming_stream = connection_options.on_incoming_stream;
connection->on_connection_protocol_message = connection_options.on_connection_protocol_message;
connection->user_data = connection_options.user_data;
@@ -425,6 +436,16 @@ error:
return NULL;
}
+uint16_t aws_event_stream_rpc_server_listener_get_bound_port(
+ const struct aws_event_stream_rpc_server_listener *server) {
+
+ struct aws_socket_endpoint address;
+ AWS_ZERO_STRUCT(address);
+ /* not checking error code because it can't fail when called on a listening socket */
+ aws_socket_get_bound_address(server->listener, &address);
+ return address.port;
+}
+
void aws_event_stream_rpc_server_listener_acquire(struct aws_event_stream_rpc_server_listener *server) {
size_t current_count = aws_atomic_fetch_add_explicit(&server->ref_count, 1, aws_memory_order_relaxed);
@@ -596,7 +617,17 @@ static int s_send_protocol_message(
args->flush_fn = flush_fn;
- size_t headers_count = message_args->headers_count + 3;
+ size_t headers_count = 0;
+
+ if (aws_add_size_checked(message_args->headers_count, 3, &headers_count)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_EVENT_STREAM_RPC_SERVER,
+ "id=%p: integer overflow detected when using headers_count %zu",
+ (void *)connection,
+ message_args->headers_count);
+ goto args_allocated_before_failure;
+ }
+
struct aws_array_list headers_list;
AWS_ZERO_STRUCT(headers_list);
@@ -869,7 +900,7 @@ static void s_route_message_by_type(
return;
}
- /* if the stream is is in the past, look it up from the continuation table. If it's not there, that's an error.
+ /* if the stream is in the past, look it up from the continuation table. If it's not there, that's an error.
* if it is, find it and notify the user a message arrived */
if (stream_id <= connection->latest_stream_id) {
AWS_LOGF_ERROR(
@@ -897,9 +928,15 @@ static void s_route_message_by_type(
(void *)connection,
(void *)continuation);
- aws_event_stream_rpc_server_continuation_acquire(continuation);
- continuation->continuation_fn(continuation, &message_args, continuation->user_data);
- aws_event_stream_rpc_server_continuation_release(continuation);
+ /*
+ * I don't think it's possible for the continuation_fn to be NULL at this point, but given the
+ * multiple partially-initialized object crashes we've had, let's be safe.
+ */
+ if (continuation->continuation_fn != NULL) {
+ aws_event_stream_rpc_server_continuation_acquire(continuation);
+ continuation->continuation_fn(continuation, &message_args, continuation->user_data);
+ aws_event_stream_rpc_server_continuation_release(continuation);
+ }
/* now these are potentially new streams. Make sure they're in bounds, create a new continuation
* and notify the user the stream has been created, then send them the message. */
} else {
@@ -981,9 +1018,21 @@ static void s_route_message_by_type(
aws_event_stream_rpc_server_continuation_acquire(continuation);
AWS_LOGF_TRACE(
AWS_LS_EVENT_STREAM_RPC_SERVER, "id=%p: invoking on_incoming_stream callback", (void *)connection);
- if (connection->on_incoming_stream(
+ /*
+ * This callback must only keep a ref to the continuation on a success path. On a failure, it must
+ * leave the ref count alone so that the release + removal destroys the continuation
+ */
+ if (connection->on_incoming_stream == NULL ||
+ connection->on_incoming_stream(
continuation->connection, continuation, operation_name, &options, connection->user_data)) {
+
+ AWS_FATAL_ASSERT(aws_atomic_load_int(&continuation->ref_count) == 2);
+
+ /* undo the continuation acquire that was done a few lines above */
aws_event_stream_rpc_server_continuation_release(continuation);
+
+ /* removing the continuation from the table will do the final decref on the continuation */
+ aws_hash_table_remove(&connection->continuation_table, &continuation->stream_id, NULL, NULL);
s_send_connection_level_error(
connection, AWS_EVENT_STREAM_RPC_MESSAGE_TYPE_INTERNAL_ERROR, 0, &s_internal_error);
return;
@@ -997,6 +1046,8 @@ static void s_route_message_by_type(
connection->latest_stream_id = stream_id;
continuation->continuation_fn(continuation, &message_args, continuation->user_data);
+
+ /* undo the acquire made before the on_incoming_stream callback invocation */
aws_event_stream_rpc_server_continuation_release(continuation);
}
@@ -1041,7 +1092,9 @@ static void s_route_message_by_type(
(void *)connection);
}
- connection->on_connection_protocol_message(connection, &message_args, connection->user_data);
+ if (connection->on_connection_protocol_message != NULL) {
+ connection->on_connection_protocol_message(connection, &message_args, connection->user_data);
+ }
}
}
diff --git a/contrib/restricted/aws/aws-c-event-stream/ya.make b/contrib/restricted/aws/aws-c-event-stream/ya.make
index 80896b79a3..d087791e00 100644
--- a/contrib/restricted/aws/aws-c-event-stream/ya.make
+++ b/contrib/restricted/aws/aws-c-event-stream/ya.make
@@ -1,4 +1,4 @@
-# Generated by devtools/yamaker from nixpkgs 22.05.
+# Generated by devtools/yamaker from nixpkgs 23.05.
LIBRARY()
@@ -6,9 +6,9 @@ LICENSE(Apache-2.0)
LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-VERSION(0.2.7)
+VERSION(0.2.20)
-ORIGINAL_SOURCE(https://github.com/awslabs/aws-c-event-stream/archive/v0.2.7.tar.gz)
+ORIGINAL_SOURCE(https://github.com/awslabs/aws-c-event-stream/archive/v0.2.20.tar.gz)
PEERDIR(
contrib/restricted/aws/aws-c-common
@@ -32,20 +32,17 @@ CFLAGS(
-DAWS_IO_USE_IMPORT_EXPORT
-DAWS_USE_EPOLL
-DHAVE_SYSCONF
- -DS2N_ADX
- -DS2N_BIKE_R3_AVX2
- -DS2N_BIKE_R3_AVX512
- -DS2N_BIKE_R3_PCLMUL
-DS2N_CLONE_SUPPORTED
-DS2N_CPUID_AVAILABLE
-DS2N_FALL_THROUGH_SUPPORTED
-DS2N_FEATURES_AVAILABLE
- -DS2N_HAVE_EXECINFO
-DS2N_KYBER512R3_AVX2_BMI2
-DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
-DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
-DS2N_MADVISE_SUPPORTED
- -DS2N_SIKE_P434_R3_ASM
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
-DS2N___RESTRICT__SUPPORTED
)
diff --git a/contrib/restricted/aws/aws-c-http/CMakeLists.darwin-arm64.txt b/contrib/restricted/aws/aws-c-http/CMakeLists.darwin-arm64.txt
new file mode 100644
index 0000000000..2df1842086
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/CMakeLists.darwin-arm64.txt
@@ -0,0 +1,70 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-http)
+target_compile_options(restricted-aws-aws-c-http PRIVATE
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DHAVE_SYSCONF
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-http PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/include
+)
+target_link_libraries(restricted-aws-aws-c-http PUBLIC
+ restricted-aws-aws-c-cal
+ restricted-aws-aws-c-common
+ restricted-aws-aws-c-compression
+ restricted-aws-aws-c-io
+)
+target_sources(restricted-aws-aws-c-http PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/connection_manager.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/connection_monitor.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_encoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_stream.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_frames.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_stream.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack_encoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack_huffman_static.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/http.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/http2_stream_manager.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/proxy_connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/proxy_strategy.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/random_access_set.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/request_response.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/statistics.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/strutil.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket_bootstrap.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket_encoder.c
+)
diff --git a/contrib/restricted/aws/aws-c-http/CMakeLists.darwin-x86_64.txt b/contrib/restricted/aws/aws-c-http/CMakeLists.darwin-x86_64.txt
new file mode 100644
index 0000000000..2df1842086
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/CMakeLists.darwin-x86_64.txt
@@ -0,0 +1,70 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-http)
+target_compile_options(restricted-aws-aws-c-http PRIVATE
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DHAVE_SYSCONF
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-http PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/include
+)
+target_link_libraries(restricted-aws-aws-c-http PUBLIC
+ restricted-aws-aws-c-cal
+ restricted-aws-aws-c-common
+ restricted-aws-aws-c-compression
+ restricted-aws-aws-c-io
+)
+target_sources(restricted-aws-aws-c-http PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/connection_manager.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/connection_monitor.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_encoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_stream.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_frames.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_stream.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack_encoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack_huffman_static.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/http.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/http2_stream_manager.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/proxy_connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/proxy_strategy.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/random_access_set.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/request_response.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/statistics.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/strutil.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket_bootstrap.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket_encoder.c
+)
diff --git a/contrib/restricted/aws/aws-c-http/CMakeLists.linux-aarch64.txt b/contrib/restricted/aws/aws-c-http/CMakeLists.linux-aarch64.txt
new file mode 100644
index 0000000000..4cef4bd81e
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/CMakeLists.linux-aarch64.txt
@@ -0,0 +1,71 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-http)
+target_compile_options(restricted-aws-aws-c-http PRIVATE
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DHAVE_SYSCONF
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-http PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/include
+)
+target_link_libraries(restricted-aws-aws-c-http PUBLIC
+ contrib-libs-linux-headers
+ restricted-aws-aws-c-cal
+ restricted-aws-aws-c-common
+ restricted-aws-aws-c-compression
+ restricted-aws-aws-c-io
+)
+target_sources(restricted-aws-aws-c-http PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/connection_manager.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/connection_monitor.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_encoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_stream.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_frames.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_stream.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack_encoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack_huffman_static.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/http.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/http2_stream_manager.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/proxy_connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/proxy_strategy.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/random_access_set.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/request_response.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/statistics.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/strutil.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket_bootstrap.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket_encoder.c
+)
diff --git a/contrib/restricted/aws/aws-c-http/CMakeLists.linux-x86_64.txt b/contrib/restricted/aws/aws-c-http/CMakeLists.linux-x86_64.txt
new file mode 100644
index 0000000000..4cef4bd81e
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/CMakeLists.linux-x86_64.txt
@@ -0,0 +1,71 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-http)
+target_compile_options(restricted-aws-aws-c-http PRIVATE
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DHAVE_SYSCONF
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-http PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/include
+)
+target_link_libraries(restricted-aws-aws-c-http PUBLIC
+ contrib-libs-linux-headers
+ restricted-aws-aws-c-cal
+ restricted-aws-aws-c-common
+ restricted-aws-aws-c-compression
+ restricted-aws-aws-c-io
+)
+target_sources(restricted-aws-aws-c-http PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/connection_manager.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/connection_monitor.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_encoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_stream.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_frames.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_stream.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack_encoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack_huffman_static.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/http.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/http2_stream_manager.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/proxy_connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/proxy_strategy.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/random_access_set.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/request_response.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/statistics.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/strutil.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket_bootstrap.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket_encoder.c
+)
diff --git a/contrib/restricted/aws/aws-c-http/CMakeLists.txt b/contrib/restricted/aws/aws-c-http/CMakeLists.txt
new file mode 100644
index 0000000000..2dce3a77fe
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/CMakeLists.txt
@@ -0,0 +1,19 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+if (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" AND NOT HAVE_CUDA)
+ include(CMakeLists.linux-aarch64.txt)
+elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
+ include(CMakeLists.darwin-x86_64.txt)
+elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
+ include(CMakeLists.darwin-arm64.txt)
+elseif (WIN32 AND CMAKE_SYSTEM_PROCESSOR STREQUAL "AMD64" AND NOT HAVE_CUDA)
+ include(CMakeLists.windows-x86_64.txt)
+elseif (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT HAVE_CUDA)
+ include(CMakeLists.linux-x86_64.txt)
+endif()
diff --git a/contrib/restricted/aws/aws-c-http/CMakeLists.windows-x86_64.txt b/contrib/restricted/aws/aws-c-http/CMakeLists.windows-x86_64.txt
new file mode 100644
index 0000000000..2df1842086
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/CMakeLists.windows-x86_64.txt
@@ -0,0 +1,70 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-http)
+target_compile_options(restricted-aws-aws-c-http PRIVATE
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DHAVE_SYSCONF
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-http PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/include
+)
+target_link_libraries(restricted-aws-aws-c-http PUBLIC
+ restricted-aws-aws-c-cal
+ restricted-aws-aws-c-common
+ restricted-aws-aws-c-compression
+ restricted-aws-aws-c-io
+)
+target_sources(restricted-aws-aws-c-http PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/connection_manager.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/connection_monitor.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_encoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h1_stream.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_frames.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/h2_stream.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack_encoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/hpack_huffman_static.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/http.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/http2_stream_manager.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/proxy_connection.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/proxy_strategy.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/random_access_set.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/request_response.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/statistics.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/strutil.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket_bootstrap.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-http/source/websocket_encoder.c
+)
diff --git a/contrib/restricted/aws/aws-c-http/CODE_OF_CONDUCT.md b/contrib/restricted/aws/aws-c-http/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..3b64466870
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/CODE_OF_CONDUCT.md
@@ -0,0 +1,4 @@
+## Code of Conduct
+This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
+For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
+opensource-codeofconduct@amazon.com with any additional questions or comments.
diff --git a/contrib/restricted/aws/aws-c-http/CONTRIBUTING.md b/contrib/restricted/aws/aws-c-http/CONTRIBUTING.md
new file mode 100644
index 0000000000..d79975fab0
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/CONTRIBUTING.md
@@ -0,0 +1,61 @@
+# Contributing Guidelines
+
+Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional
+documentation, we greatly value feedback and contributions from our community.
+
+Please read through this document before submitting any issues or pull requests to ensure we have all the necessary
+information to effectively respond to your bug report or contribution.
+
+
+## Reporting Bugs/Feature Requests
+
+We welcome you to use the GitHub issue tracker to report bugs or suggest features.
+
+When filing an issue, please check [existing open](https://github.com/awslabs/aws-c-http/issues), or [recently closed](https://github.com/awslabs/aws-c-http/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already
+reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
+
+* A reproducible test case or series of steps
+* The version of our code being used
+* Any modifications you've made relevant to the bug
+* Anything unusual about your environment or deployment
+
+
+## Contributing via Pull Requests
+Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that:
+
+1. You are working against the latest source on the *main* branch.
+2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
+3. You open an issue to discuss any significant work - we would hate for your time to be wasted.
+
+To send us a pull request, please:
+
+1. Fork the repository.
+2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change.
+3. Ensure local tests pass.
+4. Commit to your fork using clear commit messages.
+5. Send us a pull request, answering any default questions in the pull request interface.
+6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation.
+
+GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
+[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
+
+
+## Finding contributions to work on
+Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels ((enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/awslabs/aws-c-http/labels/help%20wanted) issues is a great place to start.
+
+
+## Code of Conduct
+This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
+For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
+opensource-codeofconduct@amazon.com with any additional questions or comments.
+
+
+## Security issue notifications
+If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue.
+
+
+## Licensing
+
+See the [LICENSE](https://github.com/awslabs/aws-c-http/blob/main/LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution.
+
+We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes.
diff --git a/contrib/restricted/aws/aws-c-http/LICENSE b/contrib/restricted/aws/aws-c-http/LICENSE
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/contrib/restricted/aws/aws-c-http/NOTICE b/contrib/restricted/aws/aws-c-http/NOTICE
new file mode 100644
index 0000000000..6ac9e1e118
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/NOTICE
@@ -0,0 +1,3 @@
+AWS C Http
+Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+SPDX-License-Identifier: Apache-2.0.
diff --git a/contrib/restricted/aws/aws-c-http/README.md b/contrib/restricted/aws/aws-c-http/README.md
new file mode 100644
index 0000000000..e92425af2b
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/README.md
@@ -0,0 +1,61 @@
+## AWS C Http
+
+C99 implementation of the HTTP/1.1 and HTTP/2 specifications
+
+## License
+
+This library is licensed under the Apache 2.0 License.
+
+## Usage
+
+### Building
+
+CMake 3.1+ is required to build.
+
+`<install-path>` must be an absolute path in the following instructions.
+
+#### Linux-Only Dependencies
+
+If you are building on Linux, you will need to build aws-lc and s2n-tls first.
+
+```
+git clone git@github.com:awslabs/aws-lc.git
+cmake -S aws-lc -B aws-lc/build -DCMAKE_INSTALL_PREFIX=<install-path>
+cmake --build aws-lc/build --target install
+
+git clone git@github.com:aws/s2n-tls.git
+cmake -S s2n-tls -B s2n-tls/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build s2n-tls/build --target install
+```
+
+#### Building aws-c-http and Remaining Dependencies
+
+```
+git clone git@github.com:awslabs/aws-c-common.git
+cmake -S aws-c-common -B aws-c-common/build -DCMAKE_INSTALL_PREFIX=<install-path>
+cmake --build aws-c-common/build --target install
+
+git clone git@github.com:awslabs/aws-c-cal.git
+cmake -S aws-c-cal -B aws-c-cal/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build aws-c-cal/build --target install
+
+git clone git@github.com:awslabs/aws-c-io.git
+cmake -S aws-c-io -B aws-c-io/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build aws-c-io/build --target install
+
+git clone git@github.com:awslabs/aws-c-compression.git
+cmake -S aws-c-compression -B aws-c-compression/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build aws-c-compression/build --target install
+
+git clone git@github.com:awslabs/aws-c-http.git
+cmake -S aws-c-http -B aws-c-http/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build aws-c-http/build --target install
+```
+
+#### Run Integration Tests with localhost
+
+To run some of the integration tests (start with localhost_integ_*), you need to set up a localhost that echo the request headers from `/echo` back first.
+
+To do that, check [localhost](./tests/py_localhost/) script we have.
+
+After that, configure and build your cmake project with `-DENABLE_LOCALHOST_INTEGRATION_TESTS=true` to build the tests with localhost and run them from `ctest --output-on-failure -R localhost_integ_*`.
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/connection.h b/contrib/restricted/aws/aws-c-http/include/aws/http/connection.h
new file mode 100644
index 0000000000..e6362c1439
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/connection.h
@@ -0,0 +1,679 @@
+#ifndef AWS_HTTP_CONNECTION_H
+#define AWS_HTTP_CONNECTION_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/http.h>
+
+struct aws_client_bootstrap;
+struct aws_socket_options;
+struct aws_tls_connection_options;
+struct aws_http2_setting;
+struct proxy_env_var_settings;
+
+/**
+ * An HTTP connection.
+ * This type is used by both server-side and client-side connections.
+ * This type is also used by all supported versions of HTTP.
+ */
+struct aws_http_connection;
+
+/**
+ * Invoked when connect completes.
+ *
+ * If unsuccessful, error_code will be set, connection will be NULL,
+ * and the on_shutdown callback will never be invoked.
+ *
+ * If successful, error_code will be 0 and connection will be valid.
+ * The user is now responsible for the connection and must
+ * call aws_http_connection_release() when they are done with it.
+ *
+ * The connection uses one event-loop thread to do all its work.
+ * The thread invoking this callback will be the same thread that invokes all
+ * future callbacks for this connection and its streams.
+ */
+typedef void(
+ aws_http_on_client_connection_setup_fn)(struct aws_http_connection *connection, int error_code, void *user_data);
+
+/**
+ * Invoked when the connection has finished shutting down.
+ * Never invoked if on_setup failed.
+ * This is always invoked on connection's event-loop thread.
+ * Note that the connection is not completely done until on_shutdown has been invoked
+ * AND aws_http_connection_release() has been called.
+ */
+typedef void(
+ aws_http_on_client_connection_shutdown_fn)(struct aws_http_connection *connection, int error_code, void *user_data);
+
+/**
+ * Invoked when the HTTP/2 settings change is complete.
+ * If connection setup successfully this will always be invoked whether settings change successfully or unsuccessfully.
+ * If error_code is AWS_ERROR_SUCCESS (0), then the peer has acknowledged the settings and the change has been applied.
+ * If error_code is non-zero, then a connection error occurred before the settings could be fully acknowledged and
+ * applied. This is always invoked on the connection's event-loop thread.
+ */
+typedef void(aws_http2_on_change_settings_complete_fn)(
+ struct aws_http_connection *http2_connection,
+ int error_code,
+ void *user_data);
+
+/**
+ * Invoked when the HTTP/2 PING completes, whether peer has acknowledged it or not.
+ * If error_code is AWS_ERROR_SUCCESS (0), then the peer has acknowledged the PING and round_trip_time_ns will be the
+ * round trip time in nano seconds for the connection.
+ * If error_code is non-zero, then a connection error occurred before the PING get acknowledgment and round_trip_time_ns
+ * will be useless in this case.
+ */
+typedef void(aws_http2_on_ping_complete_fn)(
+ struct aws_http_connection *http2_connection,
+ uint64_t round_trip_time_ns,
+ int error_code,
+ void *user_data);
+
+/**
+ * Invoked when an HTTP/2 GOAWAY frame is received from peer.
+ * Implies that the peer has initiated shutdown, or encountered a serious error.
+ * Once a GOAWAY is received, no further streams may be created on this connection.
+ *
+ * @param http2_connection This HTTP/2 connection.
+ * @param last_stream_id ID of the last locally-initiated stream that peer will
+ * process. Any locally-initiated streams with a higher ID are ignored by
+ * peer, and are safe to retry on another connection.
+ * @param http2_error_code The HTTP/2 error code (RFC-7540 section 7) sent by peer.
+ * `enum aws_http2_error_code` lists official codes.
+ * @param debug_data The debug data sent by peer. It can be empty. (NOTE: this data is only valid for the lifetime of
+ * the callback. Make a deep copy if you wish to keep it longer.)
+ * @param user_data User-data passed to the callback.
+ */
+typedef void(aws_http2_on_goaway_received_fn)(
+ struct aws_http_connection *http2_connection,
+ uint32_t last_stream_id,
+ uint32_t http2_error_code,
+ struct aws_byte_cursor debug_data,
+ void *user_data);
+
+/**
+ * Invoked when new HTTP/2 settings from peer have been applied.
+ * Settings_array is the array of aws_http2_settings that contains all the settings we just changed in the order we
+ * applied (the order settings arrived). Num_settings is the number of elements in that array.
+ */
+typedef void(aws_http2_on_remote_settings_change_fn)(
+ struct aws_http_connection *http2_connection,
+ const struct aws_http2_setting *settings_array,
+ size_t num_settings,
+ void *user_data);
+
+/**
+ * Callback invoked on each statistics sample.
+ *
+ * connection_nonce is unique to each connection for disambiguation of each callback per connection.
+ */
+typedef void(
+ aws_http_statistics_observer_fn)(size_t connection_nonce, const struct aws_array_list *stats_list, void *user_data);
+
+/**
+ * Configuration options for connection monitoring
+ */
+struct aws_http_connection_monitoring_options {
+
+ /**
+ * minimum required throughput of the connection. Throughput is only measured against the interval of time where
+ * there is actual io to perform. Read and write throughput are measured and checked independently of one another.
+ */
+ uint64_t minimum_throughput_bytes_per_second;
+
+ /*
+ * amount of time, in seconds, throughput is allowed to drop below the minimum before the connection is shut down
+ * as unhealthy.
+ */
+ uint32_t allowable_throughput_failure_interval_seconds;
+
+ /**
+ * invoked on each statistics publish by the underlying IO channel. Install this callback to receive the statistics
+ * for observation. This field is optional.
+ */
+ aws_http_statistics_observer_fn *statistics_observer_fn;
+
+ /**
+ * user_data to be passed to statistics_observer_fn.
+ */
+ void *statistics_observer_user_data;
+};
+
+/**
+ * Options specific to HTTP/1.x connections.
+ */
+struct aws_http1_connection_options {
+ /**
+ * Optional
+ * Capacity in bytes of the HTTP/1 connection's read buffer.
+ * The buffer grows if the flow-control window of the incoming HTTP-stream
+ * reaches zero. If the buffer reaches capacity, no further socket data is
+ * read until the HTTP-stream's window opens again, allowing data to resume flowing.
+ *
+ * Ignored if `manual_window_management` is false.
+ * If zero is specified (the default) then a default capacity is chosen.
+ * A capacity that is too small may hinder throughput.
+ * A capacity that is too big may waste memory without helping throughput.
+ */
+ size_t read_buffer_capacity;
+};
+
+/**
+ * Options specific to HTTP/2 connections.
+ */
+struct aws_http2_connection_options {
+ /**
+ * Optional
+ * The data of settings to change for initial settings.
+ * Note: each setting has its boundary. If settings_array is not set, num_settings has to be 0 to send an empty
+ * SETTINGS frame.
+ */
+ struct aws_http2_setting *initial_settings_array;
+
+ /**
+ * Required
+ * The num of settings to change (Length of the initial_settings_array).
+ */
+ size_t num_initial_settings;
+
+ /**
+ * Optional.
+ * Invoked when the HTTP/2 initial settings change is complete.
+ * If failed to setup the connection, this will not be invoked.
+ * Otherwise, this will be invoked, whether settings change successfully or unsuccessfully.
+ * See `aws_http2_on_change_settings_complete_fn`.
+ */
+ aws_http2_on_change_settings_complete_fn *on_initial_settings_completed;
+
+ /**
+ * Optional
+ * The max number of recently-closed streams to remember.
+ * Set it to zero to use the default setting, AWS_HTTP2_DEFAULT_MAX_CLOSED_STREAMS
+ *
+ * If the connection receives a frame for a closed stream,
+ * the frame will be ignored or cause a connection error,
+ * depending on the frame type and how the stream was closed.
+ * Remembering more streams reduces the chances that a late frame causes
+ * a connection error, but costs some memory.
+ */
+ size_t max_closed_streams;
+
+ /**
+ * Optional.
+ * Invoked when a valid GOAWAY frame received.
+ * See `aws_http2_on_goaway_received_fn`.
+ */
+ aws_http2_on_goaway_received_fn *on_goaway_received;
+
+ /**
+ * Optional.
+ * Invoked when new settings from peer have been applied.
+ * See `aws_http2_on_remote_settings_change_fn`.
+ */
+ aws_http2_on_remote_settings_change_fn *on_remote_settings_change;
+
+ /**
+ * Optional.
+ * Set to true to manually manage the flow-control window of whole HTTP/2 connection.
+ *
+ * If false, the connection will maintain its flow-control windows such that
+ * no back-pressure is applied and data arrives as fast as possible.
+ *
+ * If true, the flow-control window of the whole connection will shrink as body data
+ * is received (headers, padding, and other metadata do not affect the window) for every streams
+ * created on this connection.
+ * The initial connection flow-control window is 65,535.
+ * Once the connection's flow-control window reaches to 0, all the streams on the connection stop receiving any
+ * further data.
+ * The user must call aws_http2_connection_update_window() to increment the connection's
+ * window and keep data flowing.
+ * Note: the padding of data frame counts to the flow-control window.
+ * But, the client will always automatically update the window for padding even for manual window update.
+ */
+ bool conn_manual_window_management;
+};
+
+/**
+ * Options for creating an HTTP client connection.
+ * Initialize with AWS_HTTP_CLIENT_CONNECTION_OPTIONS_INIT to set default values.
+ */
+struct aws_http_client_connection_options {
+ /**
+ * The sizeof() this struct, used for versioning.
+ * Set by AWS_HTTP_CLIENT_CONNECTION_OPTIONS_INIT.
+ */
+ size_t self_size;
+
+ /**
+ * Required.
+ * Must outlive the connection.
+ */
+ struct aws_allocator *allocator;
+
+ /**
+ * Required.
+ * The connection keeps the bootstrap alive via ref-counting.
+ */
+ struct aws_client_bootstrap *bootstrap;
+
+ /**
+ * Required.
+ * aws_http_client_connect() makes a copy.
+ */
+ struct aws_byte_cursor host_name;
+
+ /**
+ * Required.
+ */
+ uint16_t port;
+
+ /**
+ * Required.
+ * aws_http_client_connect() makes a copy.
+ */
+ const struct aws_socket_options *socket_options;
+
+ /**
+ * Optional.
+ * aws_http_client_connect() deep-copies all contents,
+ * and keeps `aws_tls_ctx` alive via ref-counting.
+ */
+ const struct aws_tls_connection_options *tls_options;
+
+ /**
+ * Optional
+ * Configuration options related to http proxy usage.
+ * Relevant fields are copied internally.
+ */
+ const struct aws_http_proxy_options *proxy_options;
+
+ /*
+ * Optional.
+ * Configuration for using proxy from environment variable.
+ * Only works when proxy_options is not set.
+ */
+ const struct proxy_env_var_settings *proxy_ev_settings;
+
+ /**
+ * Optional
+ * Configuration options related to connection health monitoring
+ */
+ const struct aws_http_connection_monitoring_options *monitoring_options;
+
+ /**
+ * Set to true to manually manage the flow-control window of each stream.
+ *
+ * If false, the connection will maintain its flow-control windows such that
+ * no back-pressure is applied and data arrives as fast as possible.
+ *
+ * If true, the flow-control window of each stream will shrink as body data
+ * is received (headers, padding, and other metadata do not affect the window).
+ * `initial_window_size` determines the starting size of each stream's window for HTTP/1 stream, while HTTP/2 stream
+ * will use the settings AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE to inform the other side about read back pressure
+ *
+ * If a stream's flow-control window reaches 0, no further data will be received. The user must call
+ * aws_http_stream_update_window() to increment the stream's window and keep data flowing.
+ *
+ * If a HTTP/2 connection created, it will ONLY control the stream window
+ * management. Connection window management is controlled by
+ * conn_manual_window_management. Note: the padding of data frame counts to the flow-control window.
+ * But, the client will always automatically update the window for padding even for manual window update.
+ */
+ bool manual_window_management;
+
+ /**
+ * The starting size of each HTTP stream's flow-control window for HTTP/1 connection.
+ * Required if `manual_window_management` is true,
+ * ignored if `manual_window_management` is false.
+ *
+ * Always ignored when HTTP/2 connection created. The initial window size is controlled by the settings,
+ * `AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE`
+ */
+ size_t initial_window_size;
+
+ /**
+ * User data for callbacks
+ * Optional.
+ */
+ void *user_data;
+
+ /**
+ * Invoked when connect completes.
+ * Required.
+ * See `aws_http_on_client_connection_setup_fn`.
+ */
+ aws_http_on_client_connection_setup_fn *on_setup;
+
+ /**
+ * Invoked when the connection has finished shutting down.
+ * Never invoked if setup failed.
+ * Optional.
+ * See `aws_http_on_client_connection_shutdown_fn`.
+ */
+ aws_http_on_client_connection_shutdown_fn *on_shutdown;
+
+ /**
+ * Optional.
+ * When true, use prior knowledge to set up an HTTP/2 connection on a cleartext
+ * connection.
+ * When TLS is set and this is true, the connection will failed to be established,
+ * as prior knowledge only works for cleartext TLS.
+ * Refer to RFC7540 3.4
+ */
+ bool prior_knowledge_http2;
+
+ /**
+ * Optional.
+ * Pointer to the hash map containing the ALPN string to protocol to use.
+ * Hash from `struct aws_string *` to `enum aws_http_version`.
+ * If not set, only the predefined string `h2` and `http/1.1` will be recognized. Other negotiated ALPN string will
+ * result in a HTTP1/1 connection
+ * Note: Connection will keep a deep copy of the table and the strings.
+ */
+ struct aws_hash_table *alpn_string_map;
+
+ /**
+ * Options specific to HTTP/1.x connections.
+ * Optional.
+ * Ignored if connection is not HTTP/1.x.
+ * If connection is HTTP/1.x and options were not specified, default values are used.
+ */
+ const struct aws_http1_connection_options *http1_options;
+
+ /**
+ * Options specific to HTTP/2 connections.
+ * Optional.
+ * Ignored if connection is not HTTP/2.
+ * If connection is HTTP/2 and options were not specified, default values are used.
+ */
+ const struct aws_http2_connection_options *http2_options;
+
+ /**
+ * Optional.
+ * Requests the channel/connection be bound to a specific event loop rather than chosen sequentially from the
+ * event loop group associated with the client bootstrap.
+ */
+ struct aws_event_loop *requested_event_loop;
+};
+
+/* Predefined settings identifiers (RFC-7540 6.5.2) */
+enum aws_http2_settings_id {
+ AWS_HTTP2_SETTINGS_BEGIN_RANGE = 0x1, /* Beginning of known values */
+ AWS_HTTP2_SETTINGS_HEADER_TABLE_SIZE = 0x1,
+ AWS_HTTP2_SETTINGS_ENABLE_PUSH = 0x2,
+ AWS_HTTP2_SETTINGS_MAX_CONCURRENT_STREAMS = 0x3,
+ AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE = 0x4,
+ AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE = 0x5,
+ AWS_HTTP2_SETTINGS_MAX_HEADER_LIST_SIZE = 0x6,
+ AWS_HTTP2_SETTINGS_END_RANGE, /* End of known values */
+};
+
+/* A HTTP/2 setting and its value, used in SETTINGS frame */
+struct aws_http2_setting {
+ enum aws_http2_settings_id id;
+ uint32_t value;
+};
+
+/**
+ * HTTP/2: Default value for max closed streams we will keep in memory.
+ */
+#define AWS_HTTP2_DEFAULT_MAX_CLOSED_STREAMS (32)
+
+/**
+ * HTTP/2: The size of payload for HTTP/2 PING frame.
+ */
+#define AWS_HTTP2_PING_DATA_SIZE (8)
+
+/**
+ * HTTP/2: The number of known settings.
+ */
+#define AWS_HTTP2_SETTINGS_COUNT (6)
+
+/**
+ * Initializes aws_http_client_connection_options with default values.
+ */
+#define AWS_HTTP_CLIENT_CONNECTION_OPTIONS_INIT \
+ { .self_size = sizeof(struct aws_http_client_connection_options), .initial_window_size = SIZE_MAX, }
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Asynchronously establish a client connection.
+ * The on_setup callback is invoked when the operation has created a connection or failed.
+ */
+AWS_HTTP_API
+int aws_http_client_connect(const struct aws_http_client_connection_options *options);
+
+/**
+ * Users must release the connection when they are done with it.
+ * The connection's memory cannot be reclaimed until this is done.
+ * If the connection was not already shutting down, it will be shut down.
+ *
+ * Users should always wait for the on_shutdown() callback to be called before releasing any data passed to the
+ * http_connection (Eg aws_tls_connection_options, aws_socket_options) otherwise there will be race conditions between
+ * http_connection shutdown tasks and memory release tasks, causing Segfaults.
+ */
+AWS_HTTP_API
+void aws_http_connection_release(struct aws_http_connection *connection);
+
+/**
+ * Begin shutdown sequence of the connection if it hasn't already started. This will schedule shutdown tasks on the
+ * EventLoop that may send HTTP/TLS/TCP shutdown messages to peers if necessary, and will eventually cause internal
+ * connection memory to stop being accessed and on_shutdown() callback to be called.
+ *
+ * It's safe to call this function regardless of the connection state as long as you hold a reference to the connection.
+ */
+AWS_HTTP_API
+void aws_http_connection_close(struct aws_http_connection *connection);
+
+/**
+ * Stop accepting new requests for the connection. It will NOT start the shutdown process for the connection. The
+ * requests that are already open can still wait to be completed, but new requests will fail to be created,
+ */
+AWS_HTTP_API
+void aws_http_connection_stop_new_requests(struct aws_http_connection *connection);
+
+/**
+ * Returns true unless the connection is closed or closing.
+ */
+AWS_HTTP_API
+bool aws_http_connection_is_open(const struct aws_http_connection *connection);
+
+/**
+ * Return whether the connection can make a new requests.
+ * If false, then a new connection must be established to make further requests.
+ */
+AWS_HTTP_API
+bool aws_http_connection_new_requests_allowed(const struct aws_http_connection *connection);
+
+/**
+ * Returns true if this is a client connection.
+ */
+AWS_HTTP_API
+bool aws_http_connection_is_client(const struct aws_http_connection *connection);
+
+AWS_HTTP_API
+enum aws_http_version aws_http_connection_get_version(const struct aws_http_connection *connection);
+
+/**
+ * Returns the channel hosting the HTTP connection.
+ * Do not expose this function to language bindings.
+ */
+AWS_HTTP_API
+struct aws_channel *aws_http_connection_get_channel(struct aws_http_connection *connection);
+
+/**
+ * Initialize an map copied from the *src map, which maps `struct aws_string *` to `enum aws_http_version`.
+ */
+AWS_HTTP_API
+int aws_http_alpn_map_init_copy(
+ struct aws_allocator *allocator,
+ struct aws_hash_table *dest,
+ struct aws_hash_table *src);
+
+/**
+ * Initialize an empty hash-table that maps `struct aws_string *` to `enum aws_http_version`.
+ * This map can used in aws_http_client_connections_options.alpn_string_map.
+ */
+AWS_HTTP_API
+int aws_http_alpn_map_init(struct aws_allocator *allocator, struct aws_hash_table *map);
+
+/**
+ * Checks http proxy options for correctness
+ */
+AWS_HTTP_API
+int aws_http_options_validate_proxy_configuration(const struct aws_http_client_connection_options *options);
+
+/**
+ * Send a SETTINGS frame (HTTP/2 only).
+ * SETTINGS will be applied locally when SETTINGS ACK is received from peer.
+ *
+ * @param http2_connection HTTP/2 connection.
+ * @param settings_array The array of settings to change. Note: each setting has its boundary.
+ * @param num_settings The num of settings to change in settings_array.
+ * @param on_completed Optional callback, see `aws_http2_on_change_settings_complete_fn`.
+ * @param user_data User-data pass to on_completed callback.
+ */
+AWS_HTTP_API
+int aws_http2_connection_change_settings(
+ struct aws_http_connection *http2_connection,
+ const struct aws_http2_setting *settings_array,
+ size_t num_settings,
+ aws_http2_on_change_settings_complete_fn *on_completed,
+ void *user_data);
+
+/**
+ * Send a PING frame (HTTP/2 only).
+ * Round-trip-time is calculated when PING ACK is received from peer.
+ *
+ * @param http2_connection HTTP/2 connection.
+ * @param optional_opaque_data Optional payload for PING frame.
+ * Must be NULL, or exactly 8 bytes (AWS_HTTP2_PING_DATA_SIZE).
+ * If NULL, the 8 byte payload will be all zeroes.
+ * @param on_completed Optional callback, invoked when PING ACK is received from peer,
+ * or when a connection error prevents the PING ACK from being received.
+ * Callback always fires on the connection's event-loop thread.
+ * @param user_data User-data pass to on_completed callback.
+ */
+AWS_HTTP_API
+int aws_http2_connection_ping(
+ struct aws_http_connection *http2_connection,
+ const struct aws_byte_cursor *optional_opaque_data,
+ aws_http2_on_ping_complete_fn *on_completed,
+ void *user_data);
+
+/**
+ * Get the local settings we are using to affect the decoding.
+ *
+ * @param http2_connection HTTP/2 connection.
+ * @param out_settings fixed size array of aws_http2_setting gets set to the local settings
+ */
+AWS_HTTP_API
+void aws_http2_connection_get_local_settings(
+ const struct aws_http_connection *http2_connection,
+ struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT]);
+
+/**
+ * Get the settings received from remote peer, which we are using to restricts the message to send.
+ *
+ * @param http2_connection HTTP/2 connection.
+ * @param out_settings fixed size array of aws_http2_setting gets set to the remote settings
+ */
+AWS_HTTP_API
+void aws_http2_connection_get_remote_settings(
+ const struct aws_http_connection *http2_connection,
+ struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT]);
+
+/**
+ * Send a custom GOAWAY frame (HTTP/2 only).
+ *
+ * Note that the connection automatically attempts to send a GOAWAY during
+ * shutdown (unless a GOAWAY with a valid Last-Stream-ID has already been sent).
+ *
+ * This call can be used to gracefully warn the peer of an impending shutdown
+ * (http2_error=0, allow_more_streams=true), or to customize the final GOAWAY
+ * frame that is sent by this connection.
+ *
+ * The other end may not receive the goaway, if the connection already closed.
+ *
+ * @param http2_connection HTTP/2 connection.
+ * @param http2_error The HTTP/2 error code (RFC-7540 section 7) to send.
+ * `enum aws_http2_error_code` lists official codes.
+ * @param allow_more_streams If true, new peer-initiated streams will continue
+ * to be acknowledged and the GOAWAY's Last-Stream-ID will be set to a max value.
+ * If false, new peer-initiated streams will be ignored and the GOAWAY's
+ * Last-Stream-ID will be set to the latest acknowledged stream.
+ * @param optional_debug_data Optional debug data to send. Size must not exceed 16KB.
+ */
+
+AWS_HTTP_API
+void aws_http2_connection_send_goaway(
+ struct aws_http_connection *http2_connection,
+ uint32_t http2_error,
+ bool allow_more_streams,
+ const struct aws_byte_cursor *optional_debug_data);
+
+/**
+ * Get data about the latest GOAWAY frame sent to peer (HTTP/2 only).
+ * If no GOAWAY has been sent, AWS_ERROR_HTTP_DATA_NOT_AVAILABLE will be raised.
+ * Note that GOAWAY frames are typically sent automatically by the connection
+ * during shutdown.
+ *
+ * @param http2_connection HTTP/2 connection.
+ * @param out_http2_error Gets set to HTTP/2 error code sent in most recent GOAWAY.
+ * @param out_last_stream_id Gets set to Last-Stream-ID sent in most recent GOAWAY.
+ */
+AWS_HTTP_API
+int aws_http2_connection_get_sent_goaway(
+ struct aws_http_connection *http2_connection,
+ uint32_t *out_http2_error,
+ uint32_t *out_last_stream_id);
+
+/**
+ * Get data about the latest GOAWAY frame received from peer (HTTP/2 only).
+ * If no GOAWAY has been received, or the GOAWAY payload is still in transmitting,
+ * AWS_ERROR_HTTP_DATA_NOT_AVAILABLE will be raised.
+ *
+ * @param http2_connection HTTP/2 connection.
+ * @param out_http2_error Gets set to HTTP/2 error code received in most recent GOAWAY.
+ * @param out_last_stream_id Gets set to Last-Stream-ID received in most recent GOAWAY.
+ */
+AWS_HTTP_API
+int aws_http2_connection_get_received_goaway(
+ struct aws_http_connection *http2_connection,
+ uint32_t *out_http2_error,
+ uint32_t *out_last_stream_id);
+
+/**
+ * Increment the connection's flow-control window to keep data flowing (HTTP/2 only).
+ *
+ * If the connection was created with `conn_manual_window_management` set true,
+ * the flow-control window of the connection will shrink as body data is received for all the streams created on it.
+ * (headers, padding, and other metadata do not affect the window).
+ * The initial connection flow-control window is 65,535.
+ * Once the connection's flow-control window reaches to 0, all the streams on the connection stop receiving any further
+ * data.
+ *
+ * If `conn_manual_window_management` is false, this call will have no effect.
+ * The connection maintains its flow-control windows such that
+ * no back-pressure is applied and data arrives as fast as possible.
+ *
+ * If you are not connected, this call will have no effect.
+ *
+ * Crashes when the connection is not http2 connection.
+ * The limit of the Maximum Size is 2**31 - 1. If the increment size cause the connection flow window exceeds the
+ * Maximum size, this call will result in the connection lost.
+ *
+ * @param http2_connection HTTP/2 connection.
+ * @param increment_size The size to increment for the connection's flow control window
+ */
+AWS_HTTP_API
+void aws_http2_connection_update_window(struct aws_http_connection *http2_connection, uint32_t increment_size);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_CONNECTION_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/connection_manager.h b/contrib/restricted/aws/aws-c-http/include/aws/http/connection_manager.h
new file mode 100644
index 0000000000..4c02df9382
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/connection_manager.h
@@ -0,0 +1,194 @@
+#ifndef AWS_HTTP_CONNECTION_MANAGER_H
+#define AWS_HTTP_CONNECTION_MANAGER_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/http.h>
+
+#include <aws/common/byte_buf.h>
+
+struct aws_client_bootstrap;
+struct aws_http_connection;
+struct aws_http_connection_manager;
+struct aws_socket_options;
+struct aws_tls_connection_options;
+struct proxy_env_var_settings;
+struct aws_http2_setting;
+
+typedef void(aws_http_connection_manager_on_connection_setup_fn)(
+ struct aws_http_connection *connection,
+ int error_code,
+ void *user_data);
+
+typedef void(aws_http_connection_manager_shutdown_complete_fn)(void *user_data);
+
+/**
+ * Metrics for logging and debugging purpose.
+ */
+struct aws_http_manager_metrics {
+ /**
+ * The number of additional concurrent requests that can be supported by the HTTP manager without needing to
+ * establish additional connections to the target server.
+ *
+ * For connection manager, it equals to connections that's idle.
+ * For stream manager, it equals to the number of streams that are possible to be made without creating new
+ * connection, although the implementation can create new connection without fully filling it.
+ */
+ size_t available_concurrency;
+ /* The number of requests that are awaiting concurrency to be made available from the HTTP manager. */
+ size_t pending_concurrency_acquires;
+ /* The number of connections (http/1.1) or streams (for h2 via. stream manager) currently vended to user. */
+ size_t leased_concurrency;
+};
+
+/*
+ * Connection manager configuration struct.
+ *
+ * Contains all of the configuration needed to create an http connection as well as
+ * the maximum number of connections to ever have in existence.
+ */
+struct aws_http_connection_manager_options {
+ /*
+ * http connection configuration, check `struct aws_http_client_connection_options` for details of each config
+ */
+ struct aws_client_bootstrap *bootstrap;
+ size_t initial_window_size;
+ const struct aws_socket_options *socket_options;
+
+ /**
+ * Options to create secure (HTTPS) connections.
+ * For secure connections, set "h2" in the ALPN string for HTTP/2, otherwise HTTP/1.1 is used.
+ *
+ * Leave NULL to create cleartext (HTTP) connections.
+ * For cleartext connections, use `http2_prior_knowledge` (RFC-7540 3.4)
+ * to control whether that are treated as HTTP/1.1 or HTTP/2.
+ */
+ const struct aws_tls_connection_options *tls_connection_options;
+
+ /**
+ * Specify whether you have prior knowledge that cleartext (HTTP) connections are HTTP/2 (RFC-7540 3.4).
+ * If false, then cleartext connections are treated as HTTP/1.1.
+ * It is illegal to set this true when secure connections are being used.
+ * Note that upgrading from HTTP/1.1 to HTTP/2 is not supported (RFC-7540 3.2).
+ */
+ bool http2_prior_knowledge;
+
+ const struct aws_http_connection_monitoring_options *monitoring_options;
+ struct aws_byte_cursor host;
+ uint16_t port;
+
+ /**
+ * Optional.
+ * HTTP/2 specific configuration. Check `struct aws_http2_connection_options` for details of each config
+ */
+ const struct aws_http2_setting *initial_settings_array;
+ size_t num_initial_settings;
+ size_t max_closed_streams;
+ bool http2_conn_manual_window_management;
+
+ /* Proxy configuration for http connection */
+ const struct aws_http_proxy_options *proxy_options;
+
+ /*
+ * Optional.
+ * Configuration for using proxy from environment variable.
+ * Only works when proxy_options is not set.
+ */
+ const struct proxy_env_var_settings *proxy_ev_settings;
+
+ /*
+ * Maximum number of connections this manager is allowed to contain
+ */
+ size_t max_connections;
+
+ /*
+ * Callback and associated user data to invoke when the connection manager has
+ * completely shutdown and has finished deleting itself.
+ * Technically optional, but correctness may be impossible without it.
+ */
+ void *shutdown_complete_user_data;
+ aws_http_connection_manager_shutdown_complete_fn *shutdown_complete_callback;
+
+ /**
+ * If set to true, the read back pressure mechanism will be enabled.
+ */
+ bool enable_read_back_pressure;
+
+ /**
+ * If set to a non-zero value, then connections that stay in the pool longer than the specified
+ * timeout will be closed automatically.
+ */
+ uint64_t max_connection_idle_in_milliseconds;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/*
+ * Connection managers are ref counted. Adds one external ref to the manager.
+ */
+AWS_HTTP_API
+void aws_http_connection_manager_acquire(struct aws_http_connection_manager *manager);
+
+/*
+ * Connection managers are ref counted. Removes one external ref from the manager.
+ *
+ * When the ref count goes to zero, the connection manager begins its shut down
+ * process. All pending connection acquisitions are failed (with callbacks
+ * invoked) and any (erroneous) subsequent attempts to acquire a connection
+ * fail immediately. The connection manager destroys itself once all pending
+ * asynchronous activities have resolved.
+ */
+AWS_HTTP_API
+void aws_http_connection_manager_release(struct aws_http_connection_manager *manager);
+
+/*
+ * Creates a new connection manager with the supplied configuration options.
+ *
+ * The returned connection manager begins with a ref count of 1.
+ */
+AWS_HTTP_API
+struct aws_http_connection_manager *aws_http_connection_manager_new(
+ struct aws_allocator *allocator,
+ const struct aws_http_connection_manager_options *options);
+
+/*
+ * Requests a connection from the manager. The requester is notified of
+ * an acquired connection (or failure to acquire) via the supplied callback.
+ *
+ * For HTTP/2 connections, the callback will not fire until the server's settings have been received.
+ *
+ * Once a connection has been successfully acquired from the manager it
+ * must be released back (via aws_http_connection_manager_release_connection)
+ * at some point. Failure to do so will cause a resource leak.
+ */
+AWS_HTTP_API
+void aws_http_connection_manager_acquire_connection(
+ struct aws_http_connection_manager *manager,
+ aws_http_connection_manager_on_connection_setup_fn *callback,
+ void *user_data);
+
+/*
+ * Returns a connection back to the manager. All acquired connections must
+ * eventually be released back to the manager in order to avoid a resource leak.
+ *
+ * Note: it can lead to another acquired callback to be invoked within the thread.
+ */
+AWS_HTTP_API
+int aws_http_connection_manager_release_connection(
+ struct aws_http_connection_manager *manager,
+ struct aws_http_connection *connection);
+
+/**
+ * Fetch the current manager metrics from connection manager.
+ */
+AWS_HTTP_API
+void aws_http_connection_manager_fetch_metrics(
+ const struct aws_http_connection_manager *manager,
+ struct aws_http_manager_metrics *out_metrics);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_CONNECTION_MANAGER_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/exports.h b/contrib/restricted/aws/aws-c-http/include/aws/http/exports.h
new file mode 100644
index 0000000000..8b728c7d4b
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/exports.h
@@ -0,0 +1,29 @@
+#ifndef AWS_HTTP_EXPORTS_H
+#define AWS_HTTP_EXPORTS_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#if defined(USE_WINDOWS_DLL_SEMANTICS) || defined(WIN32)
+# ifdef AWS_HTTP_USE_IMPORT_EXPORT
+# ifdef AWS_HTTP_EXPORTS
+# define AWS_HTTP_API __declspec(dllexport)
+# else
+# define AWS_HTTP_API __declspec(dllimport)
+# endif /* AWS_HTTP_EXPORTS */
+# else
+# define AWS_HTTP_API
+# endif /* USE_IMPORT_EXPORT */
+
+#else
+# if ((__GNUC__ >= 4) || defined(__clang__)) && defined(AWS_HTTP_USE_IMPORT_EXPORT) && defined(AWS_HTTP_EXPORTS)
+# define AWS_HTTP_API __attribute__((visibility("default")))
+# else
+# define AWS_HTTP_API
+# endif /* __GNUC__ >= 4 || defined(__clang__) */
+
+#endif /* defined(USE_WINDOWS_DLL_SEMANTICS) || defined(WIN32) */
+
+#endif /* AWS_HTTP_EXPORTS_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/http.h b/contrib/restricted/aws/aws-c-http/include/aws/http/http.h
new file mode 100644
index 0000000000..f02f09dc3e
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/http.h
@@ -0,0 +1,158 @@
+#ifndef AWS_HTTP_H
+#define AWS_HTTP_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/logging.h>
+#include <aws/http/exports.h>
+#include <aws/io/io.h>
+
+#define AWS_C_HTTP_PACKAGE_ID 2
+
+enum aws_http_errors {
+ AWS_ERROR_HTTP_UNKNOWN = AWS_ERROR_ENUM_BEGIN_RANGE(AWS_C_HTTP_PACKAGE_ID),
+ AWS_ERROR_HTTP_HEADER_NOT_FOUND,
+ AWS_ERROR_HTTP_INVALID_HEADER_FIELD,
+ AWS_ERROR_HTTP_INVALID_HEADER_NAME,
+ AWS_ERROR_HTTP_INVALID_HEADER_VALUE,
+ AWS_ERROR_HTTP_INVALID_METHOD,
+ AWS_ERROR_HTTP_INVALID_PATH,
+ AWS_ERROR_HTTP_INVALID_STATUS_CODE,
+ AWS_ERROR_HTTP_MISSING_BODY_STREAM,
+ AWS_ERROR_HTTP_INVALID_BODY_STREAM,
+ AWS_ERROR_HTTP_CONNECTION_CLOSED,
+ AWS_ERROR_HTTP_SWITCHED_PROTOCOLS,
+ AWS_ERROR_HTTP_UNSUPPORTED_PROTOCOL,
+ AWS_ERROR_HTTP_REACTION_REQUIRED,
+ AWS_ERROR_HTTP_DATA_NOT_AVAILABLE,
+ AWS_ERROR_HTTP_OUTGOING_STREAM_LENGTH_INCORRECT,
+ AWS_ERROR_HTTP_CALLBACK_FAILURE,
+ AWS_ERROR_HTTP_WEBSOCKET_UPGRADE_FAILURE,
+ AWS_ERROR_HTTP_WEBSOCKET_CLOSE_FRAME_SENT,
+ AWS_ERROR_HTTP_WEBSOCKET_IS_MIDCHANNEL_HANDLER,
+ AWS_ERROR_HTTP_CONNECTION_MANAGER_INVALID_STATE_FOR_ACQUIRE,
+ AWS_ERROR_HTTP_CONNECTION_MANAGER_VENDED_CONNECTION_UNDERFLOW,
+ AWS_ERROR_HTTP_SERVER_CLOSED,
+ AWS_ERROR_HTTP_PROXY_CONNECT_FAILED,
+ AWS_ERROR_HTTP_CONNECTION_MANAGER_SHUTTING_DOWN,
+ AWS_ERROR_HTTP_CHANNEL_THROUGHPUT_FAILURE,
+ AWS_ERROR_HTTP_PROTOCOL_ERROR,
+ AWS_ERROR_HTTP_STREAM_IDS_EXHAUSTED,
+ AWS_ERROR_HTTP_GOAWAY_RECEIVED,
+ AWS_ERROR_HTTP_RST_STREAM_RECEIVED,
+ AWS_ERROR_HTTP_RST_STREAM_SENT,
+ AWS_ERROR_HTTP_STREAM_NOT_ACTIVATED,
+ AWS_ERROR_HTTP_STREAM_HAS_COMPLETED,
+ AWS_ERROR_HTTP_PROXY_STRATEGY_NTLM_CHALLENGE_TOKEN_MISSING,
+ AWS_ERROR_HTTP_PROXY_STRATEGY_TOKEN_RETRIEVAL_FAILURE,
+ AWS_ERROR_HTTP_PROXY_CONNECT_FAILED_RETRYABLE,
+ AWS_ERROR_HTTP_PROTOCOL_SWITCH_FAILURE,
+ AWS_ERROR_HTTP_MAX_CONCURRENT_STREAMS_EXCEEDED,
+ AWS_ERROR_HTTP_STREAM_MANAGER_SHUTTING_DOWN,
+ AWS_ERROR_HTTP_STREAM_MANAGER_CONNECTION_ACQUIRE_FAILURE,
+ AWS_ERROR_HTTP_STREAM_MANAGER_UNEXPECTED_HTTP_VERSION,
+ AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR,
+ AWS_ERROR_HTTP_MANUAL_WRITE_NOT_ENABLED,
+ AWS_ERROR_HTTP_MANUAL_WRITE_HAS_COMPLETED,
+
+ AWS_ERROR_HTTP_END_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_HTTP_PACKAGE_ID)
+};
+
+/* Error codes that may be present in HTTP/2 RST_STREAM and GOAWAY frames (RFC-7540 7). */
+enum aws_http2_error_code {
+ AWS_HTTP2_ERR_NO_ERROR = 0x00,
+ AWS_HTTP2_ERR_PROTOCOL_ERROR = 0x01,
+ AWS_HTTP2_ERR_INTERNAL_ERROR = 0x02,
+ AWS_HTTP2_ERR_FLOW_CONTROL_ERROR = 0x03,
+ AWS_HTTP2_ERR_SETTINGS_TIMEOUT = 0x04,
+ AWS_HTTP2_ERR_STREAM_CLOSED = 0x05,
+ AWS_HTTP2_ERR_FRAME_SIZE_ERROR = 0x06,
+ AWS_HTTP2_ERR_REFUSED_STREAM = 0x07,
+ AWS_HTTP2_ERR_CANCEL = 0x08,
+ AWS_HTTP2_ERR_COMPRESSION_ERROR = 0x09,
+ AWS_HTTP2_ERR_CONNECT_ERROR = 0x0A,
+ AWS_HTTP2_ERR_ENHANCE_YOUR_CALM = 0x0B,
+ AWS_HTTP2_ERR_INADEQUATE_SECURITY = 0x0C,
+ AWS_HTTP2_ERR_HTTP_1_1_REQUIRED = 0x0D,
+ AWS_HTTP2_ERR_COUNT,
+};
+
+enum aws_http_log_subject {
+ AWS_LS_HTTP_GENERAL = AWS_LOG_SUBJECT_BEGIN_RANGE(AWS_C_HTTP_PACKAGE_ID),
+ AWS_LS_HTTP_CONNECTION,
+ AWS_LS_HTTP_ENCODER,
+ AWS_LS_HTTP_DECODER,
+ AWS_LS_HTTP_SERVER,
+ AWS_LS_HTTP_STREAM,
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ AWS_LS_HTTP_STREAM_MANAGER,
+ AWS_LS_HTTP_WEBSOCKET,
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ AWS_LS_HTTP_PROXY_NEGOTIATION,
+};
+
+enum aws_http_version {
+ AWS_HTTP_VERSION_UNKNOWN, /* Invalid version. */
+ AWS_HTTP_VERSION_1_0,
+ AWS_HTTP_VERSION_1_1,
+ AWS_HTTP_VERSION_2,
+ AWS_HTTP_VERSION_COUNT,
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Initializes internal datastructures used by aws-c-http.
+ * Must be called before using any functionality in aws-c-http.
+ */
+AWS_HTTP_API
+void aws_http_library_init(struct aws_allocator *alloc);
+
+/**
+ * Clean up internal datastructures used by aws-c-http.
+ * Must not be called until application is done using functionality in aws-c-http.
+ */
+AWS_HTTP_API
+void aws_http_library_clean_up(void);
+
+/**
+ * Returns the description of common status codes.
+ * Ex: 404 -> "Not Found"
+ * An empty string is returned if the status code is not recognized.
+ */
+AWS_HTTP_API
+const char *aws_http_status_text(int status_code);
+
+/**
+ * Shortcuts for common HTTP request methods
+ */
+AWS_HTTP_API
+extern const struct aws_byte_cursor aws_http_method_get;
+AWS_HTTP_API
+extern const struct aws_byte_cursor aws_http_method_head;
+AWS_HTTP_API
+extern const struct aws_byte_cursor aws_http_method_post;
+AWS_HTTP_API
+extern const struct aws_byte_cursor aws_http_method_put;
+AWS_HTTP_API
+extern const struct aws_byte_cursor aws_http_method_delete;
+AWS_HTTP_API
+extern const struct aws_byte_cursor aws_http_method_connect;
+AWS_HTTP_API
+extern const struct aws_byte_cursor aws_http_method_options;
+
+AWS_HTTP_API extern const struct aws_byte_cursor aws_http_header_method;
+AWS_HTTP_API extern const struct aws_byte_cursor aws_http_header_scheme;
+AWS_HTTP_API extern const struct aws_byte_cursor aws_http_header_authority;
+AWS_HTTP_API extern const struct aws_byte_cursor aws_http_header_path;
+AWS_HTTP_API extern const struct aws_byte_cursor aws_http_header_status;
+
+AWS_HTTP_API extern const struct aws_byte_cursor aws_http_scheme_http;
+AWS_HTTP_API extern const struct aws_byte_cursor aws_http_scheme_https;
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/http2_stream_manager.h b/contrib/restricted/aws/aws-c-http/include/aws/http/http2_stream_manager.h
new file mode 100644
index 0000000000..c37da489aa
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/http2_stream_manager.h
@@ -0,0 +1,215 @@
+#ifndef AWS_HTTP2_STREAM_MANAGER_H
+#define AWS_HTTP2_STREAM_MANAGER_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/http.h>
+
+struct aws_http2_stream_manager;
+struct aws_client_bootstrap;
+struct aws_http_connection;
+struct aws_http_connection_manager;
+struct aws_socket_options;
+struct aws_tls_connection_options;
+struct proxy_env_var_settings;
+struct aws_http2_setting;
+struct aws_http_make_request_options;
+struct aws_http_stream;
+struct aws_http_manager_metrics;
+
+/**
+ * Always invoked asynchronously when the stream was created, successfully or not.
+ * When stream is NULL, error code will be set to indicate what happened.
+ * If there is a stream returned, you own the stream completely.
+ * Invoked on the same thread as other callback of the stream, which will be the thread of the connection, ideally.
+ * If there is no connection made, the callback will be invoked from a sperate thread.
+ */
+typedef void(
+ aws_http2_stream_manager_on_stream_acquired_fn)(struct aws_http_stream *stream, int error_code, void *user_data);
+
+/**
+ * Invoked asynchronously when the stream manager has been shutdown completely.
+ * Never invoked when `aws_http2_stream_manager_new` failed.
+ */
+typedef void(aws_http2_stream_manager_shutdown_complete_fn)(void *user_data);
+
+/**
+ * HTTP/2 stream manager configuration struct.
+ *
+ * Contains all of the configuration needed to create an http2 connection as well as
+ * connection manager under the hood.
+ */
+struct aws_http2_stream_manager_options {
+ /**
+ * basic http connection configuration
+ */
+ struct aws_client_bootstrap *bootstrap;
+ const struct aws_socket_options *socket_options;
+
+ /**
+ * Options to create secure (HTTPS) connections.
+ * For secure connections, the ALPN string must be "h2".
+ *
+ * To create cleartext (HTTP) connections, leave this NULL
+ * and set `http2_prior_knowledge` (RFC-7540 3.4).
+ */
+ const struct aws_tls_connection_options *tls_connection_options;
+
+ /**
+ * Specify whether you have prior knowledge that cleartext (HTTP) connections are HTTP/2 (RFC-7540 3.4).
+ * It is illegal to set this true when secure connections are being used.
+ * Note that upgrading from HTTP/1.1 to HTTP/2 is not supported (RFC-7540 3.2).
+ */
+ bool http2_prior_knowledge;
+
+ struct aws_byte_cursor host;
+ uint16_t port;
+
+ /**
+ * Optional.
+ * HTTP/2 connection configuration. Check `struct aws_http2_connection_options` for details of each config.
+ * Notes for window control:
+ * - By default, client will will maintain its flow-control windows such that no back-pressure is applied and data
+ * arrives as fast as possible.
+ * - For connection level window control, `conn_manual_window_management` will enable manual control. The
+ * inital window size is not controllable.
+ * - For stream level window control, `enable_read_back_pressure` will enable manual control. The initial window
+ * size needs to be set through `initial_settings_array`.
+ */
+ const struct aws_http2_setting *initial_settings_array;
+ size_t num_initial_settings;
+ size_t max_closed_streams;
+ bool conn_manual_window_management;
+
+ /**
+ * HTTP/2 Stream window control.
+ * If set to true, the read back pressure mechanism will be enabled for streams created.
+ * The initial window size can be set by `AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE` via `initial_settings_array`
+ */
+ bool enable_read_back_pressure;
+
+ /* Connection monitor for the underlying connections made */
+ const struct aws_http_connection_monitoring_options *monitoring_options;
+
+ /* Optional. Proxy configuration for underlying http connection */
+ const struct aws_http_proxy_options *proxy_options;
+ const struct proxy_env_var_settings *proxy_ev_settings;
+
+ /**
+ * Required.
+ * When the stream manager finishes deleting all the resources, the callback will be invoked.
+ */
+ void *shutdown_complete_user_data;
+ aws_http2_stream_manager_shutdown_complete_fn *shutdown_complete_callback;
+
+ /**
+ * Optional.
+ * When set, connection will be closed if 5xx response received from server.
+ */
+ bool close_connection_on_server_error;
+ /**
+ * Optional.
+ * The period for all the connections held by stream manager to send a PING in milliseconds.
+ * If you specify 0, manager will NOT send any PING.
+ * Note: if set, it must be large than the time of ping timeout setting.
+ */
+ size_t connection_ping_period_ms;
+ /**
+ * Optional.
+ * Network connection will be closed if a ping response is not received
+ * within this amount of time (milliseconds).
+ * If you specify 0, a default value will be used.
+ */
+ size_t connection_ping_timeout_ms;
+
+ /* TODO: More flexible policy about the connections, but will always has these three values below. */
+ /**
+ * Optional.
+ * 0 will be considered as using a default value.
+ * The ideal number of concurrent streams for a connection. Stream manager will try to create a new connection if
+ * one connection reaches this number. But, if the max connections reaches, manager will reuse connections to create
+ * the acquired steams as much as possible. */
+ size_t ideal_concurrent_streams_per_connection;
+ /**
+ * Optional.
+ * Default is no limit, which will use the limit from the server. 0 will be considered as using the default value.
+ * The real number of concurrent streams per connection will be controlled by the minmal value of the setting from
+ * other end and the value here.
+ */
+ size_t max_concurrent_streams_per_connection;
+ /**
+ * Required.
+ * The max number of connections will be open at same time. If all the connections are full, manager will wait until
+ * available to vender more streams */
+ size_t max_connections;
+};
+
+struct aws_http2_stream_manager_acquire_stream_options {
+ /**
+ * Required.
+ * Invoked when the stream finishes acquiring by stream manager.
+ */
+ aws_http2_stream_manager_on_stream_acquired_fn *callback;
+ /**
+ * Optional.
+ * User data for the callback.
+ */
+ void *user_data;
+ /* Required. see `aws_http_make_request_options` */
+ const struct aws_http_make_request_options *options;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Acquire a refcount from the stream manager, stream manager will start to destroy after the refcount drops to zero.
+ * NULL is acceptable. Initial refcount after new is 1.
+ *
+ * @param manager
+ * @return The same pointer acquiring.
+ */
+AWS_HTTP_API
+struct aws_http2_stream_manager *aws_http2_stream_manager_acquire(struct aws_http2_stream_manager *manager);
+
+/**
+ * Release a refcount from the stream manager, stream manager will start to destroy after the refcount drops to zero.
+ * NULL is acceptable. Initial refcount after new is 1.
+ *
+ * @param manager
+ * @return NULL
+ */
+AWS_HTTP_API
+struct aws_http2_stream_manager *aws_http2_stream_manager_release(struct aws_http2_stream_manager *manager);
+
+AWS_HTTP_API
+struct aws_http2_stream_manager *aws_http2_stream_manager_new(
+ struct aws_allocator *allocator,
+ const struct aws_http2_stream_manager_options *options);
+
+/**
+ * Acquire a stream from stream manager asynchronously.
+ *
+ * @param http2_stream_manager
+ * @param acquire_stream_option see `aws_http2_stream_manager_acquire_stream_options`
+ */
+AWS_HTTP_API
+void aws_http2_stream_manager_acquire_stream(
+ struct aws_http2_stream_manager *http2_stream_manager,
+ const struct aws_http2_stream_manager_acquire_stream_options *acquire_stream_option);
+
+/**
+ * Fetch the current metrics from stream manager.
+ *
+ * @param http2_stream_manager
+ * @param out_metrics The metrics to be fetched
+ */
+AWS_HTTP_API
+void aws_http2_stream_manager_fetch_metrics(
+ const struct aws_http2_stream_manager *http2_stream_manager,
+ struct aws_http_manager_metrics *out_metrics);
+
+AWS_EXTERN_C_END
+#endif /* AWS_HTTP2_STREAM_MANAGER_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_impl.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_impl.h
new file mode 100644
index 0000000000..a97ab0daba
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_impl.h
@@ -0,0 +1,210 @@
+#ifndef AWS_HTTP_CONNECTION_IMPL_H
+#define AWS_HTTP_CONNECTION_IMPL_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/connection.h>
+
+#include <aws/http/private/http_impl.h>
+#include <aws/http/server.h>
+
+#include <aws/common/atomics.h>
+#include <aws/io/channel.h>
+#include <aws/io/channel_bootstrap.h>
+
+struct aws_http_message;
+struct aws_http_make_request_options;
+struct aws_http_request_handler_options;
+struct aws_http_stream;
+
+typedef int aws_client_bootstrap_new_socket_channel_fn(struct aws_socket_channel_bootstrap_options *options);
+
+struct aws_http_connection_system_vtable {
+ aws_client_bootstrap_new_socket_channel_fn *new_socket_channel;
+};
+
+struct aws_http_connection_vtable {
+ struct aws_channel_handler_vtable channel_handler_vtable;
+
+ /* This is a callback I wish was in aws_channel_handler_vtable. */
+ void (*on_channel_handler_installed)(struct aws_channel_handler *handler, struct aws_channel_slot *slot);
+
+ struct aws_http_stream *(*make_request)(
+ struct aws_http_connection *client_connection,
+ const struct aws_http_make_request_options *options);
+
+ struct aws_http_stream *(*new_server_request_handler_stream)(
+ const struct aws_http_request_handler_options *options);
+ int (*stream_send_response)(struct aws_http_stream *stream, struct aws_http_message *response);
+ void (*close)(struct aws_http_connection *connection);
+ void (*stop_new_requests)(struct aws_http_connection *connection);
+ bool (*is_open)(const struct aws_http_connection *connection);
+ bool (*new_requests_allowed)(const struct aws_http_connection *connection);
+
+ /* HTTP/2 specific functions */
+ void (*update_window)(struct aws_http_connection *connection, uint32_t increment_size);
+ int (*change_settings)(
+ struct aws_http_connection *http2_connection,
+ const struct aws_http2_setting *settings_array,
+ size_t num_settings,
+ aws_http2_on_change_settings_complete_fn *on_completed,
+ void *user_data);
+ int (*send_ping)(
+ struct aws_http_connection *http2_connection,
+ const struct aws_byte_cursor *optional_opaque_data,
+ aws_http2_on_ping_complete_fn *on_completed,
+ void *user_data);
+ void (*send_goaway)(
+ struct aws_http_connection *http2_connection,
+ uint32_t http2_error,
+ bool allow_more_streams,
+ const struct aws_byte_cursor *optional_debug_data);
+ int (*get_sent_goaway)(
+ struct aws_http_connection *http2_connection,
+ uint32_t *out_http2_error,
+ uint32_t *out_last_stream_id);
+ int (*get_received_goaway)(
+ struct aws_http_connection *http2_connection,
+ uint32_t *out_http2_error,
+ uint32_t *out_last_stream_id);
+ void (*get_local_settings)(
+ const struct aws_http_connection *http2_connection,
+ struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT]);
+ void (*get_remote_settings)(
+ const struct aws_http_connection *http2_connection,
+ struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT]);
+};
+
+typedef int(aws_http_proxy_request_transform_fn)(struct aws_http_message *request, void *user_data);
+
+/**
+ * Base class for connections.
+ * There are specific implementations for each HTTP version.
+ */
+struct aws_http_connection {
+ const struct aws_http_connection_vtable *vtable;
+ struct aws_channel_handler channel_handler;
+ struct aws_channel_slot *channel_slot;
+ struct aws_allocator *alloc;
+ enum aws_http_version http_version;
+
+ aws_http_proxy_request_transform_fn *proxy_request_transform;
+ void *user_data;
+
+ /* Connection starts with 1 hold for the user.
+ * aws_http_streams will also acquire holds on their connection for the duration of their lifetime */
+ struct aws_atomic_var refcount;
+
+ /* Starts at either 1 or 2, increments by two with each new stream */
+ uint32_t next_stream_id;
+
+ union {
+ struct aws_http_connection_client_data {
+ uint8_t delete_me; /* exists to prevent "empty struct" errors */
+ } client;
+
+ struct aws_http_connection_server_data {
+ aws_http_on_incoming_request_fn *on_incoming_request;
+ aws_http_on_server_connection_shutdown_fn *on_shutdown;
+ } server;
+ } client_or_server_data;
+
+ /* On client connections, `client_data` points to client_or_server_data.client and `server_data` is null.
+ * Opposite is true on server connections */
+ struct aws_http_connection_client_data *client_data;
+ struct aws_http_connection_server_data *server_data;
+
+ bool stream_manual_window_management;
+};
+
+/* Gets a client connection up and running.
+ * Responsible for firing on_setup and on_shutdown callbacks. */
+struct aws_http_client_bootstrap {
+ struct aws_allocator *alloc;
+ bool is_using_tls;
+ bool stream_manual_window_management;
+ bool prior_knowledge_http2;
+ size_t initial_window_size;
+ struct aws_http_connection_monitoring_options monitoring_options;
+ void *user_data;
+ aws_http_on_client_connection_setup_fn *on_setup;
+ aws_http_on_client_connection_shutdown_fn *on_shutdown;
+ aws_http_proxy_request_transform_fn *proxy_request_transform;
+
+ struct aws_http1_connection_options http1_options;
+ struct aws_http2_connection_options http2_options; /* allocated with bootstrap */
+ struct aws_hash_table *alpn_string_map; /* allocated with bootstrap */
+ struct aws_http_connection *connection;
+};
+
+AWS_EXTERN_C_BEGIN
+AWS_HTTP_API
+void aws_http_client_bootstrap_destroy(struct aws_http_client_bootstrap *bootstrap);
+
+AWS_HTTP_API
+void aws_http_connection_set_system_vtable(const struct aws_http_connection_system_vtable *system_vtable);
+
+AWS_HTTP_API
+int aws_http_client_connect_internal(
+ const struct aws_http_client_connection_options *options,
+ aws_http_proxy_request_transform_fn *proxy_request_transform);
+
+/**
+ * Internal API for adding a reference to a connection
+ */
+AWS_HTTP_API
+void aws_http_connection_acquire(struct aws_http_connection *connection);
+
+/**
+ * Allow tests to fake stats data
+ */
+AWS_HTTP_API
+struct aws_crt_statistics_http1_channel *aws_h1_connection_get_statistics(struct aws_http_connection *connection);
+
+/**
+ * Gets the next available stream id within the connection. Valid for creating both h1 and h2 streams.
+ *
+ * This function is not thread-safe.
+ *
+ * Returns 0 if there was an error.
+ */
+AWS_HTTP_API
+uint32_t aws_http_connection_get_next_stream_id(struct aws_http_connection *connection);
+
+/**
+ * Layers an http channel handler/connection onto a channel. Moved from internal to private so that the proxy
+ * logic could apply a new http connection/handler after tunneling proxy negotiation (into http) is finished.
+ * This is a synchronous operation.
+ *
+ * @param alloc memory allocator to use
+ * @param channel channel to apply the http handler/connection to
+ * @param is_server should the handler behave like an http server
+ * @param is_using_tls is tls is being used (do an alpn check of the to-the-left channel handler)
+ * @param manual_window_management is manual window management enabled
+ * @param prior_knowledge_http2 prior knowledge about http2 connection to be used
+ * @param initial_window_size what should the initial window size be
+ * @param alpn_string_map the customized ALPN string map from `struct aws_string *` to `enum aws_http_version`.
+ * @param http1_options http1 options
+ * @param http2_options http2 options
+ * @return a new http connection or NULL on failure
+ */
+AWS_HTTP_API
+struct aws_http_connection *aws_http_connection_new_channel_handler(
+ struct aws_allocator *alloc,
+ struct aws_channel *channel,
+ bool is_server,
+ bool is_using_tls,
+ bool manual_window_management,
+ bool prior_knowledge_http2,
+ size_t initial_window_size,
+ const struct aws_hash_table *alpn_string_map,
+ const struct aws_http1_connection_options *http1_options,
+ const struct aws_http2_connection_options *http2_options,
+ void *connection_user_data);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_CONNECTION_IMPL_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_manager_system_vtable.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_manager_system_vtable.h
new file mode 100644
index 0000000000..115ba66136
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_manager_system_vtable.h
@@ -0,0 +1,50 @@
+#ifndef AWS_HTTP_CONNECTION_MANAGER_SYSTEM_VTABLE_H
+#define AWS_HTTP_CONNECTION_MANAGER_SYSTEM_VTABLE_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/http.h>
+
+#include <aws/http/connection.h>
+
+struct aws_http_connection_manager;
+
+typedef int(aws_http_connection_manager_create_connection_fn)(const struct aws_http_client_connection_options *options);
+typedef void(aws_http_connection_manager_close_connection_fn)(struct aws_http_connection *connection);
+typedef void(aws_http_connection_release_connection_fn)(struct aws_http_connection *connection);
+typedef bool(aws_http_connection_is_connection_available_fn)(const struct aws_http_connection *connection);
+typedef bool(aws_http_connection_manager_is_callers_thread_fn)(struct aws_channel *channel);
+typedef struct aws_channel *(aws_http_connection_manager_connection_get_channel_fn)(
+ struct aws_http_connection *connection);
+typedef enum aws_http_version(aws_http_connection_manager_connection_get_version_fn)(
+ const struct aws_http_connection *connection);
+
+struct aws_http_connection_manager_system_vtable {
+ /*
+ * Downstream http functions
+ */
+ aws_http_connection_manager_create_connection_fn *create_connection;
+ aws_http_connection_manager_close_connection_fn *close_connection;
+ aws_http_connection_release_connection_fn *release_connection;
+ aws_http_connection_is_connection_available_fn *is_connection_available;
+ aws_io_clock_fn *get_monotonic_time;
+ aws_http_connection_manager_is_callers_thread_fn *is_callers_thread;
+ aws_http_connection_manager_connection_get_channel_fn *connection_get_channel;
+ aws_http_connection_manager_connection_get_version_fn *connection_get_version;
+};
+
+AWS_HTTP_API
+bool aws_http_connection_manager_system_vtable_is_valid(const struct aws_http_connection_manager_system_vtable *table);
+
+AWS_HTTP_API
+void aws_http_connection_manager_set_system_vtable(
+ struct aws_http_connection_manager *manager,
+ const struct aws_http_connection_manager_system_vtable *system_vtable);
+
+AWS_HTTP_API
+extern const struct aws_http_connection_manager_system_vtable *g_aws_http_connection_manager_default_system_vtable_ptr;
+
+#endif /* AWS_HTTP_CONNECTION_MANAGER_SYSTEM_VTABLE_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_monitor.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_monitor.h
new file mode 100644
index 0000000000..0dee2d84db
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/connection_monitor.h
@@ -0,0 +1,46 @@
+#ifndef AWS_HTTP_HTTP_MONITOR_H
+#define AWS_HTTP_HTTP_MONITOR_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/connection.h>
+#include <aws/http/http.h>
+
+struct aws_allocator;
+struct aws_crt_statistics_handler;
+
+/*
+ * Needed by tests
+ */
+struct aws_statistics_handler_http_connection_monitor_impl {
+ struct aws_http_connection_monitoring_options options;
+
+ uint64_t throughput_failure_time_ms;
+ uint32_t last_incoming_stream_id;
+ uint32_t last_outgoing_stream_id;
+ uint64_t last_measured_throughput;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Creates a new http connection monitor that regularly checks the connection's throughput and shuts the connection
+ * down if the a minimum threshold is not met for a configurable number of seconds.
+ */
+AWS_HTTP_API
+struct aws_crt_statistics_handler *aws_crt_statistics_handler_new_http_connection_monitor(
+ struct aws_allocator *allocator,
+ struct aws_http_connection_monitoring_options *options);
+
+/**
+ * Validates monitoring options to ensure they are sensible
+ */
+AWS_HTTP_API
+bool aws_http_connection_monitoring_options_is_valid(const struct aws_http_connection_monitoring_options *options);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_HTTP_MONITOR_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_connection.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_connection.h
new file mode 100644
index 0000000000..86a5124eaf
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_connection.h
@@ -0,0 +1,201 @@
+#ifndef AWS_HTTP_H1_CONNECTION_H
+#define AWS_HTTP_H1_CONNECTION_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/mutex.h>
+#include <aws/http/private/connection_impl.h>
+#include <aws/http/private/h1_encoder.h>
+#include <aws/http/statistics.h>
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4214) /* nonstandard extension used: bit field types other than int */
+#endif
+
+struct aws_h1_connection {
+ struct aws_http_connection base;
+
+ size_t initial_stream_window_size;
+
+ /* Task responsible for sending data.
+ * As long as there is data available to send, the task will be "active" and repeatedly:
+ * 1) Encode outgoing stream data to an aws_io_message and send it up the channel.
+ * 2) Wait until the aws_io_message's write_complete callback fires.
+ * 3) Reschedule the task to run again.
+ *
+ * `thread_data.is_outgoing_stream_task_active` tells whether the task is "active".
+ *
+ * If there is no data available to write (waiting for user to add more streams or chunks),
+ * then the task stops being active. The task is made active again when the user
+ * adds more outgoing data. */
+ struct aws_channel_task outgoing_stream_task;
+
+ /* Task that removes items from `synced_data` and does their on-thread work.
+ * Runs once and wait until it's scheduled again.
+ * Any function that wants to schedule this task MUST:
+ * - acquire the synced_data.lock
+ * - check whether `synced_data.is_cross_thread_work_scheduled` was true or false.
+ * - set `synced_data.is_cross_thread_work_scheduled = true`
+ * - release synced_data.lock
+ * - ONLY IF `synced_data.is_cross_thread_work_scheduled` CHANGED from false to true:
+ * - then schedule the task
+ */
+ struct aws_channel_task cross_thread_work_task;
+
+ /* Only the event-loop thread may touch this data */
+ struct {
+ /* List of streams being worked on. */
+ struct aws_linked_list stream_list;
+
+ /* Points to the stream whose data is currently being sent.
+ * This stream is ALWAYS in the `stream_list`.
+ * HTTP pipelining is supported, so once the stream is completely written
+ * we'll start working on the next stream in the list */
+ struct aws_h1_stream *outgoing_stream;
+
+ /* Points to the stream being decoded.
+ * This stream is ALWAYS in the `stream_list`. */
+ struct aws_h1_stream *incoming_stream;
+ struct aws_h1_decoder *incoming_stream_decoder;
+
+ /* Used to encode requests and responses */
+ struct aws_h1_encoder encoder;
+
+ /**
+ * All aws_io_messages arriving in the read direction are queued here before processing.
+ * This allows the connection to receive more data than the the current HTTP-stream might allow,
+ * and process the data later when HTTP-stream's window opens or the next stream begins.
+ *
+ * The `aws_io_message.copy_mark` is used to track progress on partially processed messages.
+ * `pending_bytes` is the sum of all unprocessed bytes across all queued messages.
+ * `capacity` is the limit for how many unprocessed bytes we'd like in the queue.
+ */
+ struct {
+ struct aws_linked_list messages;
+ size_t pending_bytes;
+ size_t capacity;
+ } read_buffer;
+
+ /**
+ * The connection's current window size.
+ * We use this variable, instead of the existing `aws_channel_slot.window_size`,
+ * because that variable is not updated immediately, the channel uses a task to update it.
+ * Since we use the difference between current and desired window size when deciding
+ * how much to increment, we need the most up-to-date values possible.
+ */
+ size_t connection_window;
+
+ /* Only used by tests. Sum of window_increments issued by this slot. Resets each time it's queried */
+ size_t recent_window_increments;
+
+ struct aws_crt_statistics_http1_channel stats;
+
+ uint64_t outgoing_stream_timestamp_ns;
+ uint64_t incoming_stream_timestamp_ns;
+
+ /* True when read and/or writing has stopped, whether due to errors or normal channel shutdown. */
+ bool is_reading_stopped : 1;
+ bool is_writing_stopped : 1;
+
+ /* If true, the connection has upgraded to another protocol.
+ * It will pass data to adjacent channel handlers without altering it.
+ * The connection can no longer service request/response streams. */
+ bool has_switched_protocols : 1;
+
+ /* Server-only. Request-handler streams can only be created while this is true. */
+ bool can_create_request_handler_stream : 1;
+
+ /* see `outgoing_stream_task` */
+ bool is_outgoing_stream_task_active : 1;
+
+ bool is_processing_read_messages : 1;
+ } thread_data;
+
+ /* Any thread may touch this data, but the lock must be held */
+ struct {
+ struct aws_mutex lock;
+
+ /* New client streams that have not been moved to `stream_list` yet.
+ * This list is not used on servers. */
+ struct aws_linked_list new_client_stream_list;
+
+ /* If non-zero, then window_update_task is scheduled */
+ size_t window_update_size;
+
+ /* If non-zero, reason to immediately reject new streams. (ex: closing) */
+ int new_stream_error_code;
+
+ /* See `cross_thread_work_task` */
+ bool is_cross_thread_work_task_scheduled : 1;
+
+ /* For checking status from outside the event-loop thread. */
+ bool is_open : 1;
+
+ } synced_data;
+};
+
+/* Allow tests to check current window stats */
+struct aws_h1_window_stats {
+ size_t connection_window;
+ size_t recent_window_increments; /* Resets to 0 each time window stats are queried*/
+ size_t buffer_capacity;
+ size_t buffer_pending_bytes;
+ uint64_t stream_window;
+ bool has_incoming_stream;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/* The functions below are exported so they can be accessed from tests. */
+
+AWS_HTTP_API
+struct aws_http_connection *aws_http_connection_new_http1_1_server(
+ struct aws_allocator *allocator,
+ bool manual_window_management,
+ size_t initial_window_size,
+ const struct aws_http1_connection_options *http1_options);
+
+AWS_HTTP_API
+struct aws_http_connection *aws_http_connection_new_http1_1_client(
+ struct aws_allocator *allocator,
+ bool manual_window_management,
+ size_t initial_window_size,
+ const struct aws_http1_connection_options *http1_options);
+
+/* Allow tests to check current window stats */
+AWS_HTTP_API
+struct aws_h1_window_stats aws_h1_connection_window_stats(struct aws_http_connection *connection_base);
+
+AWS_EXTERN_C_END
+
+/* DO NOT export functions below. They're only used by other .c files in this library */
+
+/* TODO: introduce naming conventions for private header functions */
+
+void aws_h1_connection_lock_synced_data(struct aws_h1_connection *connection);
+void aws_h1_connection_unlock_synced_data(struct aws_h1_connection *connection);
+
+/**
+ * Try to kick off the outgoing-stream-task.
+ * If task is already active, nothing happens.
+ * If there's nothing to do, the task will immediately stop itself.
+ * Call this whenever the user provides new outgoing data (ex: new stream, new chunk).
+ * MUST be called from the connection's event-loop thread.
+ */
+void aws_h1_connection_try_write_outgoing_stream(struct aws_h1_connection *connection);
+
+/**
+ * If any read messages are queued, and the downstream window is non-zero,
+ * process data and send it downstream. Then calculate the connection's
+ * desired window size and increment it if necessary.
+ *
+ * During normal operations "downstream" means the current incoming stream.
+ * If the connection has switched protocols "downstream" means the next
+ * channel handler in the read direction.
+ */
+void aws_h1_connection_try_process_read_messages(struct aws_h1_connection *connection);
+
+#endif /* AWS_HTTP_H1_CONNECTION_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_decoder.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_decoder.h
new file mode 100644
index 0000000000..eaf8956cdd
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_decoder.h
@@ -0,0 +1,90 @@
+#ifndef AWS_HTTP_H1_DECODER_H
+#define AWS_HTTP_H1_DECODER_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/http_impl.h>
+#include <aws/http/private/request_response_impl.h>
+
+struct aws_h1_decoded_header {
+ /* Name of the header. If the type is `AWS_HTTP_HEADER_NAME_UNKNOWN` then `name_data` must be parsed manually. */
+ enum aws_http_header_name name;
+
+ /* Raw buffer storing the header's name. */
+ struct aws_byte_cursor name_data;
+
+ /* Raw buffer storing the header's value. */
+ struct aws_byte_cursor value_data;
+
+ /* Raw buffer storing the entire header. */
+ struct aws_byte_cursor data;
+};
+
+struct aws_h1_decoder_vtable {
+ /**
+ * Called from `aws_h*_decode` when an http header has been received.
+ * All pointers are strictly *read only*; any data that needs to persist must be copied out into user-owned memory.
+ */
+ int (*on_header)(const struct aws_h1_decoded_header *header, void *user_data);
+
+ /**
+ * Called from `aws_h1_decode` when a portion of the http body has been received.
+ * `finished` is true if this is the last section of the http body, and false if more body data is yet to be
+ * received. All pointers are strictly *read only*; any data that needs to persist must be copied out into
+ * user-owned memory.
+ */
+ int (*on_body)(const struct aws_byte_cursor *data, bool finished, void *user_data);
+
+ /* Only needed for requests, can be NULL for responses. */
+ int (*on_request)(
+ enum aws_http_method method_enum,
+ const struct aws_byte_cursor *method_str,
+ const struct aws_byte_cursor *uri,
+ void *user_data);
+
+ /* Only needed for responses, can be NULL for requests. */
+ int (*on_response)(int status_code, void *user_data);
+
+ int (*on_done)(void *user_data);
+};
+
+/**
+ * Structure used to initialize an `aws_h1_decoder`.
+ */
+struct aws_h1_decoder_params {
+ struct aws_allocator *alloc;
+ size_t scratch_space_initial_size;
+ /* Set false if decoding responses */
+ bool is_decoding_requests;
+ void *user_data;
+ struct aws_h1_decoder_vtable vtable;
+};
+
+struct aws_h1_decoder;
+
+AWS_EXTERN_C_BEGIN
+
+AWS_HTTP_API struct aws_h1_decoder *aws_h1_decoder_new(struct aws_h1_decoder_params *params);
+AWS_HTTP_API void aws_h1_decoder_destroy(struct aws_h1_decoder *decoder);
+AWS_HTTP_API int aws_h1_decode(struct aws_h1_decoder *decoder, struct aws_byte_cursor *data);
+
+AWS_HTTP_API void aws_h1_decoder_set_logging_id(struct aws_h1_decoder *decoder, const void *id);
+AWS_HTTP_API void aws_h1_decoder_set_body_headers_ignored(struct aws_h1_decoder *decoder, bool body_headers_ignored);
+
+/* RFC-7230 section 4.2 Message Format */
+#define AWS_HTTP_TRANSFER_ENCODING_CHUNKED (1 << 0)
+#define AWS_HTTP_TRANSFER_ENCODING_GZIP (1 << 1)
+#define AWS_HTTP_TRANSFER_ENCODING_DEFLATE (1 << 2)
+#define AWS_HTTP_TRANSFER_ENCODING_DEPRECATED_COMPRESS (1 << 3)
+AWS_HTTP_API int aws_h1_decoder_get_encoding_flags(const struct aws_h1_decoder *decoder);
+
+AWS_HTTP_API uint64_t aws_h1_decoder_get_content_length(const struct aws_h1_decoder *decoder);
+AWS_HTTP_API bool aws_h1_decoder_get_body_headers_ignored(const struct aws_h1_decoder *decoder);
+AWS_HTTP_API enum aws_http_header_block aws_h1_decoder_get_header_block(const struct aws_h1_decoder *decoder);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_H1_DECODER_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_encoder.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_encoder.h
new file mode 100644
index 0000000000..11b4965c0d
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_encoder.h
@@ -0,0 +1,140 @@
+#ifndef AWS_HTTP_H1_ENCODER_H
+#define AWS_HTTP_H1_ENCODER_H
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/http_impl.h>
+#include <aws/http/private/request_response_impl.h>
+
+struct aws_h1_chunk {
+ struct aws_allocator *allocator;
+ struct aws_input_stream *data;
+ uint64_t data_size;
+ aws_http1_stream_write_chunk_complete_fn *on_complete;
+ void *user_data;
+ struct aws_linked_list_node node;
+ /* Buffer containing pre-encoded start line: chunk-size [chunk-ext] CRLF */
+ struct aws_byte_buf chunk_line;
+};
+
+struct aws_h1_trailer {
+ struct aws_allocator *allocator;
+ struct aws_byte_buf trailer_data;
+};
+
+/**
+ * Message to be submitted to encoder.
+ * Contains data necessary for encoder to write an outgoing request or response.
+ */
+struct aws_h1_encoder_message {
+ /* Upon creation, the "head" (everything preceding body) is buffered here. */
+ struct aws_byte_buf outgoing_head_buf;
+ /* Single stream used for unchunked body */
+ struct aws_input_stream *body;
+
+ /* Pointer to list of `struct aws_h1_chunk`, used for chunked encoding.
+ * List is owned by aws_h1_stream.
+ * Encoder completes/frees/pops front chunk when it's done sending.
+ * If list goes empty, encoder waits for more chunks to arrive.
+ * A chunk with data_size=0 means "final chunk" */
+ struct aws_linked_list *pending_chunk_list;
+
+ /* Pointer to chunked_trailer, used for chunked_trailer. */
+ struct aws_h1_trailer *trailer;
+
+ /* If non-zero, length of unchunked body to send */
+ uint64_t content_length;
+ bool has_connection_close_header;
+ bool has_chunked_encoding_header;
+};
+
+enum aws_h1_encoder_state {
+ AWS_H1_ENCODER_STATE_INIT,
+ AWS_H1_ENCODER_STATE_HEAD,
+ AWS_H1_ENCODER_STATE_UNCHUNKED_BODY,
+ AWS_H1_ENCODER_STATE_CHUNK_NEXT,
+ AWS_H1_ENCODER_STATE_CHUNK_LINE,
+ AWS_H1_ENCODER_STATE_CHUNK_BODY,
+ AWS_H1_ENCODER_STATE_CHUNK_END,
+ AWS_H1_ENCODER_STATE_CHUNK_TRAILER,
+ AWS_H1_ENCODER_STATE_DONE,
+};
+
+struct aws_h1_encoder {
+ struct aws_allocator *allocator;
+
+ enum aws_h1_encoder_state state;
+ /* Current message being encoded */
+ struct aws_h1_encoder_message *message;
+ /* Used by some states to track progress. Reset to 0 whenever state changes */
+ uint64_t progress_bytes;
+ /* Current chunk */
+ struct aws_h1_chunk *current_chunk;
+ /* Number of chunks sent, just used for logging */
+ size_t chunk_count;
+ /* Encoder logs with this stream ptr as the ID, and passes this ptr to the chunk_complete callback */
+ struct aws_http_stream *current_stream;
+};
+
+struct aws_h1_chunk *aws_h1_chunk_new(struct aws_allocator *allocator, const struct aws_http1_chunk_options *options);
+struct aws_h1_trailer *aws_h1_trailer_new(
+ struct aws_allocator *allocator,
+ const struct aws_http_headers *trailing_headers);
+
+void aws_h1_trailer_destroy(struct aws_h1_trailer *trailer);
+
+/* Just destroy the chunk (don't fire callback) */
+void aws_h1_chunk_destroy(struct aws_h1_chunk *chunk);
+
+/* Destroy chunk and fire its completion callback */
+void aws_h1_chunk_complete_and_destroy(struct aws_h1_chunk *chunk, struct aws_http_stream *http_stream, int error_code);
+
+int aws_chunk_line_from_options(struct aws_http1_chunk_options *options, struct aws_byte_buf *chunk_line);
+
+AWS_EXTERN_C_BEGIN
+
+/* Validate request and cache any info the encoder will need later in the "encoder message". */
+AWS_HTTP_API
+int aws_h1_encoder_message_init_from_request(
+ struct aws_h1_encoder_message *message,
+ struct aws_allocator *allocator,
+ const struct aws_http_message *request,
+ struct aws_linked_list *pending_chunk_list);
+
+int aws_h1_encoder_message_init_from_response(
+ struct aws_h1_encoder_message *message,
+ struct aws_allocator *allocator,
+ const struct aws_http_message *response,
+ bool body_headers_ignored,
+ struct aws_linked_list *pending_chunk_list);
+
+AWS_HTTP_API
+void aws_h1_encoder_message_clean_up(struct aws_h1_encoder_message *message);
+
+AWS_HTTP_API
+void aws_h1_encoder_init(struct aws_h1_encoder *encoder, struct aws_allocator *allocator);
+
+AWS_HTTP_API
+void aws_h1_encoder_clean_up(struct aws_h1_encoder *encoder);
+
+AWS_HTTP_API
+int aws_h1_encoder_start_message(
+ struct aws_h1_encoder *encoder,
+ struct aws_h1_encoder_message *message,
+ struct aws_http_stream *stream);
+
+AWS_HTTP_API
+int aws_h1_encoder_process(struct aws_h1_encoder *encoder, struct aws_byte_buf *out_buf);
+
+AWS_HTTP_API
+bool aws_h1_encoder_is_message_in_progress(const struct aws_h1_encoder *encoder);
+
+/* Return true if the encoder is stuck waiting for more chunks to be added to the current message */
+AWS_HTTP_API
+bool aws_h1_encoder_is_waiting_for_chunks(const struct aws_h1_encoder *encoder);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_H1_ENCODER_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_stream.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_stream.h
new file mode 100644
index 0000000000..df1446ec9b
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h1_stream.h
@@ -0,0 +1,123 @@
+#ifndef AWS_HTTP_H1_STREAM_H
+#define AWS_HTTP_H1_STREAM_H
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/h1_encoder.h>
+#include <aws/http/private/http_impl.h>
+#include <aws/http/private/request_response_impl.h>
+#include <aws/io/channel.h>
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4214) /* nonstandard extension used: bit field types other than int */
+#endif
+
+/* Simple view of stream's state.
+ * Used to determine whether it's safe for a user to call functions that alter state. */
+enum aws_h1_stream_api_state {
+ AWS_H1_STREAM_API_STATE_INIT,
+ AWS_H1_STREAM_API_STATE_ACTIVE,
+ AWS_H1_STREAM_API_STATE_COMPLETE,
+};
+
+struct aws_h1_stream {
+ struct aws_http_stream base;
+
+ struct aws_linked_list_node node;
+
+ /* Task that removes items from `synced_data` and does their on-thread work.
+ * Runs once and wait until it's scheduled again.
+ * Any function that wants to schedule this task MUST:
+ * - acquire the synced_data.lock
+ * - check whether `synced_data.is_cross_thread_work_scheduled` was true or false.
+ * - set `synced_data.is_cross_thread_work_scheduled = true`
+ * - release synced_data.lock
+ * - ONLY IF `synced_data.is_cross_thread_work_scheduled` CHANGED from false to true:
+ * - increment the stream's refcount, to keep stream alive until task runs
+ * - schedule the task
+ */
+ struct aws_channel_task cross_thread_work_task;
+
+ /* Message (derived from outgoing request or response) to be submitted to encoder */
+ struct aws_h1_encoder_message encoder_message;
+
+ bool is_outgoing_message_done;
+
+ bool is_incoming_message_done;
+ bool is_incoming_head_done;
+
+ /* If true, this is the last stream the connection should process.
+ * See RFC-7230 Section 6: Connection Management. */
+ bool is_final_stream;
+
+ /* Buffer for incoming data that needs to stick around. */
+ struct aws_byte_buf incoming_storage_buf;
+
+ struct {
+ /* TODO: move most other members in here */
+
+ /* List of `struct aws_h1_chunk`, used for chunked encoding.
+ * Encoder completes/frees/pops front chunk when it's done sending. */
+ struct aws_linked_list pending_chunk_list;
+
+ struct aws_h1_encoder_message message;
+
+ /* Size of stream's flow-control window.
+ * Only body data (not headers, etc) counts against the stream's flow-control window. */
+ uint64_t stream_window;
+
+ /* Whether a "request handler" stream has a response to send.
+ * Has mirror variable in synced_data */
+ bool has_outgoing_response : 1;
+ } thread_data;
+
+ /* Any thread may touch this data, but the connection's lock must be held.
+ * Sharing a lock is fine because it's rare for an HTTP/1 connection
+ * to have more than one stream at a time. */
+ struct {
+ /* List of `struct aws_h1_chunk` which have been submitted by user,
+ * but haven't yet moved to encoder_message.pending_chunk_list where the encoder will find them. */
+ struct aws_linked_list pending_chunk_list;
+
+ /* trailing headers which have been submitted by user,
+ * but haven't yet moved to encoder_message where the encoder will find them. */
+ struct aws_h1_trailer *pending_trailer;
+
+ enum aws_h1_stream_api_state api_state;
+
+ /* Sum of all aws_http_stream_update_window() calls that haven't yet moved to thread_data.stream_window */
+ uint64_t pending_window_update;
+
+ /* See `cross_thread_work_task` */
+ bool is_cross_thread_work_task_scheduled : 1;
+
+ /* Whether a "request handler" stream has a response to send.
+ * Has mirror variable in thread_data */
+ bool has_outgoing_response : 1;
+
+ /* Whether the outgoing message is using chunked encoding */
+ bool using_chunked_encoding : 1;
+
+ /* Whether the final 0 length chunk has already been sent */
+ bool has_final_chunk : 1;
+
+ /* Whether the chunked trailer has already been sent */
+ bool has_added_trailer : 1;
+ } synced_data;
+};
+
+/* DO NOT export functions below. They're only used by other .c files in this library */
+
+struct aws_h1_stream *aws_h1_stream_new_request(
+ struct aws_http_connection *client_connection,
+ const struct aws_http_make_request_options *options);
+
+struct aws_h1_stream *aws_h1_stream_new_request_handler(const struct aws_http_request_handler_options *options);
+
+int aws_h1_stream_activate(struct aws_http_stream *stream);
+
+int aws_h1_stream_send_response(struct aws_h1_stream *stream, struct aws_http_message *response);
+
+#endif /* AWS_HTTP_H1_STREAM_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_connection.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_connection.h
new file mode 100644
index 0000000000..6d42b83160
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_connection.h
@@ -0,0 +1,289 @@
+#ifndef AWS_HTTP_H2_CONNECTION_H
+#define AWS_HTTP_H2_CONNECTION_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/atomics.h>
+#include <aws/common/fifo_cache.h>
+#include <aws/common/hash_table.h>
+#include <aws/common/mutex.h>
+
+#include <aws/http/private/connection_impl.h>
+#include <aws/http/private/h2_frames.h>
+#include <aws/http/statistics.h>
+
+struct aws_h2_decoder;
+struct aws_h2_stream;
+
+struct aws_h2_connection {
+ struct aws_http_connection base;
+
+ aws_http2_on_goaway_received_fn *on_goaway_received;
+ aws_http2_on_remote_settings_change_fn *on_remote_settings_change;
+
+ struct aws_channel_task cross_thread_work_task;
+ struct aws_channel_task outgoing_frames_task;
+
+ bool conn_manual_window_management;
+
+ /* Only the event-loop thread may touch this data */
+ struct {
+ struct aws_h2_decoder *decoder;
+ struct aws_h2_frame_encoder encoder;
+
+ /* True when reading/writing has stopped, whether due to errors or normal channel shutdown. */
+ bool is_reading_stopped;
+ bool is_writing_stopped;
+
+ bool is_outgoing_frames_task_active;
+
+ /* Settings received from peer, which restricts the message to send */
+ uint32_t settings_peer[AWS_HTTP2_SETTINGS_END_RANGE];
+ /* Local settings to send/sent to peer, which affects the decoding */
+ uint32_t settings_self[AWS_HTTP2_SETTINGS_END_RANGE];
+
+ /* List using aws_h2_pending_settings.node
+ * Contains settings waiting to be ACKed by peer and applied */
+ struct aws_linked_list pending_settings_queue;
+
+ /* List using aws_h2_pending_ping.node
+ * Pings waiting to be ACKed by peer */
+ struct aws_linked_list pending_ping_queue;
+
+ /* Most recent stream-id that was initiated by peer */
+ uint32_t latest_peer_initiated_stream_id;
+
+ /* Maps stream-id to aws_h2_stream*.
+ * Contains all streams in the open, reserved, and half-closed states (terms from RFC-7540 5.1).
+ * Once a stream enters closed state, it is removed from this map. */
+ struct aws_hash_table active_streams_map;
+
+ /* List using aws_h2_stream.node.
+ * Contains all streams with DATA frames to send.
+ * Any stream in this list is also in the active_streams_map. */
+ struct aws_linked_list outgoing_streams_list;
+
+ /* List using aws_h2_stream.node.
+ * Contains all streams with DATA frames to send, and cannot send now due to flow control.
+ * Waiting for WINDOW_UPDATE to set them free */
+ struct aws_linked_list stalled_window_streams_list;
+
+ /* List using aws_h2_stream.node.
+ * Contains all streams that are open, but are only sending data when notified, rather than polling
+ * for it (e.g. event streams)
+ * Streams are moved to the outgoing_streams_list until they send pending data, then are moved back
+ * to this list to sleep until more data comes in
+ */
+ struct aws_linked_list waiting_streams_list;
+
+ /* List using aws_h2_frame.node.
+ * Queues all frames (except DATA frames) for connection to send.
+ * When queue is empty, then we send DATA frames from the outgoing_streams_list */
+ struct aws_linked_list outgoing_frames_queue;
+
+ /* FIFO cache for closed stream, key: stream-id, value: aws_h2_stream_closed_when.
+ * Contains data about streams that were recently closed.
+ * The oldest entry will be removed if the cache is full */
+ struct aws_cache *closed_streams;
+
+ /* Flow-control of connection from peer. Indicating the buffer capacity of our peer.
+ * Reduce the space after sending a flow-controlled frame. Increment after receiving WINDOW_UPDATE for
+ * connection */
+ size_t window_size_peer;
+
+ /* Flow-control of connection for this side.
+ * Reduce the space after receiving a flow-controlled frame. Increment after sending WINDOW_UPDATE for
+ * connection */
+ size_t window_size_self;
+
+ /* Highest self-initiated stream-id that peer might have processed.
+ * Defaults to max stream-id, may be lowered when GOAWAY frame received. */
+ uint32_t goaway_received_last_stream_id;
+
+ /* Last-stream-id sent in most recent GOAWAY frame. Defaults to max stream-id. */
+ uint32_t goaway_sent_last_stream_id;
+
+ /* Frame we are encoding now. NULL if we are not encoding anything. */
+ struct aws_h2_frame *current_outgoing_frame;
+
+ /* Pointer to initial pending settings. If ACKed by peer, it will be NULL. */
+ struct aws_h2_pending_settings *init_pending_settings;
+
+ /* Cached channel shutdown values.
+ * If possible, we delay shutdown-in-the-write-dir until GOAWAY is written. */
+ int channel_shutdown_error_code;
+ bool channel_shutdown_immediately;
+ bool channel_shutdown_waiting_for_goaway_to_be_written;
+
+ /* TODO: Consider adding stream monitor */
+ struct aws_crt_statistics_http2_channel stats;
+
+ /* Timestamp when connection has data to send, which is when there is an active stream with body to send */
+ uint64_t outgoing_timestamp_ns;
+ /* Timestamp when connection has data to receive, which is when there is an active stream */
+ uint64_t incoming_timestamp_ns;
+ } thread_data;
+
+ /* Any thread may touch this data, but the lock must be held (unless it's an atomic) */
+ struct {
+ struct aws_mutex lock;
+
+ /* New `aws_h2_stream *` that haven't moved to `thread_data` yet */
+ struct aws_linked_list pending_stream_list;
+
+ /* New `aws_h2_frames *`, connection control frames created by user that haven't moved to `thread_data` yet */
+ struct aws_linked_list pending_frame_list;
+
+ /* New `aws_h2_pending_settings *` created by user that haven't moved to `thread_data` yet */
+ struct aws_linked_list pending_settings_list;
+
+ /* New `aws_h2_pending_ping *` created by user that haven't moved to `thread_data` yet */
+ struct aws_linked_list pending_ping_list;
+
+ /* New `aws_h2_pending_goaway *` created by user that haven't sent yet */
+ struct aws_linked_list pending_goaway_list;
+
+ bool is_cross_thread_work_task_scheduled;
+
+ /* The window_update value for `thread_data.window_size_self` that haven't applied yet */
+ size_t window_update_size;
+
+ /* For checking status from outside the event-loop thread. */
+ bool is_open;
+
+ /* If non-zero, reason to immediately reject new streams. (ex: closing) */
+ int new_stream_error_code;
+
+ /* Last-stream-id sent in most recent GOAWAY frame. Defaults to AWS_H2_STREAM_ID_MAX + 1 indicates no GOAWAY has
+ * been sent so far.*/
+ uint32_t goaway_sent_last_stream_id;
+ /* aws_http2_error_code sent in most recent GOAWAY frame. Defaults to 0, check goaway_sent_last_stream_id for
+ * any GOAWAY has sent or not */
+ uint32_t goaway_sent_http2_error_code;
+
+ /* Last-stream-id received in most recent GOAWAY frame. Defaults to AWS_H2_STREAM_ID_MAX + 1 indicates no GOAWAY
+ * has been received so far.*/
+ uint32_t goaway_received_last_stream_id;
+ /* aws_http2_error_code received in most recent GOAWAY frame. Defaults to 0, check
+ * goaway_received_last_stream_id for any GOAWAY has received or not */
+ uint32_t goaway_received_http2_error_code;
+
+ /* For checking settings received from peer from outside the event-loop thread. */
+ uint32_t settings_peer[AWS_HTTP2_SETTINGS_END_RANGE];
+ /* For checking local settings to send/sent to peer from outside the event-loop thread. */
+ uint32_t settings_self[AWS_HTTP2_SETTINGS_END_RANGE];
+ } synced_data;
+};
+
+struct aws_h2_pending_settings {
+ struct aws_http2_setting *settings_array;
+ size_t num_settings;
+ struct aws_linked_list_node node;
+ /* user callback */
+ void *user_data;
+ aws_http2_on_change_settings_complete_fn *on_completed;
+};
+
+struct aws_h2_pending_ping {
+ uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE];
+ /* For calculating round-trip time */
+ uint64_t started_time;
+ struct aws_linked_list_node node;
+ /* user callback */
+ void *user_data;
+ aws_http2_on_ping_complete_fn *on_completed;
+};
+
+struct aws_h2_pending_goaway {
+ bool allow_more_streams;
+ uint32_t http2_error;
+ struct aws_byte_cursor debug_data;
+ struct aws_linked_list_node node;
+};
+
+/**
+ * The action which caused the stream to close.
+ */
+enum aws_h2_stream_closed_when {
+ AWS_H2_STREAM_CLOSED_UNKNOWN,
+ AWS_H2_STREAM_CLOSED_WHEN_BOTH_SIDES_END_STREAM,
+ AWS_H2_STREAM_CLOSED_WHEN_RST_STREAM_RECEIVED,
+ AWS_H2_STREAM_CLOSED_WHEN_RST_STREAM_SENT,
+};
+
+enum aws_h2_data_encode_status {
+ AWS_H2_DATA_ENCODE_COMPLETE,
+ AWS_H2_DATA_ENCODE_ONGOING,
+ AWS_H2_DATA_ENCODE_ONGOING_BODY_STREAM_STALLED, /* stalled reading from body stream */
+ AWS_H2_DATA_ENCODE_ONGOING_WAITING_FOR_WRITES, /* waiting for next manual write */
+ AWS_H2_DATA_ENCODE_ONGOING_WINDOW_STALLED, /* stalled due to reduced window size */
+};
+
+/* When window size is too small to fit the possible padding into it, we stop sending data and wait for WINDOW_UPDATE */
+#define AWS_H2_MIN_WINDOW_SIZE (256)
+
+/* Private functions called from tests... */
+
+AWS_EXTERN_C_BEGIN
+
+AWS_HTTP_API
+struct aws_http_connection *aws_http_connection_new_http2_server(
+ struct aws_allocator *allocator,
+ bool manual_window_management,
+ const struct aws_http2_connection_options *http2_options);
+
+AWS_HTTP_API
+struct aws_http_connection *aws_http_connection_new_http2_client(
+ struct aws_allocator *allocator,
+ bool manual_window_management,
+ const struct aws_http2_connection_options *http2_options);
+
+AWS_EXTERN_C_END
+
+/* Private functions called from multiple .c files... */
+
+/**
+ * Enqueue outgoing frame.
+ * Connection takes ownership of frame.
+ * Frames are sent into FIFO order.
+ * Do not enqueue DATA frames, these are sent by other means when the frame queue is empty.
+ */
+void aws_h2_connection_enqueue_outgoing_frame(struct aws_h2_connection *connection, struct aws_h2_frame *frame);
+
+/**
+ * Invoked immediately after a stream enters the CLOSED state.
+ * The connection will remove the stream from its "active" datastructures,
+ * guaranteeing that no further decoder callbacks are invoked on the stream.
+ *
+ * This should NOT be invoked in the case of a "Connection Error",
+ * though a "Stream Error", in which a RST_STREAM is sent and the stream
+ * is closed early, would invoke this.
+ */
+int aws_h2_connection_on_stream_closed(
+ struct aws_h2_connection *connection,
+ struct aws_h2_stream *stream,
+ enum aws_h2_stream_closed_when closed_when,
+ int aws_error_code);
+
+/**
+ * Send RST_STREAM and close a stream reserved via PUSH_PROMISE.
+ */
+int aws_h2_connection_send_rst_and_close_reserved_stream(
+ struct aws_h2_connection *connection,
+ uint32_t stream_id,
+ uint32_t h2_error_code);
+
+/**
+ * Error happens while writing into channel, shutdown the connection. Only called within the eventloop thread
+ */
+void aws_h2_connection_shutdown_due_to_write_err(struct aws_h2_connection *connection, int error_code);
+
+/**
+ * Try to write outgoing frames, if the outgoing-frames-task isn't scheduled, run it immediately.
+ */
+void aws_h2_try_write_outgoing_frames(struct aws_h2_connection *connection);
+
+#endif /* AWS_HTTP_H2_CONNECTION_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_decoder.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_decoder.h
new file mode 100644
index 0000000000..bd8a7199a1
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_decoder.h
@@ -0,0 +1,121 @@
+#ifndef AWS_HTTP_H2_DECODER_H
+#define AWS_HTTP_H2_DECODER_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/h2_frames.h>
+#include <aws/http/private/http_impl.h>
+
+/* Decoder design goals:
+ * - Minimize state tracking and verification required by user.
+ * For example, we have _begin()/_i()/_end() callbacks when something happens N times.
+ * The _begin() and _end() callbacks tell the user when to transition states.
+ * Without them the user needs to be like, oh, I was doing X but now I'm doing Y,
+ * so I guess I need to end X and start Y.
+
+ * - A callback should result in 1 distinct action.
+ * For example, we have distinct callbacks for `on_ping()` and `on_ping_ack()`.
+ * We COULD have had just one `on_ping(bool ack)` callback, but since user must
+ * take two complete different actions based on the ACK, we opted for two callbacks.
+ */
+
+/* Return a failed aws_h2err from any callback to stop the decoder and cause a Connection Error */
+struct aws_h2_decoder_vtable {
+ /* For HEADERS header-block: _begin() is called, then 0+ _i() calls, then _end().
+ * No other decoder callbacks will occur in this time.
+ * If something is malformed, no further _i() calls occur, and it is reported in _end() */
+ struct aws_h2err (*on_headers_begin)(uint32_t stream_id, void *userdata);
+ struct aws_h2err (*on_headers_i)(
+ uint32_t stream_id,
+ const struct aws_http_header *header,
+ enum aws_http_header_name name_enum,
+ enum aws_http_header_block block_type,
+ void *userdata);
+ struct aws_h2err (
+ *on_headers_end)(uint32_t stream_id, bool malformed, enum aws_http_header_block block_type, void *userdata);
+
+ /* For PUSH_PROMISE header-block: _begin() is called, then 0+ _i() calls, then _end().
+ * No other decoder callbacks will occur in this time.
+ * If something is malformed, no further _i() calls occur, and it is reported in _end() */
+ struct aws_h2err (*on_push_promise_begin)(uint32_t stream_id, uint32_t promised_stream_id, void *userdata);
+ struct aws_h2err (*on_push_promise_i)(
+ uint32_t stream_id,
+ const struct aws_http_header *header,
+ enum aws_http_header_name name_enum,
+ void *userdata);
+ struct aws_h2err (*on_push_promise_end)(uint32_t stream_id, bool malformed, void *userdata);
+
+ /* For DATA frame: _begin() is called, then 0+ _i() calls, then _end().
+ * No other decoder callbacks will occur in this time */
+ struct aws_h2err (*on_data_begin)(
+ uint32_t stream_id,
+ uint32_t payload_len, /* Whole payload length including padding and padding length */
+ uint32_t total_padding_bytes, /* The length of padding and the byte for padding length */
+ bool end_stream,
+ void *userdata);
+ struct aws_h2err (*on_data_i)(uint32_t stream_id, struct aws_byte_cursor data, void *userdata);
+ struct aws_h2err (*on_data_end)(uint32_t stream_id, void *userdata);
+
+ /* Called at end of DATA frame containing the END_STREAM flag.
+ * OR called at end of header-block which began with HEADERS frame containing the END_STREAM flag */
+ struct aws_h2err (*on_end_stream)(uint32_t stream_id, void *userdata);
+
+ /* Called once for RST_STREAM frame */
+ struct aws_h2err (*on_rst_stream)(uint32_t stream_id, uint32_t error_code, void *userdata);
+
+ /* Called once For PING frame with ACK flag set */
+ struct aws_h2err (*on_ping_ack)(uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE], void *userdata);
+
+ /* Called once for PING frame (no ACK flag set)*/
+ struct aws_h2err (*on_ping)(uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE], void *userdata);
+
+ /* Called once for SETTINGS frame with ACK flag */
+ struct aws_h2err (*on_settings_ack)(void *userdata);
+
+ /* Called once for SETTINGS frame, without ACK flag */
+ struct aws_h2err (
+ *on_settings)(const struct aws_http2_setting *settings_array, size_t num_settings, void *userdata);
+
+ /* Called once for GOAWAY frame */
+ struct aws_h2err (
+ *on_goaway)(uint32_t last_stream, uint32_t error_code, struct aws_byte_cursor debug_data, void *userdata);
+
+ /* Called once for WINDOW_UPDATE frame */
+ struct aws_h2err (*on_window_update)(uint32_t stream_id, uint32_t window_size_increment, void *userdata);
+};
+
+/**
+ * Structure used to initialize an `aws_h2_decoder`.
+ */
+struct aws_h2_decoder_params {
+ struct aws_allocator *alloc;
+ const struct aws_h2_decoder_vtable *vtable;
+ void *userdata;
+ const void *logging_id;
+ bool is_server;
+
+ /* If true, do not expect the connection preface and immediately accept any frame type.
+ * Only set this when testing the decoder itself */
+ bool skip_connection_preface;
+};
+
+struct aws_h2_decoder;
+
+AWS_EXTERN_C_BEGIN
+
+AWS_HTTP_API struct aws_h2_decoder *aws_h2_decoder_new(struct aws_h2_decoder_params *params);
+AWS_HTTP_API void aws_h2_decoder_destroy(struct aws_h2_decoder *decoder);
+
+/* If failed aws_h2err returned, it is a Connection Error */
+AWS_HTTP_API struct aws_h2err aws_h2_decode(struct aws_h2_decoder *decoder, struct aws_byte_cursor *data);
+
+AWS_HTTP_API void aws_h2_decoder_set_setting_header_table_size(struct aws_h2_decoder *decoder, uint32_t data);
+AWS_HTTP_API void aws_h2_decoder_set_setting_enable_push(struct aws_h2_decoder *decoder, uint32_t data);
+AWS_HTTP_API void aws_h2_decoder_set_setting_max_frame_size(struct aws_h2_decoder *decoder, uint32_t data);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_H2_DECODER_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_frames.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_frames.h
new file mode 100644
index 0000000000..23c1daf1ec
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_frames.h
@@ -0,0 +1,299 @@
+#ifndef AWS_HTTP_H2_FRAMES_H
+#define AWS_HTTP_H2_FRAMES_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/connection.h>
+#include <aws/http/private/hpack.h>
+#include <aws/http/request_response.h>
+
+#include <aws/common/byte_buf.h>
+
+/* Ids for each frame type (RFC-7540 6) */
+enum aws_h2_frame_type {
+ AWS_H2_FRAME_T_DATA = 0x00,
+ AWS_H2_FRAME_T_HEADERS = 0x01,
+ AWS_H2_FRAME_T_PRIORITY = 0x02,
+ AWS_H2_FRAME_T_RST_STREAM = 0x03,
+ AWS_H2_FRAME_T_SETTINGS = 0x04,
+ AWS_H2_FRAME_T_PUSH_PROMISE = 0x05,
+ AWS_H2_FRAME_T_PING = 0x06,
+ AWS_H2_FRAME_T_GOAWAY = 0x07,
+ AWS_H2_FRAME_T_WINDOW_UPDATE = 0x08,
+ AWS_H2_FRAME_T_CONTINUATION = 0x09,
+ AWS_H2_FRAME_T_UNKNOWN,
+ AWS_H2_FRAME_TYPE_COUNT,
+};
+
+/* Represents flags that may be set on a frame (RFC-7540 6) */
+enum aws_h2_frame_flag {
+ AWS_H2_FRAME_F_ACK = 0x01,
+ AWS_H2_FRAME_F_END_STREAM = 0x01,
+ AWS_H2_FRAME_F_END_HEADERS = 0x04,
+ AWS_H2_FRAME_F_PADDED = 0x08,
+ AWS_H2_FRAME_F_PRIORITY = 0x20,
+};
+
+/* Pairs the AWS_ERROR_* to show our API user,
+ * along with the AWS_HTTP2_ERR_* that should
+ * be sent to the peer via RST_STREAM or GOAWAY.
+ *
+ * Used in place of normal error handling in functions that may result
+ * in an HTTP/2 Connection Error or Stream Error.
+ */
+struct aws_h2err {
+ enum aws_http2_error_code h2_code;
+ int aws_code;
+};
+
+#define AWS_H2ERR_SUCCESS \
+ (struct aws_h2err) { .h2_code = 0, .aws_code = 0 }
+
+#define AWS_H2_PAYLOAD_MAX (0x00FFFFFF) /* must fit in 3 bytes */
+#define AWS_H2_WINDOW_UPDATE_MAX (0x7FFFFFFF) /* cannot use high bit */
+#define AWS_H2_STREAM_ID_MAX (0x7FFFFFFF) /* cannot use high bit */
+#define AWS_H2_FRAME_PREFIX_SIZE (9)
+#define AWS_H2_INIT_WINDOW_SIZE (65535) /* Defined initial window size */
+
+/* Legal min(inclusive) and max(inclusive) for each setting */
+extern const uint32_t aws_h2_settings_bounds[AWS_HTTP2_SETTINGS_END_RANGE][2];
+
+/* Initial values for settings RFC-7540 6.5.2 */
+AWS_HTTP_API
+extern const uint32_t aws_h2_settings_initial[AWS_HTTP2_SETTINGS_END_RANGE];
+
+/* This magic string must be the very first thing a client sends to the server.
+ * See RFC-7540 3.5 - HTTP/2 Connection Preface.
+ * Exported for tests */
+AWS_HTTP_API
+extern const struct aws_byte_cursor aws_h2_connection_preface_client_string;
+
+/**
+ * Present in all frames that may have set AWS_H2_FRAME_F_PRIORITY
+ *
+ * Encoded as:
+ * +-+-------------------------------------------------------------+
+ * |E| Stream Dependency (31) |
+ * +-+-------------+-----------------------------------------------+
+ * | Weight (8) |
+ * +-+-------------+
+ */
+struct aws_h2_frame_priority_settings {
+ uint32_t stream_dependency;
+ bool stream_dependency_exclusive;
+ uint8_t weight;
+};
+
+/**
+ * A frame to be encoded.
+ * (in the case of HEADERS and PUSH_PROMISE, it might turn into multiple frames due to CONTINUATION)
+ */
+struct aws_h2_frame {
+ const struct aws_h2_frame_vtable *vtable;
+ struct aws_allocator *alloc;
+ struct aws_linked_list_node node;
+ enum aws_h2_frame_type type;
+ uint32_t stream_id;
+
+ /* If true, frame will be sent before those with normal priority.
+ * Useful for frames like PING ACK where low latency is important. */
+ bool high_priority;
+};
+
+/* Used to encode a frame */
+struct aws_h2_frame_encoder {
+ struct aws_allocator *allocator;
+ const void *logging_id;
+ struct aws_hpack_encoder hpack;
+ struct aws_h2_frame *current_frame;
+
+ /* Settings for frame encoder, which is based on the settings received from peer */
+ struct {
+ /* the size of the largest frame payload */
+ uint32_t max_frame_size;
+ } settings;
+
+ bool has_errored;
+};
+
+typedef void aws_h2_frame_destroy_fn(struct aws_h2_frame *frame_base);
+typedef int aws_h2_frame_encode_fn(
+ struct aws_h2_frame *frame_base,
+ struct aws_h2_frame_encoder *encoder,
+ struct aws_byte_buf *output,
+ bool *complete);
+
+struct aws_h2_frame_vtable {
+ aws_h2_frame_destroy_fn *destroy;
+ aws_h2_frame_encode_fn *encode;
+};
+
+AWS_EXTERN_C_BEGIN
+
+AWS_HTTP_API
+const char *aws_h2_frame_type_to_str(enum aws_h2_frame_type type);
+
+AWS_HTTP_API
+const char *aws_http2_error_code_to_str(enum aws_http2_error_code h2_error_code);
+
+/**
+ * Specify which HTTP/2 error-code will be sent to the peer in a GOAWAY or RST_STREAM frame.
+ *
+ * The AWS_ERROR reported to the API user will be AWS_ERROR_HTTP_PROTOCOL_ERROR.
+ */
+AWS_HTTP_API
+struct aws_h2err aws_h2err_from_h2_code(enum aws_http2_error_code h2_error_code);
+
+/**
+ * Specify which AWS_ERROR will be reported to the API user.
+ *
+ * The peer will be sent a GOAWAY or RST_STREAM with the INTERNAL_ERROR HTTP/2 error-code.
+ */
+AWS_HTTP_API
+struct aws_h2err aws_h2err_from_aws_code(int aws_error_code);
+
+AWS_HTTP_API
+struct aws_h2err aws_h2err_from_last_error(void);
+
+AWS_HTTP_API
+bool aws_h2err_success(struct aws_h2err err);
+
+AWS_HTTP_API
+bool aws_h2err_failed(struct aws_h2err err);
+
+/* Raises AWS_ERROR_INVALID_ARGUMENT if stream_id is 0 or exceeds AWS_H2_MAX_STREAM_ID */
+AWS_HTTP_API
+int aws_h2_validate_stream_id(uint32_t stream_id);
+
+/**
+ * The process of encoding a frame looks like:
+ * 1. Create a encoder object on the stack and initialize with aws_h2_frame_encoder_init
+ * 2. Encode the frame using aws_h2_encode_frame()
+ */
+AWS_HTTP_API
+int aws_h2_frame_encoder_init(
+ struct aws_h2_frame_encoder *encoder,
+ struct aws_allocator *allocator,
+ const void *logging_id);
+
+AWS_HTTP_API
+void aws_h2_frame_encoder_clean_up(struct aws_h2_frame_encoder *encoder);
+
+/**
+ * Attempt to encode frame into output buffer.
+ * AWS_OP_ERR is returned if encoder encounters an unrecoverable error.
+ * frame_complete will be set true if the frame finished encoding.
+ *
+ * If frame_complete is false then we MUST call aws_h2_encode_frame() again
+ * with all the same inputs, when we have a fresh buffer (it would be illegal
+ * to encode a different frame).
+ */
+AWS_HTTP_API
+int aws_h2_encode_frame(
+ struct aws_h2_frame_encoder *encoder,
+ struct aws_h2_frame *frame,
+ struct aws_byte_buf *output,
+ bool *frame_complete);
+
+/**
+ * Attempt to encode a DATA frame into the output buffer.
+ * The body_stream will be read into the available space (up to MAX_FRAME_SIZE).
+ * AWS_OP_ERR is returned if encoder encounters an unrecoverable error.
+ * body_complete will be set true if encoder reaches the end of the body_stream.
+ * body_stalled will be true if aws_input_stream_read() stopped early (didn't
+ * complete, though more space was available).
+ *
+ * Each call to this function encodes a complete DATA frame, or nothing at all,
+ * so it's always safe to encode a different frame type or the body of a different stream
+ * after calling this.
+ */
+AWS_HTTP_API
+int aws_h2_encode_data_frame(
+ struct aws_h2_frame_encoder *encoder,
+ uint32_t stream_id,
+ struct aws_input_stream *body_stream,
+ bool body_ends_stream,
+ uint8_t pad_length,
+ int32_t *stream_window_size_peer,
+ size_t *connection_window_size_peer,
+ struct aws_byte_buf *output,
+ bool *body_complete,
+ bool *body_stalled);
+
+AWS_HTTP_API
+void aws_h2_frame_destroy(struct aws_h2_frame *frame);
+
+/**
+ * This frame type may actually end up encoding multiple frames
+ * (HEADERS followed by 0 or more CONTINUATION frames).
+ */
+AWS_HTTP_API
+struct aws_h2_frame *aws_h2_frame_new_headers(
+ struct aws_allocator *allocator,
+ uint32_t stream_id,
+ const struct aws_http_headers *headers,
+ bool end_stream,
+ uint8_t pad_length,
+ const struct aws_h2_frame_priority_settings *optional_priority);
+
+AWS_HTTP_API
+struct aws_h2_frame *aws_h2_frame_new_priority(
+ struct aws_allocator *allocator,
+ uint32_t stream_id,
+ const struct aws_h2_frame_priority_settings *priority);
+
+AWS_HTTP_API
+struct aws_h2_frame *aws_h2_frame_new_rst_stream(
+ struct aws_allocator *allocator,
+ uint32_t stream_id,
+ uint32_t error_code);
+
+AWS_HTTP_API
+struct aws_h2_frame *aws_h2_frame_new_settings(
+ struct aws_allocator *allocator,
+ const struct aws_http2_setting *settings_array,
+ size_t num_settings,
+ bool ack);
+
+/**
+ * This frame type may actually end up encoding multiple frames
+ * (PUSH_PROMISE followed 0 or more CONTINUATION frames).
+ */
+AWS_HTTP_API
+struct aws_h2_frame *aws_h2_frame_new_push_promise(
+ struct aws_allocator *allocator,
+ uint32_t stream_id,
+ uint32_t promised_stream_id,
+ const struct aws_http_headers *headers,
+ uint8_t pad_length);
+
+AWS_HTTP_API
+struct aws_h2_frame *aws_h2_frame_new_ping(
+ struct aws_allocator *allocator,
+ bool ack,
+ const uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE]);
+
+AWS_HTTP_API
+struct aws_h2_frame *aws_h2_frame_new_goaway(
+ struct aws_allocator *allocator,
+ uint32_t last_stream_id,
+ uint32_t error_code,
+ struct aws_byte_cursor debug_data);
+
+AWS_HTTP_API
+struct aws_h2_frame *aws_h2_frame_new_window_update(
+ struct aws_allocator *allocator,
+ uint32_t stream_id,
+ uint32_t window_size_increment);
+
+AWS_HTTP_API void aws_h2_frame_encoder_set_setting_header_table_size(
+ struct aws_h2_frame_encoder *encoder,
+ uint32_t data);
+AWS_HTTP_API void aws_h2_frame_encoder_set_setting_max_frame_size(struct aws_h2_frame_encoder *encoder, uint32_t data);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_H2_FRAMES_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_stream.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_stream.h
new file mode 100644
index 0000000000..62de106c3e
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/h2_stream.h
@@ -0,0 +1,190 @@
+#ifndef AWS_HTTP_H2_STREAM_H
+#define AWS_HTTP_H2_STREAM_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/h2_frames.h>
+#include <aws/http/private/request_response_impl.h>
+
+#include <aws/common/mutex.h>
+#include <aws/io/channel.h>
+
+#include <inttypes.h>
+
+#define AWS_H2_STREAM_LOGF(level, stream, text, ...) \
+ AWS_LOGF_##level( \
+ AWS_LS_HTTP_STREAM, \
+ "id=%" PRIu32 " connection=%p state=%s: " text, \
+ (stream)->base.id, \
+ (void *)(stream)->base.owning_connection, \
+ aws_h2_stream_state_to_str((stream)->thread_data.state), \
+ __VA_ARGS__)
+#define AWS_H2_STREAM_LOG(level, stream, text) AWS_H2_STREAM_LOGF(level, (stream), "%s", (text))
+
+enum aws_h2_stream_state {
+ /* Initial state, before anything sent or received. */
+ AWS_H2_STREAM_STATE_IDLE,
+ /* (server-only) stream-id was reserved via PUSH_PROMISE on another stream,
+ * but HEADERS for this stream have not been sent yet */
+ AWS_H2_STREAM_STATE_RESERVED_LOCAL,
+ /* (client-only) stream-id was reserved via PUSH_PROMISE on another stream,
+ * but HEADERS for this stream have not been received yet */
+ AWS_H2_STREAM_STATE_RESERVED_REMOTE,
+ /* Neither side is done sending their message. */
+ AWS_H2_STREAM_STATE_OPEN,
+ /* This side is done sending message (END_STREAM), but peer is not done. */
+ AWS_H2_STREAM_STATE_HALF_CLOSED_LOCAL,
+ /* Peer is done sending message (END_STREAM), but this side is not done */
+ AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE,
+ /* Both sides done sending message (END_STREAM),
+ * or either side has sent RST_STREAM */
+ AWS_H2_STREAM_STATE_CLOSED,
+
+ AWS_H2_STREAM_STATE_COUNT,
+};
+
+/* simplified stream state for API implementation */
+enum aws_h2_stream_api_state {
+ AWS_H2_STREAM_API_STATE_INIT,
+ AWS_H2_STREAM_API_STATE_ACTIVE,
+ AWS_H2_STREAM_API_STATE_COMPLETE,
+};
+
+/* Indicates the state of the body of the HTTP/2 stream */
+enum aws_h2_stream_body_state {
+ AWS_H2_STREAM_BODY_STATE_NONE, /* Has no body for the HTTP/2 stream */
+ AWS_H2_STREAM_BODY_STATE_WAITING_WRITES, /* Has no active body, but waiting for more to be
+ write */
+ AWS_H2_STREAM_BODY_STATE_ONGOING, /* Has active ongoing body */
+};
+
+/* represents a write operation, which will be turned into a data frame */
+struct aws_h2_stream_data_write {
+ struct aws_linked_list_node node;
+ struct aws_input_stream *data_stream;
+ aws_http2_stream_write_data_complete_fn *on_complete;
+ void *user_data;
+ bool end_stream;
+};
+
+struct aws_h2_stream {
+ struct aws_http_stream base;
+
+ struct aws_linked_list_node node;
+ struct aws_channel_task cross_thread_work_task;
+
+ /* Only the event-loop thread may touch this data */
+ struct {
+ enum aws_h2_stream_state state;
+ int32_t window_size_peer;
+ /* The local window size.
+ * We allow this value exceed the max window size (int64 can hold much more than 0x7FFFFFFF),
+ * We leave it up to the remote peer to detect whether the max window size has been exceeded. */
+ int64_t window_size_self;
+ struct aws_http_message *outgoing_message;
+ /* All queued writes. If the message provides a body stream, it will be first in this list
+ * This list can drain, which results in the stream being put to sleep (moved to waiting_streams_list in
+ * h2_connection). */
+ struct aws_linked_list outgoing_writes; /* aws_http2_stream_data_write */
+ bool received_main_headers;
+
+ bool content_length_received;
+ /* Set if incoming message has content-length header */
+ uint64_t incoming_content_length;
+ /* The total length of payload of data frame received */
+ uint64_t incoming_data_length;
+ /* Indicates that the stream is currently in the waiting_streams_list and is
+ * asleep. When stream needs to be awaken, moving the stream back to the outgoing_streams_list and set this bool
+ * to false */
+ bool waiting_for_writes;
+ } thread_data;
+
+ /* Any thread may touch this data, but the lock must be held (unless it's an atomic) */
+ struct {
+ struct aws_mutex lock;
+
+ bool is_cross_thread_work_task_scheduled;
+
+ /* The window_update value for `thread_data.window_size_self` that haven't applied yet */
+ size_t window_update_size;
+
+ /* The combined aws_http2_error_code user wanted to send to remote peer via rst_stream and internal aws error
+ * code we want to inform user about. */
+ struct aws_h2err reset_error;
+ bool reset_called;
+ bool manual_write_ended;
+
+ /* Simplified stream state. */
+ enum aws_h2_stream_api_state api_state;
+
+ /* any data streams sent manually via aws_http2_stream_write_data */
+ struct aws_linked_list pending_write_list; /* aws_h2_stream_pending_data */
+ } synced_data;
+ bool manual_write;
+
+ /* Store the sent reset HTTP/2 error code, set to -1, if none has sent so far */
+ int64_t sent_reset_error_code;
+
+ /* Store the received reset HTTP/2 error code, set to -1, if none has received so far */
+ int64_t received_reset_error_code;
+};
+
+const char *aws_h2_stream_state_to_str(enum aws_h2_stream_state state);
+
+struct aws_h2_stream *aws_h2_stream_new_request(
+ struct aws_http_connection *client_connection,
+ const struct aws_http_make_request_options *options);
+
+enum aws_h2_stream_state aws_h2_stream_get_state(const struct aws_h2_stream *stream);
+
+struct aws_h2err aws_h2_stream_window_size_change(struct aws_h2_stream *stream, int32_t size_changed, bool self);
+
+/* Connection is ready to send frames from stream now */
+int aws_h2_stream_on_activated(struct aws_h2_stream *stream, enum aws_h2_stream_body_state *body_state);
+
+/* Completes stream for one reason or another, clean up any pending writes/resources. */
+void aws_h2_stream_complete(struct aws_h2_stream *stream, int error_code);
+
+/* Connection is ready to send data from stream now.
+ * Stream may complete itself during this call.
+ * data_encode_status: see `aws_h2_data_encode_status`
+ */
+int aws_h2_stream_encode_data_frame(
+ struct aws_h2_stream *stream,
+ struct aws_h2_frame_encoder *encoder,
+ struct aws_byte_buf *output,
+ int *data_encode_status);
+
+struct aws_h2err aws_h2_stream_on_decoder_headers_begin(struct aws_h2_stream *stream);
+
+struct aws_h2err aws_h2_stream_on_decoder_headers_i(
+ struct aws_h2_stream *stream,
+ const struct aws_http_header *header,
+ enum aws_http_header_name name_enum,
+ enum aws_http_header_block block_type);
+
+struct aws_h2err aws_h2_stream_on_decoder_headers_end(
+ struct aws_h2_stream *stream,
+ bool malformed,
+ enum aws_http_header_block block_type);
+
+struct aws_h2err aws_h2_stream_on_decoder_push_promise(struct aws_h2_stream *stream, uint32_t promised_stream_id);
+struct aws_h2err aws_h2_stream_on_decoder_data_begin(
+ struct aws_h2_stream *stream,
+ uint32_t payload_len,
+ uint32_t total_padding_bytes,
+ bool end_stream);
+struct aws_h2err aws_h2_stream_on_decoder_data_i(struct aws_h2_stream *stream, struct aws_byte_cursor data);
+struct aws_h2err aws_h2_stream_on_decoder_window_update(
+ struct aws_h2_stream *stream,
+ uint32_t window_size_increment,
+ bool *window_resume);
+struct aws_h2err aws_h2_stream_on_decoder_end_stream(struct aws_h2_stream *stream);
+struct aws_h2err aws_h2_stream_on_decoder_rst_stream(struct aws_h2_stream *stream, uint32_t h2_error_code);
+
+int aws_h2_stream_activate(struct aws_http_stream *stream);
+
+#endif /* AWS_HTTP_H2_STREAM_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/hpack.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/hpack.h
new file mode 100644
index 0000000000..d0507c2aff
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/hpack.h
@@ -0,0 +1,297 @@
+#ifndef AWS_HTTP_HPACK_H
+#define AWS_HTTP_HPACK_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/http/request_response.h>
+
+#include <aws/common/hash_table.h>
+#include <aws/compression/huffman.h>
+
+/**
+ * Result of aws_hpack_decode() call.
+ * If a complete entry has not been decoded yet, type is ONGOING.
+ * Otherwise, type informs which data to look at.
+ */
+struct aws_hpack_decode_result {
+ enum aws_hpack_decode_type {
+ AWS_HPACK_DECODE_T_ONGOING,
+ AWS_HPACK_DECODE_T_HEADER_FIELD,
+ AWS_HPACK_DECODE_T_DYNAMIC_TABLE_RESIZE,
+ } type;
+
+ union {
+ /* If type is AWS_HPACK_DECODE_T_HEADER_FIELD */
+ struct aws_http_header header_field;
+
+ /* If type is AWS_HPACK_DECODE_T_DYNAMIC_TABLE_RESIZE */
+ size_t dynamic_table_resize;
+ } data;
+};
+
+/**
+ * Controls whether non-indexed strings will use Huffman encoding.
+ * In SMALLEST mode, strings will only be sent with Huffman encoding if it makes them smaller.
+ *
+ * Note: This does not control compression via "indexing",
+ * for that, see `aws_http_header_compression`.
+ * This only controls how string values are encoded when they're not already in a table.
+ */
+enum aws_hpack_huffman_mode {
+ AWS_HPACK_HUFFMAN_SMALLEST,
+ AWS_HPACK_HUFFMAN_NEVER,
+ AWS_HPACK_HUFFMAN_ALWAYS,
+};
+
+/**
+ * Maintains the dynamic table.
+ * Insertion is backwards, indexing is forwards
+ */
+struct aws_hpack_context {
+ struct aws_allocator *allocator;
+
+ enum aws_http_log_subject log_subject;
+ const void *log_id;
+
+ struct {
+ /* Array of headers, pointers to memory we alloced, which needs to be cleaned up whenever we move an entry out
+ */
+ struct aws_http_header *buffer;
+ size_t buffer_capacity; /* Number of http_headers that can fit in buffer */
+
+ size_t num_elements;
+ size_t index_0;
+
+ /* Size in bytes, according to [4.1] */
+ size_t size;
+ size_t max_size;
+
+ /* aws_http_header * -> size_t */
+ struct aws_hash_table reverse_lookup;
+ /* aws_byte_cursor * -> size_t */
+ struct aws_hash_table reverse_lookup_name_only;
+ } dynamic_table;
+};
+
+/**
+ * Encodes outgoing headers.
+ */
+struct aws_hpack_encoder {
+ const void *log_id;
+
+ struct aws_huffman_encoder huffman_encoder;
+ enum aws_hpack_huffman_mode huffman_mode;
+
+ struct aws_hpack_context context;
+
+ struct {
+ size_t latest_value;
+ size_t smallest_value;
+ bool pending;
+ } dynamic_table_size_update;
+};
+
+/**
+ * Decodes incoming headers
+ */
+struct aws_hpack_decoder {
+ const void *log_id;
+
+ struct aws_huffman_decoder huffman_decoder;
+
+ struct aws_hpack_context context;
+
+ /* TODO: check the new (RFC 9113 - 4.3.1) to make sure we did it right */
+ /* SETTINGS_HEADER_TABLE_SIZE from http2 */
+ size_t dynamic_table_protocol_max_size_setting;
+
+ /* PRO TIP: Don't union progress_integer and progress_string together, since string_decode calls integer_decode */
+ struct hpack_progress_integer {
+ enum {
+ HPACK_INTEGER_STATE_INIT,
+ HPACK_INTEGER_STATE_VALUE,
+ } state;
+ uint8_t bit_count;
+ } progress_integer;
+
+ struct hpack_progress_string {
+ enum {
+ HPACK_STRING_STATE_INIT,
+ HPACK_STRING_STATE_LENGTH,
+ HPACK_STRING_STATE_VALUE,
+ } state;
+ bool use_huffman;
+ uint64_t length;
+ } progress_string;
+
+ struct hpack_progress_entry {
+ enum {
+ HPACK_ENTRY_STATE_INIT,
+ /* Indexed header field: just 1 state. read index, find name and value at index */
+ HPACK_ENTRY_STATE_INDEXED,
+ /* Literal header field: name may be indexed OR literal, value is always literal */
+ HPACK_ENTRY_STATE_LITERAL_BEGIN,
+ HPACK_ENTRY_STATE_LITERAL_NAME_STRING,
+ HPACK_ENTRY_STATE_LITERAL_VALUE_STRING,
+ /* Dynamic table resize: just 1 state. read new size */
+ HPACK_ENTRY_STATE_DYNAMIC_TABLE_RESIZE,
+ /* Done */
+ HPACK_ENTRY_STATE_COMPLETE,
+ } state;
+
+ union {
+ struct {
+ uint64_t index;
+ } indexed;
+
+ struct hpack_progress_literal {
+ uint8_t prefix_size;
+ enum aws_http_header_compression compression;
+ uint64_t name_index;
+ size_t name_length;
+ } literal;
+
+ struct {
+ uint64_t size;
+ } dynamic_table_resize;
+ } u;
+
+ enum aws_hpack_decode_type type;
+
+ /* Scratch holds header name and value while decoding */
+ struct aws_byte_buf scratch;
+ } progress_entry;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/* Library-level init and shutdown */
+void aws_hpack_static_table_init(struct aws_allocator *allocator);
+void aws_hpack_static_table_clean_up(void);
+
+AWS_HTTP_API
+void aws_hpack_context_init(
+ struct aws_hpack_context *aws_hpack_context,
+ struct aws_allocator *allocator,
+ enum aws_http_log_subject log_subject,
+ const void *log_id);
+
+AWS_HTTP_API
+void aws_hpack_context_clean_up(struct aws_hpack_context *context);
+
+/* Returns the hpack size of a header (name.len + value.len + 32) [4.1] */
+AWS_HTTP_API
+size_t aws_hpack_get_header_size(const struct aws_http_header *header);
+
+/* Returns the number of elements in dynamic table now */
+AWS_HTTP_API
+size_t aws_hpack_get_dynamic_table_num_elements(const struct aws_hpack_context *context);
+
+size_t aws_hpack_get_dynamic_table_max_size(const struct aws_hpack_context *context);
+
+AWS_HTTP_API
+const struct aws_http_header *aws_hpack_get_header(const struct aws_hpack_context *context, size_t index);
+
+/* A return value of 0 indicates that the header wasn't found */
+AWS_HTTP_API
+size_t aws_hpack_find_index(
+ const struct aws_hpack_context *context,
+ const struct aws_http_header *header,
+ bool search_value,
+ bool *found_value);
+
+AWS_HTTP_API
+int aws_hpack_insert_header(struct aws_hpack_context *context, const struct aws_http_header *header);
+
+/**
+ * Set the max size of the dynamic table (in octets). The size of each header is name.len + value.len + 32 [4.1].
+ */
+AWS_HTTP_API
+int aws_hpack_resize_dynamic_table(struct aws_hpack_context *context, size_t new_max_size);
+
+AWS_HTTP_API
+void aws_hpack_encoder_init(struct aws_hpack_encoder *encoder, struct aws_allocator *allocator, const void *log_id);
+
+AWS_HTTP_API
+void aws_hpack_encoder_clean_up(struct aws_hpack_encoder *encoder);
+
+/* Call this after receiving SETTINGS_HEADER_TABLE_SIZE from peer and sending the ACK.
+ * The hpack-encoder remembers all size updates, and makes sure to encode the proper
+ * number of Dynamic Table Size Updates the next time a header block is sent. */
+AWS_HTTP_API
+void aws_hpack_encoder_update_max_table_size(struct aws_hpack_encoder *encoder, uint32_t new_max_size);
+
+AWS_HTTP_API
+void aws_hpack_encoder_set_huffman_mode(struct aws_hpack_encoder *encoder, enum aws_hpack_huffman_mode mode);
+
+/**
+ * Encode header-block into the output.
+ * This function will mutate hpack, so an error means hpack can no longer be used.
+ * Note that output will be dynamically resized if it's too short.
+ */
+AWS_HTTP_API
+int aws_hpack_encode_header_block(
+ struct aws_hpack_encoder *encoder,
+ const struct aws_http_headers *headers,
+ struct aws_byte_buf *output);
+
+AWS_HTTP_API
+void aws_hpack_decoder_init(struct aws_hpack_decoder *decoder, struct aws_allocator *allocator, const void *log_id);
+
+AWS_HTTP_API
+void aws_hpack_decoder_clean_up(struct aws_hpack_decoder *decoder);
+
+/* Call this after sending SETTINGS_HEADER_TABLE_SIZE and receiving ACK from the peer.
+ * The hpack-decoder remembers all size updates, and makes sure that the peer
+ * sends the appropriate Dynamic Table Size Updates in the next header block we receive. */
+AWS_HTTP_API
+void aws_hpack_decoder_update_max_table_size(struct aws_hpack_decoder *decoder, uint32_t new_max_size);
+
+/**
+ * Decode the next entry in the header-block-fragment.
+ * If result->type is ONGOING, then call decode() again with more data to resume decoding.
+ * Otherwise, type is either a HEADER_FIELD or a DYNAMIC_TABLE_RESIZE.
+ *
+ * If an error occurs, the decoder is broken and decode() must not be called again.
+ */
+AWS_HTTP_API
+int aws_hpack_decode(
+ struct aws_hpack_decoder *decoder,
+ struct aws_byte_cursor *to_decode,
+ struct aws_hpack_decode_result *result);
+
+/*******************************************************************************
+ * Private functions for encoder/decoder, but public for testing purposes
+ ******************************************************************************/
+
+/* Output will be dynamically resized if it's too short */
+AWS_HTTP_API
+int aws_hpack_encode_integer(uint64_t integer, uint8_t starting_bits, uint8_t prefix_size, struct aws_byte_buf *output);
+
+/* Output will be dynamically resized if it's too short */
+AWS_HTTP_API
+int aws_hpack_encode_string(
+ struct aws_hpack_encoder *encoder,
+ struct aws_byte_cursor to_encode,
+ struct aws_byte_buf *output);
+
+AWS_HTTP_API
+int aws_hpack_decode_integer(
+ struct aws_hpack_decoder *decoder,
+ struct aws_byte_cursor *to_decode,
+ uint8_t prefix_size,
+ uint64_t *integer,
+ bool *complete);
+
+AWS_HTTP_API
+int aws_hpack_decode_string(
+ struct aws_hpack_decoder *decoder,
+ struct aws_byte_cursor *to_decode,
+ struct aws_byte_buf *output,
+ bool *complete);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_HPACK_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/hpack_header_static_table.def b/contrib/restricted/aws/aws-c-http/include/aws/http/private/hpack_header_static_table.def
new file mode 100644
index 0000000000..f9abd74f3d
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/hpack_header_static_table.def
@@ -0,0 +1,74 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#ifndef HEADER
+#error "Macro HEADER(index, name) must be defined before including this header file!"
+#endif
+
+#ifndef HEADER_WITH_VALUE
+#error "Macro HEADER_WITH_VALUE(index, name, value) must be defined before including this header file!"
+#endif
+
+HEADER(1, ":authority")
+HEADER_WITH_VALUE(2, ":method", "GET")
+HEADER_WITH_VALUE(3, ":method", "POST")
+HEADER_WITH_VALUE(4, ":path", "/")
+HEADER_WITH_VALUE(5, ":path", "/index.html")
+HEADER_WITH_VALUE(6, ":scheme", "http")
+HEADER_WITH_VALUE(7, ":scheme", "https")
+HEADER_WITH_VALUE(8, ":status", "200")
+HEADER_WITH_VALUE(9, ":status", "204")
+HEADER_WITH_VALUE(10, ":status", "206")
+HEADER_WITH_VALUE(11, ":status", "304")
+HEADER_WITH_VALUE(12, ":status", "400")
+HEADER_WITH_VALUE(13, ":status", "404")
+HEADER_WITH_VALUE(14, ":status", "500")
+HEADER(15, "accept-charset")
+HEADER_WITH_VALUE(16, "accept-encoding", "gzip,deflate")
+HEADER(17, "accept-language")
+HEADER(18, "accept-ranges")
+HEADER(19, "accept")
+HEADER(20, "access-control-allow-origin")
+HEADER(21, "age")
+HEADER(22, "allow")
+HEADER(23, "authorization")
+HEADER(24, "cache-control")
+HEADER(25, "content-disposition")
+HEADER(26, "content-encoding")
+HEADER(27, "content-language")
+HEADER(28, "content-length")
+HEADER(29, "content-location")
+HEADER(30, "content-range")
+HEADER(31, "content-type")
+HEADER(32, "cookie")
+HEADER(33, "date")
+HEADER(34, "etag")
+HEADER(35, "expect")
+HEADER(36, "expires")
+HEADER(37, "from")
+HEADER(38, "host")
+HEADER(39, "if-match")
+HEADER(40, "if-modified-since")
+HEADER(41, "if-none-match")
+HEADER(42, "if-range")
+HEADER(43, "if-unmodified-since")
+HEADER(44, "last-modified")
+HEADER(45, "link")
+HEADER(46, "location")
+HEADER(47, "max-forwards")
+HEADER(48, "proxy-authenticate")
+HEADER(49, "proxy-authorization")
+HEADER(50, "range")
+HEADER(51, "referer")
+HEADER(52, "refresh")
+HEADER(53, "retry-after")
+HEADER(54, "server")
+HEADER(55, "set-cookie")
+HEADER(56, "strict-transport-security")
+HEADER(57, "transfer-encoding")
+HEADER(58, "user-agent")
+HEADER(59, "vary")
+HEADER(60, "via")
+HEADER(61, "www-authenticate")
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/http2_stream_manager_impl.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/http2_stream_manager_impl.h
new file mode 100644
index 0000000000..d9252047e5
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/http2_stream_manager_impl.h
@@ -0,0 +1,199 @@
+#ifndef AWS_HTTP2_STREAM_MANAGER_IMPL_H
+#define AWS_HTTP2_STREAM_MANAGER_IMPL_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/mutex.h>
+#include <aws/common/ref_count.h>
+#include <aws/http/http2_stream_manager.h>
+#include <aws/http/private/random_access_set.h>
+
+enum aws_h2_sm_state_type {
+ AWS_H2SMST_READY,
+ AWS_H2SMST_DESTROYING, /* On zero external ref count, can destroy */
+};
+
+enum aws_h2_sm_connection_state_type {
+ AWS_H2SMCST_IDEAL,
+ AWS_H2SMCST_NEARLY_FULL,
+ AWS_H2SMCST_FULL,
+};
+
+/* Live with the streams opening, and if there no outstanding pending acquisition and no opening streams on the
+ * connection, this structure should die */
+struct aws_h2_sm_connection {
+ struct aws_allocator *allocator;
+ struct aws_http2_stream_manager *stream_manager;
+ struct aws_http_connection *connection;
+ uint32_t num_streams_assigned; /* From a stream assigned to the connection until the stream completed
+ or failed to be created from the connection. */
+ uint32_t max_concurrent_streams; /* lower bound between user configured and the other side */
+
+ /* task to send ping periodically from connection thread. */
+ struct aws_ref_count ref_count;
+ struct aws_channel_task ping_task;
+ struct aws_channel_task ping_timeout_task;
+ struct {
+ bool ping_received;
+ bool stopped_new_requests;
+ uint64_t next_ping_task_time;
+ } thread_data;
+
+ enum aws_h2_sm_connection_state_type state;
+};
+
+/* Live from the user request to acquire a stream to the stream completed. */
+struct aws_h2_sm_pending_stream_acquisition {
+ struct aws_allocator *allocator;
+ struct aws_linked_list_node node;
+ struct aws_http_make_request_options options;
+ struct aws_h2_sm_connection *sm_connection; /* The connection to make request to. Keep
+ NULL, until find available one and move it to the pending_make_requests
+ list. */
+ struct aws_http_message *request;
+ struct aws_channel_task make_request_task;
+ aws_http2_stream_manager_on_stream_acquired_fn *callback;
+ void *user_data;
+};
+
+/* connections_acquiring_count, open_stream_count, pending_make_requests_count AND pending_stream_acquisition_count */
+enum aws_sm_count_type {
+ AWS_SMCT_CONNECTIONS_ACQUIRING,
+ AWS_SMCT_OPEN_STREAM,
+ AWS_SMCT_PENDING_MAKE_REQUESTS,
+ AWS_SMCT_PENDING_ACQUISITION,
+ AWS_SMCT_COUNT,
+};
+
+struct aws_http2_stream_manager {
+ struct aws_allocator *allocator;
+ void *shutdown_complete_user_data;
+ aws_http2_stream_manager_shutdown_complete_fn *shutdown_complete_callback;
+ /**
+ * Underlying connection manager. Always has the same life time with the stream manager who owns it.
+ */
+ struct aws_http_connection_manager *connection_manager;
+ /**
+ * Refcount managed by user. Once this drops to zero, the manager state transitions to shutting down
+ */
+ struct aws_ref_count external_ref_count;
+ /**
+ * Internal refcount that keeps connection manager alive.
+ *
+ * It's a sum of connections_acquiring_count, open_stream_count, pending_make_requests_count and
+ * pending_stream_acquisition_count, besides the number of `struct aws_http2_stream_management_transaction` alive.
+ * And one for external usage.
+ *
+ * Once this refcount drops to zero, stream manager should either be cleaned up all the memory all waiting for
+ * the last task to clean un the memory and do nothing else.
+ */
+ struct aws_ref_count internal_ref_count;
+ struct aws_client_bootstrap *bootstrap;
+
+ /* Configurations */
+ size_t max_connections;
+ /* Connection will be closed if 5xx response received from server. */
+ bool close_connection_on_server_error;
+
+ uint64_t connection_ping_period_ns;
+ uint64_t connection_ping_timeout_ns;
+
+ /**
+ * Default is no limit. 0 will be considered as using the default value.
+ * The ideal number of concurrent streams for a connection. Stream manager will try to create a new connection if
+ * one connection reaches this number. But, if the max connections reaches, manager will reuse connections to create
+ * the acquired steams as much as possible. */
+ size_t ideal_concurrent_streams_per_connection;
+ /**
+ * Default is no limit. 0 will be considered as using the default value.
+ * The real number of concurrent streams per connection will be controlled by the minmal value of the setting from
+ * other end and the value here.
+ */
+ size_t max_concurrent_streams_per_connection;
+
+ /**
+ * Task to invoke pending acquisition callbacks asynchronously if stream manager is shutting.
+ */
+ struct aws_event_loop *finish_pending_stream_acquisitions_task_event_loop;
+
+ /* Any thread may touch this data, but the lock must be held (unless it's an atomic) */
+ struct {
+ struct aws_mutex lock;
+ /*
+ * A manager can be in one of two states, READY or SHUTTING_DOWN. The state transition
+ * takes place when ref_count drops to zero.
+ */
+ enum aws_h2_sm_state_type state;
+
+ /**
+ * A set of all connections that meet all requirement to use. Note: there will be connections not in this set,
+ * but hold by the stream manager, which can be tracked by the streams created on it. Set of `struct
+ * aws_h2_sm_connection *`
+ */
+ struct aws_random_access_set ideal_available_set;
+ /**
+ * A set of all available connections that exceed the soft limits set by users. Note: there will be connections
+ * not in this set, but hold by the stream manager, which can be tracked by the streams created. Set of `struct
+ * aws_h2_sm_connection *`
+ */
+ struct aws_random_access_set nonideal_available_set;
+ /* We don't mantain set for connections that is full or "dead" (Cannot make any new streams). We have streams
+ * opening from the connection tracking them */
+
+ /**
+ * The set of all incomplete stream acquisition requests (haven't decide what connection to make the request
+ * to), list of `struct aws_h2_sm_pending_stream_acquisition*`
+ */
+ struct aws_linked_list pending_stream_acquisitions;
+
+ /**
+ * The number of connections acquired from connection manager and not released yet.
+ */
+ size_t holding_connections_count;
+
+ /**
+ * Counts that contributes to the internal refcount.
+ * When the value changes, s_sm_count_increase/decrease_synced needed.
+ *
+ * AWS_SMCT_CONNECTIONS_ACQUIRING: The number of new connections we acquiring from the connection manager.
+ * AWS_SMCT_OPEN_STREAM: The number of streams that opened and not completed yet.
+ * AWS_SMCT_PENDING_MAKE_REQUESTS: The number of streams that scheduled to be made from a connection but haven't
+ * been executed yet.
+ * AWS_SMCT_PENDING_ACQUISITION: The number of all incomplete stream acquisition requests (haven't decide what
+ * connection to make the request to). So that we don't have compute the size of a linked list every time.
+ */
+ size_t internal_refcount_stats[AWS_SMCT_COUNT];
+
+ bool finish_pending_stream_acquisitions_task_scheduled;
+ } synced_data;
+};
+
+/**
+ * Encompasses all of the external operations that need to be done for various
+ * events:
+ * - User level:
+ * stream manager release
+ * stream acquire
+ * - Internal eventloop (anther thread):
+ * connection_acquired
+ * stream_completed
+ * - Internal (can happen from any thread):
+ * connection acquire
+ * connection release
+ *
+ * The transaction is built under the manager's lock (and the internal state is updated optimistically),
+ * but then executed outside of it.
+ */
+struct aws_http2_stream_management_transaction {
+ struct aws_http2_stream_manager *stream_manager;
+ struct aws_allocator *allocator;
+ size_t new_connections;
+ struct aws_h2_sm_connection *sm_connection_to_release;
+ struct aws_linked_list
+ pending_make_requests; /* List of aws_h2_sm_pending_stream_acquisition with chosen connection */
+};
+
+#endif /* AWS_HTTP2_STREAM_MANAGER_IMPL_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/http_impl.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/http_impl.h
new file mode 100644
index 0000000000..8940d54553
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/http_impl.h
@@ -0,0 +1,100 @@
+#ifndef AWS_HTTP_IMPL_H
+#define AWS_HTTP_IMPL_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/http.h>
+
+/**
+ * Methods that affect internal processing.
+ * This is NOT a definitive list of methods.
+ */
+enum aws_http_method {
+ AWS_HTTP_METHOD_UNKNOWN, /* Unrecognized value. */
+ AWS_HTTP_METHOD_GET,
+ AWS_HTTP_METHOD_HEAD,
+ AWS_HTTP_METHOD_CONNECT,
+ AWS_HTTP_METHOD_COUNT, /* Number of enums */
+};
+
+/**
+ * Headers that affect internal processing.
+ * This is NOT a definitive list of headers.
+ */
+enum aws_http_header_name {
+ AWS_HTTP_HEADER_UNKNOWN, /* Unrecognized value */
+
+ /* Request pseudo-headers */
+ AWS_HTTP_HEADER_METHOD,
+ AWS_HTTP_HEADER_SCHEME,
+ AWS_HTTP_HEADER_AUTHORITY,
+ AWS_HTTP_HEADER_PATH,
+
+ /* Response pseudo-headers */
+ AWS_HTTP_HEADER_STATUS,
+
+ /* Regular headers */
+ AWS_HTTP_HEADER_CONNECTION,
+ AWS_HTTP_HEADER_CONTENT_LENGTH,
+ AWS_HTTP_HEADER_EXPECT,
+ AWS_HTTP_HEADER_TRANSFER_ENCODING,
+ AWS_HTTP_HEADER_COOKIE,
+ AWS_HTTP_HEADER_SET_COOKIE,
+ AWS_HTTP_HEADER_HOST,
+ AWS_HTTP_HEADER_CACHE_CONTROL,
+ AWS_HTTP_HEADER_MAX_FORWARDS,
+ AWS_HTTP_HEADER_PRAGMA,
+ AWS_HTTP_HEADER_RANGE,
+ AWS_HTTP_HEADER_TE,
+ AWS_HTTP_HEADER_CONTENT_ENCODING,
+ AWS_HTTP_HEADER_CONTENT_TYPE,
+ AWS_HTTP_HEADER_CONTENT_RANGE,
+ AWS_HTTP_HEADER_TRAILER,
+ AWS_HTTP_HEADER_WWW_AUTHENTICATE,
+ AWS_HTTP_HEADER_AUTHORIZATION,
+ AWS_HTTP_HEADER_PROXY_AUTHENTICATE,
+ AWS_HTTP_HEADER_PROXY_AUTHORIZATION,
+ AWS_HTTP_HEADER_AGE,
+ AWS_HTTP_HEADER_EXPIRES,
+ AWS_HTTP_HEADER_DATE,
+ AWS_HTTP_HEADER_LOCATION,
+ AWS_HTTP_HEADER_RETRY_AFTER,
+ AWS_HTTP_HEADER_VARY,
+ AWS_HTTP_HEADER_WARNING,
+ AWS_HTTP_HEADER_UPGRADE,
+ AWS_HTTP_HEADER_KEEP_ALIVE,
+ AWS_HTTP_HEADER_PROXY_CONNECTION,
+
+ AWS_HTTP_HEADER_COUNT, /* Number of enums */
+};
+
+AWS_EXTERN_C_BEGIN
+
+AWS_HTTP_API void aws_http_fatal_assert_library_initialized(void);
+
+AWS_HTTP_API struct aws_byte_cursor aws_http_version_to_str(enum aws_http_version version);
+
+/**
+ * Returns appropriate enum, or AWS_HTTP_METHOD_UNKNOWN if no match found.
+ * Case-sensitive
+ */
+AWS_HTTP_API enum aws_http_method aws_http_str_to_method(struct aws_byte_cursor cursor);
+
+/**
+ * Returns appropriate enum, or AWS_HTTP_HEADER_UNKNOWN if no match found.
+ * Not case-sensitive
+ */
+AWS_HTTP_API enum aws_http_header_name aws_http_str_to_header_name(struct aws_byte_cursor cursor);
+
+/**
+ * Returns appropriate enum, or AWS_HTTP_HEADER_UNKNOWN if no match found.
+ * Case-sensitive (ex: "Connection" -> AWS_HTTP_HEADER_UNKNOWN because we looked for "connection").
+ */
+AWS_HTTP_API enum aws_http_header_name aws_http_lowercase_str_to_header_name(struct aws_byte_cursor cursor);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_IMPL_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/proxy_impl.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/proxy_impl.h
new file mode 100644
index 0000000000..c47305b251
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/proxy_impl.h
@@ -0,0 +1,236 @@
+#ifndef AWS_HTTP_PROXY_IMPL_H
+#define AWS_HTTP_PROXY_IMPL_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/http.h>
+
+#include <aws/common/hash_table.h>
+#include <aws/http/connection.h>
+#include <aws/http/proxy.h>
+#include <aws/http/status_code.h>
+#include <aws/io/channel_bootstrap.h>
+#include <aws/io/socket.h>
+
+struct aws_http_connection_manager_options;
+struct aws_http_message;
+struct aws_channel_slot;
+struct aws_string;
+struct aws_tls_connection_options;
+struct aws_http_proxy_negotiator;
+struct aws_http_proxy_strategy;
+struct aws_http_proxy_strategy_tunneling_sequence_options;
+struct aws_http_proxy_strategy_tunneling_kerberos_options;
+struct aws_http_proxy_strategy_tunneling_ntlm_options;
+
+/*
+ * (Successful) State transitions for proxy connections
+ *
+ * Http : None -> Socket Connect -> Success
+ * Https: None -> Socket Connect -> Http Connect -> Tls Negotiation -> Success
+ */
+enum aws_proxy_bootstrap_state {
+ AWS_PBS_NONE = 0,
+ AWS_PBS_SOCKET_CONNECT,
+ AWS_PBS_HTTP_CONNECT,
+ AWS_PBS_TLS_NEGOTIATION,
+ AWS_PBS_SUCCESS,
+ AWS_PBS_FAILURE,
+};
+
+/**
+ * A persistent copy of the aws_http_proxy_options struct. Clones everything appropriate.
+ */
+struct aws_http_proxy_config {
+
+ struct aws_allocator *allocator;
+
+ enum aws_http_proxy_connection_type connection_type;
+
+ struct aws_byte_buf host;
+
+ uint16_t port;
+
+ struct aws_tls_connection_options *tls_options;
+
+ struct aws_http_proxy_strategy *proxy_strategy;
+};
+
+/*
+ * When a proxy connection is made, we wrap the user-supplied user data with this
+ * proxy user data. Callbacks are passed properly to the user. By having this data
+ * available, the proxy request transform that was attached to the connection can extract
+ * the proxy settings it needs in order to properly transform the requests.
+ *
+ * Another possibility would be to fold this data into the connection itself.
+ */
+struct aws_http_proxy_user_data {
+ struct aws_allocator *allocator;
+
+ /*
+ * dynamic proxy connection resolution state
+ */
+ enum aws_proxy_bootstrap_state state;
+ int error_code;
+ enum aws_http_status_code connect_status_code;
+
+ /*
+ * The initial http connection object between the client and the proxy.
+ */
+ struct aws_http_connection *proxy_connection;
+
+ /*
+ * The http connection object that gets surfaced to callers if http is the final protocol of proxy
+ * negotiation.
+ *
+ * In the case of a forwarding proxy, proxy_connection and final_connection are the same.
+ */
+ struct aws_http_connection *final_connection;
+ struct aws_http_message *connect_request;
+ struct aws_http_stream *connect_stream;
+ struct aws_http_proxy_negotiator *proxy_negotiator;
+
+ /*
+ * Cached original connect options
+ */
+ struct aws_string *original_host;
+ uint16_t original_port;
+ void *original_user_data;
+ struct aws_tls_connection_options *original_tls_options;
+ struct aws_client_bootstrap *original_bootstrap;
+ struct aws_socket_options original_socket_options;
+ bool original_manual_window_management;
+ size_t original_initial_window_size;
+ bool prior_knowledge_http2;
+ struct aws_http1_connection_options original_http1_options;
+ struct aws_http2_connection_options
+ original_http2_options; /* the resource within options are allocated with userdata */
+ struct aws_hash_table alpn_string_map;
+ /*
+ * setup/shutdown callbacks. We enforce via fatal assert that either the http callbacks are supplied or
+ * the channel callbacks are supplied but never both.
+ *
+ * When using a proxy to ultimately establish an http connection, use the http callbacks.
+ * When using a proxy to establish any other protocol connection, use the raw channel callbacks.
+ *
+ * In the future, we might consider a further refactor which only use raw channel callbacks.
+ */
+ aws_http_on_client_connection_setup_fn *original_http_on_setup;
+ aws_http_on_client_connection_shutdown_fn *original_http_on_shutdown;
+ aws_client_bootstrap_on_channel_event_fn *original_channel_on_setup;
+ aws_client_bootstrap_on_channel_event_fn *original_channel_on_shutdown;
+
+ struct aws_http_proxy_config *proxy_config;
+
+ struct aws_event_loop *requested_event_loop;
+};
+
+struct aws_http_proxy_system_vtable {
+ int (*setup_client_tls)(struct aws_channel_slot *right_of_slot, struct aws_tls_connection_options *tls_options);
+};
+
+AWS_EXTERN_C_BEGIN
+
+AWS_HTTP_API
+struct aws_http_proxy_user_data *aws_http_proxy_user_data_new(
+ struct aws_allocator *allocator,
+ const struct aws_http_client_connection_options *options,
+ aws_client_bootstrap_on_channel_event_fn *on_channel_setup,
+ aws_client_bootstrap_on_channel_event_fn *on_channel_shutdown);
+
+AWS_HTTP_API
+void aws_http_proxy_user_data_destroy(struct aws_http_proxy_user_data *user_data);
+
+AWS_HTTP_API
+int aws_http_client_connect_via_proxy(const struct aws_http_client_connection_options *options);
+
+AWS_HTTP_API
+int aws_http_rewrite_uri_for_proxy_request(
+ struct aws_http_message *request,
+ struct aws_http_proxy_user_data *proxy_user_data);
+
+AWS_HTTP_API
+void aws_http_proxy_system_set_vtable(struct aws_http_proxy_system_vtable *vtable);
+
+/**
+ * Checks if tunneling proxy negotiation should continue to try and connect
+ * @param proxy_negotiator negotiator to query
+ * @return true if another connect request should be attempted, false otherwise
+ */
+AWS_HTTP_API
+enum aws_http_proxy_negotiation_retry_directive aws_http_proxy_negotiator_get_retry_directive(
+ struct aws_http_proxy_negotiator *proxy_negotiator);
+
+/**
+ * Constructor for a tunnel-only proxy strategy that applies no changes to outbound CONNECT requests. Intended to be
+ * the first link in an adaptive sequence for a tunneling proxy: first try a basic CONNECT, then based on the response,
+ * later links are allowed to make attempts.
+ *
+ * @param allocator memory allocator to use
+ * @return a new proxy strategy if successfully constructed, otherwise NULL
+ */
+AWS_HTTP_API
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_one_time_identity(
+ struct aws_allocator *allocator);
+
+/**
+ * Constructor for a forwarding-only proxy strategy that does nothing. Exists so that all proxy logic uses a
+ * strategy.
+ *
+ * @param allocator memory allocator to use
+ * @return a new proxy strategy if successfully constructed, otherwise NULL
+ */
+AWS_HTTP_API
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_forwarding_identity(struct aws_allocator *allocator);
+
+/**
+ * Constructor for a tunneling proxy strategy that contains a set of sub-strategies which are tried
+ * sequentially in order. Each strategy has the choice to either proceed on a fresh connection or
+ * reuse the current one.
+ *
+ * @param allocator memory allocator to use
+ * @param config sequence configuration options
+ * @return a new proxy strategy if successfully constructed, otherwise NULL
+ */
+AWS_HTTP_API
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_sequence(
+ struct aws_allocator *allocator,
+ struct aws_http_proxy_strategy_tunneling_sequence_options *config);
+
+/**
+ * A constructor for a proxy strategy that performs kerberos authentication by adding the appropriate
+ * header and header value to CONNECT requests.
+ *
+ * Currently only supports synchronous fetch of kerberos token values.
+ *
+ * @param allocator memory allocator to use
+ * @param config kerberos authentication configuration info
+ * @return a new proxy strategy if successfully constructed, otherwise NULL
+ */
+AWS_HTTP_API
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_kerberos(
+ struct aws_allocator *allocator,
+ struct aws_http_proxy_strategy_tunneling_kerberos_options *config);
+
+/**
+ * Constructor for an NTLM proxy strategy. Because ntlm is a challenge-response authentication protocol, this
+ * strategy will only succeed in a chain in a non-leading position. The strategy extracts the challenge from the
+ * proxy's response to a previous CONNECT request in the chain.
+ *
+ * Currently only supports synchronous fetch of token values.
+ *
+ * @param allocator memory allocator to use
+ * @param config configuration options for the strategy
+ * @return a new proxy strategy if successfully constructed, otherwise NULL
+ */
+AWS_HTTP_API
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_ntlm(
+ struct aws_allocator *allocator,
+ struct aws_http_proxy_strategy_tunneling_ntlm_options *config);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_PROXY_IMPL_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/random_access_set.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/random_access_set.h
new file mode 100644
index 0000000000..d0880a7194
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/random_access_set.h
@@ -0,0 +1,86 @@
+#ifndef AWS_HTTP_RANDOM_ACCESS_SET_H
+#define AWS_HTTP_RANDOM_ACCESS_SET_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/array_list.h>
+#include <aws/common/hash_table.h>
+#include <aws/http/http.h>
+
+/* TODO: someday, if you want to use it from other repo, move it to aws-c-common. */
+
+struct aws_random_access_set_impl;
+
+struct aws_random_access_set {
+ struct aws_random_access_set_impl *impl;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Initialize the set, which support constant time of insert, remove and get random element
+ * from the data structure.
+ *
+ * The underlying hash map will use hash_fn to compute the hash of each element. equals_fn to compute equality of two
+ * keys.
+ *
+ * @param set Pointer of structure to initialize with
+ * @param allocator Allocator
+ * @param hash_fn Compute the hash of each element
+ * @param equals_fn Compute equality of two elements
+ * @param destroy_element_fn Optional. Called when the element is removed
+ * @param initial_item_allocation The initial number of item to allocate.
+ * @return AWS_OP_ERR if any fails to initialize, AWS_OP_SUCCESS on success.
+ */
+AWS_HTTP_API
+int aws_random_access_set_init(
+ struct aws_random_access_set *set,
+ struct aws_allocator *allocator,
+ aws_hash_fn *hash_fn,
+ aws_hash_callback_eq_fn *equals_fn,
+ aws_hash_callback_destroy_fn *destroy_element_fn,
+ size_t initial_item_allocation);
+
+AWS_HTTP_API
+void aws_random_access_set_clean_up(struct aws_random_access_set *set);
+
+/**
+ * Insert the element to the end of the array list. A map from the element to the index of it to the hash table.
+ */
+AWS_HTTP_API
+int aws_random_access_set_add(struct aws_random_access_set *set, const void *element, bool *added);
+
+/**
+ * Find and remove the element from the table. If the element does not exist, or the table is empty, nothing will
+ * happen. Switch the element with the end of the arraylist if needed. Remove the end of the arraylist
+ */
+AWS_HTTP_API
+int aws_random_access_set_remove(struct aws_random_access_set *set, const void *element);
+
+/**
+ * Get the pointer to a random element from the data structure. Fails when the data structure is empty.
+ */
+AWS_HTTP_API
+int aws_random_access_set_random_get_ptr(const struct aws_random_access_set *set, void **out);
+
+AWS_HTTP_API
+size_t aws_random_access_set_get_size(const struct aws_random_access_set *set);
+
+/**
+ * Check the element exist in the data structure or not.
+ */
+AWS_HTTP_API
+int aws_random_access_set_exist(const struct aws_random_access_set *set, const void *element, bool *exist);
+
+/**
+ * Get the pointer to an element that currently stored at that index. It may change if operations like remove and add
+ * happens. Helpful for debugging and iterating through the whole set.
+ */
+AWS_HTTP_API
+int aws_random_access_set_random_get_ptr_index(const struct aws_random_access_set *set, void **out, size_t index);
+
+AWS_EXTERN_C_END
+#endif /* AWS_HTTP_RANDOM_ACCESS_SET_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/request_response_impl.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/request_response_impl.h
new file mode 100644
index 0000000000..9cd06e01c2
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/request_response_impl.h
@@ -0,0 +1,69 @@
+#ifndef AWS_HTTP_REQUEST_RESPONSE_IMPL_H
+#define AWS_HTTP_REQUEST_RESPONSE_IMPL_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/request_response.h>
+
+#include <aws/http/private/http_impl.h>
+
+#include <aws/common/atomics.h>
+
+struct aws_http_stream_vtable {
+ void (*destroy)(struct aws_http_stream *stream);
+ void (*update_window)(struct aws_http_stream *stream, size_t increment_size);
+ int (*activate)(struct aws_http_stream *stream);
+
+ int (*http1_write_chunk)(struct aws_http_stream *http1_stream, const struct aws_http1_chunk_options *options);
+ int (*http1_add_trailer)(struct aws_http_stream *http1_stream, const struct aws_http_headers *trailing_headers);
+
+ int (*http2_reset_stream)(struct aws_http_stream *http2_stream, uint32_t http2_error);
+ int (*http2_get_received_error_code)(struct aws_http_stream *http2_stream, uint32_t *http2_error);
+ int (*http2_get_sent_error_code)(struct aws_http_stream *http2_stream, uint32_t *http2_error);
+ int (*http2_write_data)(
+ struct aws_http_stream *http2_stream,
+ const struct aws_http2_stream_write_data_options *options);
+};
+
+/**
+ * Base class for streams.
+ * There are specific implementations for each HTTP version.
+ */
+struct aws_http_stream {
+ const struct aws_http_stream_vtable *vtable;
+ struct aws_allocator *alloc;
+ struct aws_http_connection *owning_connection;
+
+ uint32_t id;
+
+ void *user_data;
+ aws_http_on_incoming_headers_fn *on_incoming_headers;
+ aws_http_on_incoming_header_block_done_fn *on_incoming_header_block_done;
+ aws_http_on_incoming_body_fn *on_incoming_body;
+ aws_http_on_stream_complete_fn *on_complete;
+ aws_http_on_stream_destroy_fn *on_destroy;
+
+ struct aws_atomic_var refcount;
+ enum aws_http_method request_method;
+
+ union {
+ struct aws_http_stream_client_data {
+ int response_status;
+ } client;
+ struct aws_http_stream_server_data {
+ struct aws_byte_cursor request_method_str;
+ struct aws_byte_cursor request_path;
+ aws_http_on_incoming_request_done_fn *on_request_done;
+ } server;
+ } client_or_server_data;
+
+ /* On client connections, `client_data` points to client_or_server_data.client and `server_data` is null.
+ * Opposite is true on server connections */
+ struct aws_http_stream_client_data *client_data;
+ struct aws_http_stream_server_data *server_data;
+};
+
+#endif /* AWS_HTTP_REQUEST_RESPONSE_IMPL_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/strutil.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/strutil.h
new file mode 100644
index 0000000000..f670599344
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/strutil.h
@@ -0,0 +1,84 @@
+#ifndef AWS_HTTP_STRUTIL_H
+#define AWS_HTTP_STRUTIL_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/http/http.h>
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Return a cursor with all leading and trailing SPACE and TAB characters removed.
+ * RFC7230 section 3.2.3 Whitespace
+ * Examples:
+ * " \t a \t " -> "a"
+ * "a \t a" -> "a \t a"
+ */
+AWS_HTTP_API
+struct aws_byte_cursor aws_strutil_trim_http_whitespace(struct aws_byte_cursor cursor);
+
+/**
+ * Return whether this is a valid token, as defined by RFC7230 section 3.2.6:
+ * token = 1*tchar
+ * tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*"
+ * / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
+ * / DIGIT / ALPHA
+ */
+AWS_HTTP_API
+bool aws_strutil_is_http_token(struct aws_byte_cursor token);
+
+/**
+ * Same as aws_strutil_is_http_token(), but uppercase letters are forbidden.
+ */
+AWS_HTTP_API
+bool aws_strutil_is_lowercase_http_token(struct aws_byte_cursor token);
+
+/**
+ * Return whether this ASCII/UTF-8 sequence is a valid HTTP header field-value.
+ *
+ * As defined in RFC7230 section 3.2 (except we are ALWAYS forbidding obs-fold):
+ *
+ * field-value = *( field-content / obs-fold )
+ * field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
+ * field-vchar = VCHAR / obs-text
+ * VCHAR = %x21-7E ; visible (printing) characters
+ * obs-text = %x80-FF
+ *
+ * Note that we ALWAYS forbid obs-fold. Section 3.2.4 explains how
+ * obs-fold is deprecated "except within the message/http media type".
+ */
+AWS_HTTP_API
+bool aws_strutil_is_http_field_value(struct aws_byte_cursor cursor);
+
+/**
+ * Return whether this ASCII/UTF-8 sequence is a valid HTTP response status reason-phrase.
+ *
+ * As defined in RFC7230 section 3.1.2:
+ *
+ * reason-phrase = *( HTAB / SP / VCHAR / obs-text )
+ * VCHAR = %x21-7E ; visible (printing) characters
+ * obs-text = %x80-FF
+ */
+AWS_HTTP_API
+bool aws_strutil_is_http_reason_phrase(struct aws_byte_cursor cursor);
+
+/**
+ * Return whether this ASCII/UTF-8 sequence is a valid HTTP request-target.
+ *
+ * TODO: Actually check the complete grammar as defined in RFC7230 5.3 and
+ * RFC3986. Currently this just checks whether the sequence is blatantly illegal
+ * (ex: contains CR or LF)
+ */
+AWS_HTTP_API
+bool aws_strutil_is_http_request_target(struct aws_byte_cursor cursor);
+
+/**
+ * Return whether this ASCII/UTF-8 sequence start with ":" or not as the requirement for pseudo headers.
+ */
+AWS_HTTP_API
+bool aws_strutil_is_http_pseudo_header_name(struct aws_byte_cursor cursor);
+
+AWS_EXTERN_C_END
+#endif /* AWS_HTTP_STRUTIL_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/websocket_decoder.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/websocket_decoder.h
new file mode 100644
index 0000000000..d9e84c5997
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/websocket_decoder.h
@@ -0,0 +1,79 @@
+#ifndef AWS_HTTP_WEBSOCKET_DECODER_H
+#define AWS_HTTP_WEBSOCKET_DECODER_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/websocket_impl.h>
+
+/* Called when the non-payload portion of a frame has been decoded. */
+typedef int(aws_websocket_decoder_frame_fn)(const struct aws_websocket_frame *frame, void *user_data);
+
+/* Called repeatedly as the payload is decoded. If a mask was used, the data has been unmasked. */
+typedef int(aws_websocket_decoder_payload_fn)(struct aws_byte_cursor data, void *user_data);
+
+/**
+ * Each state consumes data and/or moves decoder to a subsequent state.
+ */
+enum aws_websocket_decoder_state {
+ AWS_WEBSOCKET_DECODER_STATE_INIT,
+ AWS_WEBSOCKET_DECODER_STATE_OPCODE_BYTE,
+ AWS_WEBSOCKET_DECODER_STATE_LENGTH_BYTE,
+ AWS_WEBSOCKET_DECODER_STATE_EXTENDED_LENGTH,
+ AWS_WEBSOCKET_DECODER_STATE_MASKING_KEY_CHECK,
+ AWS_WEBSOCKET_DECODER_STATE_MASKING_KEY,
+ AWS_WEBSOCKET_DECODER_STATE_PAYLOAD_CHECK,
+ AWS_WEBSOCKET_DECODER_STATE_PAYLOAD,
+ AWS_WEBSOCKET_DECODER_STATE_FRAME_END,
+ AWS_WEBSOCKET_DECODER_STATE_DONE,
+};
+
+struct aws_websocket_decoder {
+ enum aws_websocket_decoder_state state;
+ uint64_t state_bytes_processed; /* For multi-byte states, the number of bytes processed so far */
+ uint8_t state_cache[8]; /* For multi-byte states to cache data that might be split across packets */
+
+ struct aws_websocket_frame current_frame; /* Data about current frame being decoded */
+
+ bool expecting_continuation_data_frame; /* True when the next data frame must be CONTINUATION frame */
+
+ /* True while processing a TEXT "message" (from the start of a TEXT frame,
+ * until the end of the TEXT or CONTINUATION frame with the FIN bit set). */
+ bool processing_text_message;
+ struct aws_utf8_decoder *text_message_validator;
+
+ void *user_data;
+ aws_websocket_decoder_frame_fn *on_frame;
+ aws_websocket_decoder_payload_fn *on_payload;
+};
+
+AWS_EXTERN_C_BEGIN
+
+AWS_HTTP_API
+void aws_websocket_decoder_init(
+ struct aws_websocket_decoder *decoder,
+ struct aws_allocator *alloc,
+ aws_websocket_decoder_frame_fn *on_frame,
+ aws_websocket_decoder_payload_fn *on_payload,
+ void *user_data);
+
+AWS_HTTP_API
+void aws_websocket_decoder_clean_up(struct aws_websocket_decoder *decoder);
+
+/**
+ * Returns when all data is processed, or a frame and its payload have completed.
+ * `data` will be advanced to reflect the amount of data processed by this call.
+ * `frame_complete` will be set true if this call returned due to completion of a frame.
+ * The `on_frame` and `on_payload` callbacks may each be invoked once as a result of this call.
+ * If an error occurs, the decoder is invalid forevermore.
+ */
+AWS_HTTP_API int aws_websocket_decoder_process(
+ struct aws_websocket_decoder *decoder,
+ struct aws_byte_cursor *data,
+ bool *frame_complete);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_WEBSOCKET_DECODER_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/websocket_encoder.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/websocket_encoder.h
new file mode 100644
index 0000000000..7fe4949bea
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/websocket_encoder.h
@@ -0,0 +1,57 @@
+#ifndef AWS_HTTP_WEBSOCKET_ENCODER_H
+#define AWS_HTTP_WEBSOCKET_ENCODER_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/websocket_impl.h>
+
+typedef int(aws_websocket_encoder_payload_fn)(struct aws_byte_buf *out_buf, void *user_data);
+
+enum aws_websocket_encoder_state {
+ AWS_WEBSOCKET_ENCODER_STATE_INIT,
+ AWS_WEBSOCKET_ENCODER_STATE_OPCODE_BYTE,
+ AWS_WEBSOCKET_ENCODER_STATE_LENGTH_BYTE,
+ AWS_WEBSOCKET_ENCODER_STATE_EXTENDED_LENGTH,
+ AWS_WEBSOCKET_ENCODER_STATE_MASKING_KEY_CHECK,
+ AWS_WEBSOCKET_ENCODER_STATE_MASKING_KEY,
+ AWS_WEBSOCKET_ENCODER_STATE_PAYLOAD_CHECK,
+ AWS_WEBSOCKET_ENCODER_STATE_PAYLOAD,
+ AWS_WEBSOCKET_ENCODER_STATE_DONE,
+};
+
+struct aws_websocket_encoder {
+ enum aws_websocket_encoder_state state;
+ uint64_t state_bytes_processed;
+ struct aws_websocket_frame frame;
+ bool is_frame_in_progress;
+
+ /* True when the next data frame must be a CONTINUATION frame */
+ bool expecting_continuation_data_frame;
+
+ void *user_data;
+ aws_websocket_encoder_payload_fn *stream_outgoing_payload;
+};
+
+AWS_EXTERN_C_BEGIN
+
+AWS_HTTP_API
+void aws_websocket_encoder_init(
+ struct aws_websocket_encoder *encoder,
+ aws_websocket_encoder_payload_fn *stream_outgoing_payload,
+ void *user_data);
+
+AWS_HTTP_API
+int aws_websocket_encoder_start_frame(struct aws_websocket_encoder *encoder, const struct aws_websocket_frame *frame);
+
+AWS_HTTP_API
+bool aws_websocket_encoder_is_frame_in_progress(const struct aws_websocket_encoder *encoder);
+
+AWS_HTTP_API
+int aws_websocket_encoder_process(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_WEBSOCKET_ENCODER_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/private/websocket_impl.h b/contrib/restricted/aws/aws-c-http/include/aws/http/private/websocket_impl.h
new file mode 100644
index 0000000000..c807be2dac
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/private/websocket_impl.h
@@ -0,0 +1,115 @@
+#ifndef AWS_HTTP_WEBSOCKET_IMPL_H
+#define AWS_HTTP_WEBSOCKET_IMPL_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/websocket.h>
+
+struct aws_http_client_connection_options;
+struct aws_http_connection;
+struct aws_http_make_request_options;
+
+/* RFC-6455 Section 5.2 Base Framing Protocol
+ * Payload length: 7 bits, 7+16 bits, or 7+64 bits
+ *
+ * The length of the "Payload data", in bytes: if 0-125, that is the
+ * payload length. If 126, the following 2 bytes interpreted as a
+ * 16-bit unsigned integer are the payload length. If 127, the
+ * following 8 bytes interpreted as a 64-bit unsigned integer (the
+ * most significant bit MUST be 0) are the payload length. Multibyte
+ * length quantities are expressed in network byte order. Note that
+ * in all cases, the minimal number of bytes MUST be used to encode
+ * the length, for example, the length of a 124-byte-long string
+ * can't be encoded as the sequence 126, 0, 124. The payload length
+ * is the length of the "Extension data" + the length of the
+ * "Application data". The length of the "Extension data" may be
+ * zero, in which case the payload length is the length of the
+ * "Application data".
+ */
+#define AWS_WEBSOCKET_7BIT_VALUE_FOR_2BYTE_EXTENDED_LENGTH 126
+#define AWS_WEBSOCKET_7BIT_VALUE_FOR_8BYTE_EXTENDED_LENGTH 127
+
+#define AWS_WEBSOCKET_2BYTE_EXTENDED_LENGTH_MIN_VALUE AWS_WEBSOCKET_7BIT_VALUE_FOR_2BYTE_EXTENDED_LENGTH
+#define AWS_WEBSOCKET_2BYTE_EXTENDED_LENGTH_MAX_VALUE 0x000000000000FFFF
+
+#define AWS_WEBSOCKET_8BYTE_EXTENDED_LENGTH_MIN_VALUE 0x0000000000010000
+#define AWS_WEBSOCKET_8BYTE_EXTENDED_LENGTH_MAX_VALUE 0x7FFFFFFFFFFFFFFF
+
+/* Max bytes necessary to send non-payload parts of a frame */
+#define AWS_WEBSOCKET_MAX_FRAME_OVERHEAD (2 + 8 + 4) /* base + extended-length + masking-key */
+
+/**
+ * Full contents of a websocket frame, excluding the payload.
+ */
+struct aws_websocket_frame {
+ bool fin;
+ bool rsv[3];
+ bool masked;
+ uint8_t opcode;
+ uint64_t payload_length;
+ uint8_t masking_key[4];
+};
+
+struct aws_websocket_handler_options {
+ struct aws_allocator *allocator;
+ struct aws_channel *channel;
+ size_t initial_window_size;
+
+ void *user_data;
+ aws_websocket_on_incoming_frame_begin_fn *on_incoming_frame_begin;
+ aws_websocket_on_incoming_frame_payload_fn *on_incoming_frame_payload;
+ aws_websocket_on_incoming_frame_complete_fn *on_incoming_frame_complete;
+
+ bool is_server;
+ bool manual_window_update;
+};
+
+struct aws_websocket_client_bootstrap_system_vtable {
+ int (*aws_http_client_connect)(const struct aws_http_client_connection_options *options);
+ void (*aws_http_connection_release)(struct aws_http_connection *connection);
+ void (*aws_http_connection_close)(struct aws_http_connection *connection);
+ struct aws_channel *(*aws_http_connection_get_channel)(struct aws_http_connection *connection);
+ struct aws_http_stream *(*aws_http_connection_make_request)(
+ struct aws_http_connection *client_connection,
+ const struct aws_http_make_request_options *options);
+ int (*aws_http_stream_activate)(struct aws_http_stream *stream);
+ void (*aws_http_stream_release)(struct aws_http_stream *stream);
+ struct aws_http_connection *(*aws_http_stream_get_connection)(const struct aws_http_stream *stream);
+ void (*aws_http_stream_update_window)(struct aws_http_stream *stream, size_t increment_size);
+ int (*aws_http_stream_get_incoming_response_status)(const struct aws_http_stream *stream, int *out_status);
+ struct aws_websocket *(*aws_websocket_handler_new)(const struct aws_websocket_handler_options *options);
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Returns printable name for opcode as c-string.
+ */
+AWS_HTTP_API
+const char *aws_websocket_opcode_str(uint8_t opcode);
+
+/**
+ * Return total number of bytes needed to encode frame and its payload
+ */
+AWS_HTTP_API
+uint64_t aws_websocket_frame_encoded_size(const struct aws_websocket_frame *frame);
+
+/**
+ * Create a websocket channel-handler and insert it into the channel.
+ */
+AWS_HTTP_API
+struct aws_websocket *aws_websocket_handler_new(const struct aws_websocket_handler_options *options);
+
+/**
+ * Override the functions that websocket bootstrap uses to interact with external systems.
+ * Used for unit testing.
+ */
+AWS_HTTP_API
+void aws_websocket_client_bootstrap_set_system_vtable(
+ const struct aws_websocket_client_bootstrap_system_vtable *system_vtable);
+
+AWS_EXTERN_C_END
+#endif /* AWS_HTTP_WEBSOCKET_IMPL_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/proxy.h b/contrib/restricted/aws/aws-c-http/include/aws/http/proxy.h
new file mode 100644
index 0000000000..cd4c92107d
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/proxy.h
@@ -0,0 +1,570 @@
+#ifndef AWS_PROXY_H
+#define AWS_PROXY_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/ref_count.h>
+#include <aws/http/http.h>
+#include <aws/http/request_response.h>
+#include <aws/http/status_code.h>
+
+struct aws_http_client_connection_options;
+struct aws_http_connection_manager_options;
+
+struct aws_http_message;
+struct aws_http_header;
+
+struct aws_http_proxy_config;
+struct aws_http_proxy_negotiator;
+struct aws_http_proxy_strategy;
+
+struct aws_socket_channel_bootstrap_options;
+
+/**
+ * @Deprecated - Supported proxy authentication modes. Superceded by proxy strategy.
+ */
+enum aws_http_proxy_authentication_type {
+ AWS_HPAT_NONE = 0,
+ AWS_HPAT_BASIC,
+};
+
+enum aws_http_proxy_env_var_type {
+ /**
+ * Default.
+ * Disable reading from environment variable for proxy.
+ */
+ AWS_HPEV_DISABLE = 0,
+ /**
+ * Enable get proxy URL from environment variable, when the manual proxy options of connection manager is not set.
+ * env HTTPS_PROXY/https_proxy will be checked when the main connection use tls.
+ * env HTTP_PROXY/http_proxy will be checked when the main connection NOT use tls.
+ * The lower case version has precedence.
+ */
+ AWS_HPEV_ENABLE,
+};
+
+/**
+ * Supported proxy connection types
+ */
+enum aws_http_proxy_connection_type {
+ /**
+ * Deprecated, but 0-valued for backwards compatibility
+ *
+ * If tls options are provided (for the main connection) then treat the proxy as a tunneling proxy
+ * If tls options are not provided (for the main connection), then treat the proxy as a forwarding proxy
+ */
+ AWS_HPCT_HTTP_LEGACY = 0,
+
+ /**
+ * Use the proxy to forward http requests. Attempting to use both this mode and TLS on the tunnel destination
+ * is a configuration error.
+ */
+ AWS_HPCT_HTTP_FORWARD,
+
+ /**
+ * Use the proxy to establish a connection to a remote endpoint via a CONNECT request through the proxy.
+ * Works for both plaintext and tls connections.
+ */
+ AWS_HPCT_HTTP_TUNNEL,
+};
+
+/*
+ * Configuration for using proxy from environment variable.
+ * Zero out as default settings.
+ */
+struct proxy_env_var_settings {
+ enum aws_http_proxy_env_var_type env_var_type;
+ /*
+ * Optional.
+ * If not set:
+ * If tls options are provided (for the main connection) use tunnel proxy type
+ * If tls options are not provided (for the main connection) use forward proxy type
+ */
+ enum aws_http_proxy_connection_type connection_type;
+ /*
+ * Optional.
+ * If not set, a default tls option will be created. when https used for Local to proxy connection.
+ * Must be distinct from the the tls_connection_options from aws_http_connection_manager_options
+ */
+ const struct aws_tls_connection_options *tls_options;
+};
+
+struct aws_http_proxy_strategy;
+
+/**
+ * Options for http proxy server usage
+ */
+struct aws_http_proxy_options {
+
+ /**
+ * Type of proxy connection to make
+ */
+ enum aws_http_proxy_connection_type connection_type;
+
+ /**
+ * Proxy host to connect to
+ */
+ struct aws_byte_cursor host;
+
+ /**
+ * Port to make the proxy connection to
+ */
+ uint16_t port;
+
+ /**
+ * Optional.
+ * TLS configuration for the Local <-> Proxy connection
+ * Must be distinct from the the TLS options in the parent aws_http_connection_options struct
+ */
+ const struct aws_tls_connection_options *tls_options;
+
+ /**
+ * Optional
+ * Advanced option that allows the user to create a custom strategy that gives low-level control of
+ * certain logical flows within the proxy logic.
+ *
+ * For tunneling proxies it allows custom retry and adaptive negotiation of CONNECT requests.
+ * For forwarding proxies it allows custom request transformations.
+ */
+ struct aws_http_proxy_strategy *proxy_strategy;
+
+ /**
+ * @Deprecated - What type of proxy authentication to use, if any.
+ * Replaced by instantiating a proxy_strategy
+ */
+ enum aws_http_proxy_authentication_type auth_type;
+
+ /**
+ * @Deprecated - Optional user name to use for basic authentication
+ * Replaced by instantiating a proxy_strategy via aws_http_proxy_strategy_new_basic_auth()
+ */
+ struct aws_byte_cursor auth_username;
+
+ /**
+ * @Deprecated - Optional password to use for basic authentication
+ * Replaced by instantiating a proxy_strategy via aws_http_proxy_strategy_new_basic_auth()
+ */
+ struct aws_byte_cursor auth_password;
+};
+
+/**
+ * Synchronous (for now) callback function to fetch a token used in modifying CONNECT requests
+ */
+typedef struct aws_string *(aws_http_proxy_negotiation_get_token_sync_fn)(void *user_data, int *out_error_code);
+
+/**
+ * Synchronous (for now) callback function to fetch a token used in modifying CONNECT request. Includes a (byte string)
+ * context intended to be used as part of a challenge-response flow.
+ */
+typedef struct aws_string *(aws_http_proxy_negotiation_get_challenge_token_sync_fn)(
+ void *user_data,
+ const struct aws_byte_cursor *challenge_context,
+ int *out_error_code);
+
+/**
+ * Proxy negotiation logic must call this function to indicate an unsuccessful outcome
+ */
+typedef void(aws_http_proxy_negotiation_terminate_fn)(
+ struct aws_http_message *message,
+ int error_code,
+ void *internal_proxy_user_data);
+
+/**
+ * Proxy negotiation logic must call this function to forward the potentially-mutated request back to the proxy
+ * connection logic.
+ */
+typedef void(aws_http_proxy_negotiation_http_request_forward_fn)(
+ struct aws_http_message *message,
+ void *internal_proxy_user_data);
+
+/**
+ * User-supplied transform callback which implements the proxy request flow and ultimately, across all execution
+ * pathways, invokes either the terminate function or the forward function appropriately.
+ *
+ * For tunneling proxy connections, this request flow transform only applies to the CONNECT stage of proxy
+ * connection establishment.
+ *
+ * For forwarding proxy connections, this request flow transform applies to every single http request that goes
+ * out on the connection.
+ *
+ * Forwarding proxy connections cannot yet support a truly async request transform without major surgery on http
+ * stream creation, so for now, we split into an async version (for tunneling proxies) and a separate
+ * synchronous version for forwarding proxies. Also forwarding proxies are a kind of legacy dead-end in some
+ * sense.
+ *
+ */
+typedef void(aws_http_proxy_negotiation_http_request_transform_async_fn)(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ struct aws_http_message *message,
+ aws_http_proxy_negotiation_terminate_fn *negotiation_termination_callback,
+ aws_http_proxy_negotiation_http_request_forward_fn *negotiation_http_request_forward_callback,
+ void *internal_proxy_user_data);
+
+typedef int(aws_http_proxy_negotiation_http_request_transform_fn)(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ struct aws_http_message *message);
+
+/**
+ * Tunneling proxy connections only. A callback that lets the negotiator examine the headers in the
+ * response to the most recent CONNECT request as they arrive.
+ */
+typedef int(aws_http_proxy_negotiation_connect_on_incoming_headers_fn)(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ enum aws_http_header_block header_block,
+ const struct aws_http_header *header_array,
+ size_t num_headers);
+
+/**
+ * Tunneling proxy connections only. A callback that lets the negotiator examine the status code of the
+ * response to the most recent CONNECT request.
+ */
+typedef int(aws_http_proxy_negotiator_connect_status_fn)(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ enum aws_http_status_code status_code);
+
+/**
+ * Tunneling proxy connections only. A callback that lets the negotiator examine the body of the response
+ * to the most recent CONNECT request.
+ */
+typedef int(aws_http_proxy_negotiator_connect_on_incoming_body_fn)(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ const struct aws_byte_cursor *data);
+
+/*
+ * Control value that lets the http proxy implementation know if and how to retry a CONNECT request based on
+ * the proxy negotiator's state.
+ */
+enum aws_http_proxy_negotiation_retry_directive {
+ /*
+ * Stop trying to connect through the proxy and give up.
+ */
+ AWS_HPNRD_STOP,
+
+ /*
+ * Establish a new connection to the proxy before making the next CONNECT request
+ */
+ AWS_HPNRD_NEW_CONNECTION,
+
+ /*
+ * Reuse the existing connection to make the next CONNECT request
+ */
+ AWS_HPNRD_CURRENT_CONNECTION,
+};
+
+typedef enum aws_http_proxy_negotiation_retry_directive(aws_http_proxy_negotiator_get_retry_directive_fn)(
+ struct aws_http_proxy_negotiator *proxy_negotiator);
+
+/**
+ * Vtable for forwarding-based proxy negotiators
+ */
+struct aws_http_proxy_negotiator_forwarding_vtable {
+ aws_http_proxy_negotiation_http_request_transform_fn *forward_request_transform;
+};
+
+/**
+ * Vtable for tunneling-based proxy negotiators
+ */
+struct aws_http_proxy_negotiator_tunnelling_vtable {
+ aws_http_proxy_negotiation_http_request_transform_async_fn *connect_request_transform;
+
+ aws_http_proxy_negotiation_connect_on_incoming_headers_fn *on_incoming_headers_callback;
+ aws_http_proxy_negotiator_connect_status_fn *on_status_callback;
+ aws_http_proxy_negotiator_connect_on_incoming_body_fn *on_incoming_body_callback;
+
+ aws_http_proxy_negotiator_get_retry_directive_fn *get_retry_directive;
+};
+
+/*
+ * Base definition of a proxy negotiator.
+ *
+ * A negotiator works differently based on what kind of proxy connection is being asked for:
+ *
+ * (1) Tunneling - In a tunneling proxy connection, the connect_request_transform is invoked on every CONNECT request.
+ * The connect_request_transform implementation *MUST*, in turn, eventually call one of the terminate or forward
+ * functions it gets supplied with.
+ *
+ * Every CONNECT request, if a response is obtained, will properly invoke the response handling callbacks supplied
+ * in the tunneling vtable.
+ *
+ * (2) Forwarding - In a forwarding proxy connection, the forward_request_transform is invoked on every request sent out
+ * on the connection.
+ */
+struct aws_http_proxy_negotiator {
+ struct aws_ref_count ref_count;
+
+ void *impl;
+
+ union {
+ struct aws_http_proxy_negotiator_forwarding_vtable *forwarding_vtable;
+ struct aws_http_proxy_negotiator_tunnelling_vtable *tunnelling_vtable;
+ } strategy_vtable;
+};
+
+/*********************************************************************************************/
+
+typedef struct aws_http_proxy_negotiator *(aws_http_proxy_strategy_create_negotiator_fn)(
+ struct aws_http_proxy_strategy *proxy_strategy,
+ struct aws_allocator *allocator);
+
+struct aws_http_proxy_strategy_vtable {
+ aws_http_proxy_strategy_create_negotiator_fn *create_negotiator;
+};
+
+struct aws_http_proxy_strategy {
+ struct aws_ref_count ref_count;
+ struct aws_http_proxy_strategy_vtable *vtable;
+ void *impl;
+ enum aws_http_proxy_connection_type proxy_connection_type;
+};
+
+/*
+ * Options necessary to create a basic authentication proxy strategy
+ */
+struct aws_http_proxy_strategy_basic_auth_options {
+
+ /* type of proxy connection being established, must be forwarding or tunnel */
+ enum aws_http_proxy_connection_type proxy_connection_type;
+
+ /* user name to use in basic authentication */
+ struct aws_byte_cursor user_name;
+
+ /* password to use in basic authentication */
+ struct aws_byte_cursor password;
+};
+
+/*
+ * Options necessary to create a (synchronous) kerberos authentication proxy strategy
+ */
+struct aws_http_proxy_strategy_tunneling_kerberos_options {
+
+ aws_http_proxy_negotiation_get_token_sync_fn *get_token;
+
+ void *get_token_user_data;
+};
+
+/*
+ * Options necessary to create a (synchronous) ntlm authentication proxy strategy
+ */
+struct aws_http_proxy_strategy_tunneling_ntlm_options {
+
+ aws_http_proxy_negotiation_get_token_sync_fn *get_token;
+
+ aws_http_proxy_negotiation_get_challenge_token_sync_fn *get_challenge_token;
+
+ void *get_challenge_token_user_data;
+};
+
+/*
+ * Options necessary to create an adaptive sequential strategy that tries one or more of kerberos and ntlm (in that
+ * order, if both are active). If an options struct is NULL, then that strategy will not be used.
+ */
+struct aws_http_proxy_strategy_tunneling_adaptive_options {
+ /*
+ * If non-null, will insert a kerberos proxy strategy into the adaptive sequence
+ */
+ struct aws_http_proxy_strategy_tunneling_kerberos_options *kerberos_options;
+
+ /*
+ * If non-null will insert an ntlm proxy strategy into the adaptive sequence
+ */
+ struct aws_http_proxy_strategy_tunneling_ntlm_options *ntlm_options;
+};
+
+/*
+ * Options necessary to create a sequential proxy strategy.
+ */
+struct aws_http_proxy_strategy_tunneling_sequence_options {
+ struct aws_http_proxy_strategy **strategies;
+
+ uint32_t strategy_count;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Take a reference to an http proxy negotiator
+ * @param proxy_negotiator negotiator to take a reference to
+ * @return the strategy
+ */
+AWS_HTTP_API
+struct aws_http_proxy_negotiator *aws_http_proxy_negotiator_acquire(struct aws_http_proxy_negotiator *proxy_negotiator);
+
+/**
+ * Release a reference to an http proxy negotiator
+ * @param proxy_negotiator negotiator to release a reference to
+ */
+AWS_HTTP_API
+void aws_http_proxy_negotiator_release(struct aws_http_proxy_negotiator *proxy_negotiator);
+
+/**
+ * Creates a new proxy negotiator from a proxy strategy
+ * @param allocator memory allocator to use
+ * @param strategy strategy to creation a new negotiator for
+ * @return a new proxy negotiator if successful, otherwise NULL
+ */
+AWS_HTTP_API
+struct aws_http_proxy_negotiator *aws_http_proxy_strategy_create_negotiator(
+ struct aws_http_proxy_strategy *strategy,
+ struct aws_allocator *allocator);
+
+/**
+ * Take a reference to an http proxy strategy
+ * @param proxy_strategy strategy to take a reference to
+ * @return the strategy
+ */
+AWS_HTTP_API
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_acquire(struct aws_http_proxy_strategy *proxy_strategy);
+
+/**
+ * Release a reference to an http proxy strategy
+ * @param proxy_strategy strategy to release a reference to
+ */
+AWS_HTTP_API
+void aws_http_proxy_strategy_release(struct aws_http_proxy_strategy *proxy_strategy);
+
+/**
+ * A constructor for a proxy strategy that performs basic authentication by adding the appropriate
+ * header and header value to requests or CONNECT requests.
+ *
+ * @param allocator memory allocator to use
+ * @param config basic authentication configuration info
+ * @return a new proxy strategy if successfully constructed, otherwise NULL
+ */
+AWS_HTTP_API
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_basic_auth(
+ struct aws_allocator *allocator,
+ struct aws_http_proxy_strategy_basic_auth_options *config);
+
+/**
+ * Constructor for an adaptive tunneling proxy strategy. This strategy attempts a vanilla CONNECT and if that
+ * fails it may make followup CONNECT attempts using kerberos or ntlm tokens, based on configuration and proxy
+ * response properties.
+ *
+ * @param allocator memory allocator to use
+ * @param config configuration options for the strategy
+ * @return a new proxy strategy if successfully constructed, otherwise NULL
+ */
+AWS_HTTP_API
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_adaptive(
+ struct aws_allocator *allocator,
+ struct aws_http_proxy_strategy_tunneling_adaptive_options *config);
+
+/*
+ * aws_http_proxy_config is the persistent, memory-managed version of aws_http_proxy_options
+ *
+ * This is a set of APIs for creating, destroying and converting between them
+ */
+
+/**
+ * Create a persistent proxy configuration from http connection options
+ * @param allocator memory allocator to use
+ * @param options http connection options to source proxy configuration from
+ * @return
+ */
+AWS_HTTP_API
+struct aws_http_proxy_config *aws_http_proxy_config_new_from_connection_options(
+ struct aws_allocator *allocator,
+ const struct aws_http_client_connection_options *options);
+
+/**
+ * Create a persistent proxy configuration from http connection manager options
+ * @param allocator memory allocator to use
+ * @param options http connection manager options to source proxy configuration from
+ * @return
+ */
+AWS_HTTP_API
+struct aws_http_proxy_config *aws_http_proxy_config_new_from_manager_options(
+ struct aws_allocator *allocator,
+ const struct aws_http_connection_manager_options *options);
+
+/**
+ * Create a persistent proxy configuration from non-persistent proxy options. The resulting
+ * proxy configuration assumes a tunneling connection type.
+ *
+ * @param allocator memory allocator to use
+ * @param options http proxy options to source proxy configuration from
+ * @return
+ */
+AWS_HTTP_API
+struct aws_http_proxy_config *aws_http_proxy_config_new_tunneling_from_proxy_options(
+ struct aws_allocator *allocator,
+ const struct aws_http_proxy_options *options);
+
+/**
+ * Create a persistent proxy configuration from non-persistent proxy options.
+ * Legacy connection type of proxy options will be rejected.
+ *
+ * @param allocator memory allocator to use
+ * @param options http proxy options to source proxy configuration from
+ * @return
+ */
+AWS_HTTP_API
+struct aws_http_proxy_config *aws_http_proxy_config_new_from_proxy_options(
+ struct aws_allocator *allocator,
+ const struct aws_http_proxy_options *options);
+
+/**
+ * Create a persistent proxy configuration from non-persistent proxy options.
+ *
+ * @param allocator memory allocator to use
+ * @param options http proxy options to source proxy configuration from
+ * @param is_tls_connection tls connection info of the main connection to determine connection_type
+ * when the connection_type is legacy.
+ * @return
+ */
+AWS_HTTP_API
+struct aws_http_proxy_config *aws_http_proxy_config_new_from_proxy_options_with_tls_info(
+ struct aws_allocator *allocator,
+ const struct aws_http_proxy_options *proxy_options,
+ bool is_tls_connection);
+
+/**
+ * Clones an existing proxy configuration. A refactor could remove this (do a "move" between the old and new user
+ * data in the one spot it's used) but that should wait until we have better test cases for the logic where this
+ * gets invoked (ntlm/kerberos chains).
+ *
+ * @param allocator memory allocator to use
+ * @param proxy_config http proxy configuration to clone
+ * @return
+ */
+AWS_HTTP_API
+struct aws_http_proxy_config *aws_http_proxy_config_new_clone(
+ struct aws_allocator *allocator,
+ const struct aws_http_proxy_config *proxy_config);
+
+/**
+ * Destroys an http proxy configuration
+ * @param config http proxy configuration to destroy
+ */
+AWS_HTTP_API
+void aws_http_proxy_config_destroy(struct aws_http_proxy_config *config);
+
+/**
+ * Initializes non-persistent http proxy options from a persistent http proxy configuration
+ * @param options http proxy options to initialize
+ * @param config the http proxy config to use as an initialization source
+ */
+AWS_HTTP_API
+void aws_http_proxy_options_init_from_config(
+ struct aws_http_proxy_options *options,
+ const struct aws_http_proxy_config *config);
+
+/**
+ * Establish an arbitrary protocol connection through an http proxy via tunneling CONNECT. Alpn is
+ * not required for this connection process to succeed, but we encourage its use if available.
+ *
+ * @param channel_options configuration options for the socket level connection
+ * @param proxy_options configuration options for the proxy connection
+ *
+ * @return AWS_OP_SUCCESS if the asynchronous channel kickoff succeeded, AWS_OP_ERR otherwise
+ */
+AWS_HTTP_API int aws_http_proxy_new_socket_channel(
+ struct aws_socket_channel_bootstrap_options *channel_options,
+ const struct aws_http_proxy_options *proxy_options);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_PROXY_STRATEGY_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/request_response.h b/contrib/restricted/aws/aws-c-http/include/aws/http/request_response.h
new file mode 100644
index 0000000000..a4ff6da947
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/request_response.h
@@ -0,0 +1,1072 @@
+#ifndef AWS_HTTP_REQUEST_RESPONSE_H
+#define AWS_HTTP_REQUEST_RESPONSE_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/http.h>
+
+struct aws_http_connection;
+struct aws_input_stream;
+
+/**
+ * A stream exists for the duration of a request/response exchange.
+ * A client creates a stream to send a request and receive a response.
+ * A server creates a stream to receive a request and send a response.
+ * In http/2, a push-promise stream can be sent by a server and received by a client.
+ */
+struct aws_http_stream;
+
+/**
+ * Controls whether a header's strings may be compressed by encoding the index of
+ * strings in a cache, rather than encoding the literal string.
+ *
+ * This setting has no effect on HTTP/1.x connections.
+ * On HTTP/2 connections this controls HPACK behavior.
+ * See RFC-7541 Section 7.1 for security considerations.
+ */
+enum aws_http_header_compression {
+ /**
+ * Compress header by encoding the cached index of its strings,
+ * or by updating the cache to contain these strings for future reference.
+ * Best for headers that are sent repeatedly.
+ * This is the default setting.
+ */
+ AWS_HTTP_HEADER_COMPRESSION_USE_CACHE,
+
+ /**
+ * Encode header strings literally.
+ * If an intermediary re-broadcasts the headers, it is permitted to use cache.
+ * Best for unique headers that are unlikely to repeat.
+ */
+ AWS_HTTP_HEADER_COMPRESSION_NO_CACHE,
+
+ /**
+ * Encode header strings literally and forbid all intermediaries from using
+ * cache when re-broadcasting.
+ * Best for header fields that are highly valuable or sensitive to recovery.
+ */
+ AWS_HTTP_HEADER_COMPRESSION_NO_FORWARD_CACHE,
+};
+
+/**
+ * A lightweight HTTP header struct.
+ * Note that the underlying strings are not owned by the byte cursors.
+ */
+struct aws_http_header {
+ struct aws_byte_cursor name;
+ struct aws_byte_cursor value;
+
+ /* Controls whether the header's strings may be compressed via caching. */
+ enum aws_http_header_compression compression;
+};
+
+/**
+ * A transformable block of HTTP headers.
+ * Provides a nice API for getting/setting header names and values.
+ *
+ * All strings are copied and stored within this datastructure.
+ * The index of a given header may change any time headers are modified.
+ * When iterating headers, the following ordering rules apply:
+ *
+ * - Headers with the same name will always be in the same order, relative to one another.
+ * If "A: one" is added before "A: two", then "A: one" will always precede "A: two".
+ *
+ * - Headers with different names could be in any order, relative to one another.
+ * If "A: one" is seen before "B: bee" in one iteration, you might see "B: bee" before "A: one" on the next.
+ */
+struct aws_http_headers;
+
+/**
+ * Header block type.
+ * INFORMATIONAL: Header block for 1xx informational (interim) responses.
+ * MAIN: Main header block sent with request or response.
+ * TRAILING: Headers sent after the body of a request or response.
+ */
+enum aws_http_header_block {
+ AWS_HTTP_HEADER_BLOCK_MAIN,
+ AWS_HTTP_HEADER_BLOCK_INFORMATIONAL,
+ AWS_HTTP_HEADER_BLOCK_TRAILING,
+};
+
+/**
+ * The definition for an outgoing HTTP request or response.
+ * The message may be transformed (ex: signing the request) before its data is eventually sent.
+ *
+ * The message keeps internal copies of its trivial strings (method, path, headers)
+ * but does NOT take ownership of its body stream.
+ *
+ * A language binding would likely present this as an HttpMessage base class with
+ * HttpRequest and HttpResponse subclasses.
+ */
+struct aws_http_message;
+
+/**
+ * Function to invoke when a message transformation completes.
+ * This function MUST be invoked or the application will soft-lock.
+ * `message` and `complete_ctx` must be the same pointers provided to the `aws_http_message_transform_fn`.
+ * `error_code` should should be AWS_ERROR_SUCCESS if transformation was successful,
+ * otherwise pass a different AWS_ERROR_X value.
+ */
+typedef void(
+ aws_http_message_transform_complete_fn)(struct aws_http_message *message, int error_code, void *complete_ctx);
+
+/**
+ * A function that may modify a request or response before it is sent.
+ * The transformation may be asynchronous or immediate.
+ * The user MUST invoke the `complete_fn` when transformation is complete or the application will soft-lock.
+ * When invoking the `complete_fn`, pass along the `message` and `complete_ctx` provided here and an error code.
+ * The error code should be AWS_ERROR_SUCCESS if transformation was successful,
+ * otherwise pass a different AWS_ERROR_X value.
+ */
+typedef void(aws_http_message_transform_fn)(
+ struct aws_http_message *message,
+ void *user_data,
+ aws_http_message_transform_complete_fn *complete_fn,
+ void *complete_ctx);
+
+/**
+ * Invoked repeatedly times as headers are received.
+ * At this point, aws_http_stream_get_incoming_response_status() can be called for the client.
+ * And aws_http_stream_get_incoming_request_method() and aws_http_stream_get_incoming_request_uri() can be called for
+ * the server.
+ * This is always invoked on the HTTP connection's event-loop thread.
+ *
+ * Return AWS_OP_SUCCESS to continue processing the stream.
+ * Return AWS_OP_ERR to indicate failure and cancel the stream.
+ */
+typedef int(aws_http_on_incoming_headers_fn)(
+ struct aws_http_stream *stream,
+ enum aws_http_header_block header_block,
+ const struct aws_http_header *header_array,
+ size_t num_headers,
+ void *user_data);
+
+/**
+ * Invoked when the incoming header block of this type(informational/main/trailing) has been completely read.
+ * This is always invoked on the HTTP connection's event-loop thread.
+ *
+ * Return AWS_OP_SUCCESS to continue processing the stream.
+ * Return AWS_OP_ERR to indicate failure and cancel the stream.
+ */
+typedef int(aws_http_on_incoming_header_block_done_fn)(
+ struct aws_http_stream *stream,
+ enum aws_http_header_block header_block,
+ void *user_data);
+
+/**
+ * Called repeatedly as body data is received.
+ * The data must be copied immediately if you wish to preserve it.
+ * This is always invoked on the HTTP connection's event-loop thread.
+ *
+ * Note that, if the connection is using manual_window_management then the window
+ * size has shrunk by the amount of body data received. If the window size
+ * reaches 0 no further data will be received. Increment the window size with
+ * aws_http_stream_update_window().
+ *
+ * Return AWS_OP_SUCCESS to continue processing the stream.
+ * Return AWS_OP_ERR to indicate failure and cancel the stream.
+ */
+typedef int(
+ aws_http_on_incoming_body_fn)(struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data);
+
+/**
+ * Invoked when request has been completely read.
+ * This is always invoked on the HTTP connection's event-loop thread.
+ *
+ * Return AWS_OP_SUCCESS to continue processing the stream.
+ * Return AWS_OP_ERR to indicate failure and cancel the stream.
+ */
+typedef int(aws_http_on_incoming_request_done_fn)(struct aws_http_stream *stream, void *user_data);
+
+/**
+ * Invoked when request/response stream is completely destroyed.
+ * This may be invoked synchronously when aws_http_stream_release() is called.
+ * This is invoked even if the stream is never activated.
+ */
+typedef void(aws_http_on_stream_complete_fn)(struct aws_http_stream *stream, int error_code, void *user_data);
+
+/**
+ * Invoked when request/response stream destroy completely.
+ * This can be invoked within the same thead who release the refcount on http stream.
+ */
+typedef void(aws_http_on_stream_destroy_fn)(void *user_data);
+
+/**
+ * Options for creating a stream which sends a request from the client and receives a response from the server.
+ */
+struct aws_http_make_request_options {
+ /**
+ * The sizeof() this struct, used for versioning.
+ * Required.
+ */
+ size_t self_size;
+
+ /**
+ * Definition for outgoing request.
+ * Required.
+ * The request will be kept alive via refcounting until the request completes.
+ */
+ struct aws_http_message *request;
+
+ void *user_data;
+
+ /**
+ * Invoked repeatedly times as headers are received.
+ * Optional.
+ * See `aws_http_on_incoming_headers_fn`.
+ */
+ aws_http_on_incoming_headers_fn *on_response_headers;
+
+ /**
+ * Invoked when response header block has been completely read.
+ * Optional.
+ * See `aws_http_on_incoming_header_block_done_fn`.
+ */
+ aws_http_on_incoming_header_block_done_fn *on_response_header_block_done;
+
+ /**
+ * Invoked repeatedly as body data is received.
+ * Optional.
+ * See `aws_http_on_incoming_body_fn`.
+ */
+ aws_http_on_incoming_body_fn *on_response_body;
+
+ /**
+ * Invoked when request/response stream is complete, whether successful or unsuccessful
+ * Optional.
+ * See `aws_http_on_stream_complete_fn`.
+ */
+ aws_http_on_stream_complete_fn *on_complete;
+
+ /* Callback for when the request/response stream is completely destroyed. */
+ aws_http_on_stream_destroy_fn *on_destroy;
+
+ /**
+ * When using HTTP/2, request body data will be provided over time. The stream will only be polled for writing
+ * when data has been supplied via `aws_http2_stream_write_data`
+ */
+ bool http2_use_manual_data_writes;
+};
+
+struct aws_http_request_handler_options {
+ /* Set to sizeof() this struct, used for versioning. */
+ size_t self_size;
+
+ /**
+ * Required.
+ */
+ struct aws_http_connection *server_connection;
+
+ /**
+ * user_data passed to callbacks.
+ * Optional.
+ */
+ void *user_data;
+
+ /**
+ * Invoked repeatedly times as headers are received.
+ * Optional.
+ * See `aws_http_on_incoming_headers_fn`.
+ */
+ aws_http_on_incoming_headers_fn *on_request_headers;
+
+ /**
+ * Invoked when the request header block has been completely read.
+ * Optional.
+ * See `aws_http_on_incoming_header_block_done_fn`.
+ */
+ aws_http_on_incoming_header_block_done_fn *on_request_header_block_done;
+
+ /**
+ * Invoked as body data is received.
+ * Optional.
+ * See `aws_http_on_incoming_body_fn`.
+ */
+ aws_http_on_incoming_body_fn *on_request_body;
+
+ /**
+ * Invoked when request has been completely read.
+ * Optional.
+ * See `aws_http_on_incoming_request_done_fn`.
+ */
+ aws_http_on_incoming_request_done_fn *on_request_done;
+
+ /**
+ * Invoked when request/response stream is complete, whether successful or unsuccessful
+ * Optional.
+ * See `aws_http_on_stream_complete_fn`.
+ */
+ aws_http_on_stream_complete_fn *on_complete;
+
+ /* Callback for when the request/response stream is completely destroyed. */
+ aws_http_on_stream_destroy_fn *on_destroy;
+};
+
+/**
+ * Invoked when the data stream of an outgoing HTTP write operation is no longer in use.
+ * This is always invoked on the HTTP connection's event-loop thread.
+ *
+ * @param stream HTTP-stream this write operation was submitted to.
+ * @param error_code If error_code is AWS_ERROR_SUCCESS (0), the data was successfully sent.
+ * Any other error_code indicates that the HTTP-stream is in the process of terminating.
+ * If the error_code is AWS_ERROR_HTTP_STREAM_HAS_COMPLETED,
+ * the stream's termination has nothing to do with this write operation.
+ * Any other non-zero error code indicates a problem with this particular write
+ * operation's data.
+ * @param user_data User data for this write operation.
+ */
+typedef void aws_http_stream_write_complete_fn(struct aws_http_stream *stream, int error_code, void *user_data);
+
+/**
+ * Invoked when the data of an outgoing HTTP/1.1 chunk is no longer in use.
+ * This is always invoked on the HTTP connection's event-loop thread.
+ *
+ * @param stream HTTP-stream this chunk was submitted to.
+ * @param error_code If error_code is AWS_ERROR_SUCCESS (0), the data was successfully sent.
+ * Any other error_code indicates that the HTTP-stream is in the process of terminating.
+ * If the error_code is AWS_ERROR_HTTP_STREAM_HAS_COMPLETED,
+ * the stream's termination has nothing to do with this chunk.
+ * Any other non-zero error code indicates a problem with this particular chunk's data.
+ * @param user_data User data for this chunk.
+ */
+typedef aws_http_stream_write_complete_fn aws_http1_stream_write_chunk_complete_fn;
+
+/**
+ * HTTP/1.1 chunk extension for chunked encoding.
+ * Note that the underlying strings are not owned by the byte cursors.
+ */
+struct aws_http1_chunk_extension {
+ struct aws_byte_cursor key;
+ struct aws_byte_cursor value;
+};
+
+/**
+ * Encoding options for an HTTP/1.1 chunked transfer encoding chunk.
+ */
+struct aws_http1_chunk_options {
+ /*
+ * The data stream to be sent in a single chunk.
+ * The aws_input_stream must remain valid until on_complete is invoked.
+ * May be NULL in the final chunk with size 0.
+ *
+ * Note that, for Transfer-Encodings other than "chunked", the data is
+ * expected to already have that encoding applied. For example, if
+ * "Transfer-Encoding: gzip, chunked" then the data from aws_input_stream
+ * should already be in gzip format.
+ */
+ struct aws_input_stream *chunk_data;
+
+ /*
+ * Size of the chunk_data input stream in bytes.
+ */
+ uint64_t chunk_data_size;
+
+ /**
+ * A pointer to an array of chunked extensions.
+ * The num_extensions must match the length of the array.
+ * This data is deep-copied by aws_http1_stream_write_chunk(),
+ * it does not need to remain valid until on_complete is invoked.
+ */
+ struct aws_http1_chunk_extension *extensions;
+
+ /**
+ * The number of elements defined in the extensions array.
+ */
+ size_t num_extensions;
+
+ /**
+ * Invoked when the chunk data is no longer in use, whether or not it was successfully sent.
+ * Optional.
+ * See `aws_http1_stream_write_chunk_complete_fn`.
+ */
+ aws_http1_stream_write_chunk_complete_fn *on_complete;
+
+ /**
+ * User provided data passed to the on_complete callback on its invocation.
+ */
+ void *user_data;
+};
+
+/**
+ * Invoked when the data of an outgoing HTTP2 data frame is no longer in use.
+ * This is always invoked on the HTTP connection's event-loop thread.
+ *
+ * @param stream HTTP2-stream this write was submitted to.
+ * @param error_code If error_code is AWS_ERROR_SUCCESS (0), the data was successfully sent.
+ * Any other error_code indicates that the HTTP-stream is in the process of terminating.
+ * If the error_code is AWS_ERROR_HTTP_STREAM_HAS_COMPLETED,
+ * the stream's termination has nothing to do with this write.
+ * Any other non-zero error code indicates a problem with this particular write's data.
+ * @param user_data User data for this write.
+ */
+typedef aws_http_stream_write_complete_fn aws_http2_stream_write_data_complete_fn;
+
+/**
+ * Encoding options for manual H2 data frame writes
+ */
+struct aws_http2_stream_write_data_options {
+ /**
+ * The data to be sent.
+ * Optional.
+ * If not set, input stream with length 0 will be used.
+ */
+ struct aws_input_stream *data;
+
+ /**
+ * Set true when it's the last chunk to be sent.
+ * After a write with end_stream, no more data write will be accepted.
+ */
+ bool end_stream;
+
+ /**
+ * Invoked when the data stream is no longer in use, whether or not it was successfully sent.
+ * Optional.
+ * See `aws_http2_stream_write_data_complete_fn`.
+ */
+ aws_http2_stream_write_data_complete_fn *on_complete;
+
+ /**
+ * User provided data passed to the on_complete callback on its invocation.
+ */
+ void *user_data;
+};
+
+#define AWS_HTTP_REQUEST_HANDLER_OPTIONS_INIT \
+ { .self_size = sizeof(struct aws_http_request_handler_options), }
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Return whether both names are equivalent.
+ * This is a case-insensitive string comparison.
+ *
+ * Example Matches:
+ * "Content-Length" == "content-length" // upper or lower case ok
+
+ * Example Mismatches:
+ * "Content-Length" != " Content-Length" // leading whitespace bad
+ */
+AWS_HTTP_API
+bool aws_http_header_name_eq(struct aws_byte_cursor name_a, struct aws_byte_cursor name_b);
+
+/**
+ * Create a new headers object.
+ * The caller has a hold on the object and must call aws_http_headers_release() when they are done with it.
+ */
+AWS_HTTP_API
+struct aws_http_headers *aws_http_headers_new(struct aws_allocator *allocator);
+
+/**
+ * Acquire a hold on the object, preventing it from being deleted until
+ * aws_http_headers_release() is called by all those with a hold on it.
+ */
+AWS_HTTP_API
+void aws_http_headers_acquire(struct aws_http_headers *headers);
+
+/**
+ * Release a hold on the object.
+ * The object is deleted when all holds on it are released.
+ */
+AWS_HTTP_API
+void aws_http_headers_release(struct aws_http_headers *headers);
+
+/**
+ * Add a header.
+ * The underlying strings are copied.
+ */
+AWS_HTTP_API
+int aws_http_headers_add_header(struct aws_http_headers *headers, const struct aws_http_header *header);
+
+/**
+ * Add a header.
+ * The underlying strings are copied.
+ */
+AWS_HTTP_API
+int aws_http_headers_add(struct aws_http_headers *headers, struct aws_byte_cursor name, struct aws_byte_cursor value);
+
+/**
+ * Add an array of headers.
+ * The underlying strings are copied.
+ */
+AWS_HTTP_API
+int aws_http_headers_add_array(struct aws_http_headers *headers, const struct aws_http_header *array, size_t count);
+
+/**
+ * Set a header value.
+ * The header is added if necessary and any existing values for this name are removed.
+ * The underlying strings are copied.
+ */
+AWS_HTTP_API
+int aws_http_headers_set(struct aws_http_headers *headers, struct aws_byte_cursor name, struct aws_byte_cursor value);
+
+/**
+ * Get the total number of headers.
+ */
+AWS_HTTP_API
+size_t aws_http_headers_count(const struct aws_http_headers *headers);
+
+/**
+ * Get the header at the specified index.
+ * The index of a given header may change any time headers are modified.
+ * When iterating headers, the following ordering rules apply:
+ *
+ * - Headers with the same name will always be in the same order, relative to one another.
+ * If "A: one" is added before "A: two", then "A: one" will always precede "A: two".
+ *
+ * - Headers with different names could be in any order, relative to one another.
+ * If "A: one" is seen before "B: bee" in one iteration, you might see "B: bee" before "A: one" on the next.
+ *
+ * AWS_ERROR_INVALID_INDEX is raised if the index is invalid.
+ */
+AWS_HTTP_API
+int aws_http_headers_get_index(
+ const struct aws_http_headers *headers,
+ size_t index,
+ struct aws_http_header *out_header);
+
+/**
+ *
+ * Get all values with this name, combined into one new aws_string that you are responsible for destroying.
+ * If there are multiple headers with this name, their values are appended with comma-separators.
+ * If there are no headers with this name, NULL is returned and AWS_ERROR_HTTP_HEADER_NOT_FOUND is raised.
+ */
+AWS_HTTP_API
+struct aws_string *aws_http_headers_get_all(const struct aws_http_headers *headers, struct aws_byte_cursor name);
+
+/**
+ * Get the first value for this name, ignoring any additional values.
+ * AWS_ERROR_HTTP_HEADER_NOT_FOUND is raised if the name is not found.
+ */
+AWS_HTTP_API
+int aws_http_headers_get(
+ const struct aws_http_headers *headers,
+ struct aws_byte_cursor name,
+ struct aws_byte_cursor *out_value);
+
+/**
+ * Test if header name exists or not in headers
+ */
+AWS_HTTP_API
+bool aws_http_headers_has(const struct aws_http_headers *headers, struct aws_byte_cursor name);
+
+/**
+ * Remove all headers with this name.
+ * AWS_ERROR_HTTP_HEADER_NOT_FOUND is raised if no headers with this name are found.
+ */
+AWS_HTTP_API
+int aws_http_headers_erase(struct aws_http_headers *headers, struct aws_byte_cursor name);
+
+/**
+ * Remove the first header found with this name and value.
+ * AWS_ERROR_HTTP_HEADER_NOT_FOUND is raised if no such header is found.
+ */
+AWS_HTTP_API
+int aws_http_headers_erase_value(
+ struct aws_http_headers *headers,
+ struct aws_byte_cursor name,
+ struct aws_byte_cursor value);
+
+/**
+ * Remove the header at the specified index.
+ *
+ * AWS_ERROR_INVALID_INDEX is raised if the index is invalid.
+ */
+AWS_HTTP_API
+int aws_http_headers_erase_index(struct aws_http_headers *headers, size_t index);
+
+/**
+ * Clear all headers.
+ */
+AWS_HTTP_API
+void aws_http_headers_clear(struct aws_http_headers *headers);
+
+/**
+ * Get the `:method` value (HTTP/2 headers only).
+ */
+AWS_HTTP_API
+int aws_http2_headers_get_request_method(const struct aws_http_headers *h2_headers, struct aws_byte_cursor *out_method);
+
+/**
+ * Set `:method` (HTTP/2 headers only).
+ * The headers makes its own copy of the underlying string.
+ */
+AWS_HTTP_API
+int aws_http2_headers_set_request_method(struct aws_http_headers *h2_headers, struct aws_byte_cursor method);
+
+/*
+ * Get the `:scheme` value (HTTP/2 headers only).
+ */
+AWS_HTTP_API
+int aws_http2_headers_get_request_scheme(const struct aws_http_headers *h2_headers, struct aws_byte_cursor *out_scheme);
+
+/**
+ * Set `:scheme` (request pseudo headers only).
+ * The pseudo headers makes its own copy of the underlying string.
+ */
+AWS_HTTP_API
+int aws_http2_headers_set_request_scheme(struct aws_http_headers *h2_headers, struct aws_byte_cursor scheme);
+
+/*
+ * Get the `:authority` value (request pseudo headers only).
+ */
+AWS_HTTP_API
+int aws_http2_headers_get_request_authority(
+ const struct aws_http_headers *h2_headers,
+ struct aws_byte_cursor *out_authority);
+
+/**
+ * Set `:authority` (request pseudo headers only).
+ * The pseudo headers makes its own copy of the underlying string.
+ */
+AWS_HTTP_API
+int aws_http2_headers_set_request_authority(struct aws_http_headers *h2_headers, struct aws_byte_cursor authority);
+
+/*
+ * Get the `:path` value (request pseudo headers only).
+ */
+AWS_HTTP_API
+int aws_http2_headers_get_request_path(const struct aws_http_headers *h2_headers, struct aws_byte_cursor *out_path);
+
+/**
+ * Set `:path` (request pseudo headers only).
+ * The pseudo headers makes its own copy of the underlying string.
+ */
+AWS_HTTP_API
+int aws_http2_headers_set_request_path(struct aws_http_headers *h2_headers, struct aws_byte_cursor path);
+
+/**
+ * Get `:status` (response pseudo headers only).
+ * If no status is set, AWS_ERROR_HTTP_DATA_NOT_AVAILABLE is raised.
+ */
+AWS_HTTP_API
+int aws_http2_headers_get_response_status(const struct aws_http_headers *h2_headers, int *out_status_code);
+
+/**
+ * Set `:status` (response pseudo headers only).
+ */
+AWS_HTTP_API
+int aws_http2_headers_set_response_status(struct aws_http_headers *h2_headers, int status_code);
+
+/**
+ * Create a new HTTP/1.1 request message.
+ * The message is blank, all properties (method, path, etc) must be set individually.
+ * If HTTP/1.1 message used in HTTP/2 connection, the transformation will be automatically applied.
+ * A HTTP/2 message will created and sent based on the HTTP/1.1 message.
+ *
+ * The caller has a hold on the object and must call aws_http_message_release() when they are done with it.
+ */
+AWS_HTTP_API
+struct aws_http_message *aws_http_message_new_request(struct aws_allocator *allocator);
+
+/**
+ * Like aws_http_message_new_request(), but uses existing aws_http_headers instead of creating a new one.
+ * Acquires a hold on the headers, and releases it when the request is destroyed.
+ */
+AWS_HTTP_API
+struct aws_http_message *aws_http_message_new_request_with_headers(
+ struct aws_allocator *allocator,
+ struct aws_http_headers *existing_headers);
+
+/**
+ * Create a new HTTP/1.1 response message.
+ * The message is blank, all properties (status, headers, etc) must be set individually.
+ *
+ * The caller has a hold on the object and must call aws_http_message_release() when they are done with it.
+ */
+AWS_HTTP_API
+struct aws_http_message *aws_http_message_new_response(struct aws_allocator *allocator);
+
+/**
+ * Create a new HTTP/2 request message.
+ * pseudo headers need to be set from aws_http2_headers_set_request_* to the headers of the aws_http_message.
+ * Will be errored out if used in HTTP/1.1 connection.
+ *
+ * The caller has a hold on the object and must call aws_http_message_release() when they are done with it.
+ */
+AWS_HTTP_API
+struct aws_http_message *aws_http2_message_new_request(struct aws_allocator *allocator);
+
+/**
+ * Create a new HTTP/2 response message.
+ * pseudo headers need to be set from aws_http2_headers_set_response_status to the headers of the aws_http_message.
+ * Will be errored out if used in HTTP/1.1 connection.
+ *
+ * The caller has a hold on the object and must call aws_http_message_release() when they are done with it.
+ */
+AWS_HTTP_API
+struct aws_http_message *aws_http2_message_new_response(struct aws_allocator *allocator);
+
+/**
+ * Create an HTTP/2 message from HTTP/1.1 message.
+ * pseudo headers will be created from the context and added to the headers of new message.
+ * Normal headers will be copied to the headers of new message.
+ * Note:
+ * - if `host` exist, it will be removed and `:authority` will be added using the information.
+ * - `:scheme` always defaults to "https". To use a different scheme create the HTTP/2 message directly
+ */
+AWS_HTTP_API
+struct aws_http_message *aws_http2_message_new_from_http1(
+ struct aws_allocator *alloc,
+ const struct aws_http_message *http1_msg);
+
+/**
+ * Acquire a hold on the object, preventing it from being deleted until
+ * aws_http_message_release() is called by all those with a hold on it.
+ *
+ * This function returns the passed in message (possibly NULL) so that acquire-and-assign can be done with a single
+ * statement.
+ */
+AWS_HTTP_API
+struct aws_http_message *aws_http_message_acquire(struct aws_http_message *message);
+
+/**
+ * Release a hold on the object.
+ * The object is deleted when all holds on it are released.
+ *
+ * This function always returns NULL so that release-and-assign-NULL can be done with a single statement.
+ */
+AWS_HTTP_API
+struct aws_http_message *aws_http_message_release(struct aws_http_message *message);
+
+/**
+ * Deprecated. This is equivalent to aws_http_message_release().
+ */
+AWS_HTTP_API
+void aws_http_message_destroy(struct aws_http_message *message);
+
+AWS_HTTP_API
+bool aws_http_message_is_request(const struct aws_http_message *message);
+
+AWS_HTTP_API
+bool aws_http_message_is_response(const struct aws_http_message *message);
+
+/**
+ * Get the protocol version of the http message.
+ */
+AWS_HTTP_API
+enum aws_http_version aws_http_message_get_protocol_version(const struct aws_http_message *message);
+
+/**
+ * Get the method (request messages only).
+ */
+AWS_HTTP_API
+int aws_http_message_get_request_method(
+ const struct aws_http_message *request_message,
+ struct aws_byte_cursor *out_method);
+
+/**
+ * Set the method (request messages only).
+ * The request makes its own copy of the underlying string.
+ */
+AWS_HTTP_API
+int aws_http_message_set_request_method(struct aws_http_message *request_message, struct aws_byte_cursor method);
+
+/*
+ * Get the path-and-query value (request messages only).
+ */
+AWS_HTTP_API
+int aws_http_message_get_request_path(const struct aws_http_message *request_message, struct aws_byte_cursor *out_path);
+
+/**
+ * Set the path-and-query value (request messages only).
+ * The request makes its own copy of the underlying string.
+ */
+AWS_HTTP_API
+int aws_http_message_set_request_path(struct aws_http_message *request_message, struct aws_byte_cursor path);
+
+/**
+ * Get the status code (response messages only).
+ * If no status is set, AWS_ERROR_HTTP_DATA_NOT_AVAILABLE is raised.
+ */
+AWS_HTTP_API
+int aws_http_message_get_response_status(const struct aws_http_message *response_message, int *out_status_code);
+
+/**
+ * Set the status code (response messages only).
+ */
+AWS_HTTP_API
+int aws_http_message_set_response_status(struct aws_http_message *response_message, int status_code);
+
+/**
+ * Get the body stream.
+ * Returns NULL if no body stream is set.
+ */
+AWS_HTTP_API
+struct aws_input_stream *aws_http_message_get_body_stream(const struct aws_http_message *message);
+
+/**
+ * Set the body stream.
+ * NULL is an acceptable value for messages with no body.
+ * Note: The message does NOT take ownership of the body stream.
+ * The stream must not be destroyed until the message is complete.
+ */
+AWS_HTTP_API
+void aws_http_message_set_body_stream(struct aws_http_message *message, struct aws_input_stream *body_stream);
+
+/**
+ * Submit a chunk of data to be sent on an HTTP/1.1 stream.
+ * The stream must have specified "chunked" in a "transfer-encoding" header.
+ * For client streams, activate() must be called before any chunks are submitted.
+ * For server streams, the response must be submitted before any chunks.
+ * A final chunk with size 0 must be submitted to successfully complete the HTTP-stream.
+ *
+ * Returns AWS_OP_SUCCESS if the chunk has been submitted. The chunk's completion
+ * callback will be invoked when the HTTP-stream is done with the chunk data,
+ * whether or not it was successfully sent (see `aws_http1_stream_write_chunk_complete_fn`).
+ * The chunk data must remain valid until the completion callback is invoked.
+ *
+ * Returns AWS_OP_ERR and raises an error if the chunk could not be submitted.
+ * In this case, the chunk's completion callback will never be invoked.
+ * Note that it is always possible for the HTTP-stream to terminate unexpectedly
+ * prior to this call being made, in which case the error raised is
+ * AWS_ERROR_HTTP_STREAM_HAS_COMPLETED.
+ */
+AWS_HTTP_API int aws_http1_stream_write_chunk(
+ struct aws_http_stream *http1_stream,
+ const struct aws_http1_chunk_options *options);
+
+/**
+ * The stream must have specified `http2_use_manual_data_writes` during request creation.
+ * For client streams, activate() must be called before any frames are submitted.
+ * For server streams, the response headers must be submitted before any frames.
+ * A write with options that has end_stream set to be true will end the stream and prevent any further write.
+ *
+ * @return AWS_OP_SUCCESS if the write was queued
+ * AWS_OP_ERROR indicating the attempt raised an error code.
+ * AWS_ERROR_INVALID_STATE will be raised for invalid usage.
+ * AWS_ERROR_HTTP_STREAM_HAS_COMPLETED will be raised if the stream ended for reasons behind the scenes.
+ *
+ * Typical usage will be something like:
+ * options.http2_use_manual_data_writes = true;
+ * stream = aws_http_connection_make_request(connection, &options);
+ * aws_http_stream_activate(stream);
+ * ...
+ * struct aws_http2_stream_write_data_options write;
+ * aws_http2_stream_write_data(stream, &write);
+ * ...
+ * struct aws_http2_stream_write_data_options last_write;
+ * last_write.end_stream = true;
+ * aws_http2_stream_write_data(stream, &write);
+ * ...
+ * aws_http_stream_release(stream);
+ */
+AWS_HTTP_API int aws_http2_stream_write_data(
+ struct aws_http_stream *http2_stream,
+ const struct aws_http2_stream_write_data_options *options);
+
+/**
+ * Add a list of headers to be added as trailing headers sent after the last chunk is sent.
+ * a "Trailer" header field which indicates the fields present in the trailer.
+ *
+ * Certain headers are forbidden in the trailer (e.g., Transfer-Encoding, Content-Length, Host). See RFC-7541
+ * Section 4.1.2 for more details.
+ *
+ * For client streams, activate() must be called before any chunks are submitted.
+ *
+ * For server streams, the response must be submitted before the trailer can be added
+ *
+ * aws_http1_stream_add_chunked_trailer must be called before the final size 0 chunk, and at the moment can only
+ * be called once, though this could change if need be.
+ *
+ * Returns AWS_OP_SUCCESS if the chunk has been submitted.
+ */
+AWS_HTTP_API int aws_http1_stream_add_chunked_trailer(
+ struct aws_http_stream *http1_stream,
+ const struct aws_http_headers *trailing_headers);
+
+/**
+ *
+ * This datastructure has more functions for inspecting and modifying headers than
+ * are available on the aws_http_message datastructure.
+ */
+AWS_HTTP_API
+struct aws_http_headers *aws_http_message_get_headers(const struct aws_http_message *message);
+
+/**
+ * Get the message's const aws_http_headers.
+ */
+AWS_HTTP_API
+const struct aws_http_headers *aws_http_message_get_const_headers(const struct aws_http_message *message);
+
+/**
+ * Get the number of headers.
+ */
+AWS_HTTP_API
+size_t aws_http_message_get_header_count(const struct aws_http_message *message);
+
+/**
+ * Get the header at the specified index.
+ * This function cannot fail if a valid index is provided.
+ * Otherwise, AWS_ERROR_INVALID_INDEX will be raised.
+ *
+ * The underlying strings are stored within the message.
+ */
+AWS_HTTP_API
+int aws_http_message_get_header(
+ const struct aws_http_message *message,
+ struct aws_http_header *out_header,
+ size_t index);
+
+/**
+ * Add a header to the end of the array.
+ * The message makes its own copy of the underlying strings.
+ */
+AWS_HTTP_API
+int aws_http_message_add_header(struct aws_http_message *message, struct aws_http_header header);
+
+/**
+ * Add an array of headers to the end of the header array.
+ * The message makes its own copy of the underlying strings.
+ *
+ * This is a helper function useful when it's easier to define headers as a stack array, rather than calling add_header
+ * repeatedly.
+ */
+AWS_HTTP_API
+int aws_http_message_add_header_array(
+ struct aws_http_message *message,
+ const struct aws_http_header *headers,
+ size_t num_headers);
+
+/**
+ * Remove the header at the specified index.
+ * Headers after this index are all shifted back one position.
+ *
+ * This function cannot fail if a valid index is provided.
+ * Otherwise, AWS_ERROR_INVALID_INDEX will be raised.
+ */
+AWS_HTTP_API
+int aws_http_message_erase_header(struct aws_http_message *message, size_t index);
+
+/**
+ * Create a stream, with a client connection sending a request.
+ * The request does not start sending automatically once the stream is created. You must call
+ * aws_http_stream_activate to begin execution of the request.
+ *
+ * The `options` are copied during this call.
+ *
+ * Tip for language bindings: Do not bind the `options` struct. Use something more natural for your language,
+ * such as Builder Pattern in Java, or Python's ability to take many optional arguments by name.
+ *
+ * Note: The header of the request will be sent as it is when the message to send protocol matches the protocol of the
+ * connection.
+ * - No `user-agent` will be added.
+ * - No security check will be enforced. eg: `referer` header privacy should be enforced by the user-agent who adds the
+ * header
+ * - When HTTP/1 message sent on HTTP/2 connection, `aws_http2_message_new_from_http1` will be applied under the hood.
+ * - When HTTP/2 message sent on HTTP/1 connection, no change will be made.
+ */
+AWS_HTTP_API
+struct aws_http_stream *aws_http_connection_make_request(
+ struct aws_http_connection *client_connection,
+ const struct aws_http_make_request_options *options);
+
+/**
+ * Create a stream, with a server connection receiving and responding to a request.
+ * This function can only be called from the `aws_http_on_incoming_request_fn` callback.
+ * aws_http_stream_send_response() should be used to send a response.
+ */
+AWS_HTTP_API
+struct aws_http_stream *aws_http_stream_new_server_request_handler(
+ const struct aws_http_request_handler_options *options);
+
+/**
+ * Users must release the stream when they are done with it, or its memory will never be cleaned up.
+ * This will not cancel the stream, its callbacks will still fire if the stream is still in progress.
+ *
+ * Tips for language bindings:
+ * - Invoke this from the wrapper class's finalizer/destructor.
+ * - Do not let the wrapper class be destroyed until on_complete() has fired.
+ */
+AWS_HTTP_API
+void aws_http_stream_release(struct aws_http_stream *stream);
+
+/**
+ * Only used for client initiated streams (immediately following a call to aws_http_connection_make_request).
+ *
+ * Activates the request's outgoing stream processing.
+ */
+AWS_HTTP_API int aws_http_stream_activate(struct aws_http_stream *stream);
+
+AWS_HTTP_API
+struct aws_http_connection *aws_http_stream_get_connection(const struct aws_http_stream *stream);
+
+/* Only valid in "request" streams, once response headers start arriving */
+AWS_HTTP_API
+int aws_http_stream_get_incoming_response_status(const struct aws_http_stream *stream, int *out_status);
+
+/* Only valid in "request handler" streams, once request headers start arriving */
+AWS_HTTP_API
+int aws_http_stream_get_incoming_request_method(
+ const struct aws_http_stream *stream,
+ struct aws_byte_cursor *out_method);
+
+AWS_HTTP_API
+int aws_http_stream_get_incoming_request_uri(const struct aws_http_stream *stream, struct aws_byte_cursor *out_uri);
+
+/**
+ * Send response (only callable from "request handler" streams)
+ * The response object must stay alive at least until the stream's on_complete is called.
+ */
+AWS_HTTP_API
+int aws_http_stream_send_response(struct aws_http_stream *stream, struct aws_http_message *response);
+
+/**
+ * Increment the stream's flow-control window to keep data flowing.
+ *
+ * If the connection was created with `manual_window_management` set true,
+ * the flow-control window of each stream will shrink as body data is received
+ * (headers, padding, and other metadata do not affect the window).
+ * The connection's `initial_window_size` determines the starting size of each stream's window.
+ * If a stream's flow-control window reaches 0, no further data will be received.
+ *
+ * If `manual_window_management` is false, this call will have no effect.
+ * The connection maintains its flow-control windows such that
+ * no back-pressure is applied and data arrives as fast as possible.
+ */
+AWS_HTTP_API
+void aws_http_stream_update_window(struct aws_http_stream *stream, size_t increment_size);
+
+/**
+ * Gets the HTTP/2 id associated with a stream. Even h1 streams have an id (using the same allocation procedure
+ * as http/2) for easier tracking purposes. For client streams, this will only be non-zero after a successful call
+ * to aws_http_stream_activate()
+ */
+AWS_HTTP_API
+uint32_t aws_http_stream_get_id(const struct aws_http_stream *stream);
+
+/**
+ * Reset the HTTP/2 stream (HTTP/2 only).
+ * Note that if the stream closes before this async call is fully processed, the RST_STREAM frame will not be sent.
+ *
+ * @param http2_stream HTTP/2 stream.
+ * @param http2_error aws_http2_error_code. Reason to reset the stream.
+ */
+AWS_HTTP_API
+int aws_http2_stream_reset(struct aws_http_stream *http2_stream, uint32_t http2_error);
+
+/**
+ * Get the error code received in rst_stream.
+ * Only valid if the stream has completed, and an RST_STREAM frame has received.
+ *
+ * @param http2_stream HTTP/2 stream.
+ * @param out_http2_error Gets to set to HTTP/2 error code received in rst_stream.
+ */
+AWS_HTTP_API
+int aws_http2_stream_get_received_reset_error_code(struct aws_http_stream *http2_stream, uint32_t *out_http2_error);
+
+/**
+ * Get the HTTP/2 error code sent in the RST_STREAM frame (HTTP/2 only).
+ * Only valid if the stream has completed, and has sent an RST_STREAM frame.
+ *
+ * @param http2_stream HTTP/2 stream.
+ * @param out_http2_error Gets to set to HTTP/2 error code sent in rst_stream.
+ */
+AWS_HTTP_API
+int aws_http2_stream_get_sent_reset_error_code(struct aws_http_stream *http2_stream, uint32_t *out_http2_error);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_REQUEST_RESPONSE_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/server.h b/contrib/restricted/aws/aws-c-http/include/aws/http/server.h
new file mode 100644
index 0000000000..0e1be3d8c0
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/server.h
@@ -0,0 +1,198 @@
+#ifndef AWS_HTTP_SERVER_H
+#define AWS_HTTP_SERVER_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/http.h>
+
+struct aws_http_connection;
+struct aws_server_bootstrap;
+struct aws_socket_options;
+struct aws_tls_connection_options;
+/**
+ * A listening socket which accepts incoming HTTP connections,
+ * creating a server-side aws_http_connection to handle each one.
+ */
+struct aws_http_server;
+struct aws_http_stream;
+
+typedef void(aws_http_server_on_incoming_connection_fn)(
+ struct aws_http_server *server,
+ struct aws_http_connection *connection,
+ int error_code,
+ void *user_data);
+
+typedef void(aws_http_server_on_destroy_fn)(void *user_data);
+
+/**
+ * Options for creating an HTTP server.
+ * Initialize with AWS_HTTP_SERVER_OPTIONS_INIT to set default values.
+ */
+struct aws_http_server_options {
+ /**
+ * The sizeof() this struct, used for versioning.
+ * Set by AWS_HTTP_SERVER_OPTIONS_INIT.
+ */
+ size_t self_size;
+
+ /**
+ * Required.
+ * Must outlive server.
+ */
+ struct aws_allocator *allocator;
+
+ /**
+ * Required.
+ * Must outlive server.
+ */
+ struct aws_server_bootstrap *bootstrap;
+
+ /**
+ * Required.
+ * Server makes copy.
+ */
+ struct aws_socket_endpoint *endpoint;
+
+ /**
+ * Required.
+ * Server makes a copy.
+ */
+ struct aws_socket_options *socket_options;
+
+ /**
+ * Optional.
+ * Server copies all contents except the `aws_tls_ctx`, which must outlive the server.
+ */
+ struct aws_tls_connection_options *tls_options;
+
+ /**
+ * Initial window size for incoming connections.
+ * Optional.
+ * A default size is set by AWS_HTTP_SERVER_OPTIONS_INIT.
+ */
+ size_t initial_window_size;
+
+ /**
+ * User data passed to callbacks.
+ * Optional.
+ */
+ void *server_user_data;
+
+ /**
+ * Invoked when an incoming connection has been set up, or when setup has failed.
+ * Required.
+ * If setup succeeds, the user must call aws_http_connection_configure_server().
+ */
+ aws_http_server_on_incoming_connection_fn *on_incoming_connection;
+
+ /**
+ * Invoked when the server finishes the destroy operation.
+ * Optional.
+ */
+ aws_http_server_on_destroy_fn *on_destroy_complete;
+
+ /**
+ * Set to true to manually manage the read window size.
+ *
+ * If this is false, the connection will maintain a constant window size.
+ *
+ * If this is true, the caller must manually increment the window size using aws_http_stream_update_window().
+ * If the window is not incremented, it will shrink by the amount of body data received. If the window size
+ * reaches 0, no further data will be received.
+ **/
+ bool manual_window_management;
+};
+
+/**
+ * Initializes aws_http_server_options with default values.
+ */
+#define AWS_HTTP_SERVER_OPTIONS_INIT \
+ { .self_size = sizeof(struct aws_http_server_options), .initial_window_size = SIZE_MAX, }
+
+/**
+ * Invoked at the start of an incoming request.
+ * To process the request, the user must create a request handler stream and return it to the connection.
+ * If NULL is returned, the request will not be processed and the last error will be reported as the reason for failure.
+ */
+typedef struct aws_http_stream *(
+ aws_http_on_incoming_request_fn)(struct aws_http_connection *connection, void *user_data);
+
+typedef void(aws_http_on_server_connection_shutdown_fn)(
+ struct aws_http_connection *connection,
+ int error_code,
+ void *connection_user_data);
+
+/**
+ * Options for configuring a server-side aws_http_connection.
+ * Initialized with AWS_HTTP_SERVER_CONNECTION_OPTIONS_INIT to set default values.
+ */
+struct aws_http_server_connection_options {
+ /**
+ * The sizeof() this struct, used for versioning.
+ * Set by AWS_HTTP_SERVER_CONNECTION_OPTIONS_INIT.
+ */
+ size_t self_size;
+
+ /**
+ * User data specific to this connection.
+ * Optional.
+ */
+ void *connection_user_data;
+
+ /**
+ * Invoked at the start of an incoming request.
+ * Required.
+ * The user must create a request handler stream and return it to the connection.
+ * See `aws_http_on_incoming_request_fn`.
+ */
+ aws_http_on_incoming_request_fn *on_incoming_request;
+
+ /**
+ * Invoked when the connection is shut down.
+ * Optional.
+ */
+ aws_http_on_server_connection_shutdown_fn *on_shutdown;
+};
+
+/**
+ * Initializes aws_http_server_connection_options with default values.
+ */
+#define AWS_HTTP_SERVER_CONNECTION_OPTIONS_INIT \
+ { .self_size = sizeof(struct aws_http_server_connection_options), }
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Create server, a listening socket that accepts incoming connections.
+ */
+AWS_HTTP_API
+struct aws_http_server *aws_http_server_new(const struct aws_http_server_options *options);
+
+/**
+ * Release the server. It will close the listening socket and all the connections existing in the server.
+ * The on_destroy_complete will be invoked when the destroy operation completes
+ */
+AWS_HTTP_API
+void aws_http_server_release(struct aws_http_server *server);
+
+/**
+ * Configure a server connection.
+ * This must be called from the server's on_incoming_connection callback.
+ */
+AWS_HTTP_API
+int aws_http_connection_configure_server(
+ struct aws_http_connection *connection,
+ const struct aws_http_server_connection_options *options);
+
+/**
+ * Returns true if this is a server connection.
+ */
+AWS_HTTP_API
+bool aws_http_connection_is_server(const struct aws_http_connection *connection);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_SERVER_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/statistics.h b/contrib/restricted/aws/aws-c-http/include/aws/http/statistics.h
new file mode 100644
index 0000000000..ecc8c2700a
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/statistics.h
@@ -0,0 +1,75 @@
+#ifndef AWS_HTTP_STATISTICS_H
+#define AWS_HTTP_STATISTICS_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/http.h>
+
+#include <aws/common/statistics.h>
+
+enum aws_crt_http_statistics_category {
+ AWSCRT_STAT_CAT_HTTP1_CHANNEL = AWS_CRT_STATISTICS_CATEGORY_BEGIN_RANGE(AWS_C_HTTP_PACKAGE_ID),
+ AWSCRT_STAT_CAT_HTTP2_CHANNEL,
+};
+
+/**
+ * A statistics struct for http handlers. Tracks the actual amount of time that incoming and outgoing requests are
+ * waiting for their IO to complete.
+ */
+struct aws_crt_statistics_http1_channel {
+ aws_crt_statistics_category_t category;
+
+ uint64_t pending_outgoing_stream_ms;
+ uint64_t pending_incoming_stream_ms;
+
+ uint32_t current_outgoing_stream_id;
+ uint32_t current_incoming_stream_id;
+};
+
+struct aws_crt_statistics_http2_channel {
+ aws_crt_statistics_category_t category;
+
+ uint64_t pending_outgoing_stream_ms;
+ uint64_t pending_incoming_stream_ms;
+
+ /* True if during the time of report, there has ever been no active streams on the connection */
+ bool was_inactive;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Initializes a http channel handler statistics struct
+ */
+AWS_HTTP_API
+int aws_crt_statistics_http1_channel_init(struct aws_crt_statistics_http1_channel *stats);
+
+/**
+ * Cleans up a http channel handler statistics struct
+ */
+AWS_HTTP_API
+void aws_crt_statistics_http1_channel_cleanup(struct aws_crt_statistics_http1_channel *stats);
+
+/**
+ * Resets a http channel handler statistics struct's statistics
+ */
+AWS_HTTP_API
+void aws_crt_statistics_http1_channel_reset(struct aws_crt_statistics_http1_channel *stats);
+
+/**
+ * Initializes a HTTP/2 channel handler statistics struct
+ */
+AWS_HTTP_API
+void aws_crt_statistics_http2_channel_init(struct aws_crt_statistics_http2_channel *stats);
+/**
+ * Resets a HTTP/2 channel handler statistics struct's statistics
+ */
+AWS_HTTP_API
+void aws_crt_statistics_http2_channel_reset(struct aws_crt_statistics_http2_channel *stats);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_STATISTICS_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/status_code.h b/contrib/restricted/aws/aws-c-http/include/aws/http/status_code.h
new file mode 100644
index 0000000000..292f866239
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/status_code.h
@@ -0,0 +1,82 @@
+#ifndef AWS_HTTP_STATUS_CODE_H
+#define AWS_HTTP_STATUS_CODE_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+/*
+ * Define most of the http response codes we probably will use.
+ * https://www.iana.org/assignments/http-status-codes/http-status-codes.txt
+ * This is NOT a definitive list of codes.
+ */
+enum aws_http_status_code {
+ /*
+ * This is a special response code defined for convenience in error processing,
+ * indicating processing of http request met error and didn't reach server.
+ */
+ AWS_HTTP_STATUS_CODE_UNKNOWN = -1,
+ AWS_HTTP_STATUS_CODE_100_CONTINUE = 100,
+ AWS_HTTP_STATUS_CODE_101_SWITCHING_PROTOCOLS = 101,
+ AWS_HTTP_STATUS_CODE_102_PROCESSING = 102,
+ AWS_HTTP_STATUS_CODE_103_EARLY_HINTS = 103,
+ AWS_HTTP_STATUS_CODE_200_OK = 200,
+ AWS_HTTP_STATUS_CODE_201_CREATED = 201,
+ AWS_HTTP_STATUS_CODE_202_ACCEPTED = 202,
+ AWS_HTTP_STATUS_CODE_203_NON_AUTHORITATIVE_INFORMATION = 203,
+ AWS_HTTP_STATUS_CODE_204_NO_CONTENT = 204,
+ AWS_HTTP_STATUS_CODE_205_RESET_CONTENT = 205,
+ AWS_HTTP_STATUS_CODE_206_PARTIAL_CONTENT = 206,
+ AWS_HTTP_STATUS_CODE_207_MULTI_STATUS = 207,
+ AWS_HTTP_STATUS_CODE_208_ALREADY_REPORTED = 208,
+ AWS_HTTP_STATUS_CODE_226_IM_USED = 226,
+ AWS_HTTP_STATUS_CODE_300_MULTIPLE_CHOICES = 300,
+ AWS_HTTP_STATUS_CODE_301_MOVED_PERMANENTLY = 301,
+ AWS_HTTP_STATUS_CODE_302_FOUND = 302,
+ AWS_HTTP_STATUS_CODE_303_SEE_OTHER = 303,
+ AWS_HTTP_STATUS_CODE_304_NOT_MODIFIED = 304,
+ AWS_HTTP_STATUS_CODE_305_USE_PROXY = 305,
+ AWS_HTTP_STATUS_CODE_307_TEMPORARY_REDIRECT = 307,
+ AWS_HTTP_STATUS_CODE_308_PERMANENT_REDIRECT = 308,
+ AWS_HTTP_STATUS_CODE_400_BAD_REQUEST = 400,
+ AWS_HTTP_STATUS_CODE_401_UNAUTHORIZED = 401,
+ AWS_HTTP_STATUS_CODE_402_PAYMENT_REQUIRED = 402,
+ AWS_HTTP_STATUS_CODE_403_FORBIDDEN = 403,
+ AWS_HTTP_STATUS_CODE_404_NOT_FOUND = 404,
+ AWS_HTTP_STATUS_CODE_405_METHOD_NOT_ALLOWED = 405,
+ AWS_HTTP_STATUS_CODE_406_NOT_ACCEPTABLE = 406,
+ AWS_HTTP_STATUS_CODE_407_PROXY_AUTHENTICATION_REQUIRED = 407,
+ AWS_HTTP_STATUS_CODE_408_REQUEST_TIMEOUT = 408,
+ AWS_HTTP_STATUS_CODE_409_CONFLICT = 409,
+ AWS_HTTP_STATUS_CODE_410_GONE = 410,
+ AWS_HTTP_STATUS_CODE_411_LENGTH_REQUIRED = 411,
+ AWS_HTTP_STATUS_CODE_412_PRECONDITION_FAILED = 412,
+ AWS_HTTP_STATUS_CODE_413_REQUEST_ENTITY_TOO_LARGE = 413,
+ AWS_HTTP_STATUS_CODE_414_REQUEST_URI_TOO_LONG = 414,
+ AWS_HTTP_STATUS_CODE_415_UNSUPPORTED_MEDIA_TYPE = 415,
+ AWS_HTTP_STATUS_CODE_416_REQUESTED_RANGE_NOT_SATISFIABLE = 416,
+ AWS_HTTP_STATUS_CODE_417_EXPECTATION_FAILED = 417,
+ AWS_HTTP_STATUS_CODE_421_MISDIRECTED_REQUEST = 421,
+ AWS_HTTP_STATUS_CODE_422_UNPROCESSABLE_ENTITY = 422,
+ AWS_HTTP_STATUS_CODE_423_LOCKED = 423,
+ AWS_HTTP_STATUS_CODE_424_FAILED_DEPENDENCY = 424,
+ AWS_HTTP_STATUS_CODE_425_TOO_EARLY = 425,
+ AWS_HTTP_STATUS_CODE_426_UPGRADE_REQUIRED = 426,
+ AWS_HTTP_STATUS_CODE_428_PRECONDITION_REQUIRED = 428,
+ AWS_HTTP_STATUS_CODE_429_TOO_MANY_REQUESTS = 429,
+ AWS_HTTP_STATUS_CODE_431_REQUEST_HEADER_FIELDS_TOO_LARGE = 431,
+ AWS_HTTP_STATUS_CODE_451_UNAVAILABLE_FOR_LEGAL_REASON = 451,
+ AWS_HTTP_STATUS_CODE_500_INTERNAL_SERVER_ERROR = 500,
+ AWS_HTTP_STATUS_CODE_501_NOT_IMPLEMENTED = 501,
+ AWS_HTTP_STATUS_CODE_502_BAD_GATEWAY = 502,
+ AWS_HTTP_STATUS_CODE_503_SERVICE_UNAVAILABLE = 503,
+ AWS_HTTP_STATUS_CODE_504_GATEWAY_TIMEOUT = 504,
+ AWS_HTTP_STATUS_CODE_505_HTTP_VERSION_NOT_SUPPORTED = 505,
+ AWS_HTTP_STATUS_CODE_506_VARIANT_ALSO_NEGOTIATES = 506,
+ AWS_HTTP_STATUS_CODE_507_INSUFFICIENT_STORAGE = 507,
+ AWS_HTTP_STATUS_CODE_508_LOOP_DETECTED = 508,
+ AWS_HTTP_STATUS_CODE_510_NOT_EXTENDED = 510,
+ AWS_HTTP_STATUS_CODE_511_NETWORK_AUTHENTICATION_REQUIRED = 511,
+};
+#endif /* AWS_HTTP_STATUS_CODE_H */
diff --git a/contrib/restricted/aws/aws-c-http/include/aws/http/websocket.h b/contrib/restricted/aws/aws-c-http/include/aws/http/websocket.h
new file mode 100644
index 0000000000..6f85cafa81
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/include/aws/http/websocket.h
@@ -0,0 +1,483 @@
+#ifndef AWS_HTTP_WEBSOCKET_H
+#define AWS_HTTP_WEBSOCKET_H
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/http.h>
+
+struct aws_http_header;
+struct aws_http_message;
+
+/* TODO: Document lifetime stuff */
+/* TODO: Document CLOSE frame behavior (when auto-sent during close, when auto-closed) */
+/* TODO: Accept payload as aws_input_stream */
+
+/**
+ * A websocket connection.
+ */
+struct aws_websocket;
+
+/**
+ * Opcode describing the type of a websocket frame.
+ * RFC-6455 Section 5.2
+ */
+enum aws_websocket_opcode {
+ AWS_WEBSOCKET_OPCODE_CONTINUATION = 0x0,
+ AWS_WEBSOCKET_OPCODE_TEXT = 0x1,
+ AWS_WEBSOCKET_OPCODE_BINARY = 0x2,
+ AWS_WEBSOCKET_OPCODE_CLOSE = 0x8,
+ AWS_WEBSOCKET_OPCODE_PING = 0x9,
+ AWS_WEBSOCKET_OPCODE_PONG = 0xA,
+};
+
+#define AWS_WEBSOCKET_MAX_PAYLOAD_LENGTH 0x7FFFFFFFFFFFFFFF
+#define AWS_WEBSOCKET_MAX_HANDSHAKE_KEY_LENGTH 25
+#define AWS_WEBSOCKET_CLOSE_TIMEOUT 1000000000 // nanos -> 1 sec
+
+/**
+ * Data passed to the websocket on_connection_setup callback.
+ *
+ * An error_code of zero indicates that setup was completely successful.
+ * You own the websocket pointer now and must call aws_websocket_release() when you are done with it.
+ * You can inspect the response headers, if you're interested.
+ *
+ * A non-zero error_code indicates that setup failed.
+ * The websocket pointer will be NULL.
+ * If the server sent a response, you can inspect its status-code, headers, and body,
+ * but this data will NULL if setup failed before a full response could be received.
+ * If you wish to persist data from the response make a deep copy.
+ * The response data becomes invalid once the callback completes.
+ */
+struct aws_websocket_on_connection_setup_data {
+ int error_code;
+ struct aws_websocket *websocket;
+ const int *handshake_response_status;
+ const struct aws_http_header *handshake_response_header_array;
+ size_t num_handshake_response_headers;
+ const struct aws_byte_cursor *handshake_response_body;
+};
+
+/**
+ * Called when websocket setup is complete.
+ * Called exactly once on the websocket's event-loop thread.
+ * See `aws_websocket_on_connection_setup_data`.
+ */
+typedef void(
+ aws_websocket_on_connection_setup_fn)(const struct aws_websocket_on_connection_setup_data *setup, void *user_data);
+
+/**
+ * Called when the websocket has finished shutting down.
+ * Called once on the websocket's event-loop thread if setup succeeded.
+ * If setup failed, this is never called.
+ */
+typedef void(aws_websocket_on_connection_shutdown_fn)(struct aws_websocket *websocket, int error_code, void *user_data);
+
+/**
+ * Data about an incoming frame.
+ * See RFC-6455 Section 5.2.
+ */
+struct aws_websocket_incoming_frame {
+ uint64_t payload_length;
+ uint8_t opcode;
+ bool fin;
+};
+
+/**
+ * Called when a new frame arrives.
+ * Invoked once per frame on the websocket's event-loop thread.
+ * Each incoming-frame-begin call will eventually be followed by an incoming-frame-complete call,
+ * before the next frame begins and before the websocket shuts down.
+ *
+ * Return true to proceed normally. If false is returned, the websocket will read no further data,
+ * the frame will complete with an error-code, and the connection will close.
+ */
+typedef bool(aws_websocket_on_incoming_frame_begin_fn)(
+ struct aws_websocket *websocket,
+ const struct aws_websocket_incoming_frame *frame,
+ void *user_data);
+
+/**
+ * Called repeatedly as payload data arrives.
+ * Invoked 0 or more times on the websocket's event-loop thread.
+ * Payload data will not be valid after this call, so copy if necessary.
+ * The payload data is always unmasked at this point.
+ *
+ * NOTE: If you created the websocket with `manual_window_management` set true, you must maintain the read window.
+ * Whenever the read window reaches 0, you will stop receiving anything.
+ * The websocket's `initial_window_size` determines the starting size of the read window.
+ * The read window shrinks as you receive the payload from "data" frames (TEXT, BINARY, and CONTINUATION).
+ * Use aws_websocket_increment_read_window() to increment the window again and keep frames flowing.
+ * Maintain a larger window to keep up high throughput.
+ * You only need to worry about the payload from "data" frames.
+ * The websocket automatically increments the window to account for any
+ * other incoming bytes, including other parts of a frame (opcode, payload-length, etc)
+ * and the payload of other frame types (PING, PONG, CLOSE).
+ *
+ * Return true to proceed normally. If false is returned, the websocket will read no further data,
+ * the frame will complete with an error-code, and the connection will close.
+ */
+typedef bool(aws_websocket_on_incoming_frame_payload_fn)(
+ struct aws_websocket *websocket,
+ const struct aws_websocket_incoming_frame *frame,
+ struct aws_byte_cursor data,
+ void *user_data);
+
+/**
+ * Called when done processing an incoming frame.
+ * If error_code is non-zero, an error occurred and the payload may not have been completely received.
+ * Invoked once per frame on the websocket's event-loop thread.
+ *
+ * Return true to proceed normally. If false is returned, the websocket will read no further data
+ * and the connection will close.
+ */
+typedef bool(aws_websocket_on_incoming_frame_complete_fn)(
+ struct aws_websocket *websocket,
+ const struct aws_websocket_incoming_frame *frame,
+ int error_code,
+ void *user_data);
+
+/**
+ * Options for creating a websocket client connection.
+ */
+struct aws_websocket_client_connection_options {
+ /**
+ * Required.
+ * Must outlive the connection.
+ */
+ struct aws_allocator *allocator;
+
+ /**
+ * Required.
+ * The connection keeps the bootstrap alive via ref-counting.
+ */
+ struct aws_client_bootstrap *bootstrap;
+
+ /**
+ * Required.
+ * aws_websocket_client_connect() makes a copy.
+ */
+ const struct aws_socket_options *socket_options;
+
+ /**
+ * Optional.
+ * aws_websocket_client_connect() deep-copies all contents,
+ * and keeps the `aws_tls_ctx` alive via ref-counting.
+ */
+ const struct aws_tls_connection_options *tls_options;
+
+ /**
+ * Optional
+ * Configuration options related to http proxy usage.
+ */
+ const struct aws_http_proxy_options *proxy_options;
+
+ /**
+ * Required.
+ * aws_websocket_client_connect() makes a copy.
+ */
+ struct aws_byte_cursor host;
+
+ /**
+ * Optional.
+ * Defaults to 443 if tls_options is present, 80 if it is not.
+ */
+ uint16_t port;
+
+ /**
+ * Required.
+ * The request will be kept alive via ref-counting until the handshake completes.
+ * Suggestion: create via aws_http_message_new_websocket_handshake_request()
+ *
+ * The method MUST be set to GET.
+ * The following headers are required (replace values in []):
+ *
+ * Host: [server.example.com]
+ * Upgrade: websocket
+ * Connection: Upgrade
+ * Sec-WebSocket-Key: [dGhlIHNhbXBsZSBub25jZQ==]
+ * Sec-WebSocket-Version: 13
+ *
+ * Sec-Websocket-Key should be a random 16 bytes value, Base64 encoded.
+ */
+ struct aws_http_message *handshake_request;
+
+ /**
+ * Initial size of the websocket's read window.
+ * Ignored unless `manual_window_management` is true.
+ * Set to 0 to prevent any incoming websocket frames until aws_websocket_increment_read_window() is called.
+ */
+ size_t initial_window_size;
+
+ /**
+ * User data for callbacks.
+ * Optional.
+ */
+ void *user_data;
+
+ /**
+ * Called when connect completes.
+ * Required.
+ * If unsuccessful, error_code will be set, connection will be NULL,
+ * and the on_connection_shutdown callback will never be called.
+ * If successful, the user is now responsible for the websocket and must
+ * call aws_websocket_release() when they are done with it.
+ */
+ aws_websocket_on_connection_setup_fn *on_connection_setup;
+
+ /**
+ * Called when connection has finished shutting down.
+ * Optional.
+ * Never called if `on_connection_setup` reported failure.
+ * Note that the connection is not completely done until `on_connection_shutdown` has been called
+ * AND aws_websocket_release() has been called.
+ */
+ aws_websocket_on_connection_shutdown_fn *on_connection_shutdown;
+
+ /**
+ * Called when each new frame arrives.
+ * Optional.
+ * See `aws_websocket_on_incoming_frame_begin_fn`.
+ */
+ aws_websocket_on_incoming_frame_begin_fn *on_incoming_frame_begin;
+
+ /**
+ * Called repeatedly as payload data arrives.
+ * Optional.
+ * See `aws_websocket_on_incoming_frame_payload_fn`.
+ */
+ aws_websocket_on_incoming_frame_payload_fn *on_incoming_frame_payload;
+
+ /**
+ * Called when done processing an incoming frame.
+ * Optional.
+ * See `aws_websocket_on_incoming_frame_complete_fn`.
+ */
+ aws_websocket_on_incoming_frame_complete_fn *on_incoming_frame_complete;
+
+ /**
+ * Set to true to manually manage the read window size.
+ *
+ * If this is false, no backpressure is applied and frames will arrive as fast as possible.
+ *
+ * If this is true, then whenever the read window reaches 0 you will stop receiving anything.
+ * The websocket's `initial_window_size` determines the starting size of the read window.
+ * The read window shrinks as you receive the payload from "data" frames (TEXT, BINARY, and CONTINUATION).
+ * Use aws_websocket_increment_read_window() to increment the window again and keep frames flowing.
+ * Maintain a larger window to keep up high throughput.
+ * You only need to worry about the payload from "data" frames.
+ * The websocket automatically increments the window to account for any
+ * other incoming bytes, including other parts of a frame (opcode, payload-length, etc)
+ * and the payload of other frame types (PING, PONG, CLOSE).
+ */
+ bool manual_window_management;
+
+ /**
+ * Optional
+ * If set, requests that a specific event loop be used to seat the connection, rather than the next one
+ * in the event loop group. Useful for serializing all io and external events related to a client onto
+ * a single thread.
+ */
+ struct aws_event_loop *requested_event_loop;
+};
+
+/**
+ * Called repeatedly as the websocket's payload is streamed out.
+ * The user should write payload data to out_buf, up to available capacity.
+ * The websocket will mask this data for you, if necessary.
+ * Invoked repeatedly on the websocket's event-loop thread.
+ *
+ * Return true to proceed normally. If false is returned, the websocket will send no further data,
+ * the frame will complete with an error-code, and the connection will close.
+ */
+typedef bool(aws_websocket_stream_outgoing_payload_fn)(
+ struct aws_websocket *websocket,
+ struct aws_byte_buf *out_buf,
+ void *user_data);
+
+/**
+ * Called when a aws_websocket_send_frame() operation completes.
+ * error_code will be zero if the operation was successful.
+ * "Success" does not guarantee that the peer actually received or processed the frame.
+ * Invoked exactly once per sent frame on the websocket's event-loop thread.
+ */
+typedef void(
+ aws_websocket_outgoing_frame_complete_fn)(struct aws_websocket *websocket, int error_code, void *user_data);
+
+/**
+ * Options for sending a websocket frame.
+ * This structure is copied immediately by aws_websocket_send().
+ * For descriptions of opcode, fin, and payload_length see in RFC-6455 Section 5.2.
+ */
+struct aws_websocket_send_frame_options {
+ /**
+ * Size of payload to be sent via `stream_outgoing_payload` callback.
+ */
+ uint64_t payload_length;
+
+ /**
+ * User data passed to callbacks.
+ */
+ void *user_data;
+
+ /**
+ * Callback for sending payload data.
+ * See `aws_websocket_stream_outgoing_payload_fn`.
+ * Required if `payload_length` is non-zero.
+ */
+ aws_websocket_stream_outgoing_payload_fn *stream_outgoing_payload;
+
+ /**
+ * Callback for completion of send operation.
+ * See `aws_websocket_outgoing_frame_complete_fn`.
+ * Optional.
+ */
+ aws_websocket_outgoing_frame_complete_fn *on_complete;
+
+ /**
+ * Frame type.
+ * `aws_websocket_opcode` enum provides standard values.
+ */
+ uint8_t opcode;
+
+ /**
+ * Indicates that this is the final fragment in a message. The first fragment MAY also be the final fragment.
+ */
+ bool fin;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Return true if opcode is for a data frame, false if opcode if for a control frame.
+ */
+AWS_HTTP_API
+bool aws_websocket_is_data_frame(uint8_t opcode);
+
+/**
+ * Asynchronously establish a client websocket connection.
+ * The on_connection_setup callback is invoked when the operation has finished creating a connection, or failed.
+ */
+AWS_HTTP_API
+int aws_websocket_client_connect(const struct aws_websocket_client_connection_options *options);
+
+/**
+ * Increment the websocket's ref-count, preventing it from being destroyed.
+ * @return Always returns the same pointer that is passed in.
+ */
+AWS_HTTP_API
+struct aws_websocket *aws_websocket_acquire(struct aws_websocket *websocket);
+
+/**
+ * Decrement the websocket's ref-count.
+ * When the ref-count reaches zero, the connection will shut down, if it hasn't already.
+ * Users must release the websocket when they are done with it.
+ * The websocket's memory cannot be reclaimed until this is done.
+ * Callbacks may continue firing after this is called, with "shutdown" being the final callback.
+ * This function may be called from any thread.
+ *
+ * It is safe to pass NULL, nothing will happen.
+ */
+AWS_HTTP_API
+void aws_websocket_release(struct aws_websocket *websocket);
+
+/**
+ * Close the websocket connection.
+ * It is safe to call this, even if the connection is already closed or closing.
+ * The websocket will attempt to send a CLOSE frame during normal shutdown.
+ * If `free_scarce_resources_immediately` is true, the connection will be torn down as quickly as possible.
+ * This function may be called from any thread.
+ */
+AWS_HTTP_API
+void aws_websocket_close(struct aws_websocket *websocket, bool free_scarce_resources_immediately);
+
+/**
+ * Send a websocket frame.
+ * The `options` struct is copied.
+ * A callback will be invoked when the operation completes.
+ * This function may be called from any thread.
+ */
+AWS_HTTP_API
+int aws_websocket_send_frame(struct aws_websocket *websocket, const struct aws_websocket_send_frame_options *options);
+
+/**
+ * Manually increment the read window to keep frames flowing.
+ *
+ * If the websocket was created with `manual_window_management` set true,
+ * then whenever the read window reaches 0 you will stop receiving data.
+ * The websocket's `initial_window_size` determines the starting size of the read window.
+ * The read window shrinks as you receive the payload from "data" frames (TEXT, BINARY, and CONTINUATION).
+ * Use aws_websocket_increment_read_window() to increment the window again and keep frames flowing.
+ * Maintain a larger window to keep up high throughput.
+ * You only need to worry about the payload from "data" frames.
+ * The websocket automatically increments the window to account for any
+ * other incoming bytes, including other parts of a frame (opcode, payload-length, etc)
+ * and the payload of other frame types (PING, PONG, CLOSE).
+ *
+ * If the websocket was created with `manual_window_management` set false, this function does nothing.
+ *
+ * This function may be called from any thread.
+ */
+AWS_HTTP_API
+void aws_websocket_increment_read_window(struct aws_websocket *websocket, size_t size);
+
+/**
+ * Convert the websocket into a mid-channel handler.
+ * The websocket will stop being usable via its public API and become just another handler in the channel.
+ * The caller will likely install a channel handler to the right.
+ * This must not be called in the middle of an incoming frame (between "frame begin" and "frame complete" callbacks).
+ * This MUST be called from the websocket's thread.
+ *
+ * If successful:
+ * - Other than aws_websocket_release(), all calls to aws_websocket_x() functions are ignored.
+ * - The websocket will no longer invoke any "incoming frame" callbacks.
+ * - aws_io_messages written by a downstream handler will be wrapped in binary data frames and sent upstream.
+ * The data may be split/combined as it is sent along.
+ * - aws_io_messages read from upstream handlers will be scanned for binary data frames.
+ * The payloads of these frames will be sent downstream.
+ * The payloads may be split/combined as they are sent along.
+ * - An incoming close frame will automatically result in channel-shutdown.
+ * - aws_websocket_release() must still be called or the websocket and its channel will never be cleaned up.
+ * - The websocket will still invoke its "on connection shutdown" callback when channel shutdown completes.
+ *
+ * If unsuccessful, NULL is returned and the websocket is unchanged.
+ */
+AWS_HTTP_API
+int aws_websocket_convert_to_midchannel_handler(struct aws_websocket *websocket);
+
+/**
+ * Returns the websocket's underlying I/O channel.
+ */
+AWS_HTTP_API
+struct aws_channel *aws_websocket_get_channel(const struct aws_websocket *websocket);
+
+/**
+ * Generate value for a Sec-WebSocket-Key header and write it into `dst` buffer.
+ * The buffer should have at least AWS_WEBSOCKET_MAX_HANDSHAKE_KEY_LENGTH space available.
+ *
+ * This value is the base64 encoding of a random 16-byte value.
+ * RFC-6455 Section 4.1
+ */
+AWS_HTTP_API
+int aws_websocket_random_handshake_key(struct aws_byte_buf *dst);
+
+/**
+ * Create request with all required fields for a websocket upgrade request.
+ * The method and path are set, and the the following headers are added:
+ *
+ * Host: <host>
+ * Upgrade: websocket
+ * Connection: Upgrade
+ * Sec-WebSocket-Key: <base64 encoding of 16 random bytes>
+ * Sec-WebSocket-Version: 13
+ */
+AWS_HTTP_API
+struct aws_http_message *aws_http_message_new_websocket_handshake_request(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor path,
+ struct aws_byte_cursor host);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_HTTP_WEBSOCKET_H */
diff --git a/contrib/restricted/aws/aws-c-http/source/connection.c b/contrib/restricted/aws/aws-c-http/source/connection.c
new file mode 100644
index 0000000000..f020823dcf
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/connection.c
@@ -0,0 +1,1200 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/connection_impl.h>
+#include <aws/http/private/connection_monitor.h>
+
+#include <aws/http/private/h1_connection.h>
+#include <aws/http/private/h2_connection.h>
+
+#include <aws/http/private/proxy_impl.h>
+
+#include <aws/common/hash_table.h>
+#include <aws/common/mutex.h>
+#include <aws/common/string.h>
+#include <aws/http/request_response.h>
+#include <aws/io/channel_bootstrap.h>
+#include <aws/io/logging.h>
+#include <aws/io/socket.h>
+#include <aws/io/tls_channel_handler.h>
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4204) /* non-constant aggregate initializer */
+# pragma warning(disable : 4232) /* function pointer to dll symbol */
+#endif
+
+static struct aws_http_connection_system_vtable s_default_system_vtable = {
+ .new_socket_channel = aws_client_bootstrap_new_socket_channel,
+};
+
+static const struct aws_http_connection_system_vtable *s_system_vtable_ptr = &s_default_system_vtable;
+
+void aws_http_client_bootstrap_destroy(struct aws_http_client_bootstrap *bootstrap) {
+ /* During allocating, the underlying stuctures should be allocated with the bootstrap by aws_mem_acquire_many. Thus,
+ * we only need to clean up the first pointer which is the bootstrap */
+ if (bootstrap->alpn_string_map) {
+ aws_hash_table_clean_up(bootstrap->alpn_string_map);
+ }
+ aws_mem_release(bootstrap->alloc, bootstrap);
+}
+
+void aws_http_connection_set_system_vtable(const struct aws_http_connection_system_vtable *system_vtable) {
+ s_system_vtable_ptr = system_vtable;
+}
+
+AWS_STATIC_STRING_FROM_LITERAL(s_alpn_protocol_http_1_1, "http/1.1");
+AWS_STATIC_STRING_FROM_LITERAL(s_alpn_protocol_http_2, "h2");
+
+struct aws_http_server {
+ struct aws_allocator *alloc;
+ struct aws_server_bootstrap *bootstrap;
+ bool is_using_tls;
+ bool manual_window_management;
+ size_t initial_window_size;
+ void *user_data;
+ aws_http_server_on_incoming_connection_fn *on_incoming_connection;
+ aws_http_server_on_destroy_fn *on_destroy_complete;
+ struct aws_socket *socket;
+
+ /* Any thread may touch this data, but the lock must be held */
+ struct {
+ struct aws_mutex lock;
+ bool is_shutting_down;
+ struct aws_hash_table channel_to_connection_map;
+ } synced_data;
+};
+
+static void s_server_lock_synced_data(struct aws_http_server *server) {
+ int err = aws_mutex_lock(&server->synced_data.lock);
+ AWS_ASSERT(!err);
+ (void)err;
+}
+
+static void s_server_unlock_synced_data(struct aws_http_server *server) {
+ int err = aws_mutex_unlock(&server->synced_data.lock);
+ AWS_ASSERT(!err);
+ (void)err;
+}
+
+/* Determine the http-version, create appropriate type of connection, and insert it into the channel. */
+struct aws_http_connection *aws_http_connection_new_channel_handler(
+ struct aws_allocator *alloc,
+ struct aws_channel *channel,
+ bool is_server,
+ bool is_using_tls,
+ bool manual_window_management,
+ bool prior_knowledge_http2,
+ size_t initial_window_size,
+ const struct aws_hash_table *alpn_string_map,
+ const struct aws_http1_connection_options *http1_options,
+ const struct aws_http2_connection_options *http2_options,
+ void *connection_user_data) {
+
+ struct aws_channel_slot *connection_slot = NULL;
+ struct aws_http_connection *connection = NULL;
+
+ /* Create slot for connection. */
+ connection_slot = aws_channel_slot_new(channel);
+ if (!connection_slot) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "static: Failed to create slot in channel %p, error %d (%s).",
+ (void *)channel,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ goto error;
+ }
+
+ int err = aws_channel_slot_insert_end(channel, connection_slot);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "static: Failed to insert slot into channel %p, error %d (%s).",
+ (void *)channel,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ /* Determine HTTP version */
+ enum aws_http_version version = AWS_HTTP_VERSION_1_1;
+
+ if (is_using_tls) {
+ /* Query TLS channel handler (immediately to left in the channel) for negotiated ALPN protocol */
+ if (!connection_slot->adj_left || !connection_slot->adj_left->handler) {
+ aws_raise_error(AWS_ERROR_INVALID_STATE);
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION, "static: Failed to find TLS handler in channel %p.", (void *)channel);
+ goto error;
+ }
+
+ struct aws_channel_slot *tls_slot = connection_slot->adj_left;
+ struct aws_channel_handler *tls_handler = tls_slot->handler;
+ struct aws_byte_buf protocol = aws_tls_handler_protocol(tls_handler);
+ if (protocol.len) {
+ bool customized = false;
+ if (alpn_string_map) {
+ customized = true;
+ struct aws_string *negotiated_result = aws_string_new_from_buf(alloc, &protocol);
+ struct aws_hash_element *found = NULL;
+ aws_hash_table_find(alpn_string_map, (void *)negotiated_result, &found);
+ if (found) {
+ version = (enum aws_http_version)(size_t)found->value;
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION,
+ "static: Customized ALPN protocol " PRInSTR " used. " PRInSTR " client connection established.",
+ AWS_BYTE_BUF_PRI(protocol),
+ AWS_BYTE_CURSOR_PRI(aws_http_version_to_str(version)));
+ } else {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "static: Customized ALPN protocol " PRInSTR
+ " used. However the it's not found in the ALPN map provided.",
+ AWS_BYTE_BUF_PRI(protocol));
+ version = AWS_HTTP_VERSION_UNKNOWN;
+ }
+ aws_string_destroy(negotiated_result);
+ }
+ if (customized) {
+ /* Do nothing */
+ } else if (aws_string_eq_byte_buf(s_alpn_protocol_http_1_1, &protocol)) {
+ version = AWS_HTTP_VERSION_1_1;
+ } else if (aws_string_eq_byte_buf(s_alpn_protocol_http_2, &protocol)) {
+ version = AWS_HTTP_VERSION_2;
+ } else {
+ AWS_LOGF_WARN(AWS_LS_HTTP_CONNECTION, "static: Unrecognized ALPN protocol. Assuming HTTP/1.1");
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION, "static: Unrecognized ALPN protocol " PRInSTR, AWS_BYTE_BUF_PRI(protocol));
+
+ version = AWS_HTTP_VERSION_1_1;
+ }
+ }
+ } else {
+ if (prior_knowledge_http2) {
+ AWS_LOGF_TRACE(AWS_LS_HTTP_CONNECTION, "Using prior knowledge to start HTTP/2 connection");
+ version = AWS_HTTP_VERSION_2;
+ }
+ }
+
+ /* Create connection/handler */
+ switch (version) {
+ case AWS_HTTP_VERSION_1_1:
+ if (is_server) {
+ connection = aws_http_connection_new_http1_1_server(
+ alloc, manual_window_management, initial_window_size, http1_options);
+ } else {
+ connection = aws_http_connection_new_http1_1_client(
+ alloc, manual_window_management, initial_window_size, http1_options);
+ }
+ break;
+ case AWS_HTTP_VERSION_2:
+ if (is_server) {
+ connection = aws_http_connection_new_http2_server(alloc, manual_window_management, http2_options);
+ } else {
+ connection = aws_http_connection_new_http2_client(alloc, manual_window_management, http2_options);
+ }
+ break;
+ default:
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "static: Unsupported version " PRInSTR,
+ AWS_BYTE_CURSOR_PRI(aws_http_version_to_str(version)));
+
+ aws_raise_error(AWS_ERROR_HTTP_UNSUPPORTED_PROTOCOL);
+ goto error;
+ }
+
+ if (!connection) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "static: Failed to create " PRInSTR " %s connection object, error %d (%s).",
+ AWS_BYTE_CURSOR_PRI(aws_http_version_to_str(version)),
+ is_server ? "server" : "client",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ goto error;
+ }
+ connection->user_data = connection_user_data;
+
+ /* Connect handler and slot */
+ if (aws_channel_slot_set_handler(connection_slot, &connection->channel_handler)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "static: Failed to set HTTP handler into slot on channel %p, error %d (%s).",
+ (void *)channel,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ goto error;
+ }
+
+ /* Success! Inform connection that installation is complete */
+ connection->vtable->on_channel_handler_installed(&connection->channel_handler, connection_slot);
+
+ return connection;
+
+error:
+ if (connection_slot) {
+ if (!connection_slot->handler && connection) {
+ aws_channel_handler_destroy(&connection->channel_handler);
+ }
+
+ aws_channel_slot_remove(connection_slot);
+ }
+
+ return NULL;
+}
+
+void aws_http_connection_close(struct aws_http_connection *connection) {
+ AWS_ASSERT(connection);
+ connection->vtable->close(connection);
+}
+
+void aws_http_connection_stop_new_requests(struct aws_http_connection *connection) {
+ AWS_ASSERT(connection);
+ connection->vtable->stop_new_requests(connection);
+}
+
+bool aws_http_connection_is_open(const struct aws_http_connection *connection) {
+ AWS_ASSERT(connection);
+ return connection->vtable->is_open(connection);
+}
+
+bool aws_http_connection_new_requests_allowed(const struct aws_http_connection *connection) {
+ AWS_ASSERT(connection);
+ return connection->vtable->new_requests_allowed(connection);
+}
+
+bool aws_http_connection_is_client(const struct aws_http_connection *connection) {
+ return connection->client_data;
+}
+
+bool aws_http_connection_is_server(const struct aws_http_connection *connection) {
+ return connection->server_data;
+}
+
+int aws_http2_connection_change_settings(
+ struct aws_http_connection *http2_connection,
+ const struct aws_http2_setting *settings_array,
+ size_t num_settings,
+ aws_http2_on_change_settings_complete_fn *on_completed,
+ void *user_data) {
+ AWS_ASSERT(http2_connection);
+ AWS_PRECONDITION(http2_connection->vtable);
+ AWS_FATAL_ASSERT(http2_connection->http_version == AWS_HTTP_VERSION_2);
+ return http2_connection->vtable->change_settings(
+ http2_connection, settings_array, num_settings, on_completed, user_data);
+}
+
+int aws_http2_connection_ping(
+ struct aws_http_connection *http2_connection,
+ const struct aws_byte_cursor *optional_opaque_data,
+ aws_http2_on_ping_complete_fn *on_ack,
+ void *user_data) {
+ AWS_ASSERT(http2_connection);
+ AWS_PRECONDITION(http2_connection->vtable);
+ AWS_FATAL_ASSERT(http2_connection->http_version == AWS_HTTP_VERSION_2);
+ return http2_connection->vtable->send_ping(http2_connection, optional_opaque_data, on_ack, user_data);
+}
+
+void aws_http2_connection_send_goaway(
+ struct aws_http_connection *http2_connection,
+ uint32_t http2_error,
+ bool allow_more_streams,
+ const struct aws_byte_cursor *optional_debug_data) {
+ AWS_ASSERT(http2_connection);
+ AWS_PRECONDITION(http2_connection->vtable);
+ AWS_FATAL_ASSERT(http2_connection->http_version == AWS_HTTP_VERSION_2);
+ http2_connection->vtable->send_goaway(http2_connection, http2_error, allow_more_streams, optional_debug_data);
+}
+
+int aws_http2_connection_get_sent_goaway(
+ struct aws_http_connection *http2_connection,
+ uint32_t *out_http2_error,
+ uint32_t *out_last_stream_id) {
+ AWS_ASSERT(http2_connection);
+ AWS_PRECONDITION(out_http2_error);
+ AWS_PRECONDITION(out_last_stream_id);
+ AWS_PRECONDITION(http2_connection->vtable);
+ AWS_FATAL_ASSERT(http2_connection->http_version == AWS_HTTP_VERSION_2);
+ return http2_connection->vtable->get_sent_goaway(http2_connection, out_http2_error, out_last_stream_id);
+}
+
+int aws_http2_connection_get_received_goaway(
+ struct aws_http_connection *http2_connection,
+ uint32_t *out_http2_error,
+ uint32_t *out_last_stream_id) {
+ AWS_ASSERT(http2_connection);
+ AWS_PRECONDITION(out_http2_error);
+ AWS_PRECONDITION(out_last_stream_id);
+ AWS_PRECONDITION(http2_connection->vtable);
+ AWS_FATAL_ASSERT(http2_connection->http_version == AWS_HTTP_VERSION_2);
+ return http2_connection->vtable->get_received_goaway(http2_connection, out_http2_error, out_last_stream_id);
+}
+
+void aws_http2_connection_get_local_settings(
+ const struct aws_http_connection *http2_connection,
+ struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT]) {
+ AWS_ASSERT(http2_connection);
+ AWS_PRECONDITION(http2_connection->vtable);
+ AWS_FATAL_ASSERT(http2_connection->http_version == AWS_HTTP_VERSION_2);
+ http2_connection->vtable->get_local_settings(http2_connection, out_settings);
+}
+
+void aws_http2_connection_get_remote_settings(
+ const struct aws_http_connection *http2_connection,
+ struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT]) {
+ AWS_ASSERT(http2_connection);
+ AWS_PRECONDITION(http2_connection->vtable);
+ AWS_FATAL_ASSERT(http2_connection->http_version == AWS_HTTP_VERSION_2);
+ http2_connection->vtable->get_remote_settings(http2_connection, out_settings);
+}
+
+void aws_http2_connection_update_window(struct aws_http_connection *http2_connection, uint32_t increment_size) {
+ AWS_ASSERT(http2_connection);
+ AWS_PRECONDITION(http2_connection->vtable);
+ AWS_FATAL_ASSERT(http2_connection->http_version == AWS_HTTP_VERSION_2);
+ http2_connection->vtable->update_window(http2_connection, increment_size);
+}
+
+struct aws_channel *aws_http_connection_get_channel(struct aws_http_connection *connection) {
+ AWS_ASSERT(connection);
+ return connection->channel_slot->channel;
+}
+
+int aws_http_alpn_map_init(struct aws_allocator *allocator, struct aws_hash_table *map) {
+ AWS_ASSERT(allocator);
+ AWS_ASSERT(map);
+ int result = aws_hash_table_init(
+ map,
+ allocator,
+ 5 /* initial size */,
+ aws_hash_string,
+ aws_hash_callback_string_eq,
+ aws_hash_callback_string_destroy,
+ NULL);
+ if (result) {
+ /* OOM will crash */
+ int error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "Failed to initialize ALPN map with error code %d (%s)",
+ error_code,
+ aws_error_name(error_code));
+ }
+ return result;
+}
+
+void aws_http_connection_acquire(struct aws_http_connection *connection) {
+ AWS_ASSERT(connection);
+ aws_atomic_fetch_add(&connection->refcount, 1);
+}
+
+void aws_http_connection_release(struct aws_http_connection *connection) {
+ if (!connection) {
+ return;
+ }
+ size_t prev_refcount = aws_atomic_fetch_sub(&connection->refcount, 1);
+ if (prev_refcount == 1) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Final connection refcount released, shut down if necessary.",
+ (void *)connection);
+
+ /* Channel might already be shut down, but make sure */
+ aws_channel_shutdown(connection->channel_slot->channel, AWS_ERROR_SUCCESS);
+
+ /* When the channel's refcount reaches 0, it destroys its slots/handlers, which will destroy the connection */
+ aws_channel_release_hold(connection->channel_slot->channel);
+ } else {
+ AWS_FATAL_ASSERT(prev_refcount != 0);
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Connection refcount released, %zu remaining.",
+ (void *)connection,
+ prev_refcount - 1);
+ }
+}
+
+/* At this point, the server bootstrapper has accepted an incoming connection from a client and set up a channel.
+ * Now we need to create an aws_http_connection and insert it into the channel as a channel-handler.
+ * Note: Be careful not to access server->socket until lock is acquired to avoid race conditions */
+static void s_server_bootstrap_on_accept_channel_setup(
+ struct aws_server_bootstrap *bootstrap,
+ int error_code,
+ struct aws_channel *channel,
+ void *user_data) {
+
+ (void)bootstrap;
+ AWS_ASSERT(user_data);
+ struct aws_http_server *server = user_data;
+ bool user_cb_invoked = false;
+ struct aws_http_connection *connection = NULL;
+ if (error_code) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_SERVER,
+ "%p: Incoming connection failed with error code %d (%s)",
+ (void *)server,
+ error_code,
+ aws_error_name(error_code));
+
+ goto error;
+ }
+ /* Create connection */
+ /* TODO: expose http1/2 options to server API */
+ struct aws_http1_connection_options http1_options;
+ AWS_ZERO_STRUCT(http1_options);
+ struct aws_http2_connection_options http2_options;
+ AWS_ZERO_STRUCT(http2_options);
+ connection = aws_http_connection_new_channel_handler(
+ server->alloc,
+ channel,
+ true,
+ server->is_using_tls,
+ server->manual_window_management,
+ false, /* prior_knowledge_http2 */
+ server->initial_window_size,
+ NULL, /* alpn_string_map */
+ &http1_options,
+ &http2_options,
+ NULL /* connection_user_data */);
+ if (!connection) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_SERVER,
+ "%p: Failed to create connection object, error %d (%s).",
+ (void *)server,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ goto error;
+ }
+
+ int put_err = 0;
+ /* BEGIN CRITICAL SECTION */
+ s_server_lock_synced_data(server);
+ if (server->synced_data.is_shutting_down) {
+ error_code = AWS_ERROR_HTTP_CONNECTION_CLOSED;
+ }
+ if (!error_code) {
+ put_err = aws_hash_table_put(&server->synced_data.channel_to_connection_map, channel, connection, NULL);
+ }
+ s_server_unlock_synced_data(server);
+ /* END CRITICAL SECTION */
+ if (error_code) {
+ AWS_LOGF_ERROR(
+ AWS_ERROR_HTTP_SERVER_CLOSED,
+ "id=%p: Incoming connection failed. The server is shutting down.",
+ (void *)server);
+ goto error;
+ }
+
+ if (put_err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_SERVER,
+ "%p: %s:%d: Failed to store connection object, error %d (%s).",
+ (void *)server,
+ server->socket->local_endpoint.address,
+ server->socket->local_endpoint.port,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ goto error;
+ }
+
+ /* Tell user of successful connection. */
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: " PRInSTR " server connection established at %p %s:%d.",
+ (void *)connection,
+ AWS_BYTE_CURSOR_PRI(aws_http_version_to_str(connection->http_version)),
+ (void *)server,
+ server->socket->local_endpoint.address,
+ server->socket->local_endpoint.port);
+
+ server->on_incoming_connection(server, connection, AWS_ERROR_SUCCESS, server->user_data);
+ user_cb_invoked = true;
+
+ /* If user failed to configure the server during callback, shut down the channel. */
+ if (!connection->server_data->on_incoming_request) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Caller failed to invoke aws_http_connection_configure_server() during on_incoming_connection "
+ "callback, closing connection.",
+ (void *)connection);
+
+ aws_raise_error(AWS_ERROR_HTTP_REACTION_REQUIRED);
+ goto error;
+ }
+ return;
+
+error:
+
+ if (!error_code) {
+ error_code = aws_last_error();
+ }
+
+ if (!user_cb_invoked) {
+ server->on_incoming_connection(server, NULL, error_code, server->user_data);
+ }
+
+ if (channel) {
+ aws_channel_shutdown(channel, error_code);
+ }
+
+ if (connection) {
+ /* release the ref count for the user side */
+ aws_http_connection_release(connection);
+ }
+}
+
+/* clean the server memory up */
+static void s_http_server_clean_up(struct aws_http_server *server) {
+ if (!server) {
+ return;
+ }
+
+ aws_server_bootstrap_release(server->bootstrap);
+
+ /* invoke the user callback */
+ if (server->on_destroy_complete) {
+ server->on_destroy_complete(server->user_data);
+ }
+ aws_hash_table_clean_up(&server->synced_data.channel_to_connection_map);
+ aws_mutex_clean_up(&server->synced_data.lock);
+ aws_mem_release(server->alloc, server);
+}
+
+/* At this point, the channel for a server connection has completed shutdown, but hasn't been destroyed yet. */
+static void s_server_bootstrap_on_accept_channel_shutdown(
+ struct aws_server_bootstrap *bootstrap,
+ int error_code,
+ struct aws_channel *channel,
+ void *user_data) {
+
+ (void)bootstrap;
+ AWS_ASSERT(user_data);
+ struct aws_http_server *server = user_data;
+
+ /* Figure out which connection this was, and remove that entry from the map.
+ * It won't be in the map if something went wrong while setting up the connection. */
+ struct aws_hash_element map_elem;
+ int was_present;
+
+ /* BEGIN CRITICAL SECTION */
+ s_server_lock_synced_data(server);
+ int remove_err =
+ aws_hash_table_remove(&server->synced_data.channel_to_connection_map, channel, &map_elem, &was_present);
+ s_server_unlock_synced_data(server);
+ /* END CRITICAL SECTION */
+
+ if (!remove_err && was_present) {
+ struct aws_http_connection *connection = map_elem.value;
+ AWS_LOGF_INFO(AWS_LS_HTTP_CONNECTION, "id=%p: Server connection shut down.", (void *)connection);
+ /* Tell user about shutdown */
+ if (connection->server_data->on_shutdown) {
+ connection->server_data->on_shutdown(connection, error_code, connection->user_data);
+ }
+ }
+}
+
+/* the server listener has finished the destroy process, no existing connections
+ * finally safe to clean the server up */
+static void s_server_bootstrap_on_server_listener_destroy(struct aws_server_bootstrap *bootstrap, void *user_data) {
+ (void)bootstrap;
+ AWS_ASSERT(user_data);
+ struct aws_http_server *server = user_data;
+ s_http_server_clean_up(server);
+}
+
+struct aws_http_server *aws_http_server_new(const struct aws_http_server_options *options) {
+ aws_http_fatal_assert_library_initialized();
+
+ struct aws_http_server *server = NULL;
+
+ if (!options || options->self_size == 0 || !options->allocator || !options->bootstrap || !options->socket_options ||
+ !options->on_incoming_connection || !options->endpoint) {
+
+ AWS_LOGF_ERROR(AWS_LS_HTTP_SERVER, "static: Invalid options, cannot create server.");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ /* nothing to clean up */
+ return NULL;
+ }
+
+ server = aws_mem_calloc(options->allocator, 1, sizeof(struct aws_http_server));
+ if (!server) {
+ /* nothing to clean up */
+ return NULL;
+ }
+
+ server->alloc = options->allocator;
+ server->bootstrap = aws_server_bootstrap_acquire(options->bootstrap);
+ server->is_using_tls = options->tls_options != NULL;
+ server->initial_window_size = options->initial_window_size;
+ server->user_data = options->server_user_data;
+ server->on_incoming_connection = options->on_incoming_connection;
+ server->on_destroy_complete = options->on_destroy_complete;
+ server->manual_window_management = options->manual_window_management;
+
+ int err = aws_mutex_init(&server->synced_data.lock);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_SERVER, "static: Failed to initialize mutex, error %d (%s).", err, aws_error_name(err));
+ goto mutex_error;
+ }
+ err = aws_hash_table_init(
+ &server->synced_data.channel_to_connection_map, server->alloc, 16, aws_hash_ptr, aws_ptr_eq, NULL, NULL);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_SERVER,
+ "static: Cannot create server, error %d (%s).",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto hash_table_error;
+ }
+ /* Protect against callbacks firing before server->socket is set */
+ s_server_lock_synced_data(server);
+ if (options->tls_options) {
+ server->is_using_tls = true;
+ }
+
+ struct aws_server_socket_channel_bootstrap_options bootstrap_options = {
+ .enable_read_back_pressure = options->manual_window_management,
+ .tls_options = options->tls_options,
+ .bootstrap = options->bootstrap,
+ .socket_options = options->socket_options,
+ .incoming_callback = s_server_bootstrap_on_accept_channel_setup,
+ .shutdown_callback = s_server_bootstrap_on_accept_channel_shutdown,
+ .destroy_callback = s_server_bootstrap_on_server_listener_destroy,
+ .host_name = options->endpoint->address,
+ .port = options->endpoint->port,
+ .user_data = server,
+ };
+
+ server->socket = aws_server_bootstrap_new_socket_listener(&bootstrap_options);
+
+ s_server_unlock_synced_data(server);
+
+ if (!server->socket) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_SERVER,
+ "static: Failed creating new socket listener, error %d (%s). Cannot create server.",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ goto socket_error;
+ }
+
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_SERVER,
+ "%p %s:%d: Server setup complete, listening for incoming connections.",
+ (void *)server,
+ server->socket->local_endpoint.address,
+ server->socket->local_endpoint.port);
+
+ return server;
+
+socket_error:
+ aws_hash_table_clean_up(&server->synced_data.channel_to_connection_map);
+hash_table_error:
+ aws_mutex_clean_up(&server->synced_data.lock);
+mutex_error:
+ aws_mem_release(server->alloc, server);
+ return NULL;
+}
+
+void aws_http_server_release(struct aws_http_server *server) {
+ if (!server) {
+ return;
+ }
+ bool already_shutting_down = false;
+ /* BEGIN CRITICAL SECTION */
+ s_server_lock_synced_data(server);
+ if (server->synced_data.is_shutting_down) {
+ already_shutting_down = true;
+ } else {
+ server->synced_data.is_shutting_down = true;
+ }
+ if (!already_shutting_down) {
+ /* shutdown all existing channels */
+ for (struct aws_hash_iter iter = aws_hash_iter_begin(&server->synced_data.channel_to_connection_map);
+ !aws_hash_iter_done(&iter);
+ aws_hash_iter_next(&iter)) {
+ struct aws_channel *channel = (struct aws_channel *)iter.element.key;
+ aws_channel_shutdown(channel, AWS_ERROR_HTTP_CONNECTION_CLOSED);
+ }
+ }
+ s_server_unlock_synced_data(server);
+ /* END CRITICAL SECTION */
+
+ if (already_shutting_down) {
+ /* The service is already shutting down, not shutting it down again */
+ AWS_LOGF_TRACE(AWS_LS_HTTP_SERVER, "id=%p: The server is already shutting down", (void *)server);
+ return;
+ }
+
+ /* stop listening, clean up the socket, after all existing connections finish shutting down, the
+ * s_server_bootstrap_on_server_listener_destroy will be invoked, clean up of the server will be there */
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_SERVER,
+ "%p %s:%d: Shutting down the server.",
+ (void *)server,
+ server->socket->local_endpoint.address,
+ server->socket->local_endpoint.port);
+
+ aws_server_bootstrap_destroy_socket_listener(server->bootstrap, server->socket);
+
+ /* wait for connections to finish shutting down
+ * clean up will be called from eventloop */
+}
+
+/* At this point, the channel bootstrapper has established a connection to the server and set up a channel.
+ * Now we need to create the aws_http_connection and insert it into the channel as a channel-handler. */
+static void s_client_bootstrap_on_channel_setup(
+ struct aws_client_bootstrap *channel_bootstrap,
+ int error_code,
+ struct aws_channel *channel,
+ void *user_data) {
+
+ (void)channel_bootstrap;
+ AWS_ASSERT(user_data);
+ struct aws_http_client_bootstrap *http_bootstrap = user_data;
+
+ /* Contract for setup callbacks is: channel is NULL if error_code is non-zero. */
+ AWS_FATAL_ASSERT((error_code != 0) == (channel == NULL));
+
+ if (error_code) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "static: Client connection failed with error %d (%s).",
+ error_code,
+ aws_error_name(error_code));
+
+ /* Immediately tell user of failed connection.
+ * No channel exists, so there will be no channel_shutdown callback. */
+ http_bootstrap->on_setup(NULL, error_code, http_bootstrap->user_data);
+
+ /* Clean up the http_bootstrap, it has no more work to do. */
+ aws_http_client_bootstrap_destroy(http_bootstrap);
+ return;
+ }
+
+ AWS_LOGF_TRACE(AWS_LS_HTTP_CONNECTION, "static: Socket connected, creating client connection object.");
+
+ http_bootstrap->connection = aws_http_connection_new_channel_handler(
+ http_bootstrap->alloc,
+ channel,
+ false,
+ http_bootstrap->is_using_tls,
+ http_bootstrap->stream_manual_window_management,
+ http_bootstrap->prior_knowledge_http2,
+ http_bootstrap->initial_window_size,
+ http_bootstrap->alpn_string_map,
+ &http_bootstrap->http1_options,
+ &http_bootstrap->http2_options,
+ http_bootstrap->user_data);
+ if (!http_bootstrap->connection) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "static: Failed to create the client connection object, error %d (%s).",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ goto error;
+ }
+
+ if (aws_http_connection_monitoring_options_is_valid(&http_bootstrap->monitoring_options)) {
+ /*
+ * On creation we validate monitoring options, if they exist, and fail if they're not
+ * valid. So at this point, is_valid() functions as an is-monitoring-on? check. A false
+ * value here is not an error, it's just not enabled.
+ */
+ struct aws_crt_statistics_handler *http_connection_monitor =
+ aws_crt_statistics_handler_new_http_connection_monitor(
+ http_bootstrap->alloc, &http_bootstrap->monitoring_options);
+ if (http_connection_monitor == NULL) {
+ goto error;
+ }
+
+ aws_channel_set_statistics_handler(channel, http_connection_monitor);
+ }
+
+ http_bootstrap->connection->proxy_request_transform = http_bootstrap->proxy_request_transform;
+
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: " PRInSTR " client connection established.",
+ (void *)http_bootstrap->connection,
+ AWS_BYTE_CURSOR_PRI(aws_http_version_to_str(http_bootstrap->connection->http_version)));
+
+ /* Tell user of successful connection.
+ * Then clear the on_setup callback so that we know it's been called */
+ http_bootstrap->on_setup(http_bootstrap->connection, AWS_ERROR_SUCCESS, http_bootstrap->user_data);
+ http_bootstrap->on_setup = NULL;
+
+ return;
+
+error:
+ /* Something went wrong. Invoke channel shutdown. Then wait for channel shutdown to complete
+ * before informing the user that setup failed and cleaning up the http_bootstrap.*/
+ aws_channel_shutdown(channel, aws_last_error());
+}
+
+/* At this point, the channel for a client connection has completed its shutdown */
+static void s_client_bootstrap_on_channel_shutdown(
+ struct aws_client_bootstrap *channel_bootstrap,
+ int error_code,
+ struct aws_channel *channel,
+ void *user_data) {
+
+ (void)channel_bootstrap;
+ (void)channel;
+
+ AWS_ASSERT(user_data);
+ struct aws_http_client_bootstrap *http_bootstrap = user_data;
+
+ /* If on_setup hasn't been called yet, inform user of failed setup.
+ * If on_setup was already called, inform user that it's shut down now. */
+ if (http_bootstrap->on_setup) {
+ /* make super duper sure that failed setup receives a non-zero error_code */
+ if (error_code == 0) {
+ error_code = AWS_ERROR_UNKNOWN;
+ }
+
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "static: Client setup failed with error %d (%s).",
+ error_code,
+ aws_error_name(error_code));
+
+ http_bootstrap->on_setup(NULL, error_code, http_bootstrap->user_data);
+
+ } else if (http_bootstrap->on_shutdown) {
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION,
+ "%p: Client shutdown completed with error %d (%s).",
+ (void *)http_bootstrap->connection,
+ error_code,
+ aws_error_name(error_code));
+
+ http_bootstrap->on_shutdown(http_bootstrap->connection, error_code, http_bootstrap->user_data);
+ }
+
+ /* Clean up bootstrapper */
+ aws_http_client_bootstrap_destroy(http_bootstrap);
+}
+
+int s_validate_http_client_connection_options(const struct aws_http_client_connection_options *options) {
+ if (options->self_size == 0) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "static: Invalid connection options, self size not initialized");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (!options->allocator) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "static: Invalid connection options, no allocator supplied");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (options->host_name.len == 0) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "static: Invalid connection options, empty host name.");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (!options->socket_options) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "static: Invalid connection options, socket options are null.");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (!options->on_setup) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "static: Invalid connection options, setup callback is null");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ /* http2_options cannot be NULL here, calling function adds them if they were missing */
+ if (options->http2_options->num_initial_settings > 0 && options->http2_options->initial_settings_array) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "static: Invalid connection options, h2 settings count is non-zero but settings array is null");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (options->monitoring_options && !aws_http_connection_monitoring_options_is_valid(options->monitoring_options)) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "static: Invalid connection options, invalid monitoring options");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (options->prior_knowledge_http2 && options->tls_options) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "static: HTTP/2 prior knowledge only works with cleartext TCP.");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+struct s_copy_alpn_string_map_context {
+ struct aws_hash_table *map;
+ struct aws_allocator *allocator;
+};
+
+/* put every item into the source to make a deep copy of the map */
+static int s_copy_alpn_string_map(void *context, struct aws_hash_element *item) {
+ struct s_copy_alpn_string_map_context *func_context = context;
+ struct aws_hash_table *dest = func_context->map;
+ /* make a deep copy of the string and hash map will own the copy */
+ struct aws_string *key_copy = aws_string_new_from_string(func_context->allocator, item->key);
+ int was_created;
+ if (aws_hash_table_put(dest, key_copy, item->value, &was_created)) {
+ int error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "Failed to copy ALPN map with error code %d (%s)",
+ error_code,
+ aws_error_name(error_code));
+ /* failed to put into the table, we need to clean up the copy ourselves */
+ aws_string_destroy(key_copy);
+ /* return error to stop iteration */
+ return AWS_COMMON_HASH_TABLE_ITER_ERROR;
+ }
+ if (!was_created) {
+ /* no new entry created, clean up the copy ourselves */
+ aws_string_destroy(key_copy);
+ }
+ return AWS_COMMON_HASH_TABLE_ITER_CONTINUE;
+}
+
+int aws_http_alpn_map_init_copy(
+ struct aws_allocator *allocator,
+ struct aws_hash_table *dest,
+ struct aws_hash_table *src) {
+ if (!src) {
+ AWS_ZERO_STRUCT(*dest);
+ return AWS_OP_SUCCESS;
+ }
+ if (!src->p_impl) {
+ AWS_ZERO_STRUCT(*dest);
+ return AWS_OP_SUCCESS;
+ }
+
+ if (aws_http_alpn_map_init(allocator, dest)) {
+ return AWS_OP_ERR;
+ }
+ struct s_copy_alpn_string_map_context context;
+ context.allocator = allocator;
+ context.map = dest;
+ /* make a deep copy of the map */
+ if (aws_hash_table_foreach(src, s_copy_alpn_string_map, &context)) {
+ int error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "Failed to copy ALPN map with error code %d (%s)",
+ error_code,
+ aws_error_name(error_code));
+ aws_hash_table_clean_up(dest);
+ return AWS_OP_ERR;
+ }
+ return AWS_OP_SUCCESS;
+}
+
+int aws_http_client_connect_internal(
+ const struct aws_http_client_connection_options *orig_options,
+ aws_http_proxy_request_transform_fn *proxy_request_transform) {
+
+ if (!orig_options) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "static: http connection options are null.");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ struct aws_http_client_bootstrap *http_bootstrap = NULL;
+ struct aws_string *host_name = NULL;
+ int err = 0;
+
+ /* make copy of options, and add defaults for missing optional structs */
+ struct aws_http_client_connection_options options = *orig_options;
+ struct aws_http1_connection_options default_http1_options;
+ AWS_ZERO_STRUCT(default_http1_options);
+ if (options.http1_options == NULL) {
+ options.http1_options = &default_http1_options;
+ }
+
+ struct aws_http2_connection_options default_http2_options;
+ AWS_ZERO_STRUCT(default_http2_options);
+ if (options.http2_options == NULL) {
+ options.http2_options = &default_http2_options;
+ }
+
+ /* validate options */
+ if (s_validate_http_client_connection_options(&options)) {
+ goto error;
+ }
+
+ AWS_FATAL_ASSERT(options.proxy_options == NULL);
+
+ /* bootstrap_new() functions requires a null-terminated c-str */
+ host_name = aws_string_new_from_cursor(options.allocator, &options.host_name);
+ if (!host_name) {
+ goto error;
+ }
+
+ struct aws_http2_setting *setting_array = NULL;
+ struct aws_hash_table *alpn_string_map = NULL;
+ aws_mem_acquire_many(
+ options.allocator,
+ 3,
+ &http_bootstrap,
+ sizeof(struct aws_http_client_bootstrap),
+ &setting_array,
+ options.http2_options->num_initial_settings * sizeof(struct aws_http2_setting),
+ &alpn_string_map,
+ sizeof(struct aws_hash_table));
+
+ AWS_ZERO_STRUCT(*http_bootstrap);
+
+ http_bootstrap->alloc = options.allocator;
+ http_bootstrap->is_using_tls = options.tls_options != NULL;
+ http_bootstrap->stream_manual_window_management = options.manual_window_management;
+ http_bootstrap->prior_knowledge_http2 = options.prior_knowledge_http2;
+ http_bootstrap->initial_window_size = options.initial_window_size;
+ http_bootstrap->user_data = options.user_data;
+ http_bootstrap->on_setup = options.on_setup;
+ http_bootstrap->on_shutdown = options.on_shutdown;
+ http_bootstrap->proxy_request_transform = proxy_request_transform;
+ http_bootstrap->http1_options = *options.http1_options;
+ http_bootstrap->http2_options = *options.http2_options;
+
+ /* keep a copy of the settings array if it's not NULL */
+ if (options.http2_options->num_initial_settings > 0) {
+ memcpy(
+ setting_array,
+ options.http2_options->initial_settings_array,
+ options.http2_options->num_initial_settings * sizeof(struct aws_http2_setting));
+ http_bootstrap->http2_options.initial_settings_array = setting_array;
+ }
+
+ if (options.alpn_string_map) {
+ if (aws_http_alpn_map_init_copy(options.allocator, alpn_string_map, options.alpn_string_map)) {
+ goto error;
+ }
+ http_bootstrap->alpn_string_map = alpn_string_map;
+ }
+
+ if (options.monitoring_options) {
+ http_bootstrap->monitoring_options = *options.monitoring_options;
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "static: attempting to initialize a new client channel to %s:%d",
+ aws_string_c_str(host_name),
+ (int)options.port);
+
+ struct aws_socket_channel_bootstrap_options channel_options = {
+ .bootstrap = options.bootstrap,
+ .host_name = aws_string_c_str(host_name),
+ .port = options.port,
+ .socket_options = options.socket_options,
+ .tls_options = options.tls_options,
+ .setup_callback = s_client_bootstrap_on_channel_setup,
+ .shutdown_callback = s_client_bootstrap_on_channel_shutdown,
+ .enable_read_back_pressure = options.manual_window_management,
+ .user_data = http_bootstrap,
+ .requested_event_loop = options.requested_event_loop,
+ };
+
+ err = s_system_vtable_ptr->new_socket_channel(&channel_options);
+
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "static: Failed to initiate socket channel for new client connection, error %d (%s).",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ goto error;
+ }
+
+ aws_string_destroy(host_name);
+ return AWS_OP_SUCCESS;
+
+error:
+ if (http_bootstrap) {
+ aws_http_client_bootstrap_destroy(http_bootstrap);
+ }
+
+ if (host_name) {
+ aws_string_destroy(host_name);
+ }
+
+ return AWS_OP_ERR;
+}
+
+int aws_http_client_connect(const struct aws_http_client_connection_options *options) {
+ aws_http_fatal_assert_library_initialized();
+ if (options->prior_knowledge_http2 && options->tls_options) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "static: HTTP/2 prior knowledge only works with cleartext TCP.");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (options->proxy_options != NULL) {
+ return aws_http_client_connect_via_proxy(options);
+ } else {
+ if (!options->proxy_ev_settings || options->proxy_ev_settings->env_var_type != AWS_HPEV_ENABLE) {
+ return aws_http_client_connect_internal(options, NULL);
+ } else {
+ /* Proxy through envrionment variable is enabled */
+ return aws_http_client_connect_via_proxy(options);
+ }
+ }
+}
+
+enum aws_http_version aws_http_connection_get_version(const struct aws_http_connection *connection) {
+ return connection->http_version;
+}
+
+int aws_http_connection_configure_server(
+ struct aws_http_connection *connection,
+ const struct aws_http_server_connection_options *options) {
+
+ if (!connection || !options || !options->on_incoming_request) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "id=%p: Invalid server configuration options.", (void *)connection);
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (!connection->server_data) {
+ AWS_LOGF_WARN(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Server-only function invoked on client, ignoring call.",
+ (void *)connection);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+ if (connection->server_data->on_incoming_request) {
+ AWS_LOGF_WARN(
+ AWS_LS_HTTP_CONNECTION, "id=%p: Connection is already configured, ignoring call.", (void *)connection);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ connection->user_data = options->connection_user_data;
+ connection->server_data->on_incoming_request = options->on_incoming_request;
+ connection->server_data->on_shutdown = options->on_shutdown;
+
+ return AWS_OP_SUCCESS;
+}
+
+/* Stream IDs are only 31 bits [5.1.1] */
+static const uint32_t MAX_STREAM_ID = UINT32_MAX >> 1;
+
+uint32_t aws_http_connection_get_next_stream_id(struct aws_http_connection *connection) {
+
+ uint32_t next_id = connection->next_stream_id;
+
+ if (AWS_UNLIKELY(next_id > MAX_STREAM_ID)) {
+ AWS_LOGF_INFO(AWS_LS_HTTP_CONNECTION, "id=%p: All available stream ids are gone", (void *)connection);
+
+ next_id = 0;
+ aws_raise_error(AWS_ERROR_HTTP_STREAM_IDS_EXHAUSTED);
+ } else {
+ connection->next_stream_id += 2;
+ }
+ return next_id;
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/connection_manager.c b/contrib/restricted/aws/aws-c-http/source/connection_manager.c
new file mode 100644
index 0000000000..30eda61778
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/connection_manager.c
@@ -0,0 +1,1560 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/connection_manager.h>
+
+#include <aws/http/connection.h>
+#include <aws/http/private/connection_manager_system_vtable.h>
+#include <aws/http/private/connection_monitor.h>
+#include <aws/http/private/http_impl.h>
+#include <aws/http/private/proxy_impl.h>
+
+#include <aws/io/channel_bootstrap.h>
+#include <aws/io/event_loop.h>
+#include <aws/io/logging.h>
+#include <aws/io/socket.h>
+#include <aws/io/tls_channel_handler.h>
+#include <aws/io/uri.h>
+
+#include <aws/common/clock.h>
+#include <aws/common/hash_table.h>
+#include <aws/common/linked_list.h>
+#include <aws/common/mutex.h>
+#include <aws/common/ref_count.h>
+#include <aws/common/string.h>
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4232) /* function pointer to dll symbol */
+#endif
+
+/*
+ * Established connections not currently in use are tracked via this structure.
+ */
+struct aws_idle_connection {
+ struct aws_allocator *allocator;
+ struct aws_linked_list_node node;
+ uint64_t cull_timestamp;
+ struct aws_http_connection *connection;
+};
+
+/*
+ * System vtable to use under normal circumstances
+ */
+static struct aws_http_connection_manager_system_vtable s_default_system_vtable = {
+ .create_connection = aws_http_client_connect,
+ .release_connection = aws_http_connection_release,
+ .close_connection = aws_http_connection_close,
+ .is_connection_available = aws_http_connection_new_requests_allowed,
+ .get_monotonic_time = aws_high_res_clock_get_ticks,
+ .is_callers_thread = aws_channel_thread_is_callers_thread,
+ .connection_get_channel = aws_http_connection_get_channel,
+ .connection_get_version = aws_http_connection_get_version,
+};
+
+const struct aws_http_connection_manager_system_vtable *g_aws_http_connection_manager_default_system_vtable_ptr =
+ &s_default_system_vtable;
+
+bool aws_http_connection_manager_system_vtable_is_valid(const struct aws_http_connection_manager_system_vtable *table) {
+ return table->create_connection && table->close_connection && table->release_connection &&
+ table->is_connection_available;
+}
+
+enum aws_http_connection_manager_state_type { AWS_HCMST_UNINITIALIZED, AWS_HCMST_READY, AWS_HCMST_SHUTTING_DOWN };
+
+/*
+ * AWS_HCMCT_VENDED_CONNECTION: The number of connections currently being used by external users.
+ * AWS_HCMCT_PENDING_CONNECTIONS: The number of pending new connection requests we have outstanding to the http
+ * layer.
+ * AWS_HCMCT_OPEN_CONNECTION: Always equal to # of connection shutdown callbacks not yet invoked
+ * or equivalently:
+ *
+ * # of connections ever created by the manager - # shutdown callbacks received
+ */
+enum aws_http_connection_manager_count_type {
+ AWS_HCMCT_VENDED_CONNECTION,
+ AWS_HCMCT_PENDING_CONNECTIONS,
+ AWS_HCMCT_OPEN_CONNECTION,
+ AWS_HCMCT_COUNT,
+};
+
+/**
+ * Vocabulary
+ * Acquisition - a request by a user for a connection
+ * Pending Acquisition - a request by a user for a new connection that has not been completed. It may be
+ * waiting on http, a release by another user, or the manager itself.
+ * Pending Connect - a request to the http layer for a new connection that has not been resolved yet
+ * Vended Connection - a successfully established connection that is currently in use by something; must
+ * be released (through the connection manager) by the user before anyone else can use it. The connection
+ * manager does not explicitly track vended connections.
+ * Task Set - A set of operations that should be attempted once the lock is released. A task set includes
+ * completion callbacks (which can't fail) and connection attempts (which can fail either immediately or
+ * asynchronously).
+ *
+ * Requirements/Assumptions
+ * (1) Don't invoke user callbacks while holding the internal state lock
+ * (2) Don't invoke downstream http calls while holding the internal state lock
+ * (3) Only log unusual or rare events while the lock is held. Common-path logging should be while it is
+ * not held.
+ * (4) Don't crash or do awful things (leaking resources is ok though) if the interface contract
+ * (ref counting + balanced acquire/release of connections) is violated by the user
+ *
+ * In order to fulfill (1) and (2), all side-effecting operations within the connection manager follow a pattern:
+ *
+ * (1) Lock
+ * (2) Make state changes based on the operation
+ * (3) Build a set of work (completions, connect calls, releases, self-destruction) as appropriate to the operation
+ * (4) Unlock
+ * (5) Execute the task set
+ *
+ * Asynchronous work order failures are handled in the async callback, but immediate failures require
+ * us to relock and update the internal state. When there's an immediate connect failure, we use a
+ * conservative policy to fail all excess (beyond the # of pending connects) acquisitions; this allows us
+ * to avoid a possible recursive invocation (and potential failures) to connect again.
+ *
+ * Lifecycle
+ * Our connection manager implementation has a reasonably complex lifecycle.
+ *
+ * All state around the life cycle is protected by a lock. It seemed too risky and error-prone
+ * to try and mix an atomic ref count with the internal tracking counters we need.
+ *
+ * Over the course of its lifetime, a connection manager moves through two states:
+ *
+ * READY - connections may be acquired and released. When the external ref count for the manager
+ * drops to zero, the manager moves to:
+ *
+ * TODO: Seems like connections can still be release while shutting down.
+ * SHUTTING_DOWN - connections may no longer be acquired and released (how could they if the external
+ * ref count was accurate?) but in case of user ref errors, we simply fail attempts to do so rather
+ * than crash or underflow. While in this state, we wait for a set of tracking counters to all fall to zero:
+ *
+ * pending_connect_count - the # of unresolved calls to the http layer's connect logic
+ * open_connection_count - the # of connections for whom the shutdown callback (from http) has not been invoked
+ * vended_connection_count - the # of connections held by external users that haven't been released. Under correct
+ * usage this should be zero before SHUTTING_DOWN is entered, but we attempt to handle incorrect usage gracefully.
+ *
+ * While all the counter fall to zero and no outlife transition, connection manager will detroy itself.
+ *
+ * While shutting down, as pending connects resolve, we immediately release new incoming (from http) connections
+ *
+ * During the transition from READY to SHUTTING_DOWN, we flush the pending acquisition queue (with failure callbacks)
+ * and since we disallow new acquires, pending_acquisition_count should always be zero after the transition.
+ *
+ */
+struct aws_http_connection_manager {
+ struct aws_allocator *allocator;
+
+ /*
+ * A union of external downstream dependencies (primarily global http API functions) and
+ * internal implementation references. Selectively overridden by tests in order to
+ * enable strong coverage of internal implementation details.
+ */
+ const struct aws_http_connection_manager_system_vtable *system_vtable;
+
+ /*
+ * Callback to invoke when shutdown has completed and all resources have been cleaned up.
+ */
+ aws_http_connection_manager_shutdown_complete_fn *shutdown_complete_callback;
+
+ /*
+ * User data to pass to the shutdown completion callback.
+ */
+ void *shutdown_complete_user_data;
+
+ /*
+ * Controls access to all mutable state on the connection manager
+ */
+ struct aws_mutex lock;
+
+ /*
+ * A manager can be in one of two states, READY or SHUTTING_DOWN. The state transition
+ * takes place when ref_count drops to zero.
+ */
+ enum aws_http_connection_manager_state_type state;
+
+ /*
+ * The number of all established, idle connections. So
+ * that we don't have compute the size of a linked list every time.
+ * It doesn't contribute to internal refcount as AWS_HCMCT_OPEN_CONNECTION includes all idle connections as well.
+ */
+ size_t idle_connection_count;
+
+ /*
+ * The set of all available, ready-to-be-used connections, as aws_idle_connection structs.
+ *
+ * This must be a LIFO stack. When connections are released by the user, they must be added on to the back.
+ * When we vend connections to the user, they must be removed from the back first.
+ * In this way, the list will always be sorted from oldest (in terms of time spent idle) to newest. This means
+ * we can always use the cull timestamp of the front connection as the next scheduled time for culling.
+ * It also means that when we cull connections, we can quit the loop as soon as we find a connection
+ * whose timestamp is greater than the current timestamp.
+ */
+ struct aws_linked_list idle_connections;
+
+ /*
+ * The set of all incomplete connection acquisition requests
+ */
+ struct aws_linked_list pending_acquisitions;
+
+ /*
+ * The number of all incomplete connection acquisition requests. So
+ * that we don't have compute the size of a linked list every time.
+ */
+ size_t pending_acquisition_count;
+
+ /*
+ * Counts that contributes to the internal refcount.
+ * When the value changes, s_connection_manager_internal_ref_increase/decrease needed.
+ *
+ * AWS_HCMCT_VENDED_CONNECTION: The number of connections currently being used by external users.
+ * AWS_HCMCT_PENDING_CONNECTIONS: The number of pending new connection requests we have outstanding to the http
+ * layer.
+ * AWS_HCMCT_OPEN_CONNECTION: Always equal to # of connection shutdown callbacks not yet invoked
+ * or equivalently:
+ *
+ * # of connections ever created by the manager - # shutdown callbacks received
+ */
+ size_t internal_ref[AWS_HCMCT_COUNT];
+
+ /*
+ * The number of established new HTTP/2 connections we have waiting for SETTINGS from the http layer
+ * It doesn't contribute to internal refcount as AWS_HCMCT_OPEN_CONNECTION inclues all connections waiting for
+ * settings as well.
+ */
+ size_t pending_settings_count;
+
+ /*
+ * All the options needed to create an http connection
+ */
+ struct aws_client_bootstrap *bootstrap;
+ size_t initial_window_size;
+ struct aws_socket_options socket_options;
+ struct aws_tls_connection_options *tls_connection_options;
+ struct aws_http_proxy_config *proxy_config;
+ struct aws_http_connection_monitoring_options monitoring_options;
+ struct aws_string *host;
+ struct proxy_env_var_settings proxy_ev_settings;
+ struct aws_tls_connection_options *proxy_ev_tls_options;
+ uint16_t port;
+ /*
+ * HTTP/2 specific.
+ */
+ bool http2_prior_knowledge;
+ struct aws_array_list *initial_settings;
+ size_t max_closed_streams;
+ bool http2_conn_manual_window_management;
+
+ /*
+ * The maximum number of connections this manager should ever have at once.
+ */
+ size_t max_connections;
+
+ /*
+ * Lifecycle tracking for the connection manager. Starts at 1.
+ *
+ * Once this drops to zero, the manager state transitions to shutting down
+ *
+ * The manager is deleted when all other tracking counters have returned to zero.
+ *
+ * We don't use an atomic here because the shutdown phase wants to check many different
+ * values. You could argue that we could use a sum of everything, but we still need the
+ * individual values for proper behavior and error checking during the ready state. Also,
+ * a hybrid atomic/lock solution felt excessively complicated and delicate.
+ */
+ size_t external_ref_count;
+
+ /*
+ * Internal refcount that keeps connection manager alive.
+ *
+ * It's a sum of all internal_ref, the `struct aws_connection_management_transaction` alive and one for any external
+ * usage.
+ *
+ * Once this refcount drops to zero, connection manager should either be cleaned up all the memory all waiting for
+ * the last task to clean un the memory and do nothing else.
+ */
+ struct aws_ref_count internal_ref_count;
+
+ /*
+ * if set to true, read back pressure mechanism will be enabled.
+ */
+ bool enable_read_back_pressure;
+
+ /**
+ * If set to a non-zero value, then connections that stay in the pool longer than the specified
+ * timeout will be closed automatically.
+ */
+ uint64_t max_connection_idle_in_milliseconds;
+
+ /*
+ * Task to cull idle connections. This task is run periodically on the cull_event_loop if a non-zero
+ * culling time interval is specified.
+ */
+ struct aws_task *cull_task;
+ struct aws_event_loop *cull_event_loop;
+};
+
+struct aws_http_connection_manager_snapshot {
+ enum aws_http_connection_manager_state_type state;
+
+ size_t idle_connection_count;
+ size_t pending_acquisition_count;
+ size_t pending_settings_count;
+
+ /* From internal_ref */
+ size_t pending_connects_count;
+ size_t vended_connection_count;
+ size_t open_connection_count;
+
+ size_t external_ref_count;
+};
+
+/*
+ * Correct usage requires AWS_ZERO_STRUCT to have been called beforehand.
+ */
+static void s_aws_http_connection_manager_get_snapshot(
+ struct aws_http_connection_manager *manager,
+ struct aws_http_connection_manager_snapshot *snapshot) {
+
+ snapshot->state = manager->state;
+ snapshot->idle_connection_count = manager->idle_connection_count;
+ snapshot->pending_acquisition_count = manager->pending_acquisition_count;
+ snapshot->pending_settings_count = manager->pending_settings_count;
+
+ snapshot->pending_connects_count = manager->internal_ref[AWS_HCMCT_PENDING_CONNECTIONS];
+ snapshot->vended_connection_count = manager->internal_ref[AWS_HCMCT_VENDED_CONNECTION];
+ snapshot->open_connection_count = manager->internal_ref[AWS_HCMCT_OPEN_CONNECTION];
+
+ snapshot->external_ref_count = manager->external_ref_count;
+}
+
+static void s_aws_http_connection_manager_log_snapshot(
+ struct aws_http_connection_manager *manager,
+ struct aws_http_connection_manager_snapshot *snapshot) {
+ if (snapshot->state != AWS_HCMST_UNINITIALIZED) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: snapshot - state=%d, idle_connection_count=%zu, pending_acquire_count=%zu, "
+ "pending_settings_count=%zu, pending_connect_count=%zu, vended_connection_count=%zu, "
+ "open_connection_count=%zu, ref_count=%zu",
+ (void *)manager,
+ (int)snapshot->state,
+ snapshot->idle_connection_count,
+ snapshot->pending_acquisition_count,
+ snapshot->pending_settings_count,
+ snapshot->pending_connects_count,
+ snapshot->vended_connection_count,
+ snapshot->open_connection_count,
+ snapshot->external_ref_count);
+ } else {
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: snapshot not initialized by control flow", (void *)manager);
+ }
+}
+
+void aws_http_connection_manager_set_system_vtable(
+ struct aws_http_connection_manager *manager,
+ const struct aws_http_connection_manager_system_vtable *system_vtable) {
+ AWS_FATAL_ASSERT(aws_http_connection_manager_system_vtable_is_valid(system_vtable));
+
+ manager->system_vtable = system_vtable;
+}
+
+/*
+ * A struct that functions as both the pending acquisition tracker and the about-to-complete data.
+ *
+ * The list in the connection manager (pending_acquisitions) is the set of all acquisition requests that we
+ * haven't yet resolved.
+ *
+ * In order to make sure we never invoke callbacks while holding the manager's lock, in a number of places
+ * we build a list of one or more acquisitions to complete. Once the lock is released
+ * we complete all the acquisitions in the list using the data within the struct (hence why we have
+ * "result-oriented" members like connection and error_code). This means we can fail an acquisition
+ * simply by setting the error_code and moving it to the current transaction's completion list.
+ */
+struct aws_http_connection_acquisition {
+ struct aws_allocator *allocator;
+ struct aws_linked_list_node node;
+ struct aws_http_connection_manager *manager; /* Only used by logging */
+ aws_http_connection_manager_on_connection_setup_fn *callback;
+ void *user_data;
+ struct aws_http_connection *connection;
+ int error_code;
+ struct aws_channel_task acquisition_task;
+};
+
+static void s_connection_acquisition_task(
+ struct aws_channel_task *channel_task,
+ void *arg,
+ enum aws_task_status status) {
+ (void)channel_task;
+
+ struct aws_http_connection_acquisition *pending_acquisition = arg;
+
+ /* this is a channel task. If it is canceled, that means the channel shutdown. In that case, that's equivalent
+ * to a closed connection. */
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ AWS_LOGF_WARN(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Failed to complete connection acquisition because the connection was closed",
+ (void *)pending_acquisition->manager);
+ pending_acquisition->callback(NULL, AWS_ERROR_HTTP_CONNECTION_CLOSED, pending_acquisition->user_data);
+ /* release it back to prevent a leak of the connection count. */
+ aws_http_connection_manager_release_connection(pending_acquisition->manager, pending_acquisition->connection);
+ } else {
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Successfully completed connection acquisition with connection id=%p",
+ (void *)pending_acquisition->manager,
+ (void *)pending_acquisition->connection);
+ pending_acquisition->callback(
+ pending_acquisition->connection, pending_acquisition->error_code, pending_acquisition->user_data);
+ }
+
+ aws_mem_release(pending_acquisition->allocator, pending_acquisition);
+}
+
+/*
+ * Invokes a set of connection acquisition completion callbacks.
+ *
+ * Soft Requirement: The manager's lock must not be held in the callstack.
+ *
+ * Assumes that internal state (like pending_acquisition_count, vended_connection_count, etc...) have already been
+ * updated according to the list's contents.
+ */
+static void s_aws_http_connection_manager_complete_acquisitions(
+ struct aws_linked_list *acquisitions,
+ struct aws_allocator *allocator) {
+
+ while (!aws_linked_list_empty(acquisitions)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(acquisitions);
+ struct aws_http_connection_acquisition *pending_acquisition =
+ AWS_CONTAINER_OF(node, struct aws_http_connection_acquisition, node);
+
+ if (pending_acquisition->error_code == AWS_OP_SUCCESS) {
+
+ struct aws_channel *channel =
+ pending_acquisition->manager->system_vtable->connection_get_channel(pending_acquisition->connection);
+ AWS_PRECONDITION(channel);
+
+ /* For some workloads, going ahead and moving the connection callback to the connection's thread is a
+ * substantial performance improvement so let's do that */
+ if (!pending_acquisition->manager->system_vtable->is_callers_thread(channel)) {
+ aws_channel_task_init(
+ &pending_acquisition->acquisition_task,
+ s_connection_acquisition_task,
+ pending_acquisition,
+ "s_connection_acquisition_task");
+ aws_channel_schedule_task_now(channel, &pending_acquisition->acquisition_task);
+ return;
+ }
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Successfully completed connection acquisition with connection id=%p",
+ (void *)pending_acquisition->manager,
+ (void *)pending_acquisition->connection);
+
+ } else {
+ AWS_LOGF_WARN(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Failed to complete connection acquisition with error_code %d(%s)",
+ (void *)pending_acquisition->manager,
+ pending_acquisition->error_code,
+ aws_error_str(pending_acquisition->error_code));
+ }
+
+ pending_acquisition->callback(
+ pending_acquisition->connection, pending_acquisition->error_code, pending_acquisition->user_data);
+ aws_mem_release(allocator, pending_acquisition);
+ }
+}
+
+/*
+ * Moves the first pending connection acquisition into a (task set) list. Call this while holding the lock to
+ * build the set of callbacks to be completed once the lock is released.
+ *
+ * Hard Requirement: Manager's lock must held somewhere in the call stack
+ *
+ * If this was a successful acquisition then connection is non-null
+ * If this was a failed acquisition then connection is null and error_code is hopefully a useful diagnostic (extreme
+ * edge cases exist where it may not be though)
+ */
+static void s_aws_http_connection_manager_move_front_acquisition(
+ struct aws_http_connection_manager *manager,
+ struct aws_http_connection *connection,
+ int error_code,
+ struct aws_linked_list *output_list) {
+
+ AWS_FATAL_ASSERT(!aws_linked_list_empty(&manager->pending_acquisitions));
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&manager->pending_acquisitions);
+
+ AWS_FATAL_ASSERT(manager->pending_acquisition_count > 0);
+ --manager->pending_acquisition_count;
+
+ if (error_code == AWS_ERROR_SUCCESS && connection == NULL) {
+ AWS_LOGF_FATAL(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Connection acquisition completed with NULL connection and no error code. Investigate.",
+ (void *)manager);
+ error_code = AWS_ERROR_UNKNOWN;
+ }
+
+ struct aws_http_connection_acquisition *pending_acquisition =
+ AWS_CONTAINER_OF(node, struct aws_http_connection_acquisition, node);
+ pending_acquisition->connection = connection;
+ pending_acquisition->error_code = error_code;
+
+ aws_linked_list_push_back(output_list, node);
+}
+
+/*
+ * Encompasses all of the external operations that need to be done for various
+ * events:
+ * manager release
+ * connection release
+ * connection acquire
+ * connection_setup
+ * connection_shutdown
+ *
+ * The transaction is built under the manager's lock (and the internal state is updated optimistically),
+ * but then executed outside of it.
+ */
+struct aws_connection_management_transaction {
+ struct aws_http_connection_manager *manager;
+ struct aws_allocator *allocator;
+ struct aws_linked_list completions;
+ struct aws_http_connection *connection_to_release;
+ struct aws_linked_list connections_to_release; /* <struct aws_idle_connection> */
+ struct aws_http_connection_manager_snapshot snapshot;
+ size_t new_connections;
+};
+
+static void s_aws_connection_management_transaction_init(
+ struct aws_connection_management_transaction *work,
+ struct aws_http_connection_manager *manager) {
+ AWS_ZERO_STRUCT(*work);
+
+ aws_linked_list_init(&work->connections_to_release);
+ aws_linked_list_init(&work->completions);
+ work->manager = manager;
+ work->allocator = manager->allocator;
+ aws_ref_count_acquire(&manager->internal_ref_count);
+}
+
+static void s_aws_connection_management_transaction_clean_up(struct aws_connection_management_transaction *work) {
+ AWS_FATAL_ASSERT(aws_linked_list_empty(&work->connections_to_release));
+ AWS_FATAL_ASSERT(aws_linked_list_empty(&work->completions));
+ AWS_ASSERT(work->manager);
+ aws_ref_count_release(&work->manager->internal_ref_count);
+}
+
+/* The count acquire and release all needs to be invoked helding the lock */
+static void s_connection_manager_internal_ref_increase(
+ struct aws_http_connection_manager *manager,
+ enum aws_http_connection_manager_count_type count_type,
+ size_t num) {
+
+ manager->internal_ref[count_type] += num;
+ for (size_t i = 0; i < num; i++) {
+ aws_ref_count_acquire(&manager->internal_ref_count);
+ }
+}
+
+static void s_connection_manager_internal_ref_decrease(
+ struct aws_http_connection_manager *manager,
+ enum aws_http_connection_manager_count_type count_type,
+ size_t num) {
+
+ manager->internal_ref[count_type] -= num;
+ for (size_t i = 0; i < num; i++) {
+ /* This only happens between transcation init and transcation clean up. As transcation always has a internal
+ * refcount, this will never bring the refcount to zero */
+ aws_ref_count_release(&manager->internal_ref_count);
+ }
+}
+
+/* Only invoked with the lock held */
+static void s_aws_http_connection_manager_build_transaction(struct aws_connection_management_transaction *work) {
+ struct aws_http_connection_manager *manager = work->manager;
+
+ if (manager->state == AWS_HCMST_READY) {
+ /*
+ * Step 1 - If there's free connections, complete acquisition requests
+ */
+ while (!aws_linked_list_empty(&manager->idle_connections) > 0 && manager->pending_acquisition_count > 0) {
+ AWS_FATAL_ASSERT(manager->idle_connection_count >= 1);
+ /*
+ * It is absolutely critical that this is pop_back and not front. By making the idle connections
+ * a LIFO stack, the list will always be sorted from oldest (in terms of idle time) to newest. This means
+ * we can always use the cull timestamp of the first connection as the next scheduled time for culling.
+ * It also means that when we cull connections, we can quit the loop as soon as we find a connection
+ * whose timestamp is greater than the current timestamp.
+ */
+ struct aws_linked_list_node *node = aws_linked_list_pop_back(&manager->idle_connections);
+ struct aws_idle_connection *idle_connection = AWS_CONTAINER_OF(node, struct aws_idle_connection, node);
+ struct aws_http_connection *connection = idle_connection->connection;
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Grabbing pooled connection (%p)",
+ (void *)manager,
+ (void *)connection);
+ s_aws_http_connection_manager_move_front_acquisition(
+ manager, connection, AWS_ERROR_SUCCESS, &work->completions);
+ s_connection_manager_internal_ref_increase(manager, AWS_HCMCT_VENDED_CONNECTION, 1);
+ --manager->idle_connection_count;
+ aws_mem_release(idle_connection->allocator, idle_connection);
+ }
+
+ /*
+ * Step 2 - if there's excess pending acquisitions and we have room to make more, make more
+ */
+ if (manager->pending_acquisition_count >
+ manager->internal_ref[AWS_HCMCT_PENDING_CONNECTIONS] + manager->pending_settings_count) {
+ AWS_FATAL_ASSERT(
+ manager->max_connections >= manager->internal_ref[AWS_HCMCT_VENDED_CONNECTION] +
+ manager->internal_ref[AWS_HCMCT_PENDING_CONNECTIONS] +
+ manager->pending_settings_count);
+
+ work->new_connections = manager->pending_acquisition_count -
+ manager->internal_ref[AWS_HCMCT_PENDING_CONNECTIONS] -
+ manager->pending_settings_count;
+ size_t max_new_connections =
+ manager->max_connections -
+ (manager->internal_ref[AWS_HCMCT_VENDED_CONNECTION] +
+ manager->internal_ref[AWS_HCMCT_PENDING_CONNECTIONS] + manager->pending_settings_count);
+
+ if (work->new_connections > max_new_connections) {
+ work->new_connections = max_new_connections;
+ }
+ s_connection_manager_internal_ref_increase(manager, AWS_HCMCT_PENDING_CONNECTIONS, work->new_connections);
+ }
+ } else {
+ /*
+ * swap our internal connection set with the empty work set
+ */
+ AWS_FATAL_ASSERT(aws_linked_list_empty(&work->connections_to_release));
+ aws_linked_list_swap_contents(&manager->idle_connections, &work->connections_to_release);
+ manager->idle_connection_count = 0;
+
+ /*
+ * Move all manager pending acquisitions to the work completion list
+ */
+ while (!aws_linked_list_empty(&manager->pending_acquisitions)) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Failing pending connection acquisition due to manager shut down",
+ (void *)manager);
+ s_aws_http_connection_manager_move_front_acquisition(
+ manager, NULL, AWS_ERROR_HTTP_CONNECTION_MANAGER_SHUTTING_DOWN, &work->completions);
+ }
+
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: manager release, failing %zu pending acquisitions",
+ (void *)manager,
+ manager->pending_acquisition_count);
+ manager->pending_acquisition_count = 0;
+ }
+
+ s_aws_http_connection_manager_get_snapshot(manager, &work->snapshot);
+}
+
+static void s_aws_http_connection_manager_execute_transaction(struct aws_connection_management_transaction *work);
+
+/*
+ * The final last gasp of a connection manager where memory is cleaned up. Destruction is split up into two parts,
+ * a begin and a finish. Idle connection culling requires a scheduled task on an arbitrary event loop. If idle
+ * connection culling is on then this task must be cancelled before destruction can finish, but you can only cancel
+ * a task from the same event loop that it is scheduled on. To resolve this, when using idle connection culling,
+ * we schedule a finish destruction task on the event loop that the culling task is on. This finish task
+ * cancels the culling task and then calls this function. If we are not using idle connection culling, we can
+ * call this function immediately from the start of destruction.
+ */
+static void s_aws_http_connection_manager_finish_destroy(struct aws_http_connection_manager *manager) {
+ if (manager == NULL) {
+ return;
+ }
+
+ AWS_LOGF_INFO(AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: Destroying self", (void *)manager);
+
+ AWS_FATAL_ASSERT(manager->internal_ref[AWS_HCMCT_PENDING_CONNECTIONS] == 0);
+ AWS_FATAL_ASSERT(manager->pending_settings_count == 0);
+ AWS_FATAL_ASSERT(manager->internal_ref[AWS_HCMCT_VENDED_CONNECTION] == 0);
+ AWS_FATAL_ASSERT(manager->pending_acquisition_count == 0);
+ AWS_FATAL_ASSERT(manager->internal_ref[AWS_HCMCT_OPEN_CONNECTION] == 0);
+ AWS_FATAL_ASSERT(aws_linked_list_empty(&manager->pending_acquisitions));
+ AWS_FATAL_ASSERT(aws_linked_list_empty(&manager->idle_connections));
+
+ aws_string_destroy(manager->host);
+ if (manager->initial_settings) {
+ aws_array_list_clean_up(manager->initial_settings);
+ aws_mem_release(manager->allocator, manager->initial_settings);
+ }
+ if (manager->tls_connection_options) {
+ aws_tls_connection_options_clean_up(manager->tls_connection_options);
+ aws_mem_release(manager->allocator, manager->tls_connection_options);
+ }
+ if (manager->proxy_ev_tls_options) {
+ aws_tls_connection_options_clean_up(manager->proxy_ev_tls_options);
+ aws_mem_release(manager->allocator, manager->proxy_ev_tls_options);
+ }
+ if (manager->proxy_config) {
+ aws_http_proxy_config_destroy(manager->proxy_config);
+ }
+
+ /*
+ * If this task exists then we are actually in the corresponding event loop running the final destruction task.
+ * In that case, we've already cancelled this task and when you cancel, it runs synchronously. So in that
+ * case the task has run as cancelled, it was not rescheduled, and so we can safely release the memory.
+ */
+ if (manager->cull_task) {
+ aws_mem_release(manager->allocator, manager->cull_task);
+ }
+
+ aws_mutex_clean_up(&manager->lock);
+
+ aws_client_bootstrap_release(manager->bootstrap);
+
+ if (manager->shutdown_complete_callback) {
+ manager->shutdown_complete_callback(manager->shutdown_complete_user_data);
+ }
+
+ aws_mem_release(manager->allocator, manager);
+}
+
+/* This is scheduled to run on the cull task's event loop. Should only be scheduled to run if we have one */
+static void s_final_destruction_task(struct aws_task *task, void *arg, enum aws_task_status status) {
+ (void)status;
+ struct aws_http_connection_manager *manager = arg;
+ struct aws_allocator *allocator = manager->allocator;
+
+ AWS_FATAL_ASSERT(manager->cull_task != NULL);
+ AWS_FATAL_ASSERT(manager->cull_event_loop != NULL);
+
+ aws_event_loop_cancel_task(manager->cull_event_loop, manager->cull_task);
+ aws_mem_release(allocator, task);
+
+ /* release the refcount on manager as the culling task will not run again */
+ aws_ref_count_release(&manager->internal_ref_count);
+}
+
+static void s_cull_task(struct aws_task *task, void *arg, enum aws_task_status status);
+static void s_schedule_connection_culling(struct aws_http_connection_manager *manager) {
+ if (manager->max_connection_idle_in_milliseconds == 0) {
+ return;
+ }
+
+ if (manager->cull_task == NULL) {
+ manager->cull_task = aws_mem_calloc(manager->allocator, 1, sizeof(struct aws_task));
+ aws_task_init(manager->cull_task, s_cull_task, manager, "cull_idle_connections");
+ /* For the task to properly run and cancel, we need to keep manager alive */
+ aws_ref_count_acquire(&manager->internal_ref_count);
+ }
+
+ if (manager->cull_event_loop == NULL) {
+ manager->cull_event_loop = aws_event_loop_group_get_next_loop(manager->bootstrap->event_loop_group);
+ }
+ AWS_FATAL_ASSERT(manager->cull_event_loop != NULL);
+
+ uint64_t cull_task_time = 0;
+
+ aws_mutex_lock(&manager->lock);
+ const struct aws_linked_list_node *end = aws_linked_list_end(&manager->idle_connections);
+ struct aws_linked_list_node *oldest_node = aws_linked_list_begin(&manager->idle_connections);
+ if (oldest_node != end) {
+ /*
+ * Since the connections are in LIFO order in the list, the front of the list has the closest
+ * cull time.
+ */
+ struct aws_idle_connection *oldest_idle_connection =
+ AWS_CONTAINER_OF(oldest_node, struct aws_idle_connection, node);
+ cull_task_time = oldest_idle_connection->cull_timestamp;
+ } else {
+ /*
+ * There are no connections in the list, so the absolute minimum anything could be culled is the full
+ * culling interval from now.
+ */
+ uint64_t now = 0;
+ manager->system_vtable->get_monotonic_time(&now);
+ cull_task_time =
+ now + aws_timestamp_convert(
+ manager->max_connection_idle_in_milliseconds, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL);
+ }
+ aws_mutex_unlock(&manager->lock);
+
+ aws_event_loop_schedule_task_future(manager->cull_event_loop, manager->cull_task, cull_task_time);
+
+ return;
+}
+
+struct aws_http_connection_manager *aws_http_connection_manager_new(
+ struct aws_allocator *allocator,
+ const struct aws_http_connection_manager_options *options) {
+
+ aws_http_fatal_assert_library_initialized();
+
+ if (!options) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION_MANAGER, "Invalid options - options is null");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ if (!options->socket_options) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION_MANAGER, "Invalid options - socket_options is null");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ if (options->max_connections == 0) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION_MANAGER, "Invalid options - max_connections cannot be 0");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ if (options->tls_connection_options && options->http2_prior_knowledge) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION_MANAGER, "Invalid options - HTTP/2 prior knowledge cannot be set when TLS is used");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_connection_manager *manager =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http_connection_manager));
+ if (manager == NULL) {
+ return NULL;
+ }
+
+ manager->allocator = allocator;
+
+ if (aws_mutex_init(&manager->lock)) {
+ goto on_error;
+ }
+
+ aws_ref_count_init(
+ &manager->internal_ref_count,
+ manager,
+ (aws_simple_completion_callback *)s_aws_http_connection_manager_finish_destroy);
+
+ aws_linked_list_init(&manager->idle_connections);
+ aws_linked_list_init(&manager->pending_acquisitions);
+
+ manager->host = aws_string_new_from_cursor(allocator, &options->host);
+ if (manager->host == NULL) {
+ goto on_error;
+ }
+
+ if (options->tls_connection_options) {
+ manager->tls_connection_options = aws_mem_calloc(allocator, 1, sizeof(struct aws_tls_connection_options));
+ if (aws_tls_connection_options_copy(manager->tls_connection_options, options->tls_connection_options)) {
+ goto on_error;
+ }
+ }
+ if (options->proxy_options) {
+ manager->proxy_config = aws_http_proxy_config_new_from_manager_options(allocator, options);
+ if (manager->proxy_config == NULL) {
+ goto on_error;
+ }
+ }
+
+ if (options->monitoring_options) {
+ manager->monitoring_options = *options->monitoring_options;
+ }
+
+ manager->state = AWS_HCMST_READY;
+ manager->initial_window_size = options->initial_window_size;
+ manager->port = options->port;
+ manager->max_connections = options->max_connections;
+ manager->socket_options = *options->socket_options;
+ manager->bootstrap = aws_client_bootstrap_acquire(options->bootstrap);
+ manager->system_vtable = g_aws_http_connection_manager_default_system_vtable_ptr;
+ manager->external_ref_count = 1;
+ manager->shutdown_complete_callback = options->shutdown_complete_callback;
+ manager->shutdown_complete_user_data = options->shutdown_complete_user_data;
+ manager->enable_read_back_pressure = options->enable_read_back_pressure;
+ manager->max_connection_idle_in_milliseconds = options->max_connection_idle_in_milliseconds;
+ if (options->proxy_ev_settings) {
+ manager->proxy_ev_settings = *options->proxy_ev_settings;
+ }
+ if (manager->proxy_ev_settings.tls_options) {
+ manager->proxy_ev_tls_options = aws_mem_calloc(allocator, 1, sizeof(struct aws_tls_connection_options));
+ if (aws_tls_connection_options_copy(manager->proxy_ev_tls_options, manager->proxy_ev_settings.tls_options)) {
+ goto on_error;
+ }
+ manager->proxy_ev_settings.tls_options = manager->proxy_ev_tls_options;
+ }
+ manager->http2_prior_knowledge = options->http2_prior_knowledge;
+ if (options->num_initial_settings > 0) {
+ manager->initial_settings = aws_mem_calloc(allocator, 1, sizeof(struct aws_array_list));
+ aws_array_list_init_dynamic(
+ manager->initial_settings, allocator, options->num_initial_settings, sizeof(struct aws_http2_setting));
+ memcpy(
+ manager->initial_settings->data,
+ options->initial_settings_array,
+ options->num_initial_settings * sizeof(struct aws_http2_setting));
+ }
+ manager->max_closed_streams = options->max_closed_streams;
+ manager->http2_conn_manual_window_management = options->http2_conn_manual_window_management;
+
+ /* NOTHING can fail after here */
+ s_schedule_connection_culling(manager);
+
+ AWS_LOGF_INFO(AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: Successfully created", (void *)manager);
+
+ return manager;
+
+on_error:
+
+ s_aws_http_connection_manager_finish_destroy(manager);
+
+ return NULL;
+}
+
+void aws_http_connection_manager_acquire(struct aws_http_connection_manager *manager) {
+ aws_mutex_lock(&manager->lock);
+ AWS_FATAL_ASSERT(manager->external_ref_count > 0);
+ manager->external_ref_count += 1;
+ aws_mutex_unlock(&manager->lock);
+}
+
+void aws_http_connection_manager_release(struct aws_http_connection_manager *manager) {
+ struct aws_connection_management_transaction work;
+ s_aws_connection_management_transaction_init(&work, manager);
+
+ AWS_LOGF_INFO(AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: release", (void *)manager);
+
+ aws_mutex_lock(&manager->lock);
+
+ if (manager->external_ref_count > 0) {
+ manager->external_ref_count -= 1;
+
+ if (manager->external_ref_count == 0) {
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: ref count now zero, starting shut down process",
+ (void *)manager);
+ manager->state = AWS_HCMST_SHUTTING_DOWN;
+ s_aws_http_connection_manager_build_transaction(&work);
+ if (manager->cull_task != NULL) {
+ /* When manager shutting down, schedule the task to cancel the cull task if exist. */
+ AWS_FATAL_ASSERT(manager->cull_event_loop);
+ struct aws_task *final_destruction_task =
+ aws_mem_calloc(manager->allocator, 1, sizeof(struct aws_task));
+ aws_task_init(final_destruction_task, s_final_destruction_task, manager, "final_scheduled_destruction");
+ aws_event_loop_schedule_task_now(manager->cull_event_loop, final_destruction_task);
+ }
+ aws_ref_count_release(&manager->internal_ref_count);
+ }
+ } else {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Connection manager release called with a zero reference count",
+ (void *)manager);
+ }
+
+ aws_mutex_unlock(&manager->lock);
+
+ s_aws_http_connection_manager_execute_transaction(&work);
+}
+
+static void s_aws_http_connection_manager_on_connection_setup(
+ struct aws_http_connection *connection,
+ int error_code,
+ void *user_data);
+
+static void s_aws_http_connection_manager_on_connection_shutdown(
+ struct aws_http_connection *connection,
+ int error_code,
+ void *user_data);
+
+static void s_aws_http_connection_manager_h2_on_goaway_received(
+ struct aws_http_connection *http2_connection,
+ uint32_t last_stream_id,
+ uint32_t http2_error_code,
+ struct aws_byte_cursor debug_data,
+ void *user_data);
+
+static void s_aws_http_connection_manager_h2_on_initial_settings_completed(
+ struct aws_http_connection *http2_connection,
+ int error_code,
+ void *user_data);
+
+static int s_aws_http_connection_manager_new_connection(struct aws_http_connection_manager *manager) {
+ struct aws_http_client_connection_options options;
+ AWS_ZERO_STRUCT(options);
+ options.self_size = sizeof(struct aws_http_client_connection_options);
+ options.bootstrap = manager->bootstrap;
+ options.tls_options = manager->tls_connection_options;
+ options.allocator = manager->allocator;
+ options.user_data = manager;
+ options.host_name = aws_byte_cursor_from_string(manager->host);
+ options.port = manager->port;
+ options.initial_window_size = manager->initial_window_size;
+ options.socket_options = &manager->socket_options;
+ options.on_setup = s_aws_http_connection_manager_on_connection_setup;
+ options.on_shutdown = s_aws_http_connection_manager_on_connection_shutdown;
+ options.manual_window_management = manager->enable_read_back_pressure;
+ options.proxy_ev_settings = &manager->proxy_ev_settings;
+ options.prior_knowledge_http2 = manager->http2_prior_knowledge;
+
+ struct aws_http2_connection_options h2_options;
+ AWS_ZERO_STRUCT(h2_options);
+ if (manager->initial_settings) {
+ h2_options.initial_settings_array = manager->initial_settings->data;
+ h2_options.num_initial_settings = aws_array_list_length(manager->initial_settings);
+ }
+ h2_options.max_closed_streams = manager->max_closed_streams;
+ h2_options.conn_manual_window_management = manager->http2_conn_manual_window_management;
+ /* The initial_settings_completed invoked after the other side acknowledges it, and will always be invoked if the
+ * connection set up */
+ h2_options.on_initial_settings_completed = s_aws_http_connection_manager_h2_on_initial_settings_completed;
+ h2_options.on_goaway_received = s_aws_http_connection_manager_h2_on_goaway_received;
+
+ options.http2_options = &h2_options;
+
+ if (aws_http_connection_monitoring_options_is_valid(&manager->monitoring_options)) {
+ options.monitoring_options = &manager->monitoring_options;
+ }
+
+ struct aws_http_proxy_options proxy_options;
+ AWS_ZERO_STRUCT(proxy_options);
+
+ if (manager->proxy_config) {
+ aws_http_proxy_options_init_from_config(&proxy_options, manager->proxy_config);
+ options.proxy_options = &proxy_options;
+ }
+
+ if (manager->system_vtable->create_connection(&options)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: http connection creation failed with error code %d(%s)",
+ (void *)manager,
+ aws_last_error(),
+ aws_error_str(aws_last_error()));
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_aws_http_connection_manager_execute_transaction(struct aws_connection_management_transaction *work) {
+
+ struct aws_http_connection_manager *manager = work->manager;
+
+ int representative_error = 0;
+ size_t new_connection_failures = 0;
+
+ /*
+ * Step 1 - Logging
+ */
+ s_aws_http_connection_manager_log_snapshot(manager, &work->snapshot);
+
+ /*
+ * Step 2 - Perform any requested connection releases
+ */
+ while (!aws_linked_list_empty(&work->connections_to_release)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_back(&work->connections_to_release);
+ struct aws_idle_connection *idle_connection = AWS_CONTAINER_OF(node, struct aws_idle_connection, node);
+
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Releasing connection (id=%p)",
+ (void *)manager,
+ (void *)idle_connection->connection);
+ manager->system_vtable->release_connection(idle_connection->connection);
+ aws_mem_release(idle_connection->allocator, idle_connection);
+ }
+
+ if (work->connection_to_release) {
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Releasing connection (id=%p)",
+ (void *)manager,
+ (void *)work->connection_to_release);
+ manager->system_vtable->release_connection(work->connection_to_release);
+ }
+
+ /*
+ * Step 3 - Make new connections
+ */
+ struct aws_array_list errors;
+ AWS_ZERO_STRUCT(errors);
+ /* Even if we can't init this array, we still need to invoke error callbacks properly */
+ bool push_errors = false;
+
+ if (work->new_connections > 0) {
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Requesting %zu new connections from http",
+ (void *)manager,
+ work->new_connections);
+ push_errors = aws_array_list_init_dynamic(&errors, work->allocator, work->new_connections, sizeof(int)) ==
+ AWS_ERROR_SUCCESS;
+ }
+
+ for (size_t i = 0; i < work->new_connections; ++i) {
+ if (s_aws_http_connection_manager_new_connection(manager)) {
+ ++new_connection_failures;
+ representative_error = aws_last_error();
+ if (push_errors) {
+ AWS_FATAL_ASSERT(aws_array_list_push_back(&errors, &representative_error) == AWS_OP_SUCCESS);
+ }
+ }
+ }
+
+ if (new_connection_failures > 0) {
+ /*
+ * We failed and aren't going to receive a callback, but the current state assumes we will receive
+ * a callback. So we need to re-lock and update the state ourselves.
+ */
+ aws_mutex_lock(&manager->lock);
+
+ AWS_FATAL_ASSERT(manager->internal_ref[AWS_HCMCT_PENDING_CONNECTIONS] >= new_connection_failures);
+ s_connection_manager_internal_ref_decrease(manager, AWS_HCMCT_PENDING_CONNECTIONS, new_connection_failures);
+
+ /*
+ * Rather than failing one acquisition for each connection failure, if there's at least one
+ * connection failure, we instead fail all excess acquisitions, since there's no pending
+ * connect that will necessarily resolve them.
+ *
+ * Try to correspond an error with the acquisition failure, but as a fallback just use the
+ * representative error.
+ */
+ size_t i = 0;
+ while (manager->pending_acquisition_count > manager->internal_ref[AWS_HCMCT_PENDING_CONNECTIONS]) {
+ int error = representative_error;
+ if (i < aws_array_list_length(&errors)) {
+ aws_array_list_get_at(&errors, &error, i);
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Failing excess connection acquisition with error code %d",
+ (void *)manager,
+ (int)error);
+ s_aws_http_connection_manager_move_front_acquisition(manager, NULL, error, &work->completions);
+ ++i;
+ }
+
+ aws_mutex_unlock(&manager->lock);
+ }
+
+ /*
+ * Step 4 - Perform acquisition callbacks
+ */
+ s_aws_http_connection_manager_complete_acquisitions(&work->completions, work->allocator);
+
+ aws_array_list_clean_up(&errors);
+
+ /*
+ * Step 5 - Clean up work. Do this here rather than at the end of every caller. Destroy the manager if necessary
+ */
+ s_aws_connection_management_transaction_clean_up(work);
+}
+
+void aws_http_connection_manager_acquire_connection(
+ struct aws_http_connection_manager *manager,
+ aws_http_connection_manager_on_connection_setup_fn *callback,
+ void *user_data) {
+
+ AWS_LOGF_DEBUG(AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: Acquire connection", (void *)manager);
+
+ struct aws_http_connection_acquisition *request =
+ aws_mem_calloc(manager->allocator, 1, sizeof(struct aws_http_connection_acquisition));
+
+ request->allocator = manager->allocator;
+ request->callback = callback;
+ request->user_data = user_data;
+ request->manager = manager;
+
+ struct aws_connection_management_transaction work;
+ s_aws_connection_management_transaction_init(&work, manager);
+
+ aws_mutex_lock(&manager->lock);
+
+ /* It's a use after free crime, we don't want to handle */
+ AWS_FATAL_ASSERT(manager->state == AWS_HCMST_READY);
+
+ aws_linked_list_push_back(&manager->pending_acquisitions, &request->node);
+ ++manager->pending_acquisition_count;
+
+ s_aws_http_connection_manager_build_transaction(&work);
+
+ aws_mutex_unlock(&manager->lock);
+
+ s_aws_http_connection_manager_execute_transaction(&work);
+}
+
+/* Only invoke with lock held */
+static int s_idle_connection(struct aws_http_connection_manager *manager, struct aws_http_connection *connection) {
+ struct aws_idle_connection *idle_connection =
+ aws_mem_calloc(manager->allocator, 1, sizeof(struct aws_idle_connection));
+
+ idle_connection->allocator = manager->allocator;
+ idle_connection->connection = connection;
+
+ uint64_t idle_start_timestamp = 0;
+ if (manager->system_vtable->get_monotonic_time(&idle_start_timestamp)) {
+ goto on_error;
+ }
+
+ idle_connection->cull_timestamp =
+ idle_start_timestamp +
+ aws_timestamp_convert(
+ manager->max_connection_idle_in_milliseconds, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL);
+
+ aws_linked_list_push_back(&manager->idle_connections, &idle_connection->node);
+ ++manager->idle_connection_count;
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+
+ aws_mem_release(idle_connection->allocator, idle_connection);
+
+ return AWS_OP_ERR;
+}
+
+int aws_http_connection_manager_release_connection(
+ struct aws_http_connection_manager *manager,
+ struct aws_http_connection *connection) {
+
+ struct aws_connection_management_transaction work;
+ s_aws_connection_management_transaction_init(&work, manager);
+
+ int result = AWS_OP_ERR;
+ bool should_release_connection = !manager->system_vtable->is_connection_available(connection);
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: User releasing connection (id=%p)",
+ (void *)manager,
+ (void *)connection);
+
+ aws_mutex_lock(&manager->lock);
+
+ /* We're probably hosed in this case, but let's not underflow */
+ if (manager->internal_ref[AWS_HCMCT_VENDED_CONNECTION] == 0) {
+ AWS_LOGF_FATAL(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Connection released when vended connection count is zero",
+ (void *)manager);
+ aws_raise_error(AWS_ERROR_HTTP_CONNECTION_MANAGER_VENDED_CONNECTION_UNDERFLOW);
+ goto release;
+ }
+
+ result = AWS_OP_SUCCESS;
+
+ s_connection_manager_internal_ref_decrease(manager, AWS_HCMCT_VENDED_CONNECTION, 1);
+
+ if (!should_release_connection) {
+ if (s_idle_connection(manager, connection)) {
+ should_release_connection = true;
+ }
+ }
+
+ s_aws_http_connection_manager_build_transaction(&work);
+ if (should_release_connection) {
+ work.connection_to_release = connection;
+ }
+
+release:
+
+ aws_mutex_unlock(&manager->lock);
+
+ s_aws_http_connection_manager_execute_transaction(&work);
+
+ return result;
+}
+
+static void s_aws_http_connection_manager_h2_on_goaway_received(
+ struct aws_http_connection *http2_connection,
+ uint32_t last_stream_id,
+ uint32_t http2_error_code,
+ struct aws_byte_cursor debug_data,
+ void *user_data) {
+ struct aws_http_connection_manager *manager = user_data;
+ /* We don't offer user the details, but we can still log it out for debugging */
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: HTTP/2 connection (id=%p) received GOAWAY with: last stream id - %u, error code - %u, debug data - "
+ "\"%.*s\"",
+ (void *)manager,
+ (void *)http2_connection,
+ last_stream_id,
+ http2_error_code,
+ (int)debug_data.len,
+ debug_data.ptr);
+
+ struct aws_connection_management_transaction work;
+ s_aws_connection_management_transaction_init(&work, manager);
+
+ aws_mutex_lock(&manager->lock);
+ /* Goaway received, remove the connection from idle and release it, if it's there. But, not decrease the
+ * open_connection_count as the shutdown callback will be invoked, we still need the manager to be alive */
+ const struct aws_linked_list_node *end = aws_linked_list_end(&manager->idle_connections);
+ for (struct aws_linked_list_node *node = aws_linked_list_begin(&manager->idle_connections); node != end;
+ node = aws_linked_list_next(node)) {
+ struct aws_idle_connection *current_idle_connection = AWS_CONTAINER_OF(node, struct aws_idle_connection, node);
+ if (current_idle_connection->connection == http2_connection) {
+ aws_linked_list_remove(node);
+ work.connection_to_release = http2_connection;
+ aws_mem_release(current_idle_connection->allocator, current_idle_connection);
+ --manager->idle_connection_count;
+ break;
+ }
+ }
+ s_aws_http_connection_manager_build_transaction(&work);
+
+ aws_mutex_unlock(&manager->lock);
+
+ s_aws_http_connection_manager_execute_transaction(&work);
+}
+
+/* Only invoke with lock held */
+static void s_cm_on_connection_ready_or_failed(
+ struct aws_http_connection_manager *manager,
+ int error_code,
+ struct aws_http_connection *connection,
+ struct aws_connection_management_transaction *work) {
+
+ bool is_shutting_down = manager->state == AWS_HCMST_SHUTTING_DOWN;
+
+ if (!error_code) {
+ if (is_shutting_down || s_idle_connection(manager, connection)) {
+ /*
+ * release it immediately
+ */
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: New connection (id=%p) releasing immediately",
+ (void *)manager,
+ (void *)connection);
+ work->connection_to_release = connection;
+ }
+ } else {
+ /* fail acquisition as one connection cannot be used any more */
+ while (manager->pending_acquisition_count >
+ manager->internal_ref[AWS_HCMCT_PENDING_CONNECTIONS] + manager->pending_settings_count) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Failing excess connection acquisition with error code %d",
+ (void *)manager,
+ (int)error_code);
+ s_aws_http_connection_manager_move_front_acquisition(manager, NULL, error_code, &work->completions);
+ }
+ /* Since the connection never being idle, we need to release the connection here. */
+ if (connection) {
+ work->connection_to_release = connection;
+ }
+ }
+}
+
+static void s_aws_http_connection_manager_h2_on_initial_settings_completed(
+ struct aws_http_connection *http2_connection,
+ int error_code,
+ void *user_data) {
+ struct aws_http_connection_manager *manager = user_data;
+ /* The other side acknowledge about the settings which also means we received the settings from other side at this
+ * point, because the settings should be the fist frame to be sent */
+
+ struct aws_connection_management_transaction work;
+ s_aws_connection_management_transaction_init(&work, manager);
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: HTTP/2 connection (id=%p) completed initial settings",
+ (void *)manager,
+ (void *)http2_connection);
+
+ aws_mutex_lock(&manager->lock);
+
+ AWS_FATAL_ASSERT(manager->pending_settings_count > 0);
+ --manager->pending_settings_count;
+ s_cm_on_connection_ready_or_failed(manager, error_code, http2_connection, &work);
+
+ s_aws_http_connection_manager_build_transaction(&work);
+
+ aws_mutex_unlock(&manager->lock);
+
+ s_aws_http_connection_manager_execute_transaction(&work);
+}
+
+static void s_aws_http_connection_manager_on_connection_setup(
+ struct aws_http_connection *connection,
+ int error_code,
+ void *user_data) {
+ struct aws_http_connection_manager *manager = user_data;
+
+ struct aws_connection_management_transaction work;
+ s_aws_connection_management_transaction_init(&work, manager);
+
+ if (connection != NULL) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Received new connection (id=%p) from http layer",
+ (void *)manager,
+ (void *)connection);
+ } else {
+ AWS_LOGF_WARN(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: Failed to obtain new connection from http layer, error %d(%s)",
+ (void *)manager,
+ error_code,
+ aws_error_str(error_code));
+ }
+
+ aws_mutex_lock(&manager->lock);
+
+ AWS_FATAL_ASSERT(manager->internal_ref[AWS_HCMCT_PENDING_CONNECTIONS] > 0);
+ s_connection_manager_internal_ref_decrease(manager, AWS_HCMCT_PENDING_CONNECTIONS, 1);
+ if (!error_code) {
+ /* Shutdown will not be invoked if setup completed with error */
+ s_connection_manager_internal_ref_increase(manager, AWS_HCMCT_OPEN_CONNECTION, 1);
+ }
+
+ if (connection != NULL && manager->system_vtable->connection_get_version(connection) == AWS_HTTP_VERSION_2) {
+ /* If the manager is shutting down, we will still wait for the settings, since we don't have map for connections
+ */
+ ++manager->pending_settings_count;
+ /* For http/2 connection, we vent the connection after the initial settings completed for the user to make
+ * sure the connection is really ready to use. So, we can revert the counting and act like nothing happens
+ * here and wait for the on_initial_settings_completed, which will ALWAYS be invoked before shutdown. BUT,
+ * we increase the open_connection_count, as the shutdown will be invoked no matter what happens. */
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: New HTTP/2 connection (id=%p) set up, waiting for initial settings to complete",
+ (void *)manager,
+ (void *)connection);
+ } else {
+ /* If there is no connection, error code cannot be zero */
+ AWS_ASSERT(connection || error_code);
+ s_cm_on_connection_ready_or_failed(manager, error_code, connection, &work);
+ }
+
+ s_aws_http_connection_manager_build_transaction(&work);
+
+ aws_mutex_unlock(&manager->lock);
+
+ s_aws_http_connection_manager_execute_transaction(&work);
+}
+
+static void s_aws_http_connection_manager_on_connection_shutdown(
+ struct aws_http_connection *connection,
+ int error_code,
+ void *user_data) {
+ (void)error_code;
+
+ struct aws_http_connection_manager *manager = user_data;
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: shutdown received for connection (id=%p)",
+ (void *)manager,
+ (void *)connection);
+
+ struct aws_connection_management_transaction work;
+ s_aws_connection_management_transaction_init(&work, manager);
+
+ aws_mutex_lock(&manager->lock);
+
+ AWS_FATAL_ASSERT(manager->internal_ref[AWS_HCMCT_OPEN_CONNECTION] > 0);
+ s_connection_manager_internal_ref_decrease(manager, AWS_HCMCT_OPEN_CONNECTION, 1);
+
+ /*
+ * Find and, if found, remove it from idle connections
+ */
+ const struct aws_linked_list_node *end = aws_linked_list_end(&manager->idle_connections);
+ for (struct aws_linked_list_node *node = aws_linked_list_begin(&manager->idle_connections); node != end;
+ node = aws_linked_list_next(node)) {
+ struct aws_idle_connection *current_idle_connection = AWS_CONTAINER_OF(node, struct aws_idle_connection, node);
+ if (current_idle_connection->connection == connection) {
+ aws_linked_list_remove(node);
+ work.connection_to_release = connection;
+ aws_mem_release(current_idle_connection->allocator, current_idle_connection);
+ --manager->idle_connection_count;
+ break;
+ }
+ }
+
+ s_aws_http_connection_manager_build_transaction(&work);
+
+ aws_mutex_unlock(&manager->lock);
+
+ s_aws_http_connection_manager_execute_transaction(&work);
+}
+
+static void s_cull_idle_connections(struct aws_http_connection_manager *manager) {
+ AWS_LOGF_INFO(AWS_LS_HTTP_CONNECTION_MANAGER, "id=%p: culling idle connections", (void *)manager);
+
+ if (manager == NULL || manager->max_connection_idle_in_milliseconds == 0) {
+ return;
+ }
+
+ uint64_t now = 0;
+ if (manager->system_vtable->get_monotonic_time(&now)) {
+ return;
+ }
+
+ struct aws_connection_management_transaction work;
+ s_aws_connection_management_transaction_init(&work, manager);
+
+ aws_mutex_lock(&manager->lock);
+
+ /* Only if we're not shutting down */
+ if (manager->state == AWS_HCMST_READY) {
+ const struct aws_linked_list_node *end = aws_linked_list_end(&manager->idle_connections);
+ struct aws_linked_list_node *current_node = aws_linked_list_begin(&manager->idle_connections);
+ while (current_node != end) {
+ struct aws_linked_list_node *node = current_node;
+ struct aws_idle_connection *current_idle_connection =
+ AWS_CONTAINER_OF(node, struct aws_idle_connection, node);
+ if (current_idle_connection->cull_timestamp > now) {
+ break;
+ }
+
+ current_node = aws_linked_list_next(current_node);
+ aws_linked_list_remove(node);
+ aws_linked_list_push_back(&work.connections_to_release, node);
+ --manager->idle_connection_count;
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "id=%p: culling idle connection (%p)",
+ (void *)manager,
+ (void *)current_idle_connection->connection);
+ }
+ }
+
+ s_aws_http_connection_manager_get_snapshot(manager, &work.snapshot);
+
+ aws_mutex_unlock(&manager->lock);
+
+ s_aws_http_connection_manager_execute_transaction(&work);
+}
+
+static void s_cull_task(struct aws_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ return;
+ }
+
+ struct aws_http_connection_manager *manager = arg;
+
+ s_cull_idle_connections(manager);
+
+ s_schedule_connection_culling(manager);
+}
+
+void aws_http_connection_manager_fetch_metrics(
+ const struct aws_http_connection_manager *manager,
+ struct aws_http_manager_metrics *out_metrics) {
+ AWS_PRECONDITION(manager);
+ AWS_PRECONDITION(out_metrics);
+
+ AWS_FATAL_ASSERT(aws_mutex_lock((struct aws_mutex *)(void *)&manager->lock) == AWS_OP_SUCCESS);
+ out_metrics->available_concurrency = manager->idle_connection_count;
+ out_metrics->pending_concurrency_acquires = manager->pending_acquisition_count;
+ out_metrics->leased_concurrency = manager->internal_ref[AWS_HCMCT_VENDED_CONNECTION];
+ AWS_FATAL_ASSERT(aws_mutex_unlock((struct aws_mutex *)(void *)&manager->lock) == AWS_OP_SUCCESS);
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/connection_monitor.c b/contrib/restricted/aws/aws-c-http/source/connection_monitor.c
new file mode 100644
index 0000000000..2732325512
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/connection_monitor.c
@@ -0,0 +1,235 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/connection_monitor.h>
+
+#include <aws/http/connection.h>
+#include <aws/http/statistics.h>
+#include <aws/io/channel.h>
+#include <aws/io/logging.h>
+#include <aws/io/statistics.h>
+
+#include <aws/common/clock.h>
+
+#include <inttypes.h>
+
+static void s_process_statistics(
+ struct aws_crt_statistics_handler *handler,
+ struct aws_crt_statistics_sample_interval *interval,
+ struct aws_array_list *stats_list,
+ void *context) {
+
+ (void)interval;
+
+ struct aws_statistics_handler_http_connection_monitor_impl *impl = handler->impl;
+ if (!aws_http_connection_monitoring_options_is_valid(&impl->options)) {
+ return;
+ }
+
+ uint64_t pending_read_interval_ms = 0;
+ uint64_t pending_write_interval_ms = 0;
+ uint64_t bytes_read = 0;
+ uint64_t bytes_written = 0;
+ uint32_t h1_current_outgoing_stream_id = 0;
+ uint32_t h1_current_incoming_stream_id = 0;
+
+ /*
+ * Pull out the data needed to perform the throughput calculation
+ */
+ size_t stats_count = aws_array_list_length(stats_list);
+ bool h2 = false;
+ bool h2_was_inactive = false;
+
+ for (size_t i = 0; i < stats_count; ++i) {
+ struct aws_crt_statistics_base *stats_base = NULL;
+ if (aws_array_list_get_at(stats_list, &stats_base, i)) {
+ continue;
+ }
+
+ switch (stats_base->category) {
+ case AWSCRT_STAT_CAT_SOCKET: {
+ struct aws_crt_statistics_socket *socket_stats = (struct aws_crt_statistics_socket *)stats_base;
+ bytes_read = socket_stats->bytes_read;
+ bytes_written = socket_stats->bytes_written;
+ break;
+ }
+
+ case AWSCRT_STAT_CAT_HTTP1_CHANNEL: {
+ AWS_ASSERT(!h2);
+ struct aws_crt_statistics_http1_channel *http1_stats =
+ (struct aws_crt_statistics_http1_channel *)stats_base;
+ pending_read_interval_ms = http1_stats->pending_incoming_stream_ms;
+ pending_write_interval_ms = http1_stats->pending_outgoing_stream_ms;
+ h1_current_outgoing_stream_id = http1_stats->current_outgoing_stream_id;
+ h1_current_incoming_stream_id = http1_stats->current_incoming_stream_id;
+
+ break;
+ }
+
+ case AWSCRT_STAT_CAT_HTTP2_CHANNEL: {
+ struct aws_crt_statistics_http2_channel *h2_stats =
+ (struct aws_crt_statistics_http2_channel *)stats_base;
+ pending_read_interval_ms = h2_stats->pending_incoming_stream_ms;
+ pending_write_interval_ms = h2_stats->pending_outgoing_stream_ms;
+ h2_was_inactive |= h2_stats->was_inactive;
+ h2 = true;
+ break;
+ }
+
+ default:
+ break;
+ }
+ }
+
+ if (impl->options.statistics_observer_fn) {
+ impl->options.statistics_observer_fn(
+ (size_t)(uintptr_t)(context), stats_list, impl->options.statistics_observer_user_data);
+ }
+
+ struct aws_channel *channel = context;
+
+ uint64_t bytes_per_second = 0;
+ uint64_t max_pending_io_interval_ms = 0;
+
+ if (pending_write_interval_ms > 0) {
+ double fractional_bytes_written_per_second =
+ (double)bytes_written * (double)AWS_TIMESTAMP_MILLIS / (double)pending_write_interval_ms;
+ if (fractional_bytes_written_per_second >= (double)UINT64_MAX) {
+ bytes_per_second = UINT64_MAX;
+ } else {
+ bytes_per_second = (uint64_t)fractional_bytes_written_per_second;
+ }
+ max_pending_io_interval_ms = pending_write_interval_ms;
+ }
+
+ if (pending_read_interval_ms > 0) {
+ double fractional_bytes_read_per_second =
+ (double)bytes_read * (double)AWS_TIMESTAMP_MILLIS / (double)pending_read_interval_ms;
+ if (fractional_bytes_read_per_second >= (double)UINT64_MAX) {
+ bytes_per_second = UINT64_MAX;
+ } else {
+ bytes_per_second = aws_add_u64_saturating(bytes_per_second, (uint64_t)fractional_bytes_read_per_second);
+ }
+ if (pending_read_interval_ms > max_pending_io_interval_ms) {
+ max_pending_io_interval_ms = pending_read_interval_ms;
+ }
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_IO_CHANNEL,
+ "id=%p: channel throughput - %" PRIu64 " bytes per second",
+ (void *)channel,
+ bytes_per_second);
+
+ /*
+ * Check throughput only if the connection has active stream and no gap between.
+ */
+ bool check_throughput = false;
+ if (h2) {
+ /* For HTTP/2, check throughput only if there always has any active stream on the connection */
+ check_throughput = !h2_was_inactive;
+ } else {
+ /* For HTTP/1, check throughput only if at least one stream exists and was observed in that role previously */
+ check_throughput =
+ (h1_current_incoming_stream_id != 0 && h1_current_incoming_stream_id == impl->last_incoming_stream_id) ||
+ (h1_current_outgoing_stream_id != 0 && h1_current_outgoing_stream_id == impl->last_outgoing_stream_id);
+
+ impl->last_outgoing_stream_id = h1_current_outgoing_stream_id;
+ impl->last_incoming_stream_id = h1_current_incoming_stream_id;
+ }
+ impl->last_measured_throughput = bytes_per_second;
+
+ if (!check_throughput) {
+ AWS_LOGF_TRACE(AWS_LS_IO_CHANNEL, "id=%p: channel throughput does not need to be checked", (void *)channel);
+ impl->throughput_failure_time_ms = 0;
+ return;
+ }
+
+ if (bytes_per_second >= impl->options.minimum_throughput_bytes_per_second) {
+ impl->throughput_failure_time_ms = 0;
+ return;
+ }
+
+ impl->throughput_failure_time_ms =
+ aws_add_u64_saturating(impl->throughput_failure_time_ms, max_pending_io_interval_ms);
+
+ AWS_LOGF_INFO(
+ AWS_LS_IO_CHANNEL,
+ "id=%p: Channel low throughput warning. Currently %" PRIu64 " milliseconds of consecutive failure time",
+ (void *)channel,
+ impl->throughput_failure_time_ms);
+
+ uint64_t maximum_failure_time_ms = aws_timestamp_convert(
+ impl->options.allowable_throughput_failure_interval_seconds, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL);
+ if (impl->throughput_failure_time_ms <= maximum_failure_time_ms) {
+ return;
+ }
+
+ AWS_LOGF_INFO(
+ AWS_LS_IO_CHANNEL,
+ "id=%p: Channel low throughput threshold exceeded (< %" PRIu64
+ " bytes per second for more than %u seconds). Shutting down.",
+ (void *)channel,
+ impl->options.minimum_throughput_bytes_per_second,
+ impl->options.allowable_throughput_failure_interval_seconds);
+
+ aws_channel_shutdown(channel, AWS_ERROR_HTTP_CHANNEL_THROUGHPUT_FAILURE);
+}
+
+static void s_destroy(struct aws_crt_statistics_handler *handler) {
+ if (handler == NULL) {
+ return;
+ }
+
+ aws_mem_release(handler->allocator, handler);
+}
+
+static uint64_t s_get_report_interval_ms(struct aws_crt_statistics_handler *handler) {
+ (void)handler;
+
+ return 1000;
+}
+
+static struct aws_crt_statistics_handler_vtable s_http_connection_monitor_vtable = {
+ .process_statistics = s_process_statistics,
+ .destroy = s_destroy,
+ .get_report_interval_ms = s_get_report_interval_ms,
+};
+
+struct aws_crt_statistics_handler *aws_crt_statistics_handler_new_http_connection_monitor(
+ struct aws_allocator *allocator,
+ struct aws_http_connection_monitoring_options *options) {
+ struct aws_crt_statistics_handler *handler = NULL;
+ struct aws_statistics_handler_http_connection_monitor_impl *impl = NULL;
+
+ if (!aws_mem_acquire_many(
+ allocator,
+ 2,
+ &handler,
+ sizeof(struct aws_crt_statistics_handler),
+ &impl,
+ sizeof(struct aws_statistics_handler_http_connection_monitor_impl))) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*handler);
+ AWS_ZERO_STRUCT(*impl);
+ impl->options = *options;
+
+ handler->vtable = &s_http_connection_monitor_vtable;
+ handler->allocator = allocator;
+ handler->impl = impl;
+
+ return handler;
+}
+
+bool aws_http_connection_monitoring_options_is_valid(const struct aws_http_connection_monitoring_options *options) {
+ if (options == NULL) {
+ return false;
+ }
+
+ return options->allowable_throughput_failure_interval_seconds > 0 &&
+ options->minimum_throughput_bytes_per_second > 0;
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/h1_connection.c b/contrib/restricted/aws/aws-c-http/source/h1_connection.c
new file mode 100644
index 0000000000..3532bb80d9
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/h1_connection.c
@@ -0,0 +1,2064 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/common/clock.h>
+#include <aws/common/math.h>
+#include <aws/common/mutex.h>
+#include <aws/common/string.h>
+#include <aws/http/private/h1_connection.h>
+#include <aws/http/private/h1_decoder.h>
+#include <aws/http/private/h1_stream.h>
+#include <aws/http/private/request_response_impl.h>
+#include <aws/http/status_code.h>
+#include <aws/io/logging.h>
+
+#include <inttypes.h>
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4204) /* non-constant aggregate initializer */
+#endif
+
+enum {
+ DECODER_INITIAL_SCRATCH_SIZE = 256,
+};
+
+static int s_handler_process_read_message(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ struct aws_io_message *message);
+
+static int s_handler_process_write_message(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ struct aws_io_message *message);
+
+static int s_handler_increment_read_window(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ size_t size);
+
+static int s_handler_shutdown(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ enum aws_channel_direction dir,
+ int error_code,
+ bool free_scarce_resources_immediately);
+
+static size_t s_handler_initial_window_size(struct aws_channel_handler *handler);
+static size_t s_handler_message_overhead(struct aws_channel_handler *handler);
+static void s_handler_destroy(struct aws_channel_handler *handler);
+static void s_handler_installed(struct aws_channel_handler *handler, struct aws_channel_slot *slot);
+static struct aws_http_stream *s_make_request(
+ struct aws_http_connection *client_connection,
+ const struct aws_http_make_request_options *options);
+static struct aws_http_stream *s_new_server_request_handler_stream(
+ const struct aws_http_request_handler_options *options);
+static int s_stream_send_response(struct aws_http_stream *stream, struct aws_http_message *response);
+static void s_connection_close(struct aws_http_connection *connection_base);
+static void s_connection_stop_new_request(struct aws_http_connection *connection_base);
+static bool s_connection_is_open(const struct aws_http_connection *connection_base);
+static bool s_connection_new_requests_allowed(const struct aws_http_connection *connection_base);
+static int s_decoder_on_request(
+ enum aws_http_method method_enum,
+ const struct aws_byte_cursor *method_str,
+ const struct aws_byte_cursor *uri,
+ void *user_data);
+static int s_decoder_on_response(int status_code, void *user_data);
+static int s_decoder_on_header(const struct aws_h1_decoded_header *header, void *user_data);
+static int s_decoder_on_body(const struct aws_byte_cursor *data, bool finished, void *user_data);
+static int s_decoder_on_done(void *user_data);
+static void s_reset_statistics(struct aws_channel_handler *handler);
+static void s_gather_statistics(struct aws_channel_handler *handler, struct aws_array_list *stats);
+static void s_write_outgoing_stream(struct aws_h1_connection *connection, bool first_try);
+static int s_try_process_next_stream_read_message(struct aws_h1_connection *connection, bool *out_stop_processing);
+
+static struct aws_http_connection_vtable s_h1_connection_vtable = {
+ .channel_handler_vtable =
+ {
+ .process_read_message = s_handler_process_read_message,
+ .process_write_message = s_handler_process_write_message,
+ .increment_read_window = s_handler_increment_read_window,
+ .shutdown = s_handler_shutdown,
+ .initial_window_size = s_handler_initial_window_size,
+ .message_overhead = s_handler_message_overhead,
+ .destroy = s_handler_destroy,
+ .reset_statistics = s_reset_statistics,
+ .gather_statistics = s_gather_statistics,
+ },
+ .on_channel_handler_installed = s_handler_installed,
+ .make_request = s_make_request,
+ .new_server_request_handler_stream = s_new_server_request_handler_stream,
+ .stream_send_response = s_stream_send_response,
+ .close = s_connection_close,
+ .stop_new_requests = s_connection_stop_new_request,
+ .is_open = s_connection_is_open,
+ .new_requests_allowed = s_connection_new_requests_allowed,
+ .change_settings = NULL,
+ .send_ping = NULL,
+ .send_goaway = NULL,
+ .get_sent_goaway = NULL,
+ .get_received_goaway = NULL,
+ .get_local_settings = NULL,
+ .get_remote_settings = NULL,
+};
+
+static const struct aws_h1_decoder_vtable s_h1_decoder_vtable = {
+ .on_request = s_decoder_on_request,
+ .on_response = s_decoder_on_response,
+ .on_header = s_decoder_on_header,
+ .on_body = s_decoder_on_body,
+ .on_done = s_decoder_on_done,
+};
+
+void aws_h1_connection_lock_synced_data(struct aws_h1_connection *connection) {
+ int err = aws_mutex_lock(&connection->synced_data.lock);
+ AWS_ASSERT(!err);
+ (void)err;
+}
+
+void aws_h1_connection_unlock_synced_data(struct aws_h1_connection *connection) {
+ int err = aws_mutex_unlock(&connection->synced_data.lock);
+ AWS_ASSERT(!err);
+ (void)err;
+}
+
+/**
+ * Internal function for bringing connection to a stop.
+ * Invoked multiple times, including when:
+ * - Channel is shutting down in the read direction.
+ * - Channel is shutting down in the write direction.
+ * - An error occurs.
+ * - User wishes to close the connection (this is the only case where the function may run off-thread).
+ */
+static void s_stop(
+ struct aws_h1_connection *connection,
+ bool stop_reading,
+ bool stop_writing,
+ bool schedule_shutdown,
+ int error_code) {
+
+ AWS_ASSERT(stop_reading || stop_writing || schedule_shutdown); /* You are required to stop at least 1 thing */
+
+ if (stop_reading) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+ connection->thread_data.is_reading_stopped = true;
+ }
+
+ if (stop_writing) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+ connection->thread_data.is_writing_stopped = true;
+ }
+ { /* BEGIN CRITICAL SECTION */
+ aws_h1_connection_lock_synced_data(connection);
+
+ /* Even if we're not scheduling shutdown just yet (ex: sent final request but waiting to read final response)
+ * we don't consider the connection "open" anymore so user can't create more streams */
+ connection->synced_data.is_open = false;
+ connection->synced_data.new_stream_error_code = AWS_ERROR_HTTP_CONNECTION_CLOSED;
+
+ aws_h1_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ if (schedule_shutdown) {
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Shutting down connection with error code %d (%s).",
+ (void *)&connection->base,
+ error_code,
+ aws_error_name(error_code));
+
+ aws_channel_shutdown(connection->base.channel_slot->channel, error_code);
+ }
+}
+
+static void s_shutdown_due_to_error(struct aws_h1_connection *connection, int error_code) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+
+ if (!error_code) {
+ error_code = AWS_ERROR_UNKNOWN;
+ }
+
+ /* Stop reading AND writing if an error occurs.
+ *
+ * It doesn't currently seem worth the complexity to distinguish between read errors and write errors.
+ * The only scenarios that would benefit from this are pipelining scenarios (ex: A server
+ * could continue sending a response to request A if there was an error reading request B).
+ * But pipelining in HTTP/1.1 is known to be fragile with regards to errors, so let's just keep it simple.
+ */
+ s_stop(connection, true /*stop_reading*/, true /*stop_writing*/, true /*schedule_shutdown*/, error_code);
+}
+
+/**
+ * Public function for closing connection.
+ */
+static void s_connection_close(struct aws_http_connection *connection_base) {
+ struct aws_h1_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h1_connection, base);
+
+ /* Don't stop reading/writing immediately, let that happen naturally during the channel shutdown process. */
+ s_stop(connection, false /*stop_reading*/, false /*stop_writing*/, true /*schedule_shutdown*/, AWS_ERROR_SUCCESS);
+}
+
+static void s_connection_stop_new_request(struct aws_http_connection *connection_base) {
+ struct aws_h1_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h1_connection, base);
+
+ { /* BEGIN CRITICAL SECTION */
+ aws_h1_connection_lock_synced_data(connection);
+ if (!connection->synced_data.new_stream_error_code) {
+ connection->synced_data.new_stream_error_code = AWS_ERROR_HTTP_CONNECTION_CLOSED;
+ }
+ aws_h1_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+}
+
+static bool s_connection_is_open(const struct aws_http_connection *connection_base) {
+ struct aws_h1_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h1_connection, base);
+ bool is_open;
+
+ { /* BEGIN CRITICAL SECTION */
+ aws_h1_connection_lock_synced_data(connection);
+ is_open = connection->synced_data.is_open;
+ aws_h1_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ return is_open;
+}
+
+static bool s_connection_new_requests_allowed(const struct aws_http_connection *connection_base) {
+ struct aws_h1_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h1_connection, base);
+ int new_stream_error_code;
+ { /* BEGIN CRITICAL SECTION */
+ aws_h1_connection_lock_synced_data(connection);
+ new_stream_error_code = connection->synced_data.new_stream_error_code;
+ aws_h1_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ return new_stream_error_code == 0;
+}
+
+static int s_stream_send_response(struct aws_http_stream *stream, struct aws_http_message *response) {
+ AWS_PRECONDITION(stream);
+ AWS_PRECONDITION(response);
+ struct aws_h1_stream *h1_stream = AWS_CONTAINER_OF(stream, struct aws_h1_stream, base);
+ return aws_h1_stream_send_response(h1_stream, response);
+}
+
+/* Calculate the desired window size for connection that has switched protocols and become a midchannel handler. */
+static size_t s_calculate_midchannel_desired_connection_window(struct aws_h1_connection *connection) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+ AWS_ASSERT(connection->thread_data.has_switched_protocols);
+
+ if (!connection->base.channel_slot->adj_right) {
+ /* No downstream handler installed. */
+ return 0;
+ }
+
+ /* Connection is just dumbly forwarding aws_io_messages, so try to match downstream handler. */
+ return aws_channel_slot_downstream_read_window(connection->base.channel_slot);
+}
+
+/* Calculate the desired window size for a connection that is processing data for aws_http_streams. */
+static size_t s_calculate_stream_mode_desired_connection_window(struct aws_h1_connection *connection) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+ AWS_ASSERT(!connection->thread_data.has_switched_protocols);
+
+ if (!connection->base.stream_manual_window_management) {
+ return SIZE_MAX;
+ }
+
+ /* Connection window should match the available space in the read-buffer */
+ AWS_ASSERT(
+ connection->thread_data.read_buffer.pending_bytes <= connection->thread_data.read_buffer.capacity &&
+ "This isn't fatal, but our math is off");
+ const size_t desired_connection_window = aws_sub_size_saturating(
+ connection->thread_data.read_buffer.capacity, connection->thread_data.read_buffer.pending_bytes);
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Window stats: connection=%zu+%zu stream=%" PRIu64 " buffer=%zu/%zu",
+ (void *)&connection->base,
+ connection->thread_data.connection_window,
+ desired_connection_window - connection->thread_data.connection_window /*increment_size*/,
+ connection->thread_data.incoming_stream ? connection->thread_data.incoming_stream->thread_data.stream_window
+ : 0,
+ connection->thread_data.read_buffer.pending_bytes,
+ connection->thread_data.read_buffer.capacity);
+
+ return desired_connection_window;
+}
+
+/* Increment connection window, if necessary */
+static int s_update_connection_window(struct aws_h1_connection *connection) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+
+ if (connection->thread_data.is_reading_stopped) {
+ return AWS_OP_SUCCESS;
+ }
+
+ const size_t desired_size = connection->thread_data.has_switched_protocols
+ ? s_calculate_midchannel_desired_connection_window(connection)
+ : s_calculate_stream_mode_desired_connection_window(connection);
+
+ const size_t increment_size = aws_sub_size_saturating(desired_size, connection->thread_data.connection_window);
+ if (increment_size > 0) {
+ /* Update local `connection_window`. See comments at variable's declaration site
+ * on why we use this instead of the official `aws_channel_slot.window_size` */
+ connection->thread_data.connection_window += increment_size;
+ connection->thread_data.recent_window_increments =
+ aws_add_size_saturating(connection->thread_data.recent_window_increments, increment_size);
+ if (aws_channel_slot_increment_read_window(connection->base.channel_slot, increment_size)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Failed to increment read window, error %d (%s). Closing connection.",
+ (void *)&connection->base,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ return AWS_OP_ERR;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_h1_stream_activate(struct aws_http_stream *stream) {
+ struct aws_h1_stream *h1_stream = AWS_CONTAINER_OF(stream, struct aws_h1_stream, base);
+
+ struct aws_http_connection *base_connection = stream->owning_connection;
+ struct aws_h1_connection *connection = AWS_CONTAINER_OF(base_connection, struct aws_h1_connection, base);
+
+ bool should_schedule_task = false;
+
+ { /* BEGIN CRITICAL SECTION */
+ /* Note: We're touching both the connection's and stream's synced_data in this section,
+ * which is OK because an h1_connection and all its h1_streams share a single lock. */
+ aws_h1_connection_lock_synced_data(connection);
+
+ if (stream->id) {
+ /* stream has already been activated. */
+ aws_h1_connection_unlock_synced_data(connection);
+ return AWS_OP_SUCCESS;
+ }
+
+ if (connection->synced_data.new_stream_error_code) {
+ aws_h1_connection_unlock_synced_data(connection);
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Failed to activate the stream id=%p, new streams are not allowed now. error %d (%s)",
+ (void *)&connection->base,
+ (void *)stream,
+ connection->synced_data.new_stream_error_code,
+ aws_error_name(connection->synced_data.new_stream_error_code));
+ return aws_raise_error(connection->synced_data.new_stream_error_code);
+ }
+
+ stream->id = aws_http_connection_get_next_stream_id(base_connection);
+ if (!stream->id) {
+ aws_h1_connection_unlock_synced_data(connection);
+ /* aws_http_connection_get_next_stream_id() raises its own error. */
+ return AWS_OP_ERR;
+ }
+
+ /* ID successfully assigned */
+ h1_stream->synced_data.api_state = AWS_H1_STREAM_API_STATE_ACTIVE;
+
+ aws_linked_list_push_back(&connection->synced_data.new_client_stream_list, &h1_stream->node);
+ if (!connection->synced_data.is_cross_thread_work_task_scheduled) {
+ connection->synced_data.is_cross_thread_work_task_scheduled = true;
+ should_schedule_task = true;
+ }
+
+ aws_h1_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ /* connection keeps activated stream alive until stream completes */
+ aws_atomic_fetch_add(&stream->refcount, 1);
+
+ if (should_schedule_task) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION, "id=%p: Scheduling connection cross-thread work task.", (void *)base_connection);
+ aws_channel_schedule_task_now(connection->base.channel_slot->channel, &connection->cross_thread_work_task);
+ } else {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Connection cross-thread work task was already scheduled",
+ (void *)base_connection);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+struct aws_http_stream *s_make_request(
+ struct aws_http_connection *client_connection,
+ const struct aws_http_make_request_options *options) {
+ struct aws_h1_stream *stream = aws_h1_stream_new_request(client_connection, options);
+ if (!stream) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Cannot create request stream, error %d (%s)",
+ (void *)client_connection,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ return NULL;
+ }
+
+ struct aws_h1_connection *connection = AWS_CONTAINER_OF(client_connection, struct aws_h1_connection, base);
+
+ /* Insert new stream into pending list, and schedule outgoing_stream_task if it's not already running. */
+ int new_stream_error_code;
+ { /* BEGIN CRITICAL SECTION */
+ aws_h1_connection_lock_synced_data(connection);
+ new_stream_error_code = connection->synced_data.new_stream_error_code;
+ aws_h1_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ if (new_stream_error_code) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Cannot create request stream, error %d (%s)",
+ (void *)client_connection,
+ new_stream_error_code,
+ aws_error_name(new_stream_error_code));
+
+ aws_raise_error(new_stream_error_code);
+ goto error;
+ }
+
+ /* Success! */
+ struct aws_byte_cursor method;
+ aws_http_message_get_request_method(options->request, &method);
+ stream->base.request_method = aws_http_str_to_method(method);
+ struct aws_byte_cursor path;
+ aws_http_message_get_request_path(options->request, &path);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Created client request on connection=%p: " PRInSTR " " PRInSTR " " PRInSTR,
+ (void *)&stream->base,
+ (void *)client_connection,
+ AWS_BYTE_CURSOR_PRI(method),
+ AWS_BYTE_CURSOR_PRI(path),
+ AWS_BYTE_CURSOR_PRI(aws_http_version_to_str(connection->base.http_version)));
+
+ return &stream->base;
+
+error:
+ /* Force destruction of the stream, avoiding ref counting */
+ stream->base.vtable->destroy(&stream->base);
+ return NULL;
+}
+
+/* Extract work items from synced_data, and perform the work on-thread. */
+static void s_cross_thread_work_task(struct aws_channel_task *channel_task, void *arg, enum aws_task_status status) {
+ (void)channel_task;
+ struct aws_h1_connection *connection = arg;
+
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ return;
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION, "id=%p: Running connection cross-thread work task.", (void *)&connection->base);
+
+ /* BEGIN CRITICAL SECTION */
+ aws_h1_connection_lock_synced_data(connection);
+
+ connection->synced_data.is_cross_thread_work_task_scheduled = false;
+
+ bool has_new_client_streams = !aws_linked_list_empty(&connection->synced_data.new_client_stream_list);
+ aws_linked_list_move_all_back(
+ &connection->thread_data.stream_list, &connection->synced_data.new_client_stream_list);
+
+ aws_h1_connection_unlock_synced_data(connection);
+ /* END CRITICAL SECTION */
+
+ /* Kick off outgoing-stream task if necessary */
+ if (has_new_client_streams) {
+ aws_h1_connection_try_write_outgoing_stream(connection);
+ }
+}
+
+static bool s_aws_http_stream_was_successful_connect(struct aws_h1_stream *stream) {
+ struct aws_http_stream *base = &stream->base;
+ if (base->request_method != AWS_HTTP_METHOD_CONNECT) {
+ return false;
+ }
+
+ if (base->client_data == NULL) {
+ return false;
+ }
+
+ if (base->client_data->response_status != AWS_HTTP_STATUS_CODE_200_OK) {
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ * Validate and perform a protocol switch on a connection. Protocol switching essentially turns the connection's
+ * handler into a dummy pass-through. It is valid to switch protocols to the same protocol resulting in a channel
+ * that has a "dead" http handler in the middle of the channel (which negotiated the CONNECT through the proxy) and
+ * a "live" handler on the end which takes the actual http requests. By doing this, we get the exact same
+ * behavior whether we're transitioning to http or any other protocol: once the CONNECT succeeds
+ * the first http handler is put in pass-through mode and a new protocol (which could be http) is tacked onto the end.
+ */
+static int s_aws_http1_switch_protocols(struct aws_h1_connection *connection) {
+ AWS_FATAL_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+
+ /* Switching protocols while there are multiple streams is too complex to deal with.
+ * Ensure stream_list has exactly this 1 stream in it. */
+ if (aws_linked_list_begin(&connection->thread_data.stream_list) !=
+ aws_linked_list_rbegin(&connection->thread_data.stream_list)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Cannot switch protocols while further streams are pending, closing connection.",
+ (void *)&connection->base);
+
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Connection has switched protocols, another channel handler must be installed to"
+ " deal with further data.",
+ (void *)&connection->base);
+
+ connection->thread_data.has_switched_protocols = true;
+ { /* BEGIN CRITICAL SECTION */
+ aws_h1_connection_lock_synced_data(connection);
+ connection->synced_data.new_stream_error_code = AWS_ERROR_HTTP_SWITCHED_PROTOCOLS;
+ aws_h1_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_stream_complete(struct aws_h1_stream *stream, int error_code) {
+ struct aws_h1_connection *connection =
+ AWS_CONTAINER_OF(stream->base.owning_connection, struct aws_h1_connection, base);
+
+ /*
+ * If this is the end of a successful CONNECT request, mark ourselves as pass-through since the proxy layer
+ * will be tacking on a new http handler (and possibly a tls handler in-between).
+ */
+ if (error_code == AWS_ERROR_SUCCESS && s_aws_http_stream_was_successful_connect(stream)) {
+ if (s_aws_http1_switch_protocols(connection)) {
+ error_code = AWS_ERROR_HTTP_PROTOCOL_SWITCH_FAILURE;
+ s_shutdown_due_to_error(connection, error_code);
+ }
+ }
+
+ if (error_code != AWS_ERROR_SUCCESS) {
+ if (stream->base.client_data && stream->is_incoming_message_done) {
+ /* As a request that finished receiving the response, we ignore error and
+ * consider it finished successfully */
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Ignoring error code %d (%s). The response has been fully received,"
+ "so the stream will complete successfully.",
+ (void *)&stream->base,
+ error_code,
+ aws_error_name(error_code));
+ error_code = AWS_ERROR_SUCCESS;
+ }
+ if (stream->base.server_data && stream->is_outgoing_message_done) {
+ /* As a server finished sending the response, but still failed with the request was not finished receiving.
+ * We ignore error and consider it finished successfully */
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Ignoring error code %d (%s). The response has been fully sent,"
+ " so the stream will complete successfully",
+ (void *)&stream->base,
+ error_code,
+ aws_error_name(error_code));
+ error_code = AWS_ERROR_SUCCESS;
+ }
+ }
+
+ /* Remove stream from list. */
+ aws_linked_list_remove(&stream->node);
+
+ /* Nice logging */
+ if (error_code) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Stream completed with error code %d (%s).",
+ (void *)&stream->base,
+ error_code,
+ aws_error_name(error_code));
+
+ } else if (stream->base.client_data) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Client request complete, response status: %d (%s).",
+ (void *)&stream->base,
+ stream->base.client_data->response_status,
+ aws_http_status_text(stream->base.client_data->response_status));
+ } else {
+ AWS_ASSERT(stream->base.server_data);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Server response to " PRInSTR " request complete.",
+ (void *)&stream->base,
+ AWS_BYTE_CURSOR_PRI(stream->base.server_data->request_method_str));
+ }
+
+ /* If connection must shut down, do it BEFORE invoking stream-complete callback.
+ * That way, if aws_http_connection_is_open() is called from stream-complete callback, it returns false. */
+ if (stream->is_final_stream) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Closing connection due to completion of final stream.",
+ (void *)&connection->base);
+
+ s_connection_close(&connection->base);
+ }
+
+ { /* BEGIN CRITICAL SECTION */
+ /* Note: We're touching the stream's synced_data here, which is OK
+ * because an h1_connection and all its h1_streams share a single lock. */
+ aws_h1_connection_lock_synced_data(connection);
+
+ /* Mark stream complete */
+ stream->synced_data.api_state = AWS_H1_STREAM_API_STATE_COMPLETE;
+
+ /* Move chunks out of synced data */
+ aws_linked_list_move_all_back(&stream->thread_data.pending_chunk_list, &stream->synced_data.pending_chunk_list);
+
+ aws_h1_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ /* Complete any leftover chunks */
+ while (!aws_linked_list_empty(&stream->thread_data.pending_chunk_list)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&stream->thread_data.pending_chunk_list);
+ struct aws_h1_chunk *chunk = AWS_CONTAINER_OF(node, struct aws_h1_chunk, node);
+ aws_h1_chunk_complete_and_destroy(chunk, &stream->base, AWS_ERROR_HTTP_STREAM_HAS_COMPLETED);
+ }
+
+ /* Invoke callback and clean up stream. */
+ if (stream->base.on_complete) {
+ stream->base.on_complete(&stream->base, error_code, stream->base.user_data);
+ }
+
+ aws_http_stream_release(&stream->base);
+}
+
+static void s_add_time_measurement_to_stats(uint64_t start_ns, uint64_t end_ns, uint64_t *output_ms) {
+ if (end_ns > start_ns) {
+ *output_ms += aws_timestamp_convert(end_ns - start_ns, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_MILLIS, NULL);
+ }
+}
+
+static void s_set_outgoing_stream_ptr(
+ struct aws_h1_connection *connection,
+ struct aws_h1_stream *next_outgoing_stream) {
+ struct aws_h1_stream *prev = connection->thread_data.outgoing_stream;
+
+ uint64_t now_ns = 0;
+ aws_channel_current_clock_time(connection->base.channel_slot->channel, &now_ns);
+ if (prev == NULL && next_outgoing_stream != NULL) {
+ /* transition from nothing to write -> something to write */
+ connection->thread_data.outgoing_stream_timestamp_ns = now_ns;
+ } else if (prev != NULL && next_outgoing_stream == NULL) {
+ /* transition from something to write -> nothing to write */
+ s_add_time_measurement_to_stats(
+ connection->thread_data.outgoing_stream_timestamp_ns,
+ now_ns,
+ &connection->thread_data.stats.pending_outgoing_stream_ms);
+ }
+
+ connection->thread_data.outgoing_stream = next_outgoing_stream;
+}
+
+static void s_set_incoming_stream_ptr(
+ struct aws_h1_connection *connection,
+ struct aws_h1_stream *next_incoming_stream) {
+ struct aws_h1_stream *prev = connection->thread_data.incoming_stream;
+
+ uint64_t now_ns = 0;
+ aws_channel_current_clock_time(connection->base.channel_slot->channel, &now_ns);
+ if (prev == NULL && next_incoming_stream != NULL) {
+ /* transition from nothing to read -> something to read */
+ connection->thread_data.incoming_stream_timestamp_ns = now_ns;
+ } else if (prev != NULL && next_incoming_stream == NULL) {
+ /* transition from something to read -> nothing to read */
+ s_add_time_measurement_to_stats(
+ connection->thread_data.incoming_stream_timestamp_ns,
+ now_ns,
+ &connection->thread_data.stats.pending_incoming_stream_ms);
+ }
+
+ connection->thread_data.incoming_stream = next_incoming_stream;
+}
+
+/**
+ * Ensure `incoming_stream` is pointing at the correct stream, and update state if it changes.
+ */
+static void s_client_update_incoming_stream_ptr(struct aws_h1_connection *connection) {
+ struct aws_linked_list *list = &connection->thread_data.stream_list;
+ struct aws_h1_stream *desired;
+ if (connection->thread_data.is_reading_stopped) {
+ desired = NULL;
+ } else if (aws_linked_list_empty(list)) {
+ desired = NULL;
+ } else {
+ desired = AWS_CONTAINER_OF(aws_linked_list_begin(list), struct aws_h1_stream, node);
+ }
+
+ if (connection->thread_data.incoming_stream == desired) {
+ return;
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Current incoming stream is now %p.",
+ (void *)&connection->base,
+ desired ? (void *)&desired->base : NULL);
+
+ s_set_incoming_stream_ptr(connection, desired);
+}
+
+/**
+ * If necessary, update `outgoing_stream` so it is pointing at a stream
+ * with data to send, or NULL if all streams are done sending data.
+ *
+ * Called from event-loop thread.
+ * This function has lots of side effects.
+ */
+static struct aws_h1_stream *s_update_outgoing_stream_ptr(struct aws_h1_connection *connection) {
+ struct aws_h1_stream *current = connection->thread_data.outgoing_stream;
+ bool current_changed = false;
+ int err;
+
+ /* If current stream is done sending data... */
+ if (current && !aws_h1_encoder_is_message_in_progress(&connection->thread_data.encoder)) {
+ current->is_outgoing_message_done = true;
+
+ /* RFC-7230 section 6.6: Tear-down.
+ * If this was the final stream, don't allows any further streams to be sent */
+ if (current->is_final_stream) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Done sending final stream, no further streams will be sent.",
+ (void *)&connection->base);
+
+ s_stop(
+ connection,
+ false /*stop_reading*/,
+ true /*stop_writing*/,
+ false /*schedule_shutdown*/,
+ AWS_ERROR_SUCCESS);
+ }
+
+ /* If it's also done receiving data, then it's complete! */
+ if (current->is_incoming_message_done) {
+ /* Only 1st stream in list could finish receiving before it finished sending */
+ AWS_ASSERT(&current->node == aws_linked_list_begin(&connection->thread_data.stream_list));
+
+ /* This removes stream from list */
+ s_stream_complete(current, AWS_ERROR_SUCCESS);
+ }
+
+ current = NULL;
+ current_changed = true;
+ }
+
+ /* If current stream is NULL, look for more work. */
+ if (!current && !connection->thread_data.is_writing_stopped) {
+
+ /* Look for next stream we can work on. */
+ for (struct aws_linked_list_node *node = aws_linked_list_begin(&connection->thread_data.stream_list);
+ node != aws_linked_list_end(&connection->thread_data.stream_list);
+ node = aws_linked_list_next(node)) {
+
+ struct aws_h1_stream *stream = AWS_CONTAINER_OF(node, struct aws_h1_stream, node);
+
+ /* If we already sent this stream's data, keep looking... */
+ if (stream->is_outgoing_message_done) {
+ continue;
+ }
+
+ /* STOP if we're a server, and this stream's response isn't ready to send.
+ * It's not like we can skip this and start on the next stream because responses must be sent in order.
+ * Don't need a check like this for clients because their streams always start with data to send. */
+ if (connection->base.server_data && !stream->thread_data.has_outgoing_response) {
+ break;
+ }
+
+ /* We found a stream to work on! */
+ current = stream;
+ current_changed = true;
+ break;
+ }
+ }
+
+ /* Update current incoming and outgoing streams. */
+ if (current_changed) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Current outgoing stream is now %p.",
+ (void *)&connection->base,
+ current ? (void *)&current->base : NULL);
+
+ s_set_outgoing_stream_ptr(connection, current);
+
+ if (current) {
+ err = aws_h1_encoder_start_message(
+ &connection->thread_data.encoder, &current->encoder_message, &current->base);
+ (void)err;
+ AWS_ASSERT(!err);
+ }
+
+ /* incoming_stream update is only for client */
+ if (connection->base.client_data) {
+ s_client_update_incoming_stream_ptr(connection);
+ }
+ }
+
+ return current;
+}
+
+/* Runs after an aws_io_message containing HTTP has completed (written to the network, or failed).
+ * This does NOT run after switching protocols, when we're dumbly forwarding aws_io_messages
+ * as a midchannel handler. */
+static void s_on_channel_write_complete(
+ struct aws_channel *channel,
+ struct aws_io_message *message,
+ int err_code,
+ void *user_data) {
+
+ (void)message;
+ struct aws_h1_connection *connection = user_data;
+ AWS_ASSERT(connection->thread_data.is_outgoing_stream_task_active);
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+
+ if (err_code) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Message did not write to network, error %d (%s)",
+ (void *)&connection->base,
+ err_code,
+ aws_error_name(err_code));
+
+ s_shutdown_due_to_error(connection, err_code);
+ return;
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Message finished writing to network. Rescheduling outgoing stream task.",
+ (void *)&connection->base);
+
+ /* To avoid wasting memory, we only want ONE of our written aws_io_messages in the channel at a time.
+ * Therefore, we wait until it's written to the network before trying to send another
+ * by running the outgoing-stream-task again.
+ *
+ * We also want to share the network with other channels.
+ * Therefore, when the write completes, we SCHEDULE the outgoing-stream-task
+ * to run again instead of calling the function directly.
+ * This way, if the message completes synchronously,
+ * we're not hogging the network by writing message after message in a tight loop */
+ aws_channel_schedule_task_now(channel, &connection->outgoing_stream_task);
+}
+
+static void s_outgoing_stream_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ return;
+ }
+
+ struct aws_h1_connection *connection = arg;
+ AWS_ASSERT(connection->thread_data.is_outgoing_stream_task_active);
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+
+ s_write_outgoing_stream(connection, false /*first_try*/);
+}
+
+void aws_h1_connection_try_write_outgoing_stream(struct aws_h1_connection *connection) {
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+
+ if (connection->thread_data.is_outgoing_stream_task_active) {
+ /* Task is already active */
+ return;
+ }
+
+ connection->thread_data.is_outgoing_stream_task_active = true;
+ s_write_outgoing_stream(connection, true /*first_try*/);
+}
+
+/* Do the actual work of the outgoing-stream-task */
+static void s_write_outgoing_stream(struct aws_h1_connection *connection, bool first_try) {
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+ AWS_PRECONDITION(connection->thread_data.is_outgoing_stream_task_active);
+
+ /* Just stop if we're no longer writing stream data */
+ if (connection->thread_data.is_writing_stopped || connection->thread_data.has_switched_protocols) {
+ return;
+ }
+
+ /* Determine whether we have data available to send, and end task immediately if there's not.
+ * The outgoing stream task will be kicked off again when user adds more data (new stream, new chunk, etc) */
+ struct aws_h1_stream *outgoing_stream = s_update_outgoing_stream_ptr(connection);
+ bool waiting_for_chunks = aws_h1_encoder_is_waiting_for_chunks(&connection->thread_data.encoder);
+ if (!outgoing_stream || waiting_for_chunks) {
+ if (!first_try) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Outgoing stream task stopped. outgoing_stream=%p waiting_for_chunks:%d",
+ (void *)&connection->base,
+ outgoing_stream ? (void *)&outgoing_stream->base : NULL,
+ waiting_for_chunks);
+ }
+ connection->thread_data.is_outgoing_stream_task_active = false;
+ return;
+ }
+
+ if (first_try) {
+ AWS_LOGF_TRACE(AWS_LS_HTTP_CONNECTION, "id=%p: Outgoing stream task has begun.", (void *)&connection->base);
+ }
+
+ struct aws_io_message *msg = aws_channel_slot_acquire_max_message_for_write(connection->base.channel_slot);
+ if (!msg) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Failed to acquire message from pool, error %d (%s). Closing connection.",
+ (void *)&connection->base,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ /* Set up callback so we can send another message when this one completes */
+ msg->on_completion = s_on_channel_write_complete;
+ msg->user_data = connection;
+
+ /*
+ * Fill message data from the outgoing stream.
+ * Note that we might be resuming work on a stream from a previous run of this task.
+ */
+ if (AWS_OP_SUCCESS != aws_h1_encoder_process(&connection->thread_data.encoder, &msg->message_data)) {
+ /* Error sending data, abandon ship */
+ goto error;
+ }
+
+ if (msg->message_data.len > 0) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Outgoing stream task is sending message of size %zu.",
+ (void *)&connection->base,
+ msg->message_data.len);
+
+ if (aws_channel_slot_send_message(connection->base.channel_slot, msg, AWS_CHANNEL_DIR_WRITE)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Failed to send message in write direction, error %d (%s). Closing connection.",
+ (void *)&connection->base,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ goto error;
+ }
+
+ } else {
+ /* If message is empty, warn that no work is being done
+ * and reschedule the task to try again next tick.
+ * It's likely that body isn't ready, so body streaming function has no data to write yet.
+ * If this scenario turns out to be common we should implement a "pause" feature. */
+ AWS_LOGF_WARN(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Current outgoing stream %p sent no data, will try again next tick.",
+ (void *)&connection->base,
+ outgoing_stream ? (void *)&outgoing_stream->base : NULL);
+
+ aws_mem_release(msg->allocator, msg);
+
+ aws_channel_schedule_task_now(connection->base.channel_slot->channel, &connection->outgoing_stream_task);
+ }
+
+ return;
+error:
+ if (msg) {
+ aws_mem_release(msg->allocator, msg);
+ }
+ s_shutdown_due_to_error(connection, aws_last_error());
+}
+
+static int s_decoder_on_request(
+ enum aws_http_method method_enum,
+ const struct aws_byte_cursor *method_str,
+ const struct aws_byte_cursor *uri,
+ void *user_data) {
+
+ struct aws_h1_connection *connection = user_data;
+ struct aws_h1_stream *incoming_stream = connection->thread_data.incoming_stream;
+
+ AWS_FATAL_ASSERT(connection->thread_data.incoming_stream->base.server_data); /* Request but I'm a client?!?!? */
+
+ AWS_ASSERT(incoming_stream->base.server_data->request_method_str.len == 0);
+ AWS_ASSERT(incoming_stream->base.server_data->request_path.len == 0);
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Incoming request: method=" PRInSTR " uri=" PRInSTR,
+ (void *)&incoming_stream->base,
+ AWS_BYTE_CURSOR_PRI(*method_str),
+ AWS_BYTE_CURSOR_PRI(*uri));
+
+ /* Copy strings to internal buffer */
+ struct aws_byte_buf *storage_buf = &incoming_stream->incoming_storage_buf;
+ AWS_ASSERT(storage_buf->capacity == 0);
+
+ size_t storage_size = 0;
+ int err = aws_add_size_checked(uri->len, method_str->len, &storage_size);
+ if (err) {
+ goto error;
+ }
+
+ err = aws_byte_buf_init(storage_buf, incoming_stream->base.alloc, storage_size);
+ if (err) {
+ goto error;
+ }
+
+ aws_byte_buf_write_from_whole_cursor(storage_buf, *method_str);
+ incoming_stream->base.server_data->request_method_str = aws_byte_cursor_from_buf(storage_buf);
+
+ aws_byte_buf_write_from_whole_cursor(storage_buf, *uri);
+ incoming_stream->base.server_data->request_path = aws_byte_cursor_from_buf(storage_buf);
+ aws_byte_cursor_advance(&incoming_stream->base.server_data->request_path, storage_buf->len - uri->len);
+ incoming_stream->base.request_method = method_enum;
+
+ /* No user callbacks, so we're not checking for shutdown */
+ return AWS_OP_SUCCESS;
+
+error:
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Failed to process new incoming request, error %d (%s).",
+ (void *)&connection->base,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ return AWS_OP_ERR;
+}
+
+static int s_decoder_on_response(int status_code, void *user_data) {
+ struct aws_h1_connection *connection = user_data;
+
+ AWS_FATAL_ASSERT(connection->thread_data.incoming_stream->base.client_data); /* Response but I'm a server?!?!? */
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Incoming response status: %d (%s).",
+ (void *)&connection->thread_data.incoming_stream->base,
+ status_code,
+ aws_http_status_text(status_code));
+
+ connection->thread_data.incoming_stream->base.client_data->response_status = status_code;
+
+ /* No user callbacks, so we're not checking for shutdown */
+ return AWS_OP_SUCCESS;
+}
+
+static int s_decoder_on_header(const struct aws_h1_decoded_header *header, void *user_data) {
+ struct aws_h1_connection *connection = user_data;
+ struct aws_h1_stream *incoming_stream = connection->thread_data.incoming_stream;
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Incoming header: " PRInSTR ": " PRInSTR,
+ (void *)&incoming_stream->base,
+ AWS_BYTE_CURSOR_PRI(header->name_data),
+ AWS_BYTE_CURSOR_PRI(header->value_data));
+
+ enum aws_http_header_block header_block =
+ aws_h1_decoder_get_header_block(connection->thread_data.incoming_stream_decoder);
+
+ /* RFC-7230 section 6.1.
+ * "Connection: close" header signals that a connection will not persist after the current request/response */
+ if (header->name == AWS_HTTP_HEADER_CONNECTION) {
+ /* Certain L7 proxies send a connection close header on a 200/OK response to a CONNECT request. This is nutty
+ * behavior, but the obviously desired behavior on a 200 CONNECT response is to leave the connection open
+ * for the tunneling. */
+ bool ignore_connection_close =
+ incoming_stream->base.request_method == AWS_HTTP_METHOD_CONNECT && incoming_stream->base.client_data &&
+ incoming_stream->base.client_data->response_status == AWS_HTTP_STATUS_CODE_200_OK;
+
+ if (!ignore_connection_close && aws_byte_cursor_eq_c_str_ignore_case(&header->value_data, "close")) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Received 'Connection: close' header. This will be the final stream on this connection.",
+ (void *)&incoming_stream->base);
+
+ incoming_stream->is_final_stream = true;
+ { /* BEGIN CRITICAL SECTION */
+ aws_h1_connection_lock_synced_data(connection);
+ connection->synced_data.new_stream_error_code = AWS_ERROR_HTTP_CONNECTION_CLOSED;
+ aws_h1_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ if (connection->base.client_data) {
+ /**
+ * RFC-9112 section 9.6.
+ * A client that receives a "close" connection option MUST cease sending
+ * requests on that connection and close the connection after reading the
+ * response message containing the "close" connection option.
+ *
+ * Mark the stream's outgoing message as complete,
+ * so that we stop sending, and stop waiting for it to finish sending.
+ **/
+ if (!incoming_stream->is_outgoing_message_done) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Received 'Connection: close' header, no more request data will be sent.",
+ (void *)&incoming_stream->base);
+ incoming_stream->is_outgoing_message_done = true;
+ }
+ /* Stop writing right now.
+ * Shutdown will be scheduled after we finishing parsing the response */
+ s_stop(
+ connection,
+ false /*stop_reading*/,
+ true /*stop_writing*/,
+ false /*schedule_shutdown*/,
+ AWS_ERROR_SUCCESS);
+ }
+ }
+ }
+
+ if (incoming_stream->base.on_incoming_headers) {
+ struct aws_http_header deliver = {
+ .name = header->name_data,
+ .value = header->value_data,
+ };
+
+ int err = incoming_stream->base.on_incoming_headers(
+ &incoming_stream->base, header_block, &deliver, 1, incoming_stream->base.user_data);
+
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Incoming header callback raised error %d (%s).",
+ (void *)&incoming_stream->base,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ return AWS_OP_ERR;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_mark_head_done(struct aws_h1_stream *incoming_stream) {
+ /* Bail out if we've already done this */
+ if (incoming_stream->is_incoming_head_done) {
+ return AWS_OP_SUCCESS;
+ }
+
+ struct aws_h1_connection *connection =
+ AWS_CONTAINER_OF(incoming_stream->base.owning_connection, struct aws_h1_connection, base);
+
+ enum aws_http_header_block header_block =
+ aws_h1_decoder_get_header_block(connection->thread_data.incoming_stream_decoder);
+
+ if (header_block == AWS_HTTP_HEADER_BLOCK_MAIN) {
+ AWS_LOGF_TRACE(AWS_LS_HTTP_STREAM, "id=%p: Main header block done.", (void *)&incoming_stream->base);
+ incoming_stream->is_incoming_head_done = true;
+
+ } else if (header_block == AWS_HTTP_HEADER_BLOCK_INFORMATIONAL) {
+ AWS_LOGF_TRACE(AWS_LS_HTTP_STREAM, "id=%p: Informational header block done.", (void *)&incoming_stream->base);
+
+ /* Only clients can receive informational headers.
+ * Check whether we're switching protocols */
+ if (incoming_stream->base.client_data->response_status == AWS_HTTP_STATUS_CODE_101_SWITCHING_PROTOCOLS) {
+ if (s_aws_http1_switch_protocols(connection)) {
+ return AWS_OP_ERR;
+ }
+ }
+ }
+
+ /* Invoke user cb */
+ if (incoming_stream->base.on_incoming_header_block_done) {
+ int err = incoming_stream->base.on_incoming_header_block_done(
+ &incoming_stream->base, header_block, incoming_stream->base.user_data);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Incoming-header-block-done callback raised error %d (%s).",
+ (void *)&incoming_stream->base,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ return AWS_OP_ERR;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_decoder_on_body(const struct aws_byte_cursor *data, bool finished, void *user_data) {
+ (void)finished;
+
+ struct aws_h1_connection *connection = user_data;
+ struct aws_h1_stream *incoming_stream = connection->thread_data.incoming_stream;
+ AWS_ASSERT(incoming_stream);
+
+ int err = s_mark_head_done(incoming_stream);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ /* No need to invoke callback for 0-length data */
+ if (data->len == 0) {
+ return AWS_OP_SUCCESS;
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM, "id=%p: Incoming body: %zu bytes received.", (void *)&incoming_stream->base, data->len);
+
+ if (connection->base.stream_manual_window_management) {
+ /* Let stream window shrink by amount of body data received */
+ if (data->len > incoming_stream->thread_data.stream_window) {
+ /* This error shouldn't be possible, but it's all complicated, so do runtime check to be safe. */
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Internal error. Data exceeds HTTP-stream's window.",
+ (void *)&incoming_stream->base);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+ incoming_stream->thread_data.stream_window -= data->len;
+
+ if (incoming_stream->thread_data.stream_window == 0) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Flow-control window has reached 0. No more data can be received until window is updated.",
+ (void *)&incoming_stream->base);
+ }
+ }
+
+ if (incoming_stream->base.on_incoming_body) {
+ err = incoming_stream->base.on_incoming_body(&incoming_stream->base, data, incoming_stream->base.user_data);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Incoming body callback raised error %d (%s).",
+ (void *)&incoming_stream->base,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ return AWS_OP_ERR;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_decoder_on_done(void *user_data) {
+ struct aws_h1_connection *connection = user_data;
+ struct aws_h1_stream *incoming_stream = connection->thread_data.incoming_stream;
+ AWS_ASSERT(incoming_stream);
+
+ /* Ensure head was marked done */
+ int err = s_mark_head_done(incoming_stream);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+ /* If it is a informational response, we stop here, keep waiting for new response */
+ enum aws_http_header_block header_block =
+ aws_h1_decoder_get_header_block(connection->thread_data.incoming_stream_decoder);
+ if (header_block == AWS_HTTP_HEADER_BLOCK_INFORMATIONAL) {
+ return AWS_OP_SUCCESS;
+ }
+
+ /* Otherwise the incoming stream is finished decoding and we will update it if needed */
+ incoming_stream->is_incoming_message_done = true;
+
+ /* RFC-7230 section 6.6
+ * After reading the final message, the connection must not read any more */
+ if (incoming_stream->is_final_stream) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Done reading final stream, no further streams will be read.",
+ (void *)&connection->base);
+
+ s_stop(
+ connection, true /*stop_reading*/, false /*stop_writing*/, false /*schedule_shutdown*/, AWS_ERROR_SUCCESS);
+ }
+
+ if (connection->base.server_data) {
+ /* Server side */
+ aws_http_on_incoming_request_done_fn *on_request_done = incoming_stream->base.server_data->on_request_done;
+ if (on_request_done) {
+ err = on_request_done(&incoming_stream->base, incoming_stream->base.user_data);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Incoming request done callback raised error %d (%s).",
+ (void *)&incoming_stream->base,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ return AWS_OP_ERR;
+ }
+ }
+ if (incoming_stream->is_outgoing_message_done) {
+ AWS_ASSERT(&incoming_stream->node == aws_linked_list_begin(&connection->thread_data.stream_list));
+ s_stream_complete(incoming_stream, AWS_ERROR_SUCCESS);
+ }
+ s_set_incoming_stream_ptr(connection, NULL);
+
+ } else if (incoming_stream->is_outgoing_message_done) {
+ /* Client side */
+ AWS_ASSERT(&incoming_stream->node == aws_linked_list_begin(&connection->thread_data.stream_list));
+
+ s_stream_complete(incoming_stream, AWS_ERROR_SUCCESS);
+
+ s_client_update_incoming_stream_ptr(connection);
+ }
+
+ /* Report success even if user's on_complete() callback shuts down on the connection.
+ * We don't want it to look like something went wrong while decoding.
+ * The decode() function returns after each message completes,
+ * and we won't call decode() again if the connection has been shut down */
+ return AWS_OP_SUCCESS;
+}
+
+/* Common new() logic for server & client */
+static struct aws_h1_connection *s_connection_new(
+ struct aws_allocator *alloc,
+ bool manual_window_management,
+ size_t initial_window_size,
+ const struct aws_http1_connection_options *http1_options,
+ bool server) {
+
+ struct aws_h1_connection *connection = aws_mem_calloc(alloc, 1, sizeof(struct aws_h1_connection));
+ if (!connection) {
+ goto error_connection_alloc;
+ }
+
+ connection->base.vtable = &s_h1_connection_vtable;
+ connection->base.alloc = alloc;
+ connection->base.channel_handler.vtable = &s_h1_connection_vtable.channel_handler_vtable;
+ connection->base.channel_handler.alloc = alloc;
+ connection->base.channel_handler.impl = connection;
+ connection->base.http_version = AWS_HTTP_VERSION_1_1;
+ connection->base.stream_manual_window_management = manual_window_management;
+
+ /* Init the next stream id (server must use even ids, client odd [RFC 7540 5.1.1])*/
+ connection->base.next_stream_id = server ? 2 : 1;
+
+ /* 1 refcount for user */
+ aws_atomic_init_int(&connection->base.refcount, 1);
+
+ if (manual_window_management) {
+ connection->initial_stream_window_size = initial_window_size;
+
+ if (http1_options->read_buffer_capacity > 0) {
+ connection->thread_data.read_buffer.capacity = http1_options->read_buffer_capacity;
+ } else {
+ /* User did not set capacity, choose something reasonable based on initial_window_size */
+ /* NOTE: These values are currently guesses, we should test to find good values */
+ const size_t clamp_min = aws_min_size(g_aws_channel_max_fragment_size * 4, /*256KB*/ 256 * 1024);
+ const size_t clamp_max = /*1MB*/ 1 * 1024 * 1024;
+ connection->thread_data.read_buffer.capacity =
+ aws_max_size(clamp_min, aws_min_size(clamp_max, initial_window_size));
+ }
+
+ connection->thread_data.connection_window = connection->thread_data.read_buffer.capacity;
+ } else {
+ /* No backpressure, keep connection window at SIZE_MAX */
+ connection->initial_stream_window_size = SIZE_MAX;
+ connection->thread_data.read_buffer.capacity = SIZE_MAX;
+ connection->thread_data.connection_window = SIZE_MAX;
+ }
+
+ aws_h1_encoder_init(&connection->thread_data.encoder, alloc);
+
+ aws_channel_task_init(
+ &connection->outgoing_stream_task, s_outgoing_stream_task, connection, "http1_connection_outgoing_stream");
+ aws_channel_task_init(
+ &connection->cross_thread_work_task,
+ s_cross_thread_work_task,
+ connection,
+ "http1_connection_cross_thread_work");
+ aws_linked_list_init(&connection->thread_data.stream_list);
+ aws_linked_list_init(&connection->thread_data.read_buffer.messages);
+ aws_crt_statistics_http1_channel_init(&connection->thread_data.stats);
+
+ int err = aws_mutex_init(&connection->synced_data.lock);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "static: Failed to initialize mutex, error %d (%s).",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ goto error_mutex;
+ }
+
+ aws_linked_list_init(&connection->synced_data.new_client_stream_list);
+ connection->synced_data.is_open = true;
+
+ struct aws_h1_decoder_params options = {
+ .alloc = alloc,
+ .is_decoding_requests = server,
+ .user_data = connection,
+ .vtable = s_h1_decoder_vtable,
+ .scratch_space_initial_size = DECODER_INITIAL_SCRATCH_SIZE,
+ };
+ connection->thread_data.incoming_stream_decoder = aws_h1_decoder_new(&options);
+ if (!connection->thread_data.incoming_stream_decoder) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "static: Failed to create decoder, error %d (%s).",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ goto error_decoder;
+ }
+
+ return connection;
+
+error_decoder:
+ aws_mutex_clean_up(&connection->synced_data.lock);
+error_mutex:
+ aws_mem_release(alloc, connection);
+error_connection_alloc:
+ return NULL;
+}
+
+struct aws_http_connection *aws_http_connection_new_http1_1_server(
+ struct aws_allocator *allocator,
+ bool manual_window_management,
+ size_t initial_window_size,
+ const struct aws_http1_connection_options *http1_options) {
+
+ struct aws_h1_connection *connection =
+ s_connection_new(allocator, manual_window_management, initial_window_size, http1_options, true /*is_server*/);
+ if (!connection) {
+ return NULL;
+ }
+
+ connection->base.server_data = &connection->base.client_or_server_data.server;
+
+ return &connection->base;
+}
+
+struct aws_http_connection *aws_http_connection_new_http1_1_client(
+ struct aws_allocator *allocator,
+ bool manual_window_management,
+ size_t initial_window_size,
+ const struct aws_http1_connection_options *http1_options) {
+
+ struct aws_h1_connection *connection =
+ s_connection_new(allocator, manual_window_management, initial_window_size, http1_options, false /*is_server*/);
+ if (!connection) {
+ return NULL;
+ }
+
+ connection->base.client_data = &connection->base.client_or_server_data.client;
+
+ return &connection->base;
+}
+
+static void s_handler_destroy(struct aws_channel_handler *handler) {
+ struct aws_h1_connection *connection = handler->impl;
+
+ AWS_LOGF_TRACE(AWS_LS_HTTP_CONNECTION, "id=%p: Destroying connection.", (void *)&connection->base);
+
+ AWS_ASSERT(aws_linked_list_empty(&connection->thread_data.stream_list));
+ AWS_ASSERT(aws_linked_list_empty(&connection->synced_data.new_client_stream_list));
+
+ /* Clean up any buffered read messages. */
+ while (!aws_linked_list_empty(&connection->thread_data.read_buffer.messages)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&connection->thread_data.read_buffer.messages);
+ struct aws_io_message *msg = AWS_CONTAINER_OF(node, struct aws_io_message, queueing_handle);
+ aws_mem_release(msg->allocator, msg);
+ }
+
+ aws_h1_decoder_destroy(connection->thread_data.incoming_stream_decoder);
+ aws_h1_encoder_clean_up(&connection->thread_data.encoder);
+ aws_mutex_clean_up(&connection->synced_data.lock);
+ aws_mem_release(connection->base.alloc, connection);
+}
+
+static void s_handler_installed(struct aws_channel_handler *handler, struct aws_channel_slot *slot) {
+ struct aws_h1_connection *connection = handler->impl;
+ connection->base.channel_slot = slot;
+
+ /* Acquire a hold on the channel to prevent its destruction until the user has
+ * given the go-ahead via aws_http_connection_release() */
+ aws_channel_acquire_hold(slot->channel);
+}
+
+/* Try to send the next queued aws_io_message to the downstream handler.
+ * This can only be called after the connection has switched protocols and becoming a midchannel handler. */
+static int s_try_process_next_midchannel_read_message(struct aws_h1_connection *connection, bool *out_stop_processing) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+ AWS_ASSERT(connection->thread_data.has_switched_protocols);
+ AWS_ASSERT(!connection->thread_data.is_reading_stopped);
+ AWS_ASSERT(!aws_linked_list_empty(&connection->thread_data.read_buffer.messages));
+
+ *out_stop_processing = false;
+ struct aws_io_message *sending_msg = NULL;
+
+ if (!connection->base.channel_slot->adj_right) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Connection has switched protocols, but no handler is installed to deal with this data.",
+ (void *)connection);
+
+ return aws_raise_error(AWS_ERROR_HTTP_SWITCHED_PROTOCOLS);
+ }
+
+ size_t downstream_window = aws_channel_slot_downstream_read_window(connection->base.channel_slot);
+ if (downstream_window == 0) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Downstream window is 0, cannot send switched-protocol message now.",
+ (void *)&connection->base);
+
+ *out_stop_processing = true;
+ return AWS_OP_SUCCESS;
+ }
+
+ struct aws_linked_list_node *queued_msg_node = aws_linked_list_front(&connection->thread_data.read_buffer.messages);
+ struct aws_io_message *queued_msg = AWS_CONTAINER_OF(queued_msg_node, struct aws_io_message, queueing_handle);
+
+ /* Note that copy_mark is used to mark the progress of partially sent messages. */
+ AWS_ASSERT(queued_msg->message_data.len > queued_msg->copy_mark);
+ size_t sending_bytes = aws_min_size(queued_msg->message_data.len - queued_msg->copy_mark, downstream_window);
+
+ AWS_ASSERT(connection->thread_data.read_buffer.pending_bytes >= sending_bytes);
+ connection->thread_data.read_buffer.pending_bytes -= sending_bytes;
+
+ /* If we can't send the whole entire queued_msg, copy its data into a new aws_io_message and send that. */
+ if (sending_bytes != queued_msg->message_data.len) {
+ sending_msg = aws_channel_acquire_message_from_pool(
+ connection->base.channel_slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, sending_bytes);
+ if (!sending_msg) {
+ goto error;
+ }
+
+ aws_byte_buf_write(
+ &sending_msg->message_data, queued_msg->message_data.buffer + queued_msg->copy_mark, sending_bytes);
+
+ queued_msg->copy_mark += sending_bytes;
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Sending %zu bytes switched-protocol message to downstream handler, %zu bytes remain.",
+ (void *)&connection->base,
+ sending_bytes,
+ queued_msg->message_data.len - queued_msg->copy_mark);
+
+ /* If the last of queued_msg has been copied, it can be deleted now. */
+ if (queued_msg->copy_mark == queued_msg->message_data.len) {
+ aws_linked_list_remove(queued_msg_node);
+ aws_mem_release(queued_msg->allocator, queued_msg);
+ }
+ } else {
+ /* Sending all of queued_msg along. */
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Sending full switched-protocol message of size %zu to downstream handler.",
+ (void *)&connection->base,
+ queued_msg->message_data.len);
+
+ aws_linked_list_remove(queued_msg_node);
+ sending_msg = queued_msg;
+ }
+
+ int err = aws_channel_slot_send_message(connection->base.channel_slot, sending_msg, AWS_CHANNEL_DIR_READ);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Failed to send message in read direction, error %d (%s).",
+ (void *)&connection->base,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+error:
+ if (sending_msg) {
+ aws_mem_release(sending_msg->allocator, sending_msg);
+ }
+ return AWS_OP_ERR;
+}
+
+static struct aws_http_stream *s_new_server_request_handler_stream(
+ const struct aws_http_request_handler_options *options) {
+
+ struct aws_h1_connection *connection = AWS_CONTAINER_OF(options->server_connection, struct aws_h1_connection, base);
+
+ if (!aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel) ||
+ !connection->thread_data.can_create_request_handler_stream) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: aws_http_stream_new_server_request_handler() can only be called during incoming request callback.",
+ (void *)&connection->base);
+
+ aws_raise_error(AWS_ERROR_INVALID_STATE);
+ return NULL;
+ }
+
+ struct aws_h1_stream *stream = aws_h1_stream_new_request_handler(options);
+ if (!stream) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Failed to create request handler stream, error %d (%s).",
+ (void *)&connection->base,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ return NULL;
+ }
+
+ /*
+ * Success!
+ * Everything beyond this point cannot fail
+ */
+
+ /* Prevent further streams from being created until it's ok to do so. */
+ connection->thread_data.can_create_request_handler_stream = false;
+
+ /* Stream is waiting for response. */
+ aws_linked_list_push_back(&connection->thread_data.stream_list, &stream->node);
+
+ /* Connection owns stream, and must outlive stream */
+ aws_http_connection_acquire(&connection->base);
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Created request handler stream on server connection=%p",
+ (void *)&stream->base,
+ (void *)&connection->base);
+
+ return &stream->base;
+}
+
+/* Invokes the on_incoming_request callback and returns new stream. */
+static struct aws_h1_stream *s_server_invoke_on_incoming_request(struct aws_h1_connection *connection) {
+ AWS_PRECONDITION(connection->base.server_data);
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+ AWS_PRECONDITION(!connection->thread_data.can_create_request_handler_stream);
+ AWS_PRECONDITION(!connection->thread_data.incoming_stream);
+
+ /**
+ * The user MUST create the new request-handler stream during the on-incoming-request callback.
+ */
+ connection->thread_data.can_create_request_handler_stream = true;
+
+ struct aws_http_stream *new_stream =
+ connection->base.server_data->on_incoming_request(&connection->base, connection->base.user_data);
+
+ connection->thread_data.can_create_request_handler_stream = false;
+
+ return new_stream ? AWS_CONTAINER_OF(new_stream, struct aws_h1_stream, base) : NULL;
+}
+
+static int s_handler_process_read_message(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ struct aws_io_message *message) {
+
+ (void)slot;
+ struct aws_h1_connection *connection = handler->impl;
+ const size_t message_size = message->message_data.len;
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION, "id=%p: Incoming message of size %zu.", (void *)&connection->base, message_size);
+
+ /* Shrink connection window by amount of data received. See comments at variable's
+ * declaration site on why we use this instead of the official `aws_channel_slot.window_size`. */
+ if (message_size > connection->thread_data.connection_window) {
+ /* This error shouldn't be possible, but this is all complicated so check at runtime to be safe. */
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Internal error. Message exceeds connection's window.",
+ (void *)&connection->base);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+ connection->thread_data.connection_window -= message_size;
+
+ /* Push message into queue of buffered messages */
+ aws_linked_list_push_back(&connection->thread_data.read_buffer.messages, &message->queueing_handle);
+ connection->thread_data.read_buffer.pending_bytes += message_size;
+
+ /* Try to process messages in queue */
+ aws_h1_connection_try_process_read_messages(connection);
+ return AWS_OP_SUCCESS;
+}
+
+void aws_h1_connection_try_process_read_messages(struct aws_h1_connection *connection) {
+
+ /* Protect against this function being called recursively. */
+ if (connection->thread_data.is_processing_read_messages) {
+ return;
+ }
+ connection->thread_data.is_processing_read_messages = true;
+
+ /* Process queued messages */
+ while (!aws_linked_list_empty(&connection->thread_data.read_buffer.messages)) {
+ if (connection->thread_data.is_reading_stopped) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Cannot process message because connection is shutting down.",
+ (void *)&connection->base);
+
+ aws_raise_error(AWS_ERROR_HTTP_CONNECTION_CLOSED);
+ goto shutdown;
+ }
+
+ bool stop_processing = false;
+
+ /* When connection has switched protocols, messages are processed very differently.
+ * We need to do this check in the middle of the normal processing loop,
+ * in case the switch happens in the middle of processing a message. */
+ if (connection->thread_data.has_switched_protocols) {
+ if (s_try_process_next_midchannel_read_message(connection, &stop_processing)) {
+ goto shutdown;
+ }
+ } else {
+ if (s_try_process_next_stream_read_message(connection, &stop_processing)) {
+ goto shutdown;
+ }
+ }
+
+ /* Break out of loop if we can't process any more data */
+ if (stop_processing) {
+ break;
+ }
+ }
+
+ /* Increment connection window, if necessary */
+ if (s_update_connection_window(connection)) {
+ goto shutdown;
+ }
+
+ connection->thread_data.is_processing_read_messages = false;
+ return;
+
+shutdown:
+ s_shutdown_due_to_error(connection, aws_last_error());
+}
+
+/* Try to process the next queued aws_io_message as normal HTTP data for an aws_http_stream.
+ * This MUST NOT be called if the connection has switched protocols and become a midchannel handler. */
+static int s_try_process_next_stream_read_message(struct aws_h1_connection *connection, bool *out_stop_processing) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+ AWS_ASSERT(!connection->thread_data.has_switched_protocols);
+ AWS_ASSERT(!connection->thread_data.is_reading_stopped);
+ AWS_ASSERT(!aws_linked_list_empty(&connection->thread_data.read_buffer.messages));
+
+ *out_stop_processing = false;
+
+ /* Ensure that an incoming stream exists to receive the data */
+ if (!connection->thread_data.incoming_stream) {
+ if (aws_http_connection_is_client(&connection->base)) {
+ /* Client side */
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Cannot process message because no requests are currently awaiting response, closing "
+ "connection.",
+ (void *)&connection->base);
+
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+
+ } else {
+ /* Server side.
+ * Invoke on-incoming-request callback. The user MUST create a new stream from this callback.
+ * The new stream becomes the current incoming stream */
+ s_set_incoming_stream_ptr(connection, s_server_invoke_on_incoming_request(connection));
+ if (!connection->thread_data.incoming_stream) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Incoming request callback failed to provide a new stream, last error %d (%s). "
+ "Closing connection.",
+ (void *)&connection->base,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ return AWS_OP_ERR;
+ }
+ }
+ }
+
+ struct aws_h1_stream *incoming_stream = connection->thread_data.incoming_stream;
+
+ /* Stop processing if stream's window reaches 0. */
+ const uint64_t stream_window = incoming_stream->thread_data.stream_window;
+ if (stream_window == 0) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: HTTP-stream's window is 0, cannot process message now.",
+ (void *)&connection->base);
+ *out_stop_processing = true;
+ return AWS_OP_SUCCESS;
+ }
+
+ struct aws_linked_list_node *queued_msg_node = aws_linked_list_front(&connection->thread_data.read_buffer.messages);
+ struct aws_io_message *queued_msg = AWS_CONTAINER_OF(queued_msg_node, struct aws_io_message, queueing_handle);
+
+ /* Note that copy_mark is used to mark the progress of partially decoded messages */
+ struct aws_byte_cursor message_cursor = aws_byte_cursor_from_buf(&queued_msg->message_data);
+ aws_byte_cursor_advance(&message_cursor, queued_msg->copy_mark);
+
+ /* Don't process more data than the stream's window can accept.
+ *
+ * TODO: Let the decoder know about stream-window size so it can stop itself,
+ * instead of limiting the amount of data we feed into the decoder at a time.
+ * This would be more optimal, AND avoid an edge-case where the stream-window goes
+ * to 0 as the body ends, and the connection can't proceed to the trailing headers.
+ */
+ message_cursor.len = (size_t)aws_min_u64(message_cursor.len, stream_window);
+
+ const size_t prev_cursor_len = message_cursor.len;
+
+ /* Set some decoder state, based on current stream */
+ aws_h1_decoder_set_logging_id(connection->thread_data.incoming_stream_decoder, incoming_stream);
+
+ bool body_headers_ignored = incoming_stream->base.request_method == AWS_HTTP_METHOD_HEAD;
+ aws_h1_decoder_set_body_headers_ignored(connection->thread_data.incoming_stream_decoder, body_headers_ignored);
+
+ /* As decoder runs, it invokes the internal s_decoder_X callbacks, which in turn invoke user callbacks.
+ * The decoder will stop once it hits the end of the request/response OR the end of the message data. */
+ if (aws_h1_decode(connection->thread_data.incoming_stream_decoder, &message_cursor)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Message processing failed, error %d (%s). Closing connection.",
+ (void *)&connection->base,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ return AWS_OP_ERR;
+ }
+
+ size_t bytes_processed = prev_cursor_len - message_cursor.len;
+ queued_msg->copy_mark += bytes_processed;
+
+ AWS_ASSERT(connection->thread_data.read_buffer.pending_bytes >= bytes_processed);
+ connection->thread_data.read_buffer.pending_bytes -= bytes_processed;
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Decoded %zu bytes of message, %zu bytes remain.",
+ (void *)&connection->base,
+ bytes_processed,
+ queued_msg->message_data.len - queued_msg->copy_mark);
+
+ /* If the last of queued_msg has been processed, it can be deleted now.
+ * Otherwise, it remains in the queue for further processing later. */
+ if (queued_msg->copy_mark == queued_msg->message_data.len) {
+ aws_linked_list_remove(&queued_msg->queueing_handle);
+ aws_mem_release(queued_msg->allocator, queued_msg);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_handler_process_write_message(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ struct aws_io_message *message) {
+
+ struct aws_h1_connection *connection = handler->impl;
+
+ if (connection->thread_data.is_writing_stopped) {
+ aws_raise_error(AWS_ERROR_HTTP_CONNECTION_CLOSED);
+ goto error;
+ }
+
+ if (!connection->thread_data.has_switched_protocols) {
+ aws_raise_error(AWS_ERROR_INVALID_STATE);
+ goto error;
+ }
+
+ /* Pass the message right along. */
+ int err = aws_channel_slot_send_message(slot, message, AWS_CHANNEL_DIR_WRITE);
+ if (err) {
+ goto error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+error:
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Destroying write message without passing it along, error %d (%s)",
+ (void *)&connection->base,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ if (message->on_completion) {
+ message->on_completion(connection->base.channel_slot->channel, message, aws_last_error(), message->user_data);
+ }
+ aws_mem_release(message->allocator, message);
+ s_shutdown_due_to_error(connection, aws_last_error());
+ return AWS_OP_SUCCESS;
+}
+
+static int s_handler_increment_read_window(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ size_t size) {
+
+ (void)slot;
+ struct aws_h1_connection *connection = handler->impl;
+
+ if (!connection->thread_data.has_switched_protocols) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: HTTP connection cannot have a downstream handler without first switching protocols",
+ (void *)&connection->base);
+
+ aws_raise_error(AWS_ERROR_INVALID_STATE);
+ goto error;
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Handler in read direction incremented read window by %zu. Sending queued messages, if any.",
+ (void *)&connection->base,
+ size);
+
+ /* Send along any queued messages, and increment connection's window if necessary */
+ aws_h1_connection_try_process_read_messages(connection);
+ return AWS_OP_SUCCESS;
+
+error:
+ s_shutdown_due_to_error(connection, aws_last_error());
+ return AWS_OP_SUCCESS;
+}
+
+static int s_handler_shutdown(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ enum aws_channel_direction dir,
+ int error_code,
+ bool free_scarce_resources_immediately) {
+
+ (void)free_scarce_resources_immediately;
+ struct aws_h1_connection *connection = handler->impl;
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Channel shutting down in %s direction with error code %d (%s).",
+ (void *)&connection->base,
+ (dir == AWS_CHANNEL_DIR_READ) ? "read" : "write",
+ error_code,
+ aws_error_name(error_code));
+
+ if (dir == AWS_CHANNEL_DIR_READ) {
+ /* This call ensures that no further streams will be created or worked on. */
+ s_stop(connection, true /*stop_reading*/, false /*stop_writing*/, false /*schedule_shutdown*/, error_code);
+ } else /* dir == AWS_CHANNEL_DIR_WRITE */ {
+
+ s_stop(connection, false /*stop_reading*/, true /*stop_writing*/, false /*schedule_shutdown*/, error_code);
+
+ /* Mark all pending streams as complete. */
+ int stream_error_code = error_code == AWS_ERROR_SUCCESS ? AWS_ERROR_HTTP_CONNECTION_CLOSED : error_code;
+
+ while (!aws_linked_list_empty(&connection->thread_data.stream_list)) {
+ struct aws_linked_list_node *node = aws_linked_list_front(&connection->thread_data.stream_list);
+ s_stream_complete(AWS_CONTAINER_OF(node, struct aws_h1_stream, node), stream_error_code);
+ }
+
+ /* It's OK to access synced_data.new_client_stream_list without holding the lock because
+ * no more streams can be added after s_stop() has been invoked. */
+ while (!aws_linked_list_empty(&connection->synced_data.new_client_stream_list)) {
+ struct aws_linked_list_node *node = aws_linked_list_front(&connection->synced_data.new_client_stream_list);
+ s_stream_complete(AWS_CONTAINER_OF(node, struct aws_h1_stream, node), stream_error_code);
+ }
+ }
+
+ aws_channel_slot_on_handler_shutdown_complete(slot, dir, error_code, free_scarce_resources_immediately);
+ return AWS_OP_SUCCESS;
+}
+
+static size_t s_handler_initial_window_size(struct aws_channel_handler *handler) {
+ struct aws_h1_connection *connection = handler->impl;
+ return connection->thread_data.connection_window;
+}
+
+static size_t s_handler_message_overhead(struct aws_channel_handler *handler) {
+ (void)handler;
+ return 0;
+}
+
+static void s_reset_statistics(struct aws_channel_handler *handler) {
+ struct aws_h1_connection *connection = handler->impl;
+
+ aws_crt_statistics_http1_channel_reset(&connection->thread_data.stats);
+}
+
+static void s_pull_up_stats_timestamps(struct aws_h1_connection *connection) {
+ uint64_t now_ns = 0;
+ if (aws_channel_current_clock_time(connection->base.channel_slot->channel, &now_ns)) {
+ return;
+ }
+
+ if (connection->thread_data.outgoing_stream) {
+ s_add_time_measurement_to_stats(
+ connection->thread_data.outgoing_stream_timestamp_ns,
+ now_ns,
+ &connection->thread_data.stats.pending_outgoing_stream_ms);
+
+ connection->thread_data.outgoing_stream_timestamp_ns = now_ns;
+
+ connection->thread_data.stats.current_outgoing_stream_id =
+ aws_http_stream_get_id(&connection->thread_data.outgoing_stream->base);
+ }
+
+ if (connection->thread_data.incoming_stream) {
+ s_add_time_measurement_to_stats(
+ connection->thread_data.incoming_stream_timestamp_ns,
+ now_ns,
+ &connection->thread_data.stats.pending_incoming_stream_ms);
+
+ connection->thread_data.incoming_stream_timestamp_ns = now_ns;
+
+ connection->thread_data.stats.current_incoming_stream_id =
+ aws_http_stream_get_id(&connection->thread_data.incoming_stream->base);
+ }
+}
+
+static void s_gather_statistics(struct aws_channel_handler *handler, struct aws_array_list *stats) {
+ struct aws_h1_connection *connection = handler->impl;
+
+ /* TODO: Need update the way we calculate statistics, to account for user-controlled pauses.
+ * If user is adding chunks 1 by 1, there can naturally be a gap in the upload.
+ * If the user lets the stream-window go to zero, there can naturally be a gap in the download. */
+ s_pull_up_stats_timestamps(connection);
+
+ void *stats_base = &connection->thread_data.stats;
+ aws_array_list_push_back(stats, &stats_base);
+}
+
+struct aws_crt_statistics_http1_channel *aws_h1_connection_get_statistics(struct aws_http_connection *connection) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->channel_slot->channel));
+
+ struct aws_h1_connection *h1_conn = (void *)connection;
+
+ return &h1_conn->thread_data.stats;
+}
+
+struct aws_h1_window_stats aws_h1_connection_window_stats(struct aws_http_connection *connection_base) {
+ struct aws_h1_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h1_connection, base);
+ struct aws_h1_window_stats stats = {
+ .connection_window = connection->thread_data.connection_window,
+ .buffer_capacity = connection->thread_data.read_buffer.capacity,
+ .buffer_pending_bytes = connection->thread_data.read_buffer.pending_bytes,
+ .recent_window_increments = connection->thread_data.recent_window_increments,
+ .has_incoming_stream = connection->thread_data.incoming_stream != NULL,
+ .stream_window = connection->thread_data.incoming_stream
+ ? connection->thread_data.incoming_stream->thread_data.stream_window
+ : 0,
+ };
+
+ /* Resets each time it's queried */
+ connection->thread_data.recent_window_increments = 0;
+
+ return stats;
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/h1_decoder.c b/contrib/restricted/aws/aws-c-http/source/h1_decoder.c
new file mode 100644
index 0000000000..68e5aa224a
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/h1_decoder.c
@@ -0,0 +1,761 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/string.h>
+#include <aws/http/private/h1_decoder.h>
+#include <aws/http/private/strutil.h>
+#include <aws/http/status_code.h>
+#include <aws/io/logging.h>
+
+AWS_STATIC_STRING_FROM_LITERAL(s_transfer_coding_chunked, "chunked");
+AWS_STATIC_STRING_FROM_LITERAL(s_transfer_coding_compress, "compress");
+AWS_STATIC_STRING_FROM_LITERAL(s_transfer_coding_x_compress, "x-compress");
+AWS_STATIC_STRING_FROM_LITERAL(s_transfer_coding_deflate, "deflate");
+AWS_STATIC_STRING_FROM_LITERAL(s_transfer_coding_gzip, "gzip");
+AWS_STATIC_STRING_FROM_LITERAL(s_transfer_coding_x_gzip, "x-gzip");
+
+/* Decoder runs a state machine.
+ * Each state consumes data until it sets the next state.
+ * A common state is the "line state", which handles consuming one line ending in CRLF
+ * and feeding the line to a linestate_fn, which should process data and set the next state.
+ */
+typedef int(state_fn)(struct aws_h1_decoder *decoder, struct aws_byte_cursor *input);
+typedef int(linestate_fn)(struct aws_h1_decoder *decoder, struct aws_byte_cursor input);
+
+struct aws_h1_decoder {
+ /* Implementation data. */
+ struct aws_allocator *alloc;
+ struct aws_byte_buf scratch_space;
+ state_fn *run_state;
+ linestate_fn *process_line;
+ int transfer_encoding;
+ uint64_t content_processed;
+ uint64_t content_length;
+ uint64_t chunk_processed;
+ uint64_t chunk_size;
+ bool doing_trailers;
+ bool is_done;
+ bool body_headers_ignored;
+ bool body_headers_forbidden;
+ enum aws_http_header_block header_block;
+ const void *logging_id;
+
+ /* User callbacks and settings. */
+ struct aws_h1_decoder_vtable vtable;
+ bool is_decoding_requests;
+ void *user_data;
+};
+
+static int s_linestate_request(struct aws_h1_decoder *decoder, struct aws_byte_cursor input);
+static int s_linestate_response(struct aws_h1_decoder *decoder, struct aws_byte_cursor input);
+static int s_linestate_header(struct aws_h1_decoder *decoder, struct aws_byte_cursor input);
+static int s_linestate_chunk_size(struct aws_h1_decoder *decoder, struct aws_byte_cursor input);
+
+static bool s_scan_for_crlf(struct aws_h1_decoder *decoder, struct aws_byte_cursor input, size_t *bytes_processed) {
+ AWS_ASSERT(input.len > 0);
+
+ /* In a loop, scan for "\n", then look one char back for "\r" */
+ uint8_t *ptr = input.ptr;
+ uint8_t *end = input.ptr + input.len;
+ while (ptr != end) {
+ uint8_t *newline = (uint8_t *)memchr(ptr, '\n', end - ptr);
+ if (!newline) {
+ break;
+ }
+
+ uint8_t prev_char;
+ if (newline == input.ptr) {
+ /* If "\n" is first character check scratch_space for previous character */
+ if (decoder->scratch_space.len > 0) {
+ prev_char = decoder->scratch_space.buffer[decoder->scratch_space.len - 1];
+ } else {
+ prev_char = 0;
+ }
+ } else {
+ prev_char = *(newline - 1);
+ }
+
+ if (prev_char == '\r') {
+ *bytes_processed = 1 + (newline - input.ptr);
+ return true;
+ }
+
+ ptr = newline + 1;
+ }
+
+ *bytes_processed = input.len;
+ return false;
+}
+
+/* This state consumes an entire line, then calls a linestate_fn to process the line. */
+static int s_state_getline(struct aws_h1_decoder *decoder, struct aws_byte_cursor *input) {
+ /* If preceding runs of this state failed to find CRLF, their data is stored in the scratch_space
+ * and new data needs to be combined with the old data for processing. */
+ bool has_prev_data = decoder->scratch_space.len;
+
+ size_t line_length = 0;
+ bool found_crlf = s_scan_for_crlf(decoder, *input, &line_length);
+
+ /* Found end of line! Run the line processor on it */
+ struct aws_byte_cursor line = aws_byte_cursor_advance(input, line_length);
+
+ bool use_scratch = !found_crlf | has_prev_data;
+ if (AWS_UNLIKELY(use_scratch)) {
+ if (aws_byte_buf_append_dynamic(&decoder->scratch_space, &line)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Internal buffer write failed with error code %d (%s)",
+ decoder->logging_id,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ return AWS_OP_ERR;
+ }
+ /* Line is actually the entire scratch buffer now */
+ line = aws_byte_cursor_from_buf(&decoder->scratch_space);
+ }
+
+ if (AWS_LIKELY(found_crlf)) {
+ /* Backup so "\r\n" is not included. */
+ /* RFC-7230 section 3 Message Format */
+ AWS_ASSERT(line.len >= 2);
+ line.len -= 2;
+
+ return decoder->process_line(decoder, line);
+ }
+
+ /* Didn't find crlf, we'll continue scanning when more data comes in */
+ return AWS_OP_SUCCESS;
+}
+
+static int s_cursor_split_impl(
+ struct aws_byte_cursor input,
+ char split_on,
+ struct aws_byte_cursor *cursor_array,
+ size_t num_cursors,
+ bool error_if_more_splits_possible) {
+
+ struct aws_byte_cursor split;
+ AWS_ZERO_STRUCT(split);
+ for (size_t i = 0; i < num_cursors; ++i) {
+ if (!aws_byte_cursor_next_split(&input, split_on, &split)) {
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+ cursor_array[i] = split;
+ }
+
+ if (error_if_more_splits_possible) {
+ if (aws_byte_cursor_next_split(&input, split_on, &split)) {
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+ } else {
+ /* Otherwise, the last cursor will contain the remainder of the string */
+ struct aws_byte_cursor *last_cursor = &cursor_array[num_cursors - 1];
+ last_cursor->len = (input.ptr + input.len) - last_cursor->ptr;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* Final cursor contains remainder of input. */
+static int s_cursor_split_first_n_times(
+ struct aws_byte_cursor input,
+ char split_on,
+ struct aws_byte_cursor *cursor_array,
+ size_t num_cursors) {
+
+ return s_cursor_split_impl(input, split_on, cursor_array, num_cursors, false);
+}
+
+/* Error if input could have been split more times */
+static int s_cursor_split_exactly_n_times(
+ struct aws_byte_cursor input,
+ char split_on,
+ struct aws_byte_cursor *cursor_array,
+ size_t num_cursors) {
+
+ return s_cursor_split_impl(input, split_on, cursor_array, num_cursors, true);
+}
+
+static void s_set_state(struct aws_h1_decoder *decoder, state_fn *state) {
+ decoder->scratch_space.len = 0;
+ decoder->run_state = state;
+ decoder->process_line = NULL;
+}
+
+/* Set next state to capture a full line, then call the specified linestate_fn on it */
+static void s_set_line_state(struct aws_h1_decoder *decoder, linestate_fn *line_processor) {
+ s_set_state(decoder, s_state_getline);
+ decoder->process_line = line_processor;
+}
+
+static int s_mark_done(struct aws_h1_decoder *decoder) {
+ decoder->is_done = true;
+
+ return decoder->vtable.on_done(decoder->user_data);
+}
+
+/* Reset state, in preparation for processing a new message */
+static void s_reset_state(struct aws_h1_decoder *decoder) {
+ if (decoder->is_decoding_requests) {
+ s_set_line_state(decoder, s_linestate_request);
+ } else {
+ s_set_line_state(decoder, s_linestate_response);
+ }
+
+ decoder->transfer_encoding = 0;
+ decoder->content_processed = 0;
+ decoder->content_length = 0;
+ decoder->chunk_processed = 0;
+ decoder->chunk_size = 0;
+ decoder->doing_trailers = false;
+ decoder->is_done = false;
+ decoder->body_headers_ignored = false;
+ decoder->body_headers_forbidden = false;
+ /* set to normal by default */
+ decoder->header_block = AWS_HTTP_HEADER_BLOCK_MAIN;
+}
+
+static int s_state_unchunked_body(struct aws_h1_decoder *decoder, struct aws_byte_cursor *input) {
+
+ size_t processed_bytes = 0;
+ AWS_FATAL_ASSERT(decoder->content_processed < decoder->content_length); /* shouldn't be possible */
+
+ if (input->len > (decoder->content_length - decoder->content_processed)) {
+ processed_bytes = (size_t)(decoder->content_length - decoder->content_processed);
+ } else {
+ processed_bytes = input->len;
+ }
+
+ decoder->content_processed += processed_bytes;
+
+ bool finished = decoder->content_processed == decoder->content_length;
+ struct aws_byte_cursor body = aws_byte_cursor_advance(input, processed_bytes);
+ int err = decoder->vtable.on_body(&body, finished, decoder->user_data);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ if (AWS_LIKELY(finished)) {
+ err = s_mark_done(decoder);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_linestate_chunk_terminator(struct aws_h1_decoder *decoder, struct aws_byte_cursor input) {
+
+ /* Expecting CRLF at end of each chunk */
+ /* RFC-7230 section 4.1 Chunked Transfer Encoding */
+ if (AWS_UNLIKELY(input.len != 0)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM, "id=%p: Incoming chunk is invalid, does not end with CRLF.", decoder->logging_id);
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ s_set_line_state(decoder, s_linestate_chunk_size);
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_state_chunk(struct aws_h1_decoder *decoder, struct aws_byte_cursor *input) {
+ size_t processed_bytes = 0;
+ AWS_ASSERT(decoder->chunk_processed < decoder->chunk_size);
+
+ if (input->len > (decoder->chunk_size - decoder->chunk_processed)) {
+ processed_bytes = (size_t)(decoder->chunk_size - decoder->chunk_processed);
+ } else {
+ processed_bytes = input->len;
+ }
+
+ decoder->chunk_processed += processed_bytes;
+
+ bool finished = decoder->chunk_processed == decoder->chunk_size;
+ struct aws_byte_cursor body = aws_byte_cursor_advance(input, processed_bytes);
+ int err = decoder->vtable.on_body(&body, false, decoder->user_data);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ if (AWS_LIKELY(finished)) {
+ s_set_line_state(decoder, s_linestate_chunk_terminator);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_linestate_chunk_size(struct aws_h1_decoder *decoder, struct aws_byte_cursor input) {
+ struct aws_byte_cursor size;
+ AWS_ZERO_STRUCT(size);
+ if (!aws_byte_cursor_next_split(&input, ';', &size)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM, "id=%p: Incoming chunk is invalid, first line is malformed.", decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Bad chunk line is: '" PRInSTR "'",
+ decoder->logging_id,
+ AWS_BYTE_CURSOR_PRI(input));
+
+ return AWS_OP_ERR;
+ }
+
+ int err = aws_byte_cursor_utf8_parse_u64_hex(size, &decoder->chunk_size);
+ if (err) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Failed to parse size of incoming chunk.", decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Bad chunk size is: '" PRInSTR "'",
+ decoder->logging_id,
+ AWS_BYTE_CURSOR_PRI(size));
+
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+ decoder->chunk_processed = 0;
+
+ /* Empty chunk signifies all chunks have been read. */
+ if (AWS_UNLIKELY(decoder->chunk_size == 0)) {
+ struct aws_byte_cursor cursor;
+ cursor.ptr = NULL;
+ cursor.len = 0;
+ err = decoder->vtable.on_body(&cursor, true, decoder->user_data);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ /* Expected empty newline and end of message. */
+ decoder->doing_trailers = true;
+ s_set_line_state(decoder, s_linestate_header);
+ return AWS_OP_SUCCESS;
+ }
+
+ /* Skip all chunk extensions, as they are optional. */
+ /* RFC-7230 section 4.1.1 Chunk Extensions */
+
+ s_set_state(decoder, s_state_chunk);
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_linestate_header(struct aws_h1_decoder *decoder, struct aws_byte_cursor input) {
+ int err;
+
+ /* The \r\n was just processed by `s_state_getline`. */
+ /* Empty line signifies end of headers, and beginning of body or end of trailers. */
+ /* RFC-7230 section 3 Message Format */
+ if (input.len == 0) {
+ if (AWS_LIKELY(!decoder->doing_trailers)) {
+ if (decoder->body_headers_ignored) {
+ err = s_mark_done(decoder);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+ } else if (decoder->transfer_encoding & AWS_HTTP_TRANSFER_ENCODING_CHUNKED) {
+ s_set_line_state(decoder, s_linestate_chunk_size);
+ } else if (decoder->content_length > 0) {
+ s_set_state(decoder, s_state_unchunked_body);
+ } else {
+ err = s_mark_done(decoder);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+ }
+ } else {
+ /* Empty line means end of message. */
+ err = s_mark_done(decoder);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+ }
+
+ /* Each header field consists of a case-insensitive field name followed by a colon (":"),
+ * optional leading whitespace, the field value, and optional trailing whitespace.
+ * RFC-7230 3.2 */
+ struct aws_byte_cursor splits[2];
+ err = s_cursor_split_first_n_times(input, ':', splits, 2); /* value may contain more colons */
+ if (err) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Invalid incoming header, missing colon.", decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM, "id=%p: Bad header is: '" PRInSTR "'", decoder->logging_id, AWS_BYTE_CURSOR_PRI(input));
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ struct aws_byte_cursor name = splits[0];
+ if (!aws_strutil_is_http_token(name)) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Invalid incoming header, bad name.", decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM, "id=%p: Bad header is: '" PRInSTR "'", decoder->logging_id, AWS_BYTE_CURSOR_PRI(input));
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ struct aws_byte_cursor value = aws_strutil_trim_http_whitespace(splits[1]);
+ if (!aws_strutil_is_http_field_value(value)) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Invalid incoming header, bad value.", decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM, "id=%p: Bad header is: '" PRInSTR "'", decoder->logging_id, AWS_BYTE_CURSOR_PRI(input));
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ struct aws_h1_decoded_header header;
+ header.name = aws_http_str_to_header_name(name);
+ header.name_data = name;
+ header.value_data = value;
+ header.data = input;
+
+ switch (header.name) {
+ case AWS_HTTP_HEADER_CONTENT_LENGTH:
+ if (decoder->transfer_encoding) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Incoming headers for both content-length and transfer-encoding received. This is illegal.",
+ decoder->logging_id);
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ if (aws_byte_cursor_utf8_parse_u64(header.value_data, &decoder->content_length)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Incoming content-length header has invalid value.",
+ decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Bad content-length value is: '" PRInSTR "'",
+ decoder->logging_id,
+ AWS_BYTE_CURSOR_PRI(header.value_data));
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ if (decoder->body_headers_forbidden && decoder->content_length != 0) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Incoming headers for content-length received, but it is illegal for this message to have a "
+ "body",
+ decoder->logging_id);
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ break;
+
+ case AWS_HTTP_HEADER_TRANSFER_ENCODING: {
+ if (decoder->content_length) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Incoming headers for both content-length and transfer-encoding received. This is illegal.",
+ decoder->logging_id);
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ if (decoder->body_headers_forbidden) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Incoming headers for transfer-encoding received, but it is illegal for this message to "
+ "have a body",
+ decoder->logging_id);
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+ /* RFC-7230 section 3.3.1 Transfer-Encoding */
+ /* RFC-7230 section 4.2 Compression Codings */
+
+ /* Note that it's possible for multiple Transfer-Encoding headers to exist, in which case the values
+ * should be appended with those from any previously encountered Transfer-Encoding headers. */
+ struct aws_byte_cursor split;
+ AWS_ZERO_STRUCT(split);
+ while (aws_byte_cursor_next_split(&header.value_data, ',', &split)) {
+ struct aws_byte_cursor coding = aws_strutil_trim_http_whitespace(split);
+ int prev_flags = decoder->transfer_encoding;
+
+ if (aws_string_eq_byte_cursor_ignore_case(s_transfer_coding_chunked, &coding)) {
+ decoder->transfer_encoding |= AWS_HTTP_TRANSFER_ENCODING_CHUNKED;
+
+ } else if (
+ aws_string_eq_byte_cursor_ignore_case(s_transfer_coding_compress, &coding) ||
+ aws_string_eq_byte_cursor_ignore_case(s_transfer_coding_x_compress, &coding)) {
+ /* A recipient SHOULD consider "x-compress" to be equivalent to "compress". RFC-7230 4.2.1 */
+ decoder->transfer_encoding |= AWS_HTTP_TRANSFER_ENCODING_DEPRECATED_COMPRESS;
+
+ } else if (aws_string_eq_byte_cursor_ignore_case(s_transfer_coding_deflate, &coding)) {
+ decoder->transfer_encoding |= AWS_HTTP_TRANSFER_ENCODING_DEFLATE;
+
+ } else if (
+ aws_string_eq_byte_cursor_ignore_case(s_transfer_coding_gzip, &coding) ||
+ aws_string_eq_byte_cursor_ignore_case(s_transfer_coding_x_gzip, &coding)) {
+ /* A recipient SHOULD consider "x-gzip" to be equivalent to "gzip". RFC-7230 4.2.3 */
+ decoder->transfer_encoding |= AWS_HTTP_TRANSFER_ENCODING_GZIP;
+
+ } else if (coding.len > 0) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Incoming transfer-encoding header lists unrecognized coding.",
+ decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Unrecognized coding is: '" PRInSTR "'",
+ decoder->logging_id,
+ AWS_BYTE_CURSOR_PRI(coding));
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ /* If any transfer coding other than chunked is applied to a request payload body, the sender MUST
+ * apply chunked as the final transfer coding to ensure that the message is properly framed.
+ * RFC-7230 3.3.1 */
+ if ((prev_flags & AWS_HTTP_TRANSFER_ENCODING_CHUNKED) && (decoder->transfer_encoding != prev_flags)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Incoming transfer-encoding header lists a coding after 'chunked', this is illegal.",
+ decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Misplaced coding is '" PRInSTR "'",
+ decoder->logging_id,
+ AWS_BYTE_CURSOR_PRI(coding));
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+ }
+
+ /* TODO: deal with body of indeterminate length, marking it as successful when connection is closed:
+ *
+ * A response that has neither chunked transfer coding nor Content-Length is terminated by closure of
+ * the connection and, thus, is considered complete regardless of the number of message body octets
+ * received, provided that the header section was received intact.
+ * RFC-7230 3.4 */
+ } break;
+
+ default:
+ break;
+ }
+
+ err = decoder->vtable.on_header(&header, decoder->user_data);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ s_set_line_state(decoder, s_linestate_header);
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_linestate_request(struct aws_h1_decoder *decoder, struct aws_byte_cursor input) {
+ struct aws_byte_cursor cursors[3];
+ int err = s_cursor_split_exactly_n_times(input, ' ', cursors, 3); /* extra spaces not allowed */
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM, "id=%p: Incoming request line has wrong number of spaces.", decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Bad request line is: '" PRInSTR "'",
+ decoder->logging_id,
+ AWS_BYTE_CURSOR_PRI(input));
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ for (size_t i = 0; i < AWS_ARRAY_SIZE(cursors); ++i) {
+ if (cursors[i].len == 0) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Incoming request line has empty values.", decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Bad request line is: '" PRInSTR "'",
+ decoder->logging_id,
+ AWS_BYTE_CURSOR_PRI(input));
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+ }
+
+ struct aws_byte_cursor method = cursors[0];
+ struct aws_byte_cursor uri = cursors[1];
+ struct aws_byte_cursor version = cursors[2];
+
+ if (!aws_strutil_is_http_token(method)) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Incoming request has invalid method.", decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Bad request line is: '" PRInSTR "'",
+ decoder->logging_id,
+ AWS_BYTE_CURSOR_PRI(input));
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ if (!aws_strutil_is_http_request_target(uri)) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Incoming request has invalid path.", decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Bad request line is: '" PRInSTR "'",
+ decoder->logging_id,
+ AWS_BYTE_CURSOR_PRI(input));
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ struct aws_byte_cursor version_expected = aws_http_version_to_str(AWS_HTTP_VERSION_1_1);
+ if (!aws_byte_cursor_eq(&version, &version_expected)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM, "id=%p: Incoming request uses unsupported HTTP version.", decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Unsupported version is: '" PRInSTR "'",
+ decoder->logging_id,
+ AWS_BYTE_CURSOR_PRI(version));
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ err = decoder->vtable.on_request(aws_http_str_to_method(method), &method, &uri, decoder->user_data);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ s_set_line_state(decoder, s_linestate_header);
+
+ return AWS_OP_SUCCESS;
+}
+
+static bool s_check_info_response_status_code(int code_val) {
+ return code_val >= 100 && code_val < 200;
+}
+
+static int s_linestate_response(struct aws_h1_decoder *decoder, struct aws_byte_cursor input) {
+ struct aws_byte_cursor cursors[3];
+ int err = s_cursor_split_first_n_times(input, ' ', cursors, 3); /* phrase may contain spaces */
+ if (err) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Incoming response status line is invalid.", decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Bad status line is: '" PRInSTR "'",
+ decoder->logging_id,
+ AWS_BYTE_CURSOR_PRI(input));
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ struct aws_byte_cursor version = cursors[0];
+ struct aws_byte_cursor code = cursors[1];
+ struct aws_byte_cursor phrase = cursors[2];
+
+ struct aws_byte_cursor version_1_1_expected = aws_http_version_to_str(AWS_HTTP_VERSION_1_1);
+ struct aws_byte_cursor version_1_0_expected = aws_http_version_to_str(AWS_HTTP_VERSION_1_0);
+ if (!aws_byte_cursor_eq(&version, &version_1_1_expected) && !aws_byte_cursor_eq(&version, &version_1_0_expected)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM, "id=%p: Incoming response uses unsupported HTTP version.", decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Unsupported version is: '" PRInSTR "'",
+ decoder->logging_id,
+ AWS_BYTE_CURSOR_PRI(version));
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ /* Validate phrase */
+ if (!aws_strutil_is_http_reason_phrase(phrase)) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Incoming response has invalid reason phrase.", decoder->logging_id);
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ /* Status-code is a 3-digit integer. RFC7230 section 3.1.2 */
+ uint64_t code_val_u64;
+ err = aws_byte_cursor_utf8_parse_u64(code, &code_val_u64);
+ if (err || code.len != 3 || code_val_u64 > 999) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Incoming response has invalid status code.", decoder->logging_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Bad status code is: '" PRInSTR "'",
+ decoder->logging_id,
+ AWS_BYTE_CURSOR_PRI(code));
+ return aws_raise_error(AWS_ERROR_HTTP_PROTOCOL_ERROR);
+ }
+
+ int code_val = (int)code_val_u64;
+
+ /* RFC-7230 section 3.3 Message Body */
+ decoder->body_headers_ignored |= code_val == AWS_HTTP_STATUS_CODE_304_NOT_MODIFIED;
+ decoder->body_headers_forbidden = code_val == AWS_HTTP_STATUS_CODE_204_NO_CONTENT || code_val / 100 == 1;
+
+ if (s_check_info_response_status_code(code_val)) {
+ decoder->header_block = AWS_HTTP_HEADER_BLOCK_INFORMATIONAL;
+ }
+
+ err = decoder->vtable.on_response(code_val, decoder->user_data);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ s_set_line_state(decoder, s_linestate_header);
+ return AWS_OP_SUCCESS;
+}
+
+struct aws_h1_decoder *aws_h1_decoder_new(struct aws_h1_decoder_params *params) {
+ AWS_ASSERT(params);
+
+ struct aws_h1_decoder *decoder = aws_mem_acquire(params->alloc, sizeof(struct aws_h1_decoder));
+ if (!decoder) {
+ return NULL;
+ }
+ AWS_ZERO_STRUCT(*decoder);
+
+ decoder->alloc = params->alloc;
+ decoder->user_data = params->user_data;
+ decoder->vtable = params->vtable;
+ decoder->is_decoding_requests = params->is_decoding_requests;
+
+ aws_byte_buf_init(&decoder->scratch_space, params->alloc, params->scratch_space_initial_size);
+
+ s_reset_state(decoder);
+
+ return decoder;
+}
+
+void aws_h1_decoder_destroy(struct aws_h1_decoder *decoder) {
+ if (!decoder) {
+ return;
+ }
+ aws_byte_buf_clean_up(&decoder->scratch_space);
+ aws_mem_release(decoder->alloc, decoder);
+}
+
+int aws_h1_decode(struct aws_h1_decoder *decoder, struct aws_byte_cursor *data) {
+ AWS_ASSERT(decoder);
+ AWS_ASSERT(data);
+
+ struct aws_byte_cursor backup = *data;
+
+ while (data->len && !decoder->is_done) {
+ int err = decoder->run_state(decoder, data);
+ if (err) {
+ /* Reset the data param to how we found it */
+ *data = backup;
+ return AWS_OP_ERR;
+ }
+ }
+
+ if (decoder->is_done) {
+ s_reset_state(decoder);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_h1_decoder_get_encoding_flags(const struct aws_h1_decoder *decoder) {
+ return decoder->transfer_encoding;
+}
+
+uint64_t aws_h1_decoder_get_content_length(const struct aws_h1_decoder *decoder) {
+ return decoder->content_length;
+}
+
+bool aws_h1_decoder_get_body_headers_ignored(const struct aws_h1_decoder *decoder) {
+ return decoder->body_headers_ignored;
+}
+
+enum aws_http_header_block aws_h1_decoder_get_header_block(const struct aws_h1_decoder *decoder) {
+ return decoder->header_block;
+}
+
+void aws_h1_decoder_set_logging_id(struct aws_h1_decoder *decoder, const void *id) {
+ decoder->logging_id = id;
+}
+
+void aws_h1_decoder_set_body_headers_ignored(struct aws_h1_decoder *decoder, bool body_headers_ignored) {
+ decoder->body_headers_ignored = body_headers_ignored;
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/h1_encoder.c b/contrib/restricted/aws/aws-c-http/source/h1_encoder.c
new file mode 100644
index 0000000000..1899d2f402
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/h1_encoder.c
@@ -0,0 +1,915 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/http/private/h1_encoder.h>
+#include <aws/http/private/strutil.h>
+#include <aws/http/status_code.h>
+#include <aws/io/logging.h>
+#include <aws/io/stream.h>
+
+#include <inttypes.h>
+
+#define ENCODER_LOGF(level, encoder, text, ...) \
+ AWS_LOGF_##level(AWS_LS_HTTP_STREAM, "id=%p: " text, (void *)encoder->current_stream, __VA_ARGS__)
+#define ENCODER_LOG(level, encoder, text) ENCODER_LOGF(level, encoder, "%s", text)
+
+#define MAX_ASCII_HEX_CHUNK_STR_SIZE (sizeof(uint64_t) * 2 + 1)
+#define CRLF_SIZE 2
+
+/**
+ * Scan headers to detect errors and determine anything we'll need to know later (ex: total length).
+ */
+static int s_scan_outgoing_headers(
+ struct aws_h1_encoder_message *encoder_message,
+ const struct aws_http_message *message,
+ size_t *out_header_lines_len,
+ bool body_headers_ignored,
+ bool body_headers_forbidden) {
+
+ size_t total = 0;
+ bool has_body_stream = aws_http_message_get_body_stream(message);
+ bool has_content_length_header = false;
+ bool has_transfer_encoding_header = false;
+
+ const size_t num_headers = aws_http_message_get_header_count(message);
+ for (size_t i = 0; i < num_headers; ++i) {
+ struct aws_http_header header;
+ aws_http_message_get_header(message, &header, i);
+
+ /* Validate header field-name (RFC-7230 3.2): field-name = token */
+ if (!aws_strutil_is_http_token(header.name)) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=static: Header name is invalid");
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_NAME);
+ }
+
+ /* Validate header field-value.
+ * The value itself isn't supposed to have whitespace on either side,
+ * but we'll trim it off before validation so we don't start needlessly
+ * failing requests that used to work before we added validation.
+ * This should be OK because field-value can be sent with any amount
+ * of whitespace around it, which the other side will just ignore (RFC-7230 3.2):
+ * header-field = field-name ":" OWS field-value OWS */
+ struct aws_byte_cursor field_value = aws_strutil_trim_http_whitespace(header.value);
+ if (!aws_strutil_is_http_field_value(field_value)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=static: Header '" PRInSTR "' has invalid value",
+ AWS_BYTE_CURSOR_PRI(header.name));
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_VALUE);
+ }
+
+ enum aws_http_header_name name_enum = aws_http_str_to_header_name(header.name);
+ switch (name_enum) {
+ case AWS_HTTP_HEADER_CONNECTION: {
+ if (aws_byte_cursor_eq_c_str(&field_value, "close")) {
+ encoder_message->has_connection_close_header = true;
+ }
+ } break;
+ case AWS_HTTP_HEADER_CONTENT_LENGTH: {
+ has_content_length_header = true;
+ if (aws_byte_cursor_utf8_parse_u64(field_value, &encoder_message->content_length)) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=static: Invalid Content-Length");
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_VALUE);
+ }
+ } break;
+ case AWS_HTTP_HEADER_TRANSFER_ENCODING: {
+ has_transfer_encoding_header = true;
+ if (0 == field_value.len) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=static: Transfer-Encoding must include a valid value");
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_VALUE);
+ }
+ struct aws_byte_cursor substr;
+ AWS_ZERO_STRUCT(substr);
+ while (aws_byte_cursor_next_split(&field_value, ',', &substr)) {
+ struct aws_byte_cursor trimmed = aws_strutil_trim_http_whitespace(substr);
+ if (0 == trimmed.len) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=static: Transfer-Encoding header whitespace only "
+ "comma delimited header value");
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_VALUE);
+ }
+ if (encoder_message->has_chunked_encoding_header) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM, "id=static: Transfer-Encoding header must end with \"chunked\"");
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_VALUE);
+ }
+ if (aws_byte_cursor_eq_c_str(&trimmed, "chunked")) {
+ encoder_message->has_chunked_encoding_header = true;
+ }
+ }
+ } break;
+ default:
+ break;
+ }
+
+ /* header-line: "{name}: {value}\r\n" */
+ int err = 0;
+ err |= aws_add_size_checked(header.name.len, total, &total);
+ err |= aws_add_size_checked(header.value.len, total, &total);
+ err |= aws_add_size_checked(4, total, &total); /* ": " + "\r\n" */
+ if (err) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ if (!encoder_message->has_chunked_encoding_header && has_transfer_encoding_header) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=static: Transfer-Encoding header must include \"chunked\"");
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_VALUE);
+ }
+
+ /* Per RFC 7230: A sender MUST NOT send a Content-Length header field in any message that contains a
+ * Transfer-Encoding header field. */
+ if (encoder_message->has_chunked_encoding_header && has_content_length_header) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM, "id=static: Both Content-Length and Transfer-Encoding are set. Only one may be used");
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_VALUE);
+ }
+
+ if (encoder_message->has_chunked_encoding_header && has_body_stream) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=static: Both Transfer-Encoding chunked header and body stream is set. "
+ "chunked data must use the chunk API to write the body stream.");
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_BODY_STREAM);
+ }
+
+ if (body_headers_forbidden && (encoder_message->content_length > 0 || has_transfer_encoding_header)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=static: Transfer-Encoding or Content-Length headers may not be present in such a message");
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_FIELD);
+ }
+
+ if (body_headers_ignored) {
+ /* Don't send body, no matter what the headers are */
+ encoder_message->content_length = 0;
+ encoder_message->has_chunked_encoding_header = false;
+ }
+
+ if (encoder_message->content_length > 0 && !has_body_stream) {
+ return aws_raise_error(AWS_ERROR_HTTP_MISSING_BODY_STREAM);
+ }
+
+ *out_header_lines_len = total;
+ return AWS_OP_SUCCESS;
+}
+
+static int s_scan_outgoing_trailer(const struct aws_http_headers *headers, size_t *out_size) {
+ const size_t num_headers = aws_http_headers_count(headers);
+ size_t total = 0;
+ for (size_t i = 0; i < num_headers; i++) {
+ struct aws_http_header header;
+ aws_http_headers_get_index(headers, i, &header);
+ /* Validate header field-name (RFC-7230 3.2): field-name = token */
+ if (!aws_strutil_is_http_token(header.name)) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=static: Header name is invalid");
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_NAME);
+ }
+
+ /* Validate header field-value.
+ * The value itself isn't supposed to have whitespace on either side,
+ * but we'll trim it off before validation so we don't start needlessly
+ * failing requests that used to work before we added validation.
+ * This should be OK because field-value can be sent with any amount
+ * of whitespace around it, which the other side will just ignore (RFC-7230 3.2):
+ * header-field = field-name ":" OWS field-value OWS */
+ struct aws_byte_cursor field_value = aws_strutil_trim_http_whitespace(header.value);
+ if (!aws_strutil_is_http_field_value(field_value)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=static: Header '" PRInSTR "' has invalid value",
+ AWS_BYTE_CURSOR_PRI(header.name));
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_VALUE);
+ }
+
+ enum aws_http_header_name name_enum = aws_http_str_to_header_name(header.name);
+ if (name_enum == AWS_HTTP_HEADER_TRANSFER_ENCODING || name_enum == AWS_HTTP_HEADER_CONTENT_LENGTH ||
+ name_enum == AWS_HTTP_HEADER_HOST || name_enum == AWS_HTTP_HEADER_EXPECT ||
+ name_enum == AWS_HTTP_HEADER_CACHE_CONTROL || name_enum == AWS_HTTP_HEADER_MAX_FORWARDS ||
+ name_enum == AWS_HTTP_HEADER_PRAGMA || name_enum == AWS_HTTP_HEADER_RANGE ||
+ name_enum == AWS_HTTP_HEADER_TE || name_enum == AWS_HTTP_HEADER_CONTENT_ENCODING ||
+ name_enum == AWS_HTTP_HEADER_CONTENT_TYPE || name_enum == AWS_HTTP_HEADER_CONTENT_RANGE ||
+ name_enum == AWS_HTTP_HEADER_TRAILER || name_enum == AWS_HTTP_HEADER_WWW_AUTHENTICATE ||
+ name_enum == AWS_HTTP_HEADER_AUTHORIZATION || name_enum == AWS_HTTP_HEADER_PROXY_AUTHENTICATE ||
+ name_enum == AWS_HTTP_HEADER_PROXY_AUTHORIZATION || name_enum == AWS_HTTP_HEADER_SET_COOKIE ||
+ name_enum == AWS_HTTP_HEADER_COOKIE || name_enum == AWS_HTTP_HEADER_AGE ||
+ name_enum == AWS_HTTP_HEADER_EXPIRES || name_enum == AWS_HTTP_HEADER_DATE ||
+ name_enum == AWS_HTTP_HEADER_LOCATION || name_enum == AWS_HTTP_HEADER_RETRY_AFTER ||
+ name_enum == AWS_HTTP_HEADER_VARY || name_enum == AWS_HTTP_HEADER_WARNING) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=static: Trailing Header '" PRInSTR "' has invalid value",
+ AWS_BYTE_CURSOR_PRI(header.name));
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_FIELD);
+ }
+
+ int err = 0;
+ err |= aws_add_size_checked(header.name.len, total, &total);
+ err |= aws_add_size_checked(header.value.len, total, &total);
+ err |= aws_add_size_checked(4, total, &total); /* ": " + "\r\n" */
+ if (err) {
+ return AWS_OP_ERR;
+ }
+ }
+ if (aws_add_size_checked(2, total, &total)) { /* "\r\n" */
+ return AWS_OP_ERR;
+ }
+ *out_size = total;
+ return AWS_OP_SUCCESS;
+}
+
+static bool s_write_crlf(struct aws_byte_buf *dst) {
+ AWS_PRECONDITION(aws_byte_buf_is_valid(dst));
+ struct aws_byte_cursor crlf_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\r\n");
+ return aws_byte_buf_write_from_whole_cursor(dst, crlf_cursor);
+}
+
+static void s_write_headers(struct aws_byte_buf *dst, const struct aws_http_headers *headers) {
+
+ const size_t num_headers = aws_http_headers_count(headers);
+
+ bool wrote_all = true;
+ for (size_t i = 0; i < num_headers; ++i) {
+ struct aws_http_header header;
+ aws_http_headers_get_index(headers, i, &header);
+
+ /* header-line: "{name}: {value}\r\n" */
+ wrote_all &= aws_byte_buf_write_from_whole_cursor(dst, header.name);
+ wrote_all &= aws_byte_buf_write_u8(dst, ':');
+ wrote_all &= aws_byte_buf_write_u8(dst, ' ');
+ wrote_all &= aws_byte_buf_write_from_whole_cursor(dst, header.value);
+ wrote_all &= s_write_crlf(dst);
+ }
+ AWS_ASSERT(wrote_all);
+ (void)wrote_all;
+}
+
+int aws_h1_encoder_message_init_from_request(
+ struct aws_h1_encoder_message *message,
+ struct aws_allocator *allocator,
+ const struct aws_http_message *request,
+ struct aws_linked_list *pending_chunk_list) {
+
+ AWS_PRECONDITION(aws_linked_list_is_valid(pending_chunk_list));
+
+ AWS_ZERO_STRUCT(*message);
+
+ message->body = aws_input_stream_acquire(aws_http_message_get_body_stream(request));
+ message->pending_chunk_list = pending_chunk_list;
+
+ struct aws_byte_cursor method;
+ int err = aws_http_message_get_request_method(request, &method);
+ if (err) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=static: Request method not set");
+ aws_raise_error(AWS_ERROR_HTTP_INVALID_METHOD);
+ goto error;
+ }
+ /* RFC-7230 3.1.1: method = token */
+ if (!aws_strutil_is_http_token(method)) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=static: Request method is invalid");
+ aws_raise_error(AWS_ERROR_HTTP_INVALID_METHOD);
+ goto error;
+ }
+
+ struct aws_byte_cursor uri;
+ err = aws_http_message_get_request_path(request, &uri);
+ if (err) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=static: Request path not set");
+ aws_raise_error(AWS_ERROR_HTTP_INVALID_PATH);
+ goto error;
+ }
+ if (!aws_strutil_is_http_request_target(uri)) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=static: Request path is invalid");
+ aws_raise_error(AWS_ERROR_HTTP_INVALID_PATH);
+ goto error;
+ }
+
+ struct aws_byte_cursor version = aws_http_version_to_str(AWS_HTTP_VERSION_1_1);
+
+ /**
+ * Calculate total size needed for outgoing_head_buffer, then write to buffer.
+ */
+
+ size_t header_lines_len;
+ err = s_scan_outgoing_headers(
+ message, request, &header_lines_len, false /*body_headers_ignored*/, false /*body_headers_forbidden*/);
+ if (err) {
+ goto error;
+ }
+
+ /* request-line: "{method} {uri} {version}\r\n" */
+ size_t request_line_len = 4; /* 2 spaces + "\r\n" */
+ err |= aws_add_size_checked(method.len, request_line_len, &request_line_len);
+ err |= aws_add_size_checked(uri.len, request_line_len, &request_line_len);
+ err |= aws_add_size_checked(version.len, request_line_len, &request_line_len);
+
+ /* head-end: "\r\n" */
+ size_t head_end_len = 2;
+
+ size_t head_total_len = request_line_len;
+ err |= aws_add_size_checked(header_lines_len, head_total_len, &head_total_len);
+ err |= aws_add_size_checked(head_end_len, head_total_len, &head_total_len);
+ if (err) {
+ goto error;
+ }
+
+ err = aws_byte_buf_init(&message->outgoing_head_buf, allocator, head_total_len);
+ if (err) {
+ goto error;
+ }
+
+ bool wrote_all = true;
+
+ wrote_all &= aws_byte_buf_write_from_whole_cursor(&message->outgoing_head_buf, method);
+ wrote_all &= aws_byte_buf_write_u8(&message->outgoing_head_buf, ' ');
+ wrote_all &= aws_byte_buf_write_from_whole_cursor(&message->outgoing_head_buf, uri);
+ wrote_all &= aws_byte_buf_write_u8(&message->outgoing_head_buf, ' ');
+ wrote_all &= aws_byte_buf_write_from_whole_cursor(&message->outgoing_head_buf, version);
+ wrote_all &= s_write_crlf(&message->outgoing_head_buf);
+
+ s_write_headers(&message->outgoing_head_buf, aws_http_message_get_const_headers(request));
+
+ wrote_all &= s_write_crlf(&message->outgoing_head_buf);
+ (void)wrote_all;
+ AWS_ASSERT(wrote_all);
+
+ return AWS_OP_SUCCESS;
+error:
+ aws_h1_encoder_message_clean_up(message);
+ return AWS_OP_ERR;
+}
+
+int aws_h1_encoder_message_init_from_response(
+ struct aws_h1_encoder_message *message,
+ struct aws_allocator *allocator,
+ const struct aws_http_message *response,
+ bool body_headers_ignored,
+ struct aws_linked_list *pending_chunk_list) {
+
+ AWS_PRECONDITION(aws_linked_list_is_valid(pending_chunk_list));
+
+ AWS_ZERO_STRUCT(*message);
+
+ message->body = aws_input_stream_acquire(aws_http_message_get_body_stream(response));
+ message->pending_chunk_list = pending_chunk_list;
+
+ struct aws_byte_cursor version = aws_http_version_to_str(AWS_HTTP_VERSION_1_1);
+
+ int status_int;
+ int err = aws_http_message_get_response_status(response, &status_int);
+ if (err) {
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_STATUS_CODE);
+ }
+
+ /* Status code must fit in 3 digits */
+ AWS_ASSERT(status_int >= 0 && status_int <= 999); /* aws_http_message should have already checked this */
+ char status_code_str[4] = "XXX";
+ snprintf(status_code_str, sizeof(status_code_str), "%03d", status_int);
+ struct aws_byte_cursor status_code = aws_byte_cursor_from_c_str(status_code_str);
+
+ struct aws_byte_cursor status_text = aws_byte_cursor_from_c_str(aws_http_status_text(status_int));
+
+ /**
+ * Calculate total size needed for outgoing_head_buffer, then write to buffer.
+ */
+
+ size_t header_lines_len;
+ /**
+ * no body needed in the response
+ * RFC-7230 section 3.3 Message Body
+ */
+ body_headers_ignored |= status_int == AWS_HTTP_STATUS_CODE_304_NOT_MODIFIED;
+ bool body_headers_forbidden = status_int == AWS_HTTP_STATUS_CODE_204_NO_CONTENT || status_int / 100 == 1;
+ err = s_scan_outgoing_headers(message, response, &header_lines_len, body_headers_ignored, body_headers_forbidden);
+ if (err) {
+ goto error;
+ }
+
+ /* valid status must be three digital code, change it into byte_cursor */
+ /* response-line: "{version} {status} {status_text}\r\n" */
+ size_t response_line_len = 4; /* 2 spaces + "\r\n" */
+ err |= aws_add_size_checked(version.len, response_line_len, &response_line_len);
+ err |= aws_add_size_checked(status_code.len, response_line_len, &response_line_len);
+ err |= aws_add_size_checked(status_text.len, response_line_len, &response_line_len);
+
+ /* head-end: "\r\n" */
+ size_t head_end_len = 2;
+ size_t head_total_len = response_line_len;
+ err |= aws_add_size_checked(header_lines_len, head_total_len, &head_total_len);
+ err |= aws_add_size_checked(head_end_len, head_total_len, &head_total_len);
+ if (err) {
+ goto error;
+ }
+
+ err = aws_byte_buf_init(&message->outgoing_head_buf, allocator, head_total_len);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ bool wrote_all = true;
+
+ wrote_all &= aws_byte_buf_write_from_whole_cursor(&message->outgoing_head_buf, version);
+ wrote_all &= aws_byte_buf_write_u8(&message->outgoing_head_buf, ' ');
+ wrote_all &= aws_byte_buf_write_from_whole_cursor(&message->outgoing_head_buf, status_code);
+ wrote_all &= aws_byte_buf_write_u8(&message->outgoing_head_buf, ' ');
+ wrote_all &= aws_byte_buf_write_from_whole_cursor(&message->outgoing_head_buf, status_text);
+ wrote_all &= s_write_crlf(&message->outgoing_head_buf);
+
+ s_write_headers(&message->outgoing_head_buf, aws_http_message_get_const_headers(response));
+
+ wrote_all &= s_write_crlf(&message->outgoing_head_buf);
+ (void)wrote_all;
+ AWS_ASSERT(wrote_all);
+
+ /* Success! */
+ return AWS_OP_SUCCESS;
+
+error:
+ aws_h1_encoder_message_clean_up(message);
+ return AWS_OP_ERR;
+}
+
+void aws_h1_encoder_message_clean_up(struct aws_h1_encoder_message *message) {
+ aws_input_stream_release(message->body);
+ aws_byte_buf_clean_up(&message->outgoing_head_buf);
+ aws_h1_trailer_destroy(message->trailer);
+ AWS_ZERO_STRUCT(*message);
+}
+
+void aws_h1_encoder_init(struct aws_h1_encoder *encoder, struct aws_allocator *allocator) {
+ AWS_ZERO_STRUCT(*encoder);
+ encoder->allocator = allocator;
+}
+
+void aws_h1_encoder_clean_up(struct aws_h1_encoder *encoder) {
+ AWS_ZERO_STRUCT(*encoder);
+}
+
+int aws_h1_encoder_start_message(
+ struct aws_h1_encoder *encoder,
+ struct aws_h1_encoder_message *message,
+ struct aws_http_stream *stream) {
+
+ AWS_PRECONDITION(encoder);
+ AWS_PRECONDITION(message);
+
+ if (encoder->message) {
+ ENCODER_LOG(ERROR, encoder, "Attempting to start new request while previous request is in progress.");
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ encoder->current_stream = stream;
+ encoder->message = message;
+
+ return AWS_OP_SUCCESS;
+}
+
+static bool s_write_chunk_size(struct aws_byte_buf *dst, uint64_t chunk_size) {
+ AWS_PRECONDITION(dst);
+ AWS_PRECONDITION(aws_byte_buf_is_valid(dst));
+ char ascii_hex_chunk_size_str[MAX_ASCII_HEX_CHUNK_STR_SIZE] = {0};
+ snprintf(ascii_hex_chunk_size_str, sizeof(ascii_hex_chunk_size_str), "%" PRIX64, chunk_size);
+ return aws_byte_buf_write_from_whole_cursor(dst, aws_byte_cursor_from_c_str(ascii_hex_chunk_size_str));
+}
+
+static bool s_write_chunk_extension(struct aws_byte_buf *dst, struct aws_http1_chunk_extension *chunk_extension) {
+ AWS_PRECONDITION(chunk_extension);
+ AWS_PRECONDITION(aws_byte_buf_is_valid(dst));
+ bool wrote_all = true;
+ wrote_all &= aws_byte_buf_write_u8(dst, ';');
+ wrote_all &= aws_byte_buf_write_from_whole_cursor(dst, chunk_extension->key);
+ wrote_all &= aws_byte_buf_write_u8(dst, '=');
+ wrote_all &= aws_byte_buf_write_from_whole_cursor(dst, chunk_extension->value);
+ return wrote_all;
+}
+
+static size_t s_calculate_chunk_line_size(const struct aws_http1_chunk_options *options) {
+ size_t chunk_line_size = MAX_ASCII_HEX_CHUNK_STR_SIZE + CRLF_SIZE;
+ for (size_t i = 0; i < options->num_extensions; ++i) {
+ struct aws_http1_chunk_extension *chunk_extension = options->extensions + i;
+ chunk_line_size += sizeof(';');
+ chunk_line_size += chunk_extension->key.len;
+ chunk_line_size += sizeof('=');
+ chunk_line_size += chunk_extension->value.len;
+ }
+ return chunk_line_size;
+}
+
+static void s_populate_chunk_line_buffer(
+ struct aws_byte_buf *chunk_line,
+ const struct aws_http1_chunk_options *options) {
+
+ bool wrote_chunk_line = true;
+ wrote_chunk_line &= s_write_chunk_size(chunk_line, options->chunk_data_size);
+ for (size_t i = 0; i < options->num_extensions; ++i) {
+ wrote_chunk_line &= s_write_chunk_extension(chunk_line, options->extensions + i);
+ }
+ wrote_chunk_line &= s_write_crlf(chunk_line);
+ AWS_ASSERT(wrote_chunk_line);
+ (void)wrote_chunk_line;
+}
+
+struct aws_h1_trailer *aws_h1_trailer_new(
+ struct aws_allocator *allocator,
+ const struct aws_http_headers *trailing_headers) {
+ /* Allocate trailer along with storage for the trailer-line */
+ size_t trailer_size = 0;
+ if (s_scan_outgoing_trailer(trailing_headers, &trailer_size)) {
+ return NULL;
+ }
+
+ struct aws_h1_trailer *trailer = aws_mem_calloc(allocator, 1, sizeof(struct aws_h1_trailer));
+ trailer->allocator = allocator;
+
+ aws_byte_buf_init(&trailer->trailer_data, allocator, trailer_size); /* cannot fail */
+ s_write_headers(&trailer->trailer_data, trailing_headers);
+ s_write_crlf(&trailer->trailer_data); /* \r\n */
+ return trailer;
+}
+
+void aws_h1_trailer_destroy(struct aws_h1_trailer *trailer) {
+ if (trailer == NULL) {
+ return;
+ }
+ aws_byte_buf_clean_up(&trailer->trailer_data);
+ aws_mem_release(trailer->allocator, trailer);
+}
+
+struct aws_h1_chunk *aws_h1_chunk_new(struct aws_allocator *allocator, const struct aws_http1_chunk_options *options) {
+ /* Allocate chunk along with storage for the chunk-line */
+ struct aws_h1_chunk *chunk;
+ size_t chunk_line_size = s_calculate_chunk_line_size(options);
+ void *chunk_line_storage;
+ if (!aws_mem_acquire_many(
+ allocator, 2, &chunk, sizeof(struct aws_h1_chunk), &chunk_line_storage, chunk_line_size)) {
+ return NULL;
+ }
+
+ chunk->allocator = allocator;
+ chunk->data = aws_input_stream_acquire(options->chunk_data);
+ chunk->data_size = options->chunk_data_size;
+ chunk->on_complete = options->on_complete;
+ chunk->user_data = options->user_data;
+ chunk->chunk_line = aws_byte_buf_from_empty_array(chunk_line_storage, chunk_line_size);
+ s_populate_chunk_line_buffer(&chunk->chunk_line, options);
+ return chunk;
+}
+
+void aws_h1_chunk_destroy(struct aws_h1_chunk *chunk) {
+ AWS_PRECONDITION(chunk);
+ aws_input_stream_release(chunk->data);
+ aws_mem_release(chunk->allocator, chunk);
+}
+
+void aws_h1_chunk_complete_and_destroy(
+ struct aws_h1_chunk *chunk,
+ struct aws_http_stream *http_stream,
+ int error_code) {
+
+ AWS_PRECONDITION(chunk);
+
+ aws_http1_stream_write_chunk_complete_fn *on_complete = chunk->on_complete;
+ void *user_data = chunk->user_data;
+
+ /* Clean up before firing callback */
+ aws_h1_chunk_destroy(chunk);
+
+ if (NULL != on_complete) {
+ on_complete(http_stream, error_code, user_data);
+ }
+}
+
+static void s_clean_up_current_chunk(struct aws_h1_encoder *encoder, int error_code) {
+ AWS_PRECONDITION(encoder->current_chunk);
+ AWS_PRECONDITION(&encoder->current_chunk->node == aws_linked_list_front(encoder->message->pending_chunk_list));
+
+ aws_linked_list_remove(&encoder->current_chunk->node);
+ aws_h1_chunk_complete_and_destroy(encoder->current_chunk, encoder->current_stream, error_code);
+ encoder->current_chunk = NULL;
+}
+
+/* Write as much as possible from src_buf to dst, using encoder->progress_len to track progress.
+ * Returns true if the entire src_buf has been copied */
+static bool s_encode_buf(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst, const struct aws_byte_buf *src) {
+
+ /* advance src_cursor to current position in src_buf */
+ struct aws_byte_cursor src_cursor = aws_byte_cursor_from_buf(src);
+ aws_byte_cursor_advance(&src_cursor, (size_t)encoder->progress_bytes);
+
+ /* write as much as possible to dst, src_cursor is advanced as write occurs */
+ struct aws_byte_cursor written = aws_byte_buf_write_to_capacity(dst, &src_cursor);
+ encoder->progress_bytes += written.len;
+
+ return src_cursor.len == 0;
+}
+
+/* Write as much body stream as possible into dst buffer.
+ * Increments encoder->progress_bytes to track progress */
+static int s_encode_stream(
+ struct aws_h1_encoder *encoder,
+ struct aws_byte_buf *dst,
+ struct aws_input_stream *stream,
+ uint64_t total_length,
+ bool *out_done) {
+
+ *out_done = false;
+
+ if (dst->capacity == dst->len) {
+ /* Return success because we want to try again later */
+ return AWS_OP_SUCCESS;
+ }
+
+ /* Read from stream */
+ ENCODER_LOG(TRACE, encoder, "Reading from body stream.");
+ const size_t prev_len = dst->len;
+ int err = aws_input_stream_read(stream, dst);
+ const size_t amount_read = dst->len - prev_len;
+
+ if (err) {
+ ENCODER_LOGF(
+ ERROR,
+ encoder,
+ "Failed to read body stream, error %d (%s)",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ return AWS_OP_ERR;
+ }
+
+ /* Increment progress_bytes, and make sure we haven't written too much */
+ int add_err = aws_add_u64_checked(encoder->progress_bytes, amount_read, &encoder->progress_bytes);
+ if (add_err || encoder->progress_bytes > total_length) {
+ ENCODER_LOGF(ERROR, encoder, "Body stream has exceeded expected length: %" PRIu64, total_length);
+ return aws_raise_error(AWS_ERROR_HTTP_OUTGOING_STREAM_LENGTH_INCORRECT);
+ }
+
+ ENCODER_LOGF(
+ TRACE,
+ encoder,
+ "Sending %zu bytes of body, progress: %" PRIu64 "/%" PRIu64,
+ amount_read,
+ encoder->progress_bytes,
+ total_length);
+
+ /* Return if we're done sending stream */
+ if (encoder->progress_bytes == total_length) {
+ *out_done = true;
+ return AWS_OP_SUCCESS;
+ }
+
+ /* Return if stream failed to write anything. Maybe the data isn't ready yet. */
+ if (amount_read == 0) {
+ /* Ensure we're not at end-of-stream too early */
+ struct aws_stream_status status;
+ err = aws_input_stream_get_status(stream, &status);
+ if (err) {
+ ENCODER_LOGF(
+ TRACE,
+ encoder,
+ "Failed to query body stream status, error %d (%s)",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ return AWS_OP_ERR;
+ }
+ if (status.is_end_of_stream) {
+ ENCODER_LOGF(
+ ERROR,
+ encoder,
+ "Reached end of body stream but sent less than declared length %" PRIu64 "/%" PRIu64,
+ encoder->progress_bytes,
+ total_length);
+ return aws_raise_error(AWS_ERROR_HTTP_OUTGOING_STREAM_LENGTH_INCORRECT);
+ }
+ }
+
+ /* Not done streaming data out yet */
+ return AWS_OP_SUCCESS;
+}
+
+/* A state function should:
+ * - Raise an error only if unrecoverable error occurs.
+ * - `return s_switch_state(...)` to switch states.
+ * - `return AWS_OP_SUCCESS` if it can't progress any further (waiting for more
+ * space to write into, waiting for more chunks, etc). */
+typedef int encoder_state_fn(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst);
+
+/* Switch state.
+ * The only reason this returns a value is so it can be called with `return` to conclude a state function */
+static int s_switch_state(struct aws_h1_encoder *encoder, enum aws_h1_encoder_state state) {
+ encoder->state = state;
+ encoder->progress_bytes = 0;
+ return AWS_OP_SUCCESS;
+}
+
+/* Initial state. Waits until a new message is set */
+static int s_state_fn_init(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst) {
+ (void)dst;
+
+ if (!encoder->message) {
+ /* Remain in this state. */
+ return AWS_OP_SUCCESS;
+ }
+
+ /* Start encoding message */
+ ENCODER_LOG(TRACE, encoder, "Starting to send data.");
+ return s_switch_state(encoder, AWS_H1_ENCODER_STATE_HEAD);
+}
+
+/* Write out first line of request/response, plus all the headers.
+ * These have been pre-encoded in aws_h1_encoder_message->outgoing_head_buf. */
+static int s_state_fn_head(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst) {
+ bool done = s_encode_buf(encoder, dst, &encoder->message->outgoing_head_buf);
+ if (!done) {
+ /* Remain in this state */
+ return AWS_OP_SUCCESS;
+ }
+
+ /* Don't NEED to free this buffer now, but we don't need it anymore, so why not */
+ aws_byte_buf_clean_up(&encoder->message->outgoing_head_buf);
+
+ /* Pick next state */
+ if (encoder->message->body && encoder->message->content_length) {
+ return s_switch_state(encoder, AWS_H1_ENCODER_STATE_UNCHUNKED_BODY);
+
+ } else if (encoder->message->has_chunked_encoding_header) {
+ return s_switch_state(encoder, AWS_H1_ENCODER_STATE_CHUNK_NEXT);
+
+ } else {
+ return s_switch_state(encoder, AWS_H1_ENCODER_STATE_DONE);
+ }
+}
+
+/* Write out body (not using chunked encoding). */
+static int s_state_fn_unchunked_body(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst) {
+ bool done;
+ if (s_encode_stream(encoder, dst, encoder->message->body, encoder->message->content_length, &done)) {
+ return AWS_OP_ERR;
+ }
+
+ if (!done) {
+ /* Remain in this state until we're done writing out body */
+ return AWS_OP_SUCCESS;
+ }
+
+ /* Message is done */
+ return s_switch_state(encoder, AWS_H1_ENCODER_STATE_DONE);
+}
+
+/* Select next chunk to work on.
+ * Encoder is essentially "paused" here if no chunks are available. */
+static int s_state_fn_chunk_next(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst) {
+ (void)dst;
+
+ if (aws_linked_list_empty(encoder->message->pending_chunk_list)) {
+ /* Remain in this state until more chunks arrive */
+ ENCODER_LOG(TRACE, encoder, "No chunks ready to send, waiting for more...");
+ return AWS_OP_SUCCESS;
+ }
+
+ /* Set next chunk and go to next state */
+ struct aws_linked_list_node *node = aws_linked_list_front(encoder->message->pending_chunk_list);
+ encoder->current_chunk = AWS_CONTAINER_OF(node, struct aws_h1_chunk, node);
+ encoder->chunk_count++;
+ ENCODER_LOGF(
+ TRACE,
+ encoder,
+ "Begin sending chunk %zu with size %" PRIu64,
+ encoder->chunk_count,
+ encoder->current_chunk->data_size);
+
+ return s_switch_state(encoder, AWS_H1_ENCODER_STATE_CHUNK_LINE);
+}
+
+/* Write out "chunk-size [chunk-ext] CRLF".
+ * This data is pre-encoded in the chunk's chunk_line buffer */
+static int s_state_fn_chunk_line(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst) {
+ bool done = s_encode_buf(encoder, dst, &encoder->current_chunk->chunk_line);
+ if (!done) {
+ /* Remain in state until done writing line */
+ return AWS_OP_SUCCESS;
+ }
+
+ /* Pick next state */
+ if (encoder->current_chunk->data_size == 0) {
+ /* If data_size is 0, then this was the last chunk, which has no body.
+ * Mark it complete and move on to trailer. */
+ ENCODER_LOG(TRACE, encoder, "Final chunk complete");
+ s_clean_up_current_chunk(encoder, AWS_ERROR_SUCCESS);
+
+ return s_switch_state(encoder, AWS_H1_ENCODER_STATE_CHUNK_TRAILER);
+ }
+
+ return s_switch_state(encoder, AWS_H1_ENCODER_STATE_CHUNK_BODY);
+}
+
+/* Write out data for current chunk */
+static int s_state_fn_chunk_body(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst) {
+ bool done;
+ if (s_encode_stream(encoder, dst, encoder->current_chunk->data, encoder->current_chunk->data_size, &done)) {
+ int error_code = aws_last_error();
+
+ /* The error was caused by the chunk itself, report that specific error in its completion callback */
+ s_clean_up_current_chunk(encoder, error_code);
+
+ /* Re-raise error, in case it got cleared during user callback */
+ return aws_raise_error(error_code);
+ }
+ if (!done) {
+ /* Remain in this state until we're done writing out body */
+ return AWS_OP_SUCCESS;
+ }
+
+ return s_switch_state(encoder, AWS_H1_ENCODER_STATE_CHUNK_END);
+}
+
+/* Write CRLF and mark chunk as complete */
+static int s_state_fn_chunk_end(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst) {
+ bool done = s_write_crlf(dst);
+ if (!done) {
+ /* Remain in this state until done writing out CRLF */
+ return AWS_OP_SUCCESS;
+ }
+
+ ENCODER_LOG(TRACE, encoder, "Chunk complete");
+ s_clean_up_current_chunk(encoder, AWS_ERROR_SUCCESS);
+
+ /* Pick next chunk to work on */
+ return s_switch_state(encoder, AWS_H1_ENCODER_STATE_CHUNK_NEXT);
+}
+
+/* Write out trailer after last chunk */
+static int s_state_fn_chunk_trailer(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst) {
+ bool done;
+ /* if a chunked trailer was set */
+ if (encoder->message->trailer) {
+ done = s_encode_buf(encoder, dst, &encoder->message->trailer->trailer_data);
+ } else {
+ done = s_write_crlf(dst);
+ }
+ if (!done) {
+ /* Remain in this state until we're done writing out trailer */
+ return AWS_OP_SUCCESS;
+ }
+
+ return s_switch_state(encoder, AWS_H1_ENCODER_STATE_DONE);
+}
+
+/* Message is done, loop back to start of state machine */
+static int s_state_fn_done(struct aws_h1_encoder *encoder, struct aws_byte_buf *dst) {
+ (void)dst;
+
+ ENCODER_LOG(TRACE, encoder, "Done sending data.");
+ encoder->message = NULL;
+ return s_switch_state(encoder, AWS_H1_ENCODER_STATE_INIT);
+}
+
+struct encoder_state_def {
+ encoder_state_fn *fn;
+ const char *name;
+};
+
+static struct encoder_state_def s_encoder_states[] = {
+ [AWS_H1_ENCODER_STATE_INIT] = {.fn = s_state_fn_init, .name = "INIT"},
+ [AWS_H1_ENCODER_STATE_HEAD] = {.fn = s_state_fn_head, .name = "HEAD"},
+ [AWS_H1_ENCODER_STATE_UNCHUNKED_BODY] = {.fn = s_state_fn_unchunked_body, .name = "BODY"},
+ [AWS_H1_ENCODER_STATE_CHUNK_NEXT] = {.fn = s_state_fn_chunk_next, .name = "CHUNK_NEXT"},
+ [AWS_H1_ENCODER_STATE_CHUNK_LINE] = {.fn = s_state_fn_chunk_line, .name = "CHUNK_LINE"},
+ [AWS_H1_ENCODER_STATE_CHUNK_BODY] = {.fn = s_state_fn_chunk_body, .name = "CHUNK_BODY"},
+ [AWS_H1_ENCODER_STATE_CHUNK_END] = {.fn = s_state_fn_chunk_end, .name = "CHUNK_END"},
+ [AWS_H1_ENCODER_STATE_CHUNK_TRAILER] = {.fn = s_state_fn_chunk_trailer, .name = "CHUNK_TRAILER"},
+ [AWS_H1_ENCODER_STATE_DONE] = {.fn = s_state_fn_done, .name = "DONE"},
+};
+
+int aws_h1_encoder_process(struct aws_h1_encoder *encoder, struct aws_byte_buf *out_buf) {
+ AWS_PRECONDITION(encoder);
+ AWS_PRECONDITION(out_buf);
+
+ if (!encoder->message) {
+ ENCODER_LOG(ERROR, encoder, "No message is currently set for encoding.");
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ /* Run state machine until states stop changing. (due to out_buf running
+ * out of space, input_stream stalling, waiting for more chunks, etc) */
+ enum aws_h1_encoder_state prev_state;
+ do {
+ prev_state = encoder->state;
+ if (s_encoder_states[encoder->state].fn(encoder, out_buf)) {
+ return AWS_OP_ERR;
+ }
+ } while (prev_state != encoder->state);
+
+ return AWS_OP_SUCCESS;
+}
+
+bool aws_h1_encoder_is_message_in_progress(const struct aws_h1_encoder *encoder) {
+ return encoder->message;
+}
+
+bool aws_h1_encoder_is_waiting_for_chunks(const struct aws_h1_encoder *encoder) {
+ return encoder->state == AWS_H1_ENCODER_STATE_CHUNK_NEXT &&
+ aws_linked_list_empty(encoder->message->pending_chunk_list);
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/h1_stream.c b/contrib/restricted/aws/aws-c-http/source/h1_stream.c
new file mode 100644
index 0000000000..a5d2f4782b
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/h1_stream.c
@@ -0,0 +1,535 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/http/private/h1_stream.h>
+
+#include <aws/http/private/h1_connection.h>
+#include <aws/http/private/h1_encoder.h>
+
+#include <aws/http/status_code.h>
+#include <aws/io/logging.h>
+#include <aws/io/stream.h>
+
+#include <inttypes.h>
+
+static void s_stream_destroy(struct aws_http_stream *stream_base) {
+ struct aws_h1_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h1_stream, base);
+ AWS_ASSERT(
+ stream->synced_data.api_state != AWS_H1_STREAM_API_STATE_ACTIVE &&
+ "Stream should be complete (or never-activated) when stream destroyed");
+ AWS_ASSERT(
+ aws_linked_list_empty(&stream->thread_data.pending_chunk_list) &&
+ aws_linked_list_empty(&stream->synced_data.pending_chunk_list) &&
+ "Chunks should be marked complete before stream destroyed");
+
+ aws_h1_encoder_message_clean_up(&stream->encoder_message);
+ aws_byte_buf_clean_up(&stream->incoming_storage_buf);
+ aws_mem_release(stream->base.alloc, stream);
+}
+
+static struct aws_h1_connection *s_get_h1_connection(const struct aws_h1_stream *stream) {
+ return AWS_CONTAINER_OF(stream->base.owning_connection, struct aws_h1_connection, base);
+}
+
+static void s_stream_lock_synced_data(struct aws_h1_stream *stream) {
+ aws_h1_connection_lock_synced_data(s_get_h1_connection(stream));
+}
+
+static void s_stream_unlock_synced_data(struct aws_h1_stream *stream) {
+ aws_h1_connection_unlock_synced_data(s_get_h1_connection(stream));
+}
+
+static void s_stream_cross_thread_work_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+ struct aws_h1_stream *stream = arg;
+ struct aws_h1_connection *connection = s_get_h1_connection(stream);
+
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ goto done;
+ }
+
+ AWS_LOGF_TRACE(AWS_LS_HTTP_STREAM, "id=%p: Running stream cross-thread work task.", (void *)&stream->base);
+
+ /* BEGIN CRITICAL SECTION */
+ s_stream_lock_synced_data(stream);
+
+ stream->synced_data.is_cross_thread_work_task_scheduled = false;
+
+ int api_state = stream->synced_data.api_state;
+
+ bool found_chunks = !aws_linked_list_empty(&stream->synced_data.pending_chunk_list);
+ aws_linked_list_move_all_back(&stream->thread_data.pending_chunk_list, &stream->synced_data.pending_chunk_list);
+
+ stream->encoder_message.trailer = stream->synced_data.pending_trailer;
+ stream->synced_data.pending_trailer = NULL;
+
+ bool has_outgoing_response = stream->synced_data.has_outgoing_response;
+
+ uint64_t pending_window_update = stream->synced_data.pending_window_update;
+ stream->synced_data.pending_window_update = 0;
+
+ s_stream_unlock_synced_data(stream);
+ /* END CRITICAL SECTION */
+
+ /* If we have any new outgoing data, prompt the connection to try and send it. */
+ bool new_outgoing_data = found_chunks;
+
+ /* If we JUST learned about having an outgoing response, that's a reason to try sending data */
+ if (has_outgoing_response && !stream->thread_data.has_outgoing_response) {
+ stream->thread_data.has_outgoing_response = true;
+ new_outgoing_data = true;
+ }
+
+ if (new_outgoing_data && (api_state == AWS_H1_STREAM_API_STATE_ACTIVE)) {
+ aws_h1_connection_try_write_outgoing_stream(connection);
+ }
+
+ /* Add to window size using saturated sum to prevent overflow.
+ * Saturating is fine because it's a u64, the stream could never receive that much data. */
+ stream->thread_data.stream_window =
+ aws_add_u64_saturating(stream->thread_data.stream_window, pending_window_update);
+ if ((pending_window_update > 0) && (api_state == AWS_H1_STREAM_API_STATE_ACTIVE)) {
+ /* Now that stream window is larger, connection might have buffered
+ * data to send, or might need to increment its own window */
+ aws_h1_connection_try_process_read_messages(connection);
+ }
+
+done:
+ /* Release reference that kept stream alive until task ran */
+ aws_http_stream_release(&stream->base);
+}
+
+/* Note the update in synced_data, and schedule the cross_thread_work_task if necessary */
+static void s_stream_update_window(struct aws_http_stream *stream_base, size_t increment_size) {
+ if (increment_size == 0) {
+ return;
+ }
+
+ if (!stream_base->owning_connection->stream_manual_window_management) {
+ return;
+ }
+
+ struct aws_h1_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h1_stream, base);
+ bool should_schedule_task = false;
+
+ { /* BEGIN CRITICAL SECTION */
+ s_stream_lock_synced_data(stream);
+
+ /* Saturated sum. It's a u64. The stream could never receive that much data. */
+ stream->synced_data.pending_window_update =
+ aws_add_u64_saturating(stream->synced_data.pending_window_update, increment_size);
+
+ /* Don't alert the connection unless the stream is active */
+ if (stream->synced_data.api_state == AWS_H1_STREAM_API_STATE_ACTIVE) {
+ if (!stream->synced_data.is_cross_thread_work_task_scheduled) {
+ stream->synced_data.is_cross_thread_work_task_scheduled = true;
+ should_schedule_task = true;
+ }
+ }
+
+ s_stream_unlock_synced_data(stream);
+ } /* END CRITICAL SECTION */
+
+ if (should_schedule_task) {
+ /* Keep stream alive until task completes */
+ aws_atomic_fetch_add(&stream->base.refcount, 1);
+ AWS_LOGF_TRACE(AWS_LS_HTTP_STREAM, "id=%p: Scheduling stream cross-thread work task.", (void *)stream_base);
+ aws_channel_schedule_task_now(
+ stream->base.owning_connection->channel_slot->channel, &stream->cross_thread_work_task);
+ }
+}
+
+static int s_stream_write_chunk(struct aws_http_stream *stream_base, const struct aws_http1_chunk_options *options) {
+ AWS_PRECONDITION(stream_base);
+ AWS_PRECONDITION(options);
+ struct aws_h1_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h1_stream, base);
+
+ if (options->chunk_data == NULL && options->chunk_data_size > 0) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM, "id=%p: Chunk data cannot be NULL if data size is non-zero", (void *)stream_base);
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ struct aws_h1_chunk *chunk = aws_h1_chunk_new(stream_base->alloc, options);
+ if (AWS_UNLIKELY(NULL == chunk)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Failed to initialize streamed chunk, error %d (%s).",
+ (void *)stream_base,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ return AWS_OP_ERR;
+ }
+
+ int error_code = 0;
+ bool should_schedule_task = false;
+
+ { /* BEGIN CRITICAL SECTION */
+ s_stream_lock_synced_data(stream);
+
+ /* Can only add chunks while stream is active. */
+ if (stream->synced_data.api_state != AWS_H1_STREAM_API_STATE_ACTIVE) {
+ error_code = (stream->synced_data.api_state == AWS_H1_STREAM_API_STATE_INIT)
+ ? AWS_ERROR_HTTP_STREAM_NOT_ACTIVATED
+ : AWS_ERROR_HTTP_STREAM_HAS_COMPLETED;
+ goto unlock;
+ }
+
+ /* Prevent user trying to submit chunks without having set the required headers.
+ * This check also prevents a server-user submitting chunks before the response has been submitted. */
+ if (!stream->synced_data.using_chunked_encoding) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Cannot write chunks without 'transfer-encoding: chunked' header.",
+ (void *)stream_base);
+ error_code = AWS_ERROR_INVALID_STATE;
+ goto unlock;
+ }
+
+ if (stream->synced_data.has_final_chunk) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM, "id=%p: Cannot write additional chunk after final chunk.", (void *)stream_base);
+ error_code = AWS_ERROR_INVALID_STATE;
+ goto unlock;
+ }
+
+ /* success */
+ if (chunk->data_size == 0) {
+ stream->synced_data.has_final_chunk = true;
+ }
+ aws_linked_list_push_back(&stream->synced_data.pending_chunk_list, &chunk->node);
+ should_schedule_task = !stream->synced_data.is_cross_thread_work_task_scheduled;
+ stream->synced_data.is_cross_thread_work_task_scheduled = true;
+
+ unlock:
+ s_stream_unlock_synced_data(stream);
+ } /* END CRITICAL SECTION */
+
+ if (error_code) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Failed to add chunk, error %d (%s)",
+ (void *)stream_base,
+ error_code,
+ aws_error_name(error_code));
+
+ aws_h1_chunk_destroy(chunk);
+ return aws_raise_error(error_code);
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Adding chunk with size %" PRIu64 " to stream",
+ (void *)stream,
+ options->chunk_data_size);
+
+ if (should_schedule_task) {
+ /* Keep stream alive until task completes */
+ aws_atomic_fetch_add(&stream->base.refcount, 1);
+ AWS_LOGF_TRACE(AWS_LS_HTTP_STREAM, "id=%p: Scheduling stream cross-thread work task.", (void *)stream_base);
+ aws_channel_schedule_task_now(
+ stream->base.owning_connection->channel_slot->channel, &stream->cross_thread_work_task);
+ } else {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM, "id=%p: Stream cross-thread work task was already scheduled.", (void *)stream_base);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_stream_add_trailer(struct aws_http_stream *stream_base, const struct aws_http_headers *trailing_headers) {
+ AWS_PRECONDITION(stream_base);
+ AWS_PRECONDITION(trailing_headers);
+ struct aws_h1_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h1_stream, base);
+
+ struct aws_h1_trailer *trailer = aws_h1_trailer_new(stream_base->alloc, trailing_headers);
+ if (AWS_UNLIKELY(NULL == trailer)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Failed to initialize streamed trailer, error %d (%s).",
+ (void *)stream_base,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ return AWS_OP_ERR;
+ }
+
+ int error_code = 0;
+ bool should_schedule_task = false;
+
+ { /* BEGIN CRITICAL SECTION */
+ s_stream_lock_synced_data(stream);
+ /* Can only add trailers while stream is active. */
+ if (stream->synced_data.api_state != AWS_H1_STREAM_API_STATE_ACTIVE) {
+ error_code = (stream->synced_data.api_state == AWS_H1_STREAM_API_STATE_INIT)
+ ? AWS_ERROR_HTTP_STREAM_NOT_ACTIVATED
+ : AWS_ERROR_HTTP_STREAM_HAS_COMPLETED;
+ goto unlock;
+ }
+
+ if (!stream->synced_data.using_chunked_encoding) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Cannot write trailers without 'transfer-encoding: chunked' header.",
+ (void *)stream_base);
+ error_code = AWS_ERROR_INVALID_STATE;
+ goto unlock;
+ }
+
+ if (stream->synced_data.has_added_trailer) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Cannot write trailers twice.", (void *)stream_base);
+ error_code = AWS_ERROR_INVALID_STATE;
+ goto unlock;
+ }
+
+ if (stream->synced_data.has_final_chunk) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Cannot write trailers after final chunk.", (void *)stream_base);
+ error_code = AWS_ERROR_INVALID_STATE;
+ goto unlock;
+ }
+
+ stream->synced_data.has_added_trailer = true;
+ stream->synced_data.pending_trailer = trailer;
+ should_schedule_task = !stream->synced_data.is_cross_thread_work_task_scheduled;
+ stream->synced_data.is_cross_thread_work_task_scheduled = true;
+
+ unlock:
+ s_stream_unlock_synced_data(stream);
+ } /* END CRITICAL SECTION */
+
+ if (error_code) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Failed to add trailer, error %d (%s)",
+ (void *)stream_base,
+ error_code,
+ aws_error_name(error_code));
+
+ aws_h1_trailer_destroy(trailer);
+ return aws_raise_error(error_code);
+ }
+
+ AWS_LOGF_TRACE(AWS_LS_HTTP_STREAM, "id=%p: Adding trailer to stream", (void *)stream);
+
+ if (should_schedule_task) {
+ /* Keep stream alive until task completes */
+ aws_atomic_fetch_add(&stream->base.refcount, 1);
+ AWS_LOGF_TRACE(AWS_LS_HTTP_STREAM, "id=%p: Scheduling stream cross-thread work task.", (void *)stream_base);
+ aws_channel_schedule_task_now(
+ stream->base.owning_connection->channel_slot->channel, &stream->cross_thread_work_task);
+ } else {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM, "id=%p: Stream cross-thread work task was already scheduled.", (void *)stream_base);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static const struct aws_http_stream_vtable s_stream_vtable = {
+ .destroy = s_stream_destroy,
+ .update_window = s_stream_update_window,
+ .activate = aws_h1_stream_activate,
+ .http1_write_chunk = s_stream_write_chunk,
+ .http1_add_trailer = s_stream_add_trailer,
+ .http2_reset_stream = NULL,
+ .http2_get_received_error_code = NULL,
+ .http2_get_sent_error_code = NULL,
+};
+
+static struct aws_h1_stream *s_stream_new_common(
+ struct aws_http_connection *connection_base,
+ void *user_data,
+ aws_http_on_incoming_headers_fn *on_incoming_headers,
+ aws_http_on_incoming_header_block_done_fn *on_incoming_header_block_done,
+ aws_http_on_incoming_body_fn *on_incoming_body,
+ aws_http_on_stream_complete_fn *on_complete,
+ aws_http_on_stream_destroy_fn *on_destroy) {
+
+ struct aws_h1_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h1_connection, base);
+
+ struct aws_h1_stream *stream = aws_mem_calloc(connection_base->alloc, 1, sizeof(struct aws_h1_stream));
+ if (!stream) {
+ return NULL;
+ }
+
+ stream->base.vtable = &s_stream_vtable;
+ stream->base.alloc = connection_base->alloc;
+ stream->base.owning_connection = connection_base;
+ stream->base.user_data = user_data;
+ stream->base.on_incoming_headers = on_incoming_headers;
+ stream->base.on_incoming_header_block_done = on_incoming_header_block_done;
+ stream->base.on_incoming_body = on_incoming_body;
+ stream->base.on_complete = on_complete;
+ stream->base.on_destroy = on_destroy;
+
+ aws_channel_task_init(
+ &stream->cross_thread_work_task, s_stream_cross_thread_work_task, stream, "http1_stream_cross_thread_work");
+
+ aws_linked_list_init(&stream->thread_data.pending_chunk_list);
+ aws_linked_list_init(&stream->synced_data.pending_chunk_list);
+
+ stream->thread_data.stream_window = connection->initial_stream_window_size;
+
+ /* Stream refcount starts at 1 for user and is incremented upon activation for the connection */
+ aws_atomic_init_int(&stream->base.refcount, 1);
+
+ return stream;
+}
+
+struct aws_h1_stream *aws_h1_stream_new_request(
+ struct aws_http_connection *client_connection,
+ const struct aws_http_make_request_options *options) {
+
+ struct aws_h1_stream *stream = s_stream_new_common(
+ client_connection,
+ options->user_data,
+ options->on_response_headers,
+ options->on_response_header_block_done,
+ options->on_response_body,
+ options->on_complete,
+ options->on_destroy);
+ if (!stream) {
+ return NULL;
+ }
+
+ /* Transform request if necessary */
+ if (client_connection->proxy_request_transform) {
+ if (client_connection->proxy_request_transform(options->request, client_connection->user_data)) {
+ goto error;
+ }
+ }
+
+ stream->base.client_data = &stream->base.client_or_server_data.client;
+ stream->base.client_data->response_status = AWS_HTTP_STATUS_CODE_UNKNOWN;
+
+ /* Validate request and cache info that the encoder will eventually need */
+ if (aws_h1_encoder_message_init_from_request(
+ &stream->encoder_message,
+ client_connection->alloc,
+ options->request,
+ &stream->thread_data.pending_chunk_list)) {
+ goto error;
+ }
+
+ /* RFC-7230 Section 6.3: The "close" connection option is used to signal
+ * that a connection will not persist after the current request/response*/
+ if (stream->encoder_message.has_connection_close_header) {
+ stream->is_final_stream = true;
+ }
+
+ stream->synced_data.using_chunked_encoding = stream->encoder_message.has_chunked_encoding_header;
+
+ return stream;
+
+error:
+ s_stream_destroy(&stream->base);
+ return NULL;
+}
+
+struct aws_h1_stream *aws_h1_stream_new_request_handler(const struct aws_http_request_handler_options *options) {
+ struct aws_h1_stream *stream = s_stream_new_common(
+ options->server_connection,
+ options->user_data,
+ options->on_request_headers,
+ options->on_request_header_block_done,
+ options->on_request_body,
+ options->on_complete,
+ options->on_destroy);
+ if (!stream) {
+ return NULL;
+ }
+
+ /* This code is only executed in server mode and can only be invoked from the event-loop thread so don't worry
+ * with the lock here. */
+ stream->base.id = aws_http_connection_get_next_stream_id(options->server_connection);
+
+ /* Request-handler (server) streams don't need user to call activate() on them.
+ * Since these these streams can only be created on the event-loop thread,
+ * it's not possible for callbacks to fire before the stream pointer is returned.
+ * (Clients must call stream.activate() because they might create a stream on any thread) */
+ stream->synced_data.api_state = AWS_H1_STREAM_API_STATE_ACTIVE;
+
+ stream->base.server_data = &stream->base.client_or_server_data.server;
+ stream->base.server_data->on_request_done = options->on_request_done;
+ aws_atomic_fetch_add(&stream->base.refcount, 1);
+
+ return stream;
+}
+
+int aws_h1_stream_send_response(struct aws_h1_stream *stream, struct aws_http_message *response) {
+ struct aws_h1_connection *connection = s_get_h1_connection(stream);
+ int error_code = 0;
+
+ /* Validate the response and cache info that encoder will eventually need.
+ * The encoder_message object will be moved into the stream later while holding the lock */
+ struct aws_h1_encoder_message encoder_message;
+ bool body_headers_ignored = stream->base.request_method == AWS_HTTP_METHOD_HEAD;
+ if (aws_h1_encoder_message_init_from_response(
+ &encoder_message,
+ stream->base.alloc,
+ response,
+ body_headers_ignored,
+ &stream->thread_data.pending_chunk_list)) {
+ error_code = aws_last_error();
+ goto error;
+ }
+
+ bool should_schedule_task = false;
+ { /* BEGIN CRITICAL SECTION */
+ s_stream_lock_synced_data(stream);
+ if (stream->synced_data.api_state == AWS_H1_STREAM_API_STATE_COMPLETE) {
+ error_code = AWS_ERROR_HTTP_STREAM_HAS_COMPLETED;
+ } else if (stream->synced_data.has_outgoing_response) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Response already created on the stream", (void *)&stream->base);
+ error_code = AWS_ERROR_INVALID_STATE;
+ } else {
+ stream->synced_data.has_outgoing_response = true;
+ stream->encoder_message = encoder_message;
+ if (encoder_message.has_connection_close_header) {
+ /* This will be the last stream connection will process, new streams will be rejected */
+ stream->is_final_stream = true;
+
+ /* Note: We're touching the connection's synced_data, which is OK
+ * because an h1_connection and all its h1_streams share a single lock. */
+ connection->synced_data.new_stream_error_code = AWS_ERROR_HTTP_CONNECTION_CLOSED;
+ }
+ stream->synced_data.using_chunked_encoding = stream->encoder_message.has_chunked_encoding_header;
+
+ should_schedule_task = !stream->synced_data.is_cross_thread_work_task_scheduled;
+ stream->synced_data.is_cross_thread_work_task_scheduled = true;
+ }
+ s_stream_unlock_synced_data(stream);
+ } /* END CRITICAL SECTION */
+
+ if (error_code) {
+ goto error;
+ }
+
+ /* Success! */
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_STREAM, "id=%p: Created response on connection=%p: ", (void *)stream, (void *)connection);
+
+ if (should_schedule_task) {
+ /* Keep stream alive until task completes */
+ aws_atomic_fetch_add(&stream->base.refcount, 1);
+ AWS_LOGF_TRACE(AWS_LS_HTTP_STREAM, "id=%p: Scheduling stream cross-thread work task.", (void *)&stream->base);
+ aws_channel_schedule_task_now(
+ stream->base.owning_connection->channel_slot->channel, &stream->cross_thread_work_task);
+ } else {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM, "id=%p: Stream cross-thread work task was already scheduled.", (void *)&stream->base);
+ }
+
+ return AWS_OP_SUCCESS;
+
+error:
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: Sending response on the stream failed, error %d (%s)",
+ (void *)&stream->base,
+ error_code,
+ aws_error_name(error_code));
+
+ aws_h1_encoder_message_clean_up(&encoder_message);
+ return aws_raise_error(error_code);
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/h2_connection.c b/contrib/restricted/aws/aws-c-http/source/h2_connection.c
new file mode 100644
index 0000000000..15ea192f8a
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/h2_connection.c
@@ -0,0 +1,2850 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/h2_connection.h>
+#include <aws/http/private/h2_stream.h>
+
+#include <aws/http/private/h2_decoder.h>
+#include <aws/http/private/h2_stream.h>
+#include <aws/http/private/strutil.h>
+
+#include <aws/common/clock.h>
+#include <aws/common/logging.h>
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4204) /* non-constant aggregate initializer */
+#endif
+
+/* Apple toolchains such as xcode and swiftpm define the DEBUG symbol. undef it here so we can actually use the token */
+#undef DEBUG
+
+#define CONNECTION_LOGF(level, connection, text, ...) \
+ AWS_LOGF_##level(AWS_LS_HTTP_CONNECTION, "id=%p: " text, (void *)(connection), __VA_ARGS__)
+#define CONNECTION_LOG(level, connection, text) CONNECTION_LOGF(level, connection, "%s", text)
+
+static int s_handler_process_read_message(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ struct aws_io_message *message);
+
+static int s_handler_process_write_message(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ struct aws_io_message *message);
+
+static int s_handler_increment_read_window(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ size_t size);
+
+static int s_handler_shutdown(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ enum aws_channel_direction dir,
+ int error_code,
+ bool free_scarce_resources_immediately);
+
+static size_t s_handler_initial_window_size(struct aws_channel_handler *handler);
+static size_t s_handler_message_overhead(struct aws_channel_handler *handler);
+static void s_handler_destroy(struct aws_channel_handler *handler);
+static void s_handler_installed(struct aws_channel_handler *handler, struct aws_channel_slot *slot);
+static struct aws_http_stream *s_connection_make_request(
+ struct aws_http_connection *client_connection,
+ const struct aws_http_make_request_options *options);
+static void s_connection_close(struct aws_http_connection *connection_base);
+static void s_connection_stop_new_request(struct aws_http_connection *connection_base);
+static bool s_connection_is_open(const struct aws_http_connection *connection_base);
+static bool s_connection_new_requests_allowed(const struct aws_http_connection *connection_base);
+static void s_connection_update_window(struct aws_http_connection *connection_base, uint32_t increment_size);
+static int s_connection_change_settings(
+ struct aws_http_connection *connection_base,
+ const struct aws_http2_setting *settings_array,
+ size_t num_settings,
+ aws_http2_on_change_settings_complete_fn *on_completed,
+ void *user_data);
+static int s_connection_send_ping(
+ struct aws_http_connection *connection_base,
+ const struct aws_byte_cursor *optional_opaque_data,
+ aws_http2_on_ping_complete_fn *on_completed,
+ void *user_data);
+static void s_connection_send_goaway(
+ struct aws_http_connection *connection_base,
+ uint32_t http2_error,
+ bool allow_more_streams,
+ const struct aws_byte_cursor *optional_debug_data);
+static int s_connection_get_sent_goaway(
+ struct aws_http_connection *connection_base,
+ uint32_t *out_http2_error,
+ uint32_t *out_last_stream_id);
+static int s_connection_get_received_goaway(
+ struct aws_http_connection *connection_base,
+ uint32_t *out_http2_error,
+ uint32_t *out_last_stream_id);
+static void s_connection_get_local_settings(
+ const struct aws_http_connection *connection_base,
+ struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT]);
+static void s_connection_get_remote_settings(
+ const struct aws_http_connection *connection_base,
+ struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT]);
+
+static void s_cross_thread_work_task(struct aws_channel_task *task, void *arg, enum aws_task_status status);
+static void s_outgoing_frames_task(struct aws_channel_task *task, void *arg, enum aws_task_status status);
+static int s_encode_outgoing_frames_queue(struct aws_h2_connection *connection, struct aws_byte_buf *output);
+static int s_encode_data_from_outgoing_streams(struct aws_h2_connection *connection, struct aws_byte_buf *output);
+static int s_record_closed_stream(
+ struct aws_h2_connection *connection,
+ uint32_t stream_id,
+ enum aws_h2_stream_closed_when closed_when);
+static void s_stream_complete(struct aws_h2_connection *connection, struct aws_h2_stream *stream, int error_code);
+static void s_write_outgoing_frames(struct aws_h2_connection *connection, bool first_try);
+static void s_finish_shutdown(struct aws_h2_connection *connection);
+static void s_send_goaway(
+ struct aws_h2_connection *connection,
+ uint32_t h2_error_code,
+ bool allow_more_streams,
+ const struct aws_byte_cursor *optional_debug_data);
+static struct aws_h2_pending_settings *s_new_pending_settings(
+ struct aws_allocator *allocator,
+ const struct aws_http2_setting *settings_array,
+ size_t num_settings,
+ aws_http2_on_change_settings_complete_fn *on_completed,
+ void *user_data);
+
+static struct aws_h2err s_decoder_on_headers_begin(uint32_t stream_id, void *userdata);
+static struct aws_h2err s_decoder_on_headers_i(
+ uint32_t stream_id,
+ const struct aws_http_header *header,
+ enum aws_http_header_name name_enum,
+ enum aws_http_header_block block_type,
+ void *userdata);
+static struct aws_h2err s_decoder_on_headers_end(
+ uint32_t stream_id,
+ bool malformed,
+ enum aws_http_header_block block_type,
+ void *userdata);
+static struct aws_h2err s_decoder_on_push_promise(uint32_t stream_id, uint32_t promised_stream_id, void *userdata);
+static struct aws_h2err s_decoder_on_data_begin(
+ uint32_t stream_id,
+ uint32_t payload_len,
+ uint32_t total_padding_bytes,
+ bool end_stream,
+ void *userdata);
+static struct aws_h2err s_decoder_on_data_i(uint32_t stream_id, struct aws_byte_cursor data, void *userdata);
+static struct aws_h2err s_decoder_on_end_stream(uint32_t stream_id, void *userdata);
+static struct aws_h2err s_decoder_on_rst_stream(uint32_t stream_id, uint32_t h2_error_code, void *userdata);
+static struct aws_h2err s_decoder_on_ping_ack(uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE], void *userdata);
+static struct aws_h2err s_decoder_on_ping(uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE], void *userdata);
+static struct aws_h2err s_decoder_on_settings(
+ const struct aws_http2_setting *settings_array,
+ size_t num_settings,
+ void *userdata);
+static struct aws_h2err s_decoder_on_settings_ack(void *userdata);
+static struct aws_h2err s_decoder_on_window_update(uint32_t stream_id, uint32_t window_size_increment, void *userdata);
+struct aws_h2err s_decoder_on_goaway(
+ uint32_t last_stream,
+ uint32_t error_code,
+ struct aws_byte_cursor debug_data,
+ void *userdata);
+static void s_reset_statistics(struct aws_channel_handler *handler);
+static void s_gather_statistics(struct aws_channel_handler *handler, struct aws_array_list *stats);
+
+static struct aws_http_connection_vtable s_h2_connection_vtable = {
+ .channel_handler_vtable =
+ {
+ .process_read_message = s_handler_process_read_message,
+ .process_write_message = s_handler_process_write_message,
+ .increment_read_window = s_handler_increment_read_window,
+ .shutdown = s_handler_shutdown,
+ .initial_window_size = s_handler_initial_window_size,
+ .message_overhead = s_handler_message_overhead,
+ .destroy = s_handler_destroy,
+ .reset_statistics = s_reset_statistics,
+ .gather_statistics = s_gather_statistics,
+ },
+
+ .on_channel_handler_installed = s_handler_installed,
+ .make_request = s_connection_make_request,
+ .new_server_request_handler_stream = NULL,
+ .stream_send_response = NULL,
+ .close = s_connection_close,
+ .stop_new_requests = s_connection_stop_new_request,
+ .is_open = s_connection_is_open,
+ .new_requests_allowed = s_connection_new_requests_allowed,
+ .update_window = s_connection_update_window,
+ .change_settings = s_connection_change_settings,
+ .send_ping = s_connection_send_ping,
+ .send_goaway = s_connection_send_goaway,
+ .get_sent_goaway = s_connection_get_sent_goaway,
+ .get_received_goaway = s_connection_get_received_goaway,
+ .get_local_settings = s_connection_get_local_settings,
+ .get_remote_settings = s_connection_get_remote_settings,
+};
+
+static const struct aws_h2_decoder_vtable s_h2_decoder_vtable = {
+ .on_headers_begin = s_decoder_on_headers_begin,
+ .on_headers_i = s_decoder_on_headers_i,
+ .on_headers_end = s_decoder_on_headers_end,
+ .on_push_promise_begin = s_decoder_on_push_promise,
+ .on_data_begin = s_decoder_on_data_begin,
+ .on_data_i = s_decoder_on_data_i,
+ .on_end_stream = s_decoder_on_end_stream,
+ .on_rst_stream = s_decoder_on_rst_stream,
+ .on_ping_ack = s_decoder_on_ping_ack,
+ .on_ping = s_decoder_on_ping,
+ .on_settings = s_decoder_on_settings,
+ .on_settings_ack = s_decoder_on_settings_ack,
+ .on_window_update = s_decoder_on_window_update,
+ .on_goaway = s_decoder_on_goaway,
+};
+
+static void s_lock_synced_data(struct aws_h2_connection *connection) {
+ int err = aws_mutex_lock(&connection->synced_data.lock);
+ AWS_ASSERT(!err && "lock failed");
+ (void)err;
+}
+
+static void s_unlock_synced_data(struct aws_h2_connection *connection) {
+ int err = aws_mutex_unlock(&connection->synced_data.lock);
+ AWS_ASSERT(!err && "unlock failed");
+ (void)err;
+}
+
+static void s_acquire_stream_and_connection_lock(struct aws_h2_stream *stream, struct aws_h2_connection *connection) {
+ int err = aws_mutex_lock(&stream->synced_data.lock);
+ err |= aws_mutex_lock(&connection->synced_data.lock);
+ AWS_ASSERT(!err && "lock connection and stream failed");
+ (void)err;
+}
+
+static void s_release_stream_and_connection_lock(struct aws_h2_stream *stream, struct aws_h2_connection *connection) {
+ int err = aws_mutex_unlock(&connection->synced_data.lock);
+ err |= aws_mutex_unlock(&stream->synced_data.lock);
+ AWS_ASSERT(!err && "unlock connection and stream failed");
+ (void)err;
+}
+
+static void s_add_time_measurement_to_stats(uint64_t start_ns, uint64_t end_ns, uint64_t *output_ms) {
+ if (end_ns > start_ns) {
+ *output_ms += aws_timestamp_convert(end_ns - start_ns, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_MILLIS, NULL);
+ } else {
+ *output_ms = 0;
+ }
+}
+
+/**
+ * Internal function for bringing connection to a stop.
+ * Invoked multiple times, including when:
+ * - Channel is shutting down in the read direction.
+ * - Channel is shutting down in the write direction.
+ * - An error occurs that will shutdown the channel.
+ * - User wishes to close the connection (this is the only case where the function may run off-thread).
+ */
+static void s_stop(
+ struct aws_h2_connection *connection,
+ bool stop_reading,
+ bool stop_writing,
+ bool schedule_shutdown,
+ int error_code) {
+
+ AWS_ASSERT(stop_reading || stop_writing || schedule_shutdown); /* You are required to stop at least 1 thing */
+
+ if (stop_reading) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+ connection->thread_data.is_reading_stopped = true;
+ }
+
+ if (stop_writing) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+ connection->thread_data.is_writing_stopped = true;
+ }
+
+ /* Even if we're not scheduling shutdown just yet (ex: sent final request but waiting to read final response)
+ * we don't consider the connection "open" anymore so user can't create more streams */
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+ connection->synced_data.new_stream_error_code = AWS_ERROR_HTTP_CONNECTION_CLOSED;
+ connection->synced_data.is_open = false;
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ if (schedule_shutdown) {
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Shutting down connection with error code %d (%s).",
+ (void *)&connection->base,
+ error_code,
+ aws_error_name(error_code));
+
+ aws_channel_shutdown(connection->base.channel_slot->channel, error_code);
+ }
+}
+
+void aws_h2_connection_shutdown_due_to_write_err(struct aws_h2_connection *connection, int error_code) {
+ AWS_PRECONDITION(error_code);
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+
+ if (connection->thread_data.channel_shutdown_waiting_for_goaway_to_be_written) {
+ /* If shutdown is waiting for writes to complete, but writes are now broken,
+ * then we must finish shutdown now */
+ s_finish_shutdown(connection);
+ } else {
+ s_stop(connection, false /*stop_reading*/, true /*stop_writing*/, true /*schedule_shutdown*/, error_code);
+ }
+}
+
+/* Common new() logic for server & client */
+static struct aws_h2_connection *s_connection_new(
+ struct aws_allocator *alloc,
+ bool manual_window_management,
+ const struct aws_http2_connection_options *http2_options,
+ bool server) {
+
+ AWS_PRECONDITION(http2_options);
+
+ struct aws_h2_connection *connection = aws_mem_calloc(alloc, 1, sizeof(struct aws_h2_connection));
+ if (!connection) {
+ return NULL;
+ }
+ connection->base.vtable = &s_h2_connection_vtable;
+ connection->base.alloc = alloc;
+ connection->base.channel_handler.vtable = &s_h2_connection_vtable.channel_handler_vtable;
+ connection->base.channel_handler.alloc = alloc;
+ connection->base.channel_handler.impl = connection;
+ connection->base.http_version = AWS_HTTP_VERSION_2;
+ /* Init the next stream id (server must use even ids, client odd [RFC 7540 5.1.1])*/
+ connection->base.next_stream_id = (server ? 2 : 1);
+ /* Stream window management */
+ connection->base.stream_manual_window_management = manual_window_management;
+
+ /* Connection window management */
+ connection->conn_manual_window_management = http2_options->conn_manual_window_management;
+ connection->on_goaway_received = http2_options->on_goaway_received;
+ connection->on_remote_settings_change = http2_options->on_remote_settings_change;
+
+ aws_channel_task_init(
+ &connection->cross_thread_work_task, s_cross_thread_work_task, connection, "HTTP/2 cross-thread work");
+
+ aws_channel_task_init(
+ &connection->outgoing_frames_task, s_outgoing_frames_task, connection, "HTTP/2 outgoing frames");
+
+ /* 1 refcount for user */
+ aws_atomic_init_int(&connection->base.refcount, 1);
+ uint32_t max_stream_id = AWS_H2_STREAM_ID_MAX;
+ connection->synced_data.goaway_sent_last_stream_id = max_stream_id + 1;
+ connection->synced_data.goaway_received_last_stream_id = max_stream_id + 1;
+
+ aws_linked_list_init(&connection->synced_data.pending_stream_list);
+ aws_linked_list_init(&connection->synced_data.pending_frame_list);
+ aws_linked_list_init(&connection->synced_data.pending_settings_list);
+ aws_linked_list_init(&connection->synced_data.pending_ping_list);
+ aws_linked_list_init(&connection->synced_data.pending_goaway_list);
+
+ aws_linked_list_init(&connection->thread_data.outgoing_streams_list);
+ aws_linked_list_init(&connection->thread_data.pending_settings_queue);
+ aws_linked_list_init(&connection->thread_data.pending_ping_queue);
+ aws_linked_list_init(&connection->thread_data.stalled_window_streams_list);
+ aws_linked_list_init(&connection->thread_data.waiting_streams_list);
+ aws_linked_list_init(&connection->thread_data.outgoing_frames_queue);
+
+ if (aws_mutex_init(&connection->synced_data.lock)) {
+ CONNECTION_LOGF(
+ ERROR, connection, "Mutex init error %d (%s).", aws_last_error(), aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ if (aws_hash_table_init(
+ &connection->thread_data.active_streams_map, alloc, 8, aws_hash_ptr, aws_ptr_eq, NULL, NULL)) {
+
+ CONNECTION_LOGF(
+ ERROR, connection, "Hashtable init error %d (%s).", aws_last_error(), aws_error_name(aws_last_error()));
+ goto error;
+ }
+ size_t max_closed_streams = AWS_HTTP2_DEFAULT_MAX_CLOSED_STREAMS;
+ if (http2_options->max_closed_streams) {
+ max_closed_streams = http2_options->max_closed_streams;
+ }
+
+ connection->thread_data.closed_streams =
+ aws_cache_new_fifo(alloc, aws_hash_ptr, aws_ptr_eq, NULL, NULL, max_closed_streams);
+ if (!connection->thread_data.closed_streams) {
+ CONNECTION_LOGF(
+ ERROR, connection, "FIFO cache init error %d (%s).", aws_last_error(), aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ /* Initialize the value of settings */
+ memcpy(connection->thread_data.settings_peer, aws_h2_settings_initial, sizeof(aws_h2_settings_initial));
+ memcpy(connection->thread_data.settings_self, aws_h2_settings_initial, sizeof(aws_h2_settings_initial));
+
+ memcpy(connection->synced_data.settings_peer, aws_h2_settings_initial, sizeof(aws_h2_settings_initial));
+ memcpy(connection->synced_data.settings_self, aws_h2_settings_initial, sizeof(aws_h2_settings_initial));
+
+ connection->thread_data.window_size_peer = AWS_H2_INIT_WINDOW_SIZE;
+ connection->thread_data.window_size_self = AWS_H2_INIT_WINDOW_SIZE;
+
+ connection->thread_data.goaway_received_last_stream_id = AWS_H2_STREAM_ID_MAX;
+ connection->thread_data.goaway_sent_last_stream_id = AWS_H2_STREAM_ID_MAX;
+
+ aws_crt_statistics_http2_channel_init(&connection->thread_data.stats);
+ connection->thread_data.stats.was_inactive = true; /* Start with non active streams */
+
+ connection->synced_data.is_open = true;
+ connection->synced_data.new_stream_error_code = AWS_ERROR_SUCCESS;
+
+ /* Create a new decoder */
+ struct aws_h2_decoder_params params = {
+ .alloc = alloc,
+ .vtable = &s_h2_decoder_vtable,
+ .userdata = connection,
+ .logging_id = connection,
+ .is_server = server,
+ };
+ connection->thread_data.decoder = aws_h2_decoder_new(&params);
+ if (!connection->thread_data.decoder) {
+ CONNECTION_LOGF(
+ ERROR, connection, "Decoder init error %d (%s)", aws_last_error(), aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ if (aws_h2_frame_encoder_init(&connection->thread_data.encoder, alloc, &connection->base)) {
+ CONNECTION_LOGF(
+ ERROR, connection, "Encoder init error %d (%s)", aws_last_error(), aws_error_name(aws_last_error()));
+ goto error;
+ }
+ /* User data from connection base is not ready until the handler installed */
+ connection->thread_data.init_pending_settings = s_new_pending_settings(
+ connection->base.alloc,
+ http2_options->initial_settings_array,
+ http2_options->num_initial_settings,
+ http2_options->on_initial_settings_completed,
+ NULL /* user_data is set later... */);
+ if (!connection->thread_data.init_pending_settings) {
+ goto error;
+ }
+ /* We enqueue the inital settings when handler get installed */
+ return connection;
+
+error:
+ s_handler_destroy(&connection->base.channel_handler);
+
+ return NULL;
+}
+
+struct aws_http_connection *aws_http_connection_new_http2_server(
+ struct aws_allocator *allocator,
+ bool manual_window_management,
+ const struct aws_http2_connection_options *http2_options) {
+
+ struct aws_h2_connection *connection = s_connection_new(allocator, manual_window_management, http2_options, true);
+ if (!connection) {
+ return NULL;
+ }
+
+ connection->base.server_data = &connection->base.client_or_server_data.server;
+
+ return &connection->base;
+}
+
+struct aws_http_connection *aws_http_connection_new_http2_client(
+ struct aws_allocator *allocator,
+ bool manual_window_management,
+ const struct aws_http2_connection_options *http2_options) {
+
+ struct aws_h2_connection *connection = s_connection_new(allocator, manual_window_management, http2_options, false);
+ if (!connection) {
+ return NULL;
+ }
+
+ connection->base.client_data = &connection->base.client_or_server_data.client;
+
+ return &connection->base;
+}
+
+static void s_handler_destroy(struct aws_channel_handler *handler) {
+ struct aws_h2_connection *connection = handler->impl;
+ CONNECTION_LOG(TRACE, connection, "Destroying connection");
+
+ /* No streams should be left in internal datastructures */
+ AWS_ASSERT(
+ !aws_hash_table_is_valid(&connection->thread_data.active_streams_map) ||
+ aws_hash_table_get_entry_count(&connection->thread_data.active_streams_map) == 0);
+
+ AWS_ASSERT(aws_linked_list_empty(&connection->thread_data.waiting_streams_list));
+ AWS_ASSERT(aws_linked_list_empty(&connection->thread_data.stalled_window_streams_list));
+ AWS_ASSERT(aws_linked_list_empty(&connection->thread_data.outgoing_streams_list));
+ AWS_ASSERT(aws_linked_list_empty(&connection->synced_data.pending_stream_list));
+ AWS_ASSERT(aws_linked_list_empty(&connection->synced_data.pending_frame_list));
+ AWS_ASSERT(aws_linked_list_empty(&connection->synced_data.pending_settings_list));
+ AWS_ASSERT(aws_linked_list_empty(&connection->synced_data.pending_ping_list));
+ AWS_ASSERT(aws_linked_list_empty(&connection->synced_data.pending_goaway_list));
+ AWS_ASSERT(aws_linked_list_empty(&connection->thread_data.pending_ping_queue));
+ AWS_ASSERT(aws_linked_list_empty(&connection->thread_data.pending_settings_queue));
+
+ /* Clean up any unsent frames and structures */
+ struct aws_linked_list *outgoing_frames_queue = &connection->thread_data.outgoing_frames_queue;
+ while (!aws_linked_list_empty(outgoing_frames_queue)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(outgoing_frames_queue);
+ struct aws_h2_frame *frame = AWS_CONTAINER_OF(node, struct aws_h2_frame, node);
+ aws_h2_frame_destroy(frame);
+ }
+ if (connection->thread_data.init_pending_settings) {
+ /* if initial settings were never sent, we need to clear the memory here */
+ aws_mem_release(connection->base.alloc, connection->thread_data.init_pending_settings);
+ }
+ aws_h2_decoder_destroy(connection->thread_data.decoder);
+ aws_h2_frame_encoder_clean_up(&connection->thread_data.encoder);
+ aws_hash_table_clean_up(&connection->thread_data.active_streams_map);
+ aws_cache_destroy(connection->thread_data.closed_streams);
+ aws_mutex_clean_up(&connection->synced_data.lock);
+ aws_mem_release(connection->base.alloc, connection);
+}
+
+static struct aws_h2_pending_settings *s_new_pending_settings(
+ struct aws_allocator *allocator,
+ const struct aws_http2_setting *settings_array,
+ size_t num_settings,
+ aws_http2_on_change_settings_complete_fn *on_completed,
+ void *user_data) {
+
+ size_t settings_storage_size = sizeof(struct aws_http2_setting) * num_settings;
+ struct aws_h2_pending_settings *pending_settings;
+ void *settings_storage;
+ if (!aws_mem_acquire_many(
+ allocator,
+ 2,
+ &pending_settings,
+ sizeof(struct aws_h2_pending_settings),
+ &settings_storage,
+ settings_storage_size)) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*pending_settings);
+ /* We buffer the settings up, incase the caller has freed them when the ACK arrives */
+ pending_settings->settings_array = settings_storage;
+ if (settings_array) {
+ memcpy(pending_settings->settings_array, settings_array, num_settings * sizeof(struct aws_http2_setting));
+ }
+ pending_settings->num_settings = num_settings;
+ pending_settings->on_completed = on_completed;
+ pending_settings->user_data = user_data;
+
+ return pending_settings;
+}
+
+static struct aws_h2_pending_ping *s_new_pending_ping(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *optional_opaque_data,
+ const uint64_t started_time,
+ void *user_data,
+ aws_http2_on_ping_complete_fn *on_completed) {
+
+ struct aws_h2_pending_ping *pending_ping = aws_mem_calloc(allocator, 1, sizeof(struct aws_h2_pending_ping));
+ if (!pending_ping) {
+ return NULL;
+ }
+ if (optional_opaque_data) {
+ memcpy(pending_ping->opaque_data, optional_opaque_data->ptr, AWS_HTTP2_PING_DATA_SIZE);
+ }
+ pending_ping->started_time = started_time;
+ pending_ping->on_completed = on_completed;
+ pending_ping->user_data = user_data;
+ return pending_ping;
+}
+
+static struct aws_h2_pending_goaway *s_new_pending_goaway(
+ struct aws_allocator *allocator,
+ uint32_t http2_error,
+ bool allow_more_streams,
+ const struct aws_byte_cursor *optional_debug_data) {
+
+ struct aws_byte_cursor debug_data;
+ AWS_ZERO_STRUCT(debug_data);
+ if (optional_debug_data) {
+ debug_data = *optional_debug_data;
+ }
+ struct aws_h2_pending_goaway *pending_goaway;
+ void *debug_data_storage;
+ /* mem acquire cannot fail anymore */
+ aws_mem_acquire_many(
+ allocator, 2, &pending_goaway, sizeof(struct aws_h2_pending_goaway), &debug_data_storage, debug_data.len);
+ if (debug_data.len) {
+ memcpy(debug_data_storage, debug_data.ptr, debug_data.len);
+ debug_data.ptr = debug_data_storage;
+ }
+ pending_goaway->debug_data = debug_data;
+ pending_goaway->http2_error = http2_error;
+ pending_goaway->allow_more_streams = allow_more_streams;
+ return pending_goaway;
+}
+
+void aws_h2_connection_enqueue_outgoing_frame(struct aws_h2_connection *connection, struct aws_h2_frame *frame) {
+ AWS_PRECONDITION(frame->type != AWS_H2_FRAME_T_DATA);
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+
+ if (frame->high_priority) {
+ /* Check from the head of the queue, and find a node with normal priority, and insert before it */
+ struct aws_linked_list_node *iter = aws_linked_list_begin(&connection->thread_data.outgoing_frames_queue);
+ /* one past the last element */
+ const struct aws_linked_list_node *end = aws_linked_list_end(&connection->thread_data.outgoing_frames_queue);
+ while (iter != end) {
+ struct aws_h2_frame *frame_i = AWS_CONTAINER_OF(iter, struct aws_h2_frame, node);
+ if (connection->thread_data.current_outgoing_frame == frame_i) {
+ iter = iter->next;
+ continue;
+ }
+ if (!frame_i->high_priority) {
+ break;
+ }
+ iter = iter->next;
+ }
+ aws_linked_list_insert_before(iter, &frame->node);
+ } else {
+ aws_linked_list_push_back(&connection->thread_data.outgoing_frames_queue, &frame->node);
+ }
+}
+
+static void s_on_channel_write_complete(
+ struct aws_channel *channel,
+ struct aws_io_message *message,
+ int err_code,
+ void *user_data) {
+
+ (void)message;
+ struct aws_h2_connection *connection = user_data;
+
+ if (err_code) {
+ CONNECTION_LOGF(ERROR, connection, "Message did not write to network, error %s", aws_error_name(err_code));
+ aws_h2_connection_shutdown_due_to_write_err(connection, err_code);
+ return;
+ }
+
+ CONNECTION_LOG(TRACE, connection, "Message finished writing to network. Rescheduling outgoing frame task");
+
+ /* To avoid wasting memory, we only want ONE of our written aws_io_messages in the channel at a time.
+ * Therefore, we wait until it's written to the network before trying to send another
+ * by running the outgoing-frame-task again.
+ *
+ * We also want to share the network with other channels.
+ * Therefore, when the write completes, we SCHEDULE the outgoing-frame-task
+ * to run again instead of calling the function directly.
+ * This way, if the message completes synchronously,
+ * we're not hogging the network by writing message after message in a tight loop */
+ aws_channel_schedule_task_now(channel, &connection->outgoing_frames_task);
+}
+
+static void s_outgoing_frames_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ return;
+ }
+
+ struct aws_h2_connection *connection = arg;
+ s_write_outgoing_frames(connection, false /*first_try*/);
+}
+
+static void s_write_outgoing_frames(struct aws_h2_connection *connection, bool first_try) {
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+ AWS_PRECONDITION(connection->thread_data.is_outgoing_frames_task_active);
+
+ struct aws_channel_slot *channel_slot = connection->base.channel_slot;
+ struct aws_linked_list *outgoing_frames_queue = &connection->thread_data.outgoing_frames_queue;
+ struct aws_linked_list *outgoing_streams_list = &connection->thread_data.outgoing_streams_list;
+
+ if (connection->thread_data.is_writing_stopped) {
+ return;
+ }
+
+ /* Determine whether there's work to do, and end task immediately if there's not.
+ * Note that we stop writing DATA frames if the channel is trying to shut down */
+ bool has_control_frames = !aws_linked_list_empty(outgoing_frames_queue);
+ bool has_data_frames = !aws_linked_list_empty(outgoing_streams_list);
+ bool may_write_data_frames = (connection->thread_data.window_size_peer > AWS_H2_MIN_WINDOW_SIZE) &&
+ !connection->thread_data.channel_shutdown_waiting_for_goaway_to_be_written;
+ bool will_write = has_control_frames || (has_data_frames && may_write_data_frames);
+
+ if (!will_write) {
+ if (!first_try) {
+ CONNECTION_LOGF(
+ TRACE,
+ connection,
+ "Outgoing frames task stopped. has_control_frames:%d has_data_frames:%d may_write_data_frames:%d",
+ has_control_frames,
+ has_data_frames,
+ may_write_data_frames);
+ }
+
+ connection->thread_data.is_outgoing_frames_task_active = false;
+
+ if (connection->thread_data.channel_shutdown_waiting_for_goaway_to_be_written) {
+ s_finish_shutdown(connection);
+ }
+
+ return;
+ }
+
+ if (first_try) {
+ CONNECTION_LOG(TRACE, connection, "Starting outgoing frames task");
+ }
+
+ /* Acquire aws_io_message, that we will attempt to fill up */
+ struct aws_io_message *msg = aws_channel_slot_acquire_max_message_for_write(channel_slot);
+ if (AWS_UNLIKELY(!msg)) {
+ CONNECTION_LOG(ERROR, connection, "Failed to acquire message from pool, closing connection.");
+ goto error;
+ }
+
+ /* Set up callback so we can send another message when this one completes */
+ msg->on_completion = s_on_channel_write_complete;
+ msg->user_data = connection;
+
+ CONNECTION_LOGF(
+ TRACE,
+ connection,
+ "Outgoing frames task acquired message with %zu bytes available",
+ msg->message_data.capacity - msg->message_data.len);
+
+ /* Write as many frames from outgoing_frames_queue as possible. */
+ if (s_encode_outgoing_frames_queue(connection, &msg->message_data)) {
+ goto error;
+ }
+
+ /* If outgoing_frames_queue emptied, and connection is running normally,
+ * then write as many DATA frames from outgoing_streams_list as possible. */
+ if (aws_linked_list_empty(outgoing_frames_queue) && may_write_data_frames) {
+ if (s_encode_data_from_outgoing_streams(connection, &msg->message_data)) {
+ goto error;
+ }
+ }
+
+ if (msg->message_data.len) {
+ /* Write message to channel.
+ * outgoing_frames_task will resume when message completes. */
+ CONNECTION_LOGF(TRACE, connection, "Outgoing frames task sending message of size %zu", msg->message_data.len);
+
+ if (aws_channel_slot_send_message(channel_slot, msg, AWS_CHANNEL_DIR_WRITE)) {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Failed to send channel message: %s. Closing connection.",
+ aws_error_name(aws_last_error()));
+
+ goto error;
+ }
+ } else {
+ /* Message is empty, warn that no work is being done and reschedule the task to try again next tick.
+ * It's likely that body isn't ready, so body streaming function has no data to write yet.
+ * If this scenario turns out to be common we should implement a "pause" feature. */
+ CONNECTION_LOG(WARN, connection, "Outgoing frames task sent no data, will try again next tick.");
+
+ aws_mem_release(msg->allocator, msg);
+
+ aws_channel_schedule_task_now(channel_slot->channel, &connection->outgoing_frames_task);
+ }
+ return;
+
+error:;
+ int error_code = aws_last_error();
+
+ if (msg) {
+ aws_mem_release(msg->allocator, msg);
+ }
+
+ aws_h2_connection_shutdown_due_to_write_err(connection, error_code);
+}
+
+/* Write as many frames from outgoing_frames_queue as possible (contains all non-DATA frames) */
+static int s_encode_outgoing_frames_queue(struct aws_h2_connection *connection, struct aws_byte_buf *output) {
+
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+ struct aws_linked_list *outgoing_frames_queue = &connection->thread_data.outgoing_frames_queue;
+
+ /* Write as many frames from outgoing_frames_queue as possible. */
+ while (!aws_linked_list_empty(outgoing_frames_queue)) {
+ struct aws_linked_list_node *frame_node = aws_linked_list_front(outgoing_frames_queue);
+ struct aws_h2_frame *frame = AWS_CONTAINER_OF(frame_node, struct aws_h2_frame, node);
+ connection->thread_data.current_outgoing_frame = frame;
+ bool frame_complete;
+ if (aws_h2_encode_frame(&connection->thread_data.encoder, frame, output, &frame_complete)) {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Error encoding frame: type=%s stream=%" PRIu32 " error=%s",
+ aws_h2_frame_type_to_str(frame->type),
+ frame->stream_id,
+ aws_error_name(aws_last_error()));
+ return AWS_OP_ERR;
+ }
+
+ if (!frame_complete) {
+ if (output->len == 0) {
+ /* We're in trouble if an empty message isn't big enough for this frame to do any work with */
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Message is too small for encoder. frame-type=%s stream=%" PRIu32 " available-space=%zu",
+ aws_h2_frame_type_to_str(frame->type),
+ frame->stream_id,
+ output->capacity);
+ aws_raise_error(AWS_ERROR_INVALID_STATE);
+ return AWS_OP_ERR;
+ }
+
+ CONNECTION_LOG(TRACE, connection, "Outgoing frames task filled message, and has more frames to send later");
+ break;
+ }
+
+ /* Done encoding frame, pop from queue and cleanup*/
+ aws_linked_list_remove(frame_node);
+ aws_h2_frame_destroy(frame);
+ connection->thread_data.current_outgoing_frame = NULL;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* Write as many DATA frames from outgoing_streams_list as possible. */
+static int s_encode_data_from_outgoing_streams(struct aws_h2_connection *connection, struct aws_byte_buf *output) {
+
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+ struct aws_linked_list *outgoing_streams_list = &connection->thread_data.outgoing_streams_list;
+ if (aws_linked_list_empty(outgoing_streams_list)) {
+ return AWS_OP_SUCCESS;
+ }
+ struct aws_linked_list *stalled_window_streams_list = &connection->thread_data.stalled_window_streams_list;
+ struct aws_linked_list *waiting_streams_list = &connection->thread_data.waiting_streams_list;
+
+ /* If a stream stalls, put it in this list until the function ends so we don't keep trying to read from it.
+ * We put it back at the end of function. */
+ struct aws_linked_list stalled_streams_list;
+ aws_linked_list_init(&stalled_streams_list);
+
+ int aws_error_code = 0;
+
+ /* We simply round-robin through streams, instead of using stream priority.
+ * Respecting priority is not required (RFC-7540 5.3), so we're ignoring it for now. This also keeps use safe
+ * from priority DOS attacks: https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-9513 */
+ while (!aws_linked_list_empty(outgoing_streams_list)) {
+ if (connection->thread_data.window_size_peer <= AWS_H2_MIN_WINDOW_SIZE) {
+ CONNECTION_LOGF(
+ DEBUG,
+ connection,
+ "Peer connection's flow-control window is too small now %zu. Connection will stop sending DATA until "
+ "WINDOW_UPDATE is received.",
+ connection->thread_data.window_size_peer);
+ goto done;
+ }
+
+ /* Stop looping if message is so full it's not worth the bother */
+ size_t space_available = output->capacity - output->len;
+ size_t worth_trying_threshold = AWS_H2_FRAME_PREFIX_SIZE * 2;
+ if (space_available < worth_trying_threshold) {
+ CONNECTION_LOG(TRACE, connection, "Outgoing frames task filled message, and has more frames to send later");
+ goto done;
+ }
+
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(outgoing_streams_list);
+ struct aws_h2_stream *stream = AWS_CONTAINER_OF(node, struct aws_h2_stream, node);
+
+ /* Ask stream to encode a data frame.
+ * Stream may complete itself as a result of encoding its data,
+ * in which case it will vanish from the connection's datastructures as a side-effect of this call.
+ * But if stream has more data to send, push it back into the appropriate list. */
+ int data_encode_status;
+ if (aws_h2_stream_encode_data_frame(stream, &connection->thread_data.encoder, output, &data_encode_status)) {
+
+ aws_error_code = aws_last_error();
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Connection error while encoding DATA on stream %" PRIu32 ", %s",
+ stream->base.id,
+ aws_error_name(aws_error_code));
+ goto done;
+ }
+
+ /* If stream has more data, push it into the appropriate list. */
+ switch (data_encode_status) {
+ case AWS_H2_DATA_ENCODE_COMPLETE:
+ break;
+ case AWS_H2_DATA_ENCODE_ONGOING:
+ aws_linked_list_push_back(outgoing_streams_list, node);
+ break;
+ case AWS_H2_DATA_ENCODE_ONGOING_BODY_STREAM_STALLED:
+ aws_linked_list_push_back(&stalled_streams_list, node);
+ break;
+ case AWS_H2_DATA_ENCODE_ONGOING_WAITING_FOR_WRITES:
+ stream->thread_data.waiting_for_writes = true;
+ aws_linked_list_push_back(waiting_streams_list, node);
+ break;
+ case AWS_H2_DATA_ENCODE_ONGOING_WINDOW_STALLED:
+ aws_linked_list_push_back(stalled_window_streams_list, node);
+ AWS_H2_STREAM_LOG(
+ DEBUG,
+ stream,
+ "Peer stream's flow-control window is too small. Data frames on this stream will not be sent until "
+ "WINDOW_UPDATE. ");
+ break;
+ default:
+ CONNECTION_LOG(ERROR, connection, "Data encode status is invalid.");
+ aws_error_code = AWS_ERROR_INVALID_STATE;
+ }
+ }
+
+done:
+ /* Return any stalled streams to outgoing_streams_list */
+ while (!aws_linked_list_empty(&stalled_streams_list)) {
+ aws_linked_list_push_back(outgoing_streams_list, aws_linked_list_pop_front(&stalled_streams_list));
+ }
+
+ if (aws_error_code) {
+ return aws_raise_error(aws_error_code);
+ }
+
+ if (aws_linked_list_empty(outgoing_streams_list)) {
+ /* transition from something to write -> nothing to write */
+ uint64_t now_ns = 0;
+ aws_channel_current_clock_time(connection->base.channel_slot->channel, &now_ns);
+ s_add_time_measurement_to_stats(
+ connection->thread_data.outgoing_timestamp_ns,
+ now_ns,
+ &connection->thread_data.stats.pending_outgoing_stream_ms);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* If the outgoing-frames-task isn't scheduled, run it immediately. */
+void aws_h2_try_write_outgoing_frames(struct aws_h2_connection *connection) {
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+
+ if (connection->thread_data.is_outgoing_frames_task_active) {
+ return;
+ }
+
+ connection->thread_data.is_outgoing_frames_task_active = true;
+ s_write_outgoing_frames(connection, true /*first_try*/);
+}
+
+/**
+ * Returns successfully and sets `out_stream` if stream is currently active.
+ * Returns successfully and sets `out_stream` to NULL if the frame should be ignored.
+ * Returns failed aws_h2err if it is a connection error to receive this frame.
+ */
+struct aws_h2err s_get_active_stream_for_incoming_frame(
+ struct aws_h2_connection *connection,
+ uint32_t stream_id,
+ enum aws_h2_frame_type frame_type,
+ struct aws_h2_stream **out_stream) {
+
+ *out_stream = NULL;
+
+ /* Check active streams */
+ struct aws_hash_element *found = NULL;
+ const void *stream_id_key = (void *)(size_t)stream_id;
+ aws_hash_table_find(&connection->thread_data.active_streams_map, stream_id_key, &found);
+ if (found) {
+ /* Found it! return */
+ *out_stream = found->value;
+ return AWS_H2ERR_SUCCESS;
+ }
+
+ bool client_initiated = (stream_id % 2) == 1;
+ bool self_initiated_stream = client_initiated && (connection->base.client_data != NULL);
+ bool peer_initiated_stream = !self_initiated_stream;
+
+ if ((self_initiated_stream && stream_id >= connection->base.next_stream_id) ||
+ (peer_initiated_stream && stream_id > connection->thread_data.latest_peer_initiated_stream_id)) {
+ /* Illegal to receive frames for a stream in the idle state (stream doesn't exist yet)
+ * (except server receiving HEADERS to start a stream, but that's handled elsewhere) */
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Illegal to receive %s frame on stream id=%" PRIu32 " state=IDLE",
+ aws_h2_frame_type_to_str(frame_type),
+ stream_id);
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+
+ if (peer_initiated_stream && stream_id > connection->thread_data.goaway_sent_last_stream_id) {
+ /* Once GOAWAY sent, ignore frames for peer-initiated streams whose id > last-stream-id */
+ CONNECTION_LOGF(
+ TRACE,
+ connection,
+ "Ignoring %s frame on stream id=%" PRIu32 " because GOAWAY sent with last-stream-id=%" PRIu32,
+ aws_h2_frame_type_to_str(frame_type),
+ stream_id,
+ connection->thread_data.goaway_sent_last_stream_id);
+
+ return AWS_H2ERR_SUCCESS;
+ }
+
+ void *cached_value = NULL;
+ /* Stream is closed, check whether it's legal for a few more frames to trickle in */
+ if (aws_cache_find(connection->thread_data.closed_streams, stream_id_key, &cached_value)) {
+ return aws_h2err_from_last_error();
+ }
+ if (cached_value) {
+ if (frame_type == AWS_H2_FRAME_T_PRIORITY) {
+ /* If we support PRIORITY, do something here. Right now just ignore it */
+ return AWS_H2ERR_SUCCESS;
+ }
+ enum aws_h2_stream_closed_when closed_when = (enum aws_h2_stream_closed_when)(size_t)cached_value;
+ switch (closed_when) {
+ case AWS_H2_STREAM_CLOSED_WHEN_BOTH_SIDES_END_STREAM:
+ /* WINDOW_UPDATE or RST_STREAM frames can be received ... for a short period after
+ * a DATA or HEADERS frame containing an END_STREAM flag is sent.
+ * Endpoints MUST ignore WINDOW_UPDATE or RST_STREAM frames received in this state */
+ if (frame_type == AWS_H2_FRAME_T_WINDOW_UPDATE || frame_type == AWS_H2_FRAME_T_RST_STREAM) {
+ CONNECTION_LOGF(
+ TRACE,
+ connection,
+ "Ignoring %s frame on stream id=%" PRIu32 " because END_STREAM flag was recently sent.",
+ aws_h2_frame_type_to_str(frame_type),
+ stream_id);
+
+ return AWS_H2ERR_SUCCESS;
+ } else {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Illegal to receive %s frame on stream id=%" PRIu32 " after END_STREAM has been received.",
+ aws_h2_frame_type_to_str(frame_type),
+ stream_id);
+
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_STREAM_CLOSED);
+ }
+ break;
+ case AWS_H2_STREAM_CLOSED_WHEN_RST_STREAM_RECEIVED:
+ /* An endpoint that receives any frame other than PRIORITY after receiving a RST_STREAM
+ * MUST treat that as a stream error (Section 5.4.2) of type STREAM_CLOSED */
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Illegal to receive %s frame on stream id=%" PRIu32 " after RST_STREAM has been received",
+ aws_h2_frame_type_to_str(frame_type),
+ stream_id);
+ struct aws_h2_frame *rst_stream =
+ aws_h2_frame_new_rst_stream(connection->base.alloc, stream_id, AWS_HTTP2_ERR_STREAM_CLOSED);
+ if (!rst_stream) {
+ CONNECTION_LOGF(
+ ERROR, connection, "Error creating RST_STREAM frame, %s", aws_error_name(aws_last_error()));
+ return aws_h2err_from_last_error();
+ }
+ aws_h2_connection_enqueue_outgoing_frame(connection, rst_stream);
+ return AWS_H2ERR_SUCCESS;
+ case AWS_H2_STREAM_CLOSED_WHEN_RST_STREAM_SENT:
+ /* An endpoint MUST ignore frames that it receives on closed streams after it has sent a RST_STREAM
+ * frame */
+ CONNECTION_LOGF(
+ TRACE,
+ connection,
+ "Ignoring %s frame on stream id=%" PRIu32 " because RST_STREAM was recently sent.",
+ aws_h2_frame_type_to_str(frame_type),
+ stream_id);
+
+ return AWS_H2ERR_SUCCESS;
+ break;
+ default:
+ CONNECTION_LOGF(
+ ERROR, connection, "Invalid state fo cached closed stream, stream id=%" PRIu32, stream_id);
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_INTERNAL_ERROR);
+ break;
+ }
+ }
+ if (frame_type == AWS_H2_FRAME_T_PRIORITY) {
+ /* ignored if the stream has been removed from the dependency tree */
+ return AWS_H2ERR_SUCCESS;
+ }
+
+ /* Stream closed (purged from closed_streams, or implicitly closed when its ID was skipped) */
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Illegal to receive %s frame on stream id=%" PRIu32
+ ", no memory of closed stream (ID skipped, or removed from cache)",
+ aws_h2_frame_type_to_str(frame_type),
+ stream_id);
+
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+}
+
+/* Decoder callbacks */
+
+struct aws_h2err s_decoder_on_headers_begin(uint32_t stream_id, void *userdata) {
+ struct aws_h2_connection *connection = userdata;
+
+ if (connection->base.server_data) {
+ /* Server would create new request-handler stream... */
+ return aws_h2err_from_aws_code(AWS_ERROR_UNIMPLEMENTED);
+ }
+
+ struct aws_h2_stream *stream;
+ struct aws_h2err err =
+ s_get_active_stream_for_incoming_frame(connection, stream_id, AWS_H2_FRAME_T_HEADERS, &stream);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+
+ if (stream) {
+ err = aws_h2_stream_on_decoder_headers_begin(stream);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+struct aws_h2err s_decoder_on_headers_i(
+ uint32_t stream_id,
+ const struct aws_http_header *header,
+ enum aws_http_header_name name_enum,
+ enum aws_http_header_block block_type,
+ void *userdata) {
+
+ struct aws_h2_connection *connection = userdata;
+ struct aws_h2_stream *stream;
+ struct aws_h2err err =
+ s_get_active_stream_for_incoming_frame(connection, stream_id, AWS_H2_FRAME_T_HEADERS, &stream);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+
+ if (stream) {
+ err = aws_h2_stream_on_decoder_headers_i(stream, header, name_enum, block_type);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+struct aws_h2err s_decoder_on_headers_end(
+ uint32_t stream_id,
+ bool malformed,
+ enum aws_http_header_block block_type,
+ void *userdata) {
+
+ struct aws_h2_connection *connection = userdata;
+ struct aws_h2_stream *stream;
+ struct aws_h2err err =
+ s_get_active_stream_for_incoming_frame(connection, stream_id, AWS_H2_FRAME_T_HEADERS, &stream);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+
+ if (stream) {
+ err = aws_h2_stream_on_decoder_headers_end(stream, malformed, block_type);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+struct aws_h2err s_decoder_on_push_promise(uint32_t stream_id, uint32_t promised_stream_id, void *userdata) {
+ struct aws_h2_connection *connection = userdata;
+ AWS_ASSERT(connection->base.client_data); /* decoder has already enforced this */
+ AWS_ASSERT(promised_stream_id % 2 == 0); /* decoder has already enforced this */
+
+ /* The identifier of a newly established stream MUST be numerically greater
+ * than all streams that the initiating endpoint has opened or reserved (RFC-7540 5.1.1) */
+ if (promised_stream_id <= connection->thread_data.latest_peer_initiated_stream_id) {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Newly promised stream ID %" PRIu32 " must be higher than previously established ID %" PRIu32,
+ promised_stream_id,
+ connection->thread_data.latest_peer_initiated_stream_id);
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+ connection->thread_data.latest_peer_initiated_stream_id = promised_stream_id;
+
+ /* If we ever fully support PUSH_PROMISE, this is where we'd add the
+ * promised_stream_id to some reserved_streams datastructure */
+
+ struct aws_h2_stream *stream;
+ struct aws_h2err err =
+ s_get_active_stream_for_incoming_frame(connection, stream_id, AWS_H2_FRAME_T_PUSH_PROMISE, &stream);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+
+ if (stream) {
+ err = aws_h2_stream_on_decoder_push_promise(stream, promised_stream_id);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+static int s_connection_send_update_window(struct aws_h2_connection *connection, uint32_t window_size) {
+ struct aws_h2_frame *connection_window_update_frame =
+ aws_h2_frame_new_window_update(connection->base.alloc, 0, window_size);
+ if (!connection_window_update_frame) {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "WINDOW_UPDATE frame on connection failed to be sent, error %s",
+ aws_error_name(aws_last_error()));
+ return AWS_OP_ERR;
+ }
+ aws_h2_connection_enqueue_outgoing_frame(connection, connection_window_update_frame);
+ connection->thread_data.window_size_self += window_size;
+ return AWS_OP_SUCCESS;
+}
+
+struct aws_h2err s_decoder_on_data_begin(
+ uint32_t stream_id,
+ uint32_t payload_len,
+ uint32_t total_padding_bytes,
+ bool end_stream,
+ void *userdata) {
+ struct aws_h2_connection *connection = userdata;
+
+ /* A receiver that receives a flow-controlled frame MUST always account for its contribution against the connection
+ * flow-control window, unless the receiver treats this as a connection error */
+ if (aws_sub_size_checked(
+ connection->thread_data.window_size_self, payload_len, &connection->thread_data.window_size_self)) {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "DATA length %" PRIu32 " exceeds flow-control window %zu",
+ payload_len,
+ connection->thread_data.window_size_self);
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FLOW_CONTROL_ERROR);
+ }
+
+ struct aws_h2_stream *stream;
+ struct aws_h2err err = s_get_active_stream_for_incoming_frame(connection, stream_id, AWS_H2_FRAME_T_DATA, &stream);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+
+ if (stream) {
+ err = aws_h2_stream_on_decoder_data_begin(stream, payload_len, total_padding_bytes, end_stream);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+ }
+ /* Handle automatic updates of the connection flow window */
+ uint32_t auto_window_update;
+ if (connection->conn_manual_window_management) {
+ /* Automatically update the flow-window to account for padding, even though "manual window management"
+ * is enabled. We do this because the current API doesn't have any way to inform the user about padding,
+ * so we can't expect them to manage it themselves. */
+ auto_window_update = total_padding_bytes;
+ } else {
+ /* Automatically update the full amount we just received */
+ auto_window_update = payload_len;
+ }
+
+ if (auto_window_update != 0) {
+ if (s_connection_send_update_window(connection, auto_window_update)) {
+ return aws_h2err_from_last_error();
+ }
+ CONNECTION_LOGF(
+ TRACE,
+ connection,
+ "Automatically updating connection window by %" PRIu32 "(%" PRIu32 " due to padding).",
+ auto_window_update,
+ total_padding_bytes);
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+struct aws_h2err s_decoder_on_data_i(uint32_t stream_id, struct aws_byte_cursor data, void *userdata) {
+ struct aws_h2_connection *connection = userdata;
+
+ /* Pass data to stream */
+ struct aws_h2_stream *stream;
+ struct aws_h2err err = s_get_active_stream_for_incoming_frame(connection, stream_id, AWS_H2_FRAME_T_DATA, &stream);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+
+ if (stream) {
+ err = aws_h2_stream_on_decoder_data_i(stream, data);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+struct aws_h2err s_decoder_on_end_stream(uint32_t stream_id, void *userdata) {
+ struct aws_h2_connection *connection = userdata;
+
+ /* Not calling s_get_active_stream_for_incoming_frame() here because END_STREAM
+ * isn't an actual frame type. It's a flag on DATA or HEADERS frames, and we
+ * already checked the legality of those frames in their respective callbacks. */
+
+ struct aws_hash_element *found = NULL;
+ aws_hash_table_find(&connection->thread_data.active_streams_map, (void *)(size_t)stream_id, &found);
+ if (found) {
+ struct aws_h2_stream *stream = found->value;
+ struct aws_h2err err = aws_h2_stream_on_decoder_end_stream(stream);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+static struct aws_h2err s_decoder_on_rst_stream(uint32_t stream_id, uint32_t h2_error_code, void *userdata) {
+ struct aws_h2_connection *connection = userdata;
+
+ /* Pass RST_STREAM to stream */
+ struct aws_h2_stream *stream;
+ struct aws_h2err err =
+ s_get_active_stream_for_incoming_frame(connection, stream_id, AWS_H2_FRAME_T_RST_STREAM, &stream);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+
+ if (stream) {
+ err = aws_h2_stream_on_decoder_rst_stream(stream, h2_error_code);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+static struct aws_h2err s_decoder_on_ping_ack(uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE], void *userdata) {
+ struct aws_h2_connection *connection = userdata;
+ if (aws_linked_list_empty(&connection->thread_data.pending_ping_queue)) {
+ CONNECTION_LOG(ERROR, connection, "Received extraneous PING ACK.");
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+ struct aws_h2err err;
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&connection->thread_data.pending_ping_queue);
+ struct aws_h2_pending_ping *pending_ping = AWS_CONTAINER_OF(node, struct aws_h2_pending_ping, node);
+ /* Check the payload */
+ if (!aws_array_eq(opaque_data, AWS_HTTP2_PING_DATA_SIZE, pending_ping->opaque_data, AWS_HTTP2_PING_DATA_SIZE)) {
+ CONNECTION_LOG(ERROR, connection, "Received PING ACK with mismatched opaque-data.");
+ err = aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ goto error;
+ }
+ uint64_t time_stamp;
+ if (aws_high_res_clock_get_ticks(&time_stamp)) {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Failed getting the time stamp when PING ACK received, error %s",
+ aws_error_name(aws_last_error()));
+ err = aws_h2err_from_last_error();
+ goto error;
+ }
+ uint64_t rtt;
+ if (aws_sub_u64_checked(time_stamp, pending_ping->started_time, &rtt)) {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Overflow from time stamp when PING ACK received, error %s",
+ aws_error_name(aws_last_error()));
+ err = aws_h2err_from_last_error();
+ goto error;
+ }
+ CONNECTION_LOGF(TRACE, connection, "Round trip time is %lf ms, approximately", (double)rtt / 1000000);
+ /* fire the callback */
+ if (pending_ping->on_completed) {
+ pending_ping->on_completed(&connection->base, rtt, AWS_ERROR_SUCCESS, pending_ping->user_data);
+ }
+ aws_mem_release(connection->base.alloc, pending_ping);
+ return AWS_H2ERR_SUCCESS;
+error:
+ if (pending_ping->on_completed) {
+ pending_ping->on_completed(&connection->base, 0 /* fake rtt */, err.aws_code, pending_ping->user_data);
+ }
+ aws_mem_release(connection->base.alloc, pending_ping);
+ return err;
+}
+
+static struct aws_h2err s_decoder_on_ping(uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE], void *userdata) {
+ struct aws_h2_connection *connection = userdata;
+
+ /* send a PING frame with the ACK flag set in response, with an identical payload. */
+ struct aws_h2_frame *ping_ack_frame = aws_h2_frame_new_ping(connection->base.alloc, true, opaque_data);
+ if (!ping_ack_frame) {
+ CONNECTION_LOGF(
+ ERROR, connection, "Ping ACK frame failed to be sent, error %s", aws_error_name(aws_last_error()));
+ return aws_h2err_from_last_error();
+ }
+
+ aws_h2_connection_enqueue_outgoing_frame(connection, ping_ack_frame);
+ return AWS_H2ERR_SUCCESS;
+}
+
+static struct aws_h2err s_decoder_on_settings(
+ const struct aws_http2_setting *settings_array,
+ size_t num_settings,
+ void *userdata) {
+ struct aws_h2_connection *connection = userdata;
+ struct aws_h2err err;
+ /* Once all values have been processed, the recipient MUST immediately emit a SETTINGS frame with the ACK flag
+ * set.(RFC-7540 6.5.3) */
+ CONNECTION_LOG(TRACE, connection, "Setting frame processing ends");
+ struct aws_h2_frame *settings_ack_frame = aws_h2_frame_new_settings(connection->base.alloc, NULL, 0, true);
+ if (!settings_ack_frame) {
+ CONNECTION_LOGF(
+ ERROR, connection, "Settings ACK frame failed to be sent, error %s", aws_error_name(aws_last_error()));
+ return aws_h2err_from_last_error();
+ }
+ aws_h2_connection_enqueue_outgoing_frame(connection, settings_ack_frame);
+
+ /* Allocate a block of memory for settings_array in callback, which will only includes the settings we changed,
+ * freed once the callback finished */
+ struct aws_http2_setting *callback_array = NULL;
+ if (num_settings) {
+ callback_array = aws_mem_acquire(connection->base.alloc, num_settings * sizeof(struct aws_http2_setting));
+ if (!callback_array) {
+ return aws_h2err_from_last_error();
+ }
+ }
+ size_t callback_array_num = 0;
+
+ /* Apply the change to encoder and connection */
+ struct aws_h2_frame_encoder *encoder = &connection->thread_data.encoder;
+ for (size_t i = 0; i < num_settings; i++) {
+ if (connection->thread_data.settings_peer[settings_array[i].id] == settings_array[i].value) {
+ /* No change, don't do any work */
+ continue;
+ }
+ switch (settings_array[i].id) {
+ case AWS_HTTP2_SETTINGS_HEADER_TABLE_SIZE: {
+ aws_h2_frame_encoder_set_setting_header_table_size(encoder, settings_array[i].value);
+ } break;
+ case AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE: {
+ /* When the value of SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST adjust the size of all stream
+ * flow-control windows that it maintains by the difference between the new value and the old value. */
+ int32_t size_changed =
+ settings_array[i].value - connection->thread_data.settings_peer[settings_array[i].id];
+ struct aws_hash_iter stream_iter = aws_hash_iter_begin(&connection->thread_data.active_streams_map);
+ while (!aws_hash_iter_done(&stream_iter)) {
+ struct aws_h2_stream *stream = stream_iter.element.value;
+ aws_hash_iter_next(&stream_iter);
+ err = aws_h2_stream_window_size_change(stream, size_changed, false /*self*/);
+ if (aws_h2err_failed(err)) {
+ CONNECTION_LOG(
+ ERROR,
+ connection,
+ "Connection error, change to SETTINGS_INITIAL_WINDOW_SIZE caused a stream's flow-control "
+ "window to exceed the maximum size");
+ goto error;
+ }
+ }
+ } break;
+ case AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE: {
+ aws_h2_frame_encoder_set_setting_max_frame_size(encoder, settings_array[i].value);
+ } break;
+ default:
+ break;
+ }
+ connection->thread_data.settings_peer[settings_array[i].id] = settings_array[i].value;
+ callback_array[callback_array_num++] = settings_array[i];
+ }
+ if (connection->on_remote_settings_change) {
+ connection->on_remote_settings_change(
+ &connection->base, callback_array, callback_array_num, connection->base.user_data);
+ }
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+
+ memcpy(
+ connection->synced_data.settings_peer,
+ connection->thread_data.settings_peer,
+ sizeof(connection->thread_data.settings_peer));
+
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ aws_mem_release(connection->base.alloc, callback_array);
+ return AWS_H2ERR_SUCCESS;
+error:
+ aws_mem_release(connection->base.alloc, callback_array);
+ return err;
+}
+
+static struct aws_h2err s_decoder_on_settings_ack(void *userdata) {
+ struct aws_h2_connection *connection = userdata;
+ if (aws_linked_list_empty(&connection->thread_data.pending_settings_queue)) {
+ CONNECTION_LOG(ERROR, connection, "Received a malicious extra SETTINGS acknowledgment");
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+ struct aws_h2err err;
+ struct aws_h2_pending_settings *pending_settings = NULL;
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&connection->thread_data.pending_settings_queue);
+ pending_settings = AWS_CONTAINER_OF(node, struct aws_h2_pending_settings, node);
+
+ struct aws_http2_setting *settings_array = pending_settings->settings_array;
+ /* Apply the settings */
+ struct aws_h2_decoder *decoder = connection->thread_data.decoder;
+ for (size_t i = 0; i < pending_settings->num_settings; i++) {
+ if (connection->thread_data.settings_self[settings_array[i].id] == settings_array[i].value) {
+ /* No change, don't do any work */
+ continue;
+ }
+ switch (settings_array[i].id) {
+ case AWS_HTTP2_SETTINGS_HEADER_TABLE_SIZE: {
+ aws_h2_decoder_set_setting_header_table_size(decoder, settings_array[i].value);
+ } break;
+ case AWS_HTTP2_SETTINGS_ENABLE_PUSH: {
+ aws_h2_decoder_set_setting_enable_push(decoder, settings_array[i].value);
+ } break;
+ case AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE: {
+ /* When the value of SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST adjust the size of all stream
+ * flow-control windows that it maintains by the difference between the new value and the old value. */
+ int32_t size_changed =
+ settings_array[i].value - connection->thread_data.settings_self[settings_array[i].id];
+ struct aws_hash_iter stream_iter = aws_hash_iter_begin(&connection->thread_data.active_streams_map);
+ while (!aws_hash_iter_done(&stream_iter)) {
+ struct aws_h2_stream *stream = stream_iter.element.value;
+ aws_hash_iter_next(&stream_iter);
+ err = aws_h2_stream_window_size_change(stream, size_changed, true /*self*/);
+ if (aws_h2err_failed(err)) {
+ CONNECTION_LOG(
+ ERROR,
+ connection,
+ "Connection error, change to SETTINGS_INITIAL_WINDOW_SIZE from internal caused a stream's "
+ "flow-control window to exceed the maximum size");
+ goto error;
+ }
+ }
+ } break;
+ case AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE: {
+ aws_h2_decoder_set_setting_max_frame_size(decoder, settings_array[i].value);
+ } break;
+ default:
+ break;
+ }
+ connection->thread_data.settings_self[settings_array[i].id] = settings_array[i].value;
+ }
+ /* invoke the change settings compeleted user callback */
+ if (pending_settings->on_completed) {
+ pending_settings->on_completed(&connection->base, AWS_ERROR_SUCCESS, pending_settings->user_data);
+ }
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+
+ memcpy(
+ connection->synced_data.settings_self,
+ connection->thread_data.settings_self,
+ sizeof(connection->thread_data.settings_self));
+
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ /* clean up the pending_settings */
+ aws_mem_release(connection->base.alloc, pending_settings);
+ return AWS_H2ERR_SUCCESS;
+error:
+ /* invoke the user callback with error code */
+ if (pending_settings->on_completed) {
+ pending_settings->on_completed(&connection->base, err.aws_code, pending_settings->user_data);
+ }
+ /* clean up the pending settings here */
+ aws_mem_release(connection->base.alloc, pending_settings);
+ return err;
+}
+
+static struct aws_h2err s_decoder_on_window_update(uint32_t stream_id, uint32_t window_size_increment, void *userdata) {
+ struct aws_h2_connection *connection = userdata;
+
+ if (stream_id == 0) {
+ /* Let's update the connection flow-control window size */
+ if (window_size_increment == 0) {
+ /* flow-control window increment of 0 MUST be treated as error (RFC7540 6.9.1) */
+ CONNECTION_LOG(ERROR, connection, "Window update frame with 0 increment size");
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+ if (connection->thread_data.window_size_peer + window_size_increment > AWS_H2_WINDOW_UPDATE_MAX) {
+ /* We MUST NOT allow a flow-control window to exceed the max */
+ CONNECTION_LOG(
+ ERROR,
+ connection,
+ "Window update frame causes the connection flow-control window exceeding the maximum size");
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FLOW_CONTROL_ERROR);
+ }
+ if (connection->thread_data.window_size_peer <= AWS_H2_MIN_WINDOW_SIZE) {
+ CONNECTION_LOGF(
+ DEBUG,
+ connection,
+ "Peer connection's flow-control window is resumed from too small to %" PRIu32
+ ". Connection will resume sending DATA.",
+ window_size_increment);
+ }
+ connection->thread_data.window_size_peer += window_size_increment;
+ return AWS_H2ERR_SUCCESS;
+ } else {
+ /* Update the flow-control window size for stream */
+ struct aws_h2_stream *stream;
+ bool window_resume;
+ struct aws_h2err err =
+ s_get_active_stream_for_incoming_frame(connection, stream_id, AWS_H2_FRAME_T_WINDOW_UPDATE, &stream);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+ if (stream) {
+ err = aws_h2_stream_on_decoder_window_update(stream, window_size_increment, &window_resume);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+ if (window_resume) {
+ /* Set the stream free from stalled list */
+ AWS_H2_STREAM_LOGF(
+ DEBUG,
+ stream,
+ "Peer stream's flow-control window is resumed from 0 or negative to %" PRIu32
+ " Stream will resume sending data.",
+ stream->thread_data.window_size_peer);
+ aws_linked_list_remove(&stream->node);
+ aws_linked_list_push_back(&connection->thread_data.outgoing_streams_list, &stream->node);
+ }
+ }
+ }
+ return AWS_H2ERR_SUCCESS;
+}
+
+struct aws_h2err s_decoder_on_goaway(
+ uint32_t last_stream,
+ uint32_t error_code,
+ struct aws_byte_cursor debug_data,
+ void *userdata) {
+ struct aws_h2_connection *connection = userdata;
+
+ if (last_stream > connection->thread_data.goaway_received_last_stream_id) {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Received GOAWAY with invalid last-stream-id=%" PRIu32 ", must not exceed previous last-stream-id=%" PRIu32,
+ last_stream,
+ connection->thread_data.goaway_received_last_stream_id);
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+ /* stop sending any new stream and making new request */
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+
+ connection->synced_data.new_stream_error_code = AWS_ERROR_HTTP_GOAWAY_RECEIVED;
+ connection->synced_data.goaway_received_last_stream_id = last_stream;
+ connection->synced_data.goaway_received_http2_error_code = error_code;
+
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ connection->thread_data.goaway_received_last_stream_id = last_stream;
+ CONNECTION_LOGF(
+ DEBUG,
+ connection,
+ "Received GOAWAY error-code=%s(0x%x) last-stream-id=%" PRIu32,
+ aws_http2_error_code_to_str(error_code),
+ error_code,
+ last_stream);
+ /* Complete activated streams whose id is higher than last_stream, since they will not process by peer. We should
+ * treat them as they had never been created at all.
+ * This would be more efficient if we could iterate streams in reverse-id order */
+ struct aws_hash_iter stream_iter = aws_hash_iter_begin(&connection->thread_data.active_streams_map);
+ while (!aws_hash_iter_done(&stream_iter)) {
+ struct aws_h2_stream *stream = stream_iter.element.value;
+ aws_hash_iter_next(&stream_iter);
+ if (stream->base.id > last_stream) {
+ AWS_H2_STREAM_LOG(
+ DEBUG,
+ stream,
+ "stream ID is higher than GOAWAY last stream ID, please retry this stream on a new connection.");
+ s_stream_complete(connection, stream, AWS_ERROR_HTTP_GOAWAY_RECEIVED);
+ }
+ }
+ if (connection->on_goaway_received) {
+ /* Inform user about goaway received and the error code. */
+ connection->on_goaway_received(
+ &connection->base, last_stream, error_code, debug_data, connection->base.user_data);
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+/* End decoder callbacks */
+
+static int s_send_connection_preface_client_string(struct aws_h2_connection *connection) {
+
+ /* Just send the magic string on its own aws_io_message. */
+ struct aws_io_message *msg = aws_channel_acquire_message_from_pool(
+ connection->base.channel_slot->channel,
+ AWS_IO_MESSAGE_APPLICATION_DATA,
+ aws_h2_connection_preface_client_string.len);
+ if (!msg) {
+ goto error;
+ }
+
+ if (!aws_byte_buf_write_from_whole_cursor(&msg->message_data, aws_h2_connection_preface_client_string)) {
+ aws_raise_error(AWS_ERROR_INVALID_STATE);
+ goto error;
+ }
+
+ if (aws_channel_slot_send_message(connection->base.channel_slot, msg, AWS_CHANNEL_DIR_WRITE)) {
+ goto error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+error:
+ if (msg) {
+ aws_mem_release(msg->allocator, msg);
+ }
+ return AWS_OP_ERR;
+}
+
+static void s_handler_installed(struct aws_channel_handler *handler, struct aws_channel_slot *slot) {
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(slot->channel));
+ struct aws_h2_connection *connection = handler->impl;
+
+ connection->base.channel_slot = slot;
+
+ /* Acquire a hold on the channel to prevent its destruction until the user has
+ * given the go-ahead via aws_http_connection_release() */
+ aws_channel_acquire_hold(slot->channel);
+
+ /* Send HTTP/2 connection preface (RFC-7540 3.5)
+ * - clients must send magic string
+ * - both client and server must send SETTINGS frame */
+
+ if (connection->base.client_data) {
+ if (s_send_connection_preface_client_string(connection)) {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Failed to send client connection preface string, %s",
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+ }
+ struct aws_h2_pending_settings *init_pending_settings = connection->thread_data.init_pending_settings;
+ aws_linked_list_push_back(&connection->thread_data.pending_settings_queue, &init_pending_settings->node);
+ connection->thread_data.init_pending_settings = NULL;
+ /* Set user_data here, the user_data is valid now */
+ init_pending_settings->user_data = connection->base.user_data;
+
+ struct aws_h2_frame *init_settings_frame = aws_h2_frame_new_settings(
+ connection->base.alloc,
+ init_pending_settings->settings_array,
+ init_pending_settings->num_settings,
+ false /*ACK*/);
+ if (!init_settings_frame) {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Failed to create the initial settings frame, error %s",
+ aws_error_name(aws_last_error()));
+ aws_mem_release(connection->base.alloc, init_pending_settings);
+ goto error;
+ }
+ /* enqueue the initial settings frame here */
+ aws_linked_list_push_back(&connection->thread_data.outgoing_frames_queue, &init_settings_frame->node);
+
+ /* If not manual connection window management, update the connection window to max. */
+ if (!connection->conn_manual_window_management) {
+ uint32_t initial_window_update_size = AWS_H2_WINDOW_UPDATE_MAX - AWS_H2_INIT_WINDOW_SIZE;
+ struct aws_h2_frame *connection_window_update_frame =
+ aws_h2_frame_new_window_update(connection->base.alloc, 0 /* stream_id */, initial_window_update_size);
+ AWS_ASSERT(connection_window_update_frame);
+ /* enqueue the windows update frame here */
+ aws_linked_list_push_back(
+ &connection->thread_data.outgoing_frames_queue, &connection_window_update_frame->node);
+ connection->thread_data.window_size_self += initial_window_update_size;
+ }
+ aws_h2_try_write_outgoing_frames(connection);
+ return;
+
+error:
+ aws_h2_connection_shutdown_due_to_write_err(connection, aws_last_error());
+}
+
+static void s_stream_complete(struct aws_h2_connection *connection, struct aws_h2_stream *stream, int error_code) {
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+
+ /* Nice logging */
+ if (error_code) {
+ AWS_H2_STREAM_LOGF(
+ ERROR, stream, "Stream completed with error %d (%s).", error_code, aws_error_name(error_code));
+ } else if (stream->base.client_data) {
+ int status = stream->base.client_data->response_status;
+ AWS_H2_STREAM_LOGF(
+ DEBUG, stream, "Client stream complete, response status %d (%s)", status, aws_http_status_text(status));
+ } else {
+ AWS_H2_STREAM_LOG(DEBUG, stream, "Server stream complete");
+ }
+
+ /* Remove stream from active_streams_map and outgoing_stream_list (if it was in them at all) */
+ aws_hash_table_remove(&connection->thread_data.active_streams_map, (void *)(size_t)stream->base.id, NULL, NULL);
+ if (stream->node.next) {
+ aws_linked_list_remove(&stream->node);
+ }
+
+ if (aws_hash_table_get_entry_count(&connection->thread_data.active_streams_map) == 0 &&
+ connection->thread_data.incoming_timestamp_ns != 0) {
+ uint64_t now_ns = 0;
+ aws_channel_current_clock_time(connection->base.channel_slot->channel, &now_ns);
+ /* transition from something to read -> nothing to read and nothing to write */
+ s_add_time_measurement_to_stats(
+ connection->thread_data.incoming_timestamp_ns,
+ now_ns,
+ &connection->thread_data.stats.pending_incoming_stream_ms);
+ connection->thread_data.stats.was_inactive = true;
+ connection->thread_data.incoming_timestamp_ns = 0;
+ }
+
+ aws_h2_stream_complete(stream, error_code);
+
+ /* release connection's hold on stream */
+ aws_http_stream_release(&stream->base);
+}
+
+int aws_h2_connection_on_stream_closed(
+ struct aws_h2_connection *connection,
+ struct aws_h2_stream *stream,
+ enum aws_h2_stream_closed_when closed_when,
+ int aws_error_code) {
+
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+ AWS_PRECONDITION(stream->thread_data.state == AWS_H2_STREAM_STATE_CLOSED);
+ AWS_PRECONDITION(stream->base.id != 0);
+
+ uint32_t stream_id = stream->base.id;
+
+ /* Mark stream complete. This removes the stream from any "active" datastructures,
+ * invokes its completion callback, and releases its refcount. */
+ s_stream_complete(connection, stream, aws_error_code);
+ stream = NULL; /* Reference released, do not touch again */
+
+ if (s_record_closed_stream(connection, stream_id, closed_when)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_record_closed_stream(
+ struct aws_h2_connection *connection,
+ uint32_t stream_id,
+ enum aws_h2_stream_closed_when closed_when) {
+
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+
+ if (aws_cache_put(connection->thread_data.closed_streams, (void *)(size_t)stream_id, (void *)(size_t)closed_when)) {
+ CONNECTION_LOG(ERROR, connection, "Failed inserting ID into cache of recently closed streams");
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_h2_connection_send_rst_and_close_reserved_stream(
+ struct aws_h2_connection *connection,
+ uint32_t stream_id,
+ uint32_t h2_error_code) {
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+
+ struct aws_h2_frame *rst_stream = aws_h2_frame_new_rst_stream(connection->base.alloc, stream_id, h2_error_code);
+ if (!rst_stream) {
+ CONNECTION_LOGF(ERROR, connection, "Error creating RST_STREAM frame, %s", aws_error_name(aws_last_error()));
+ return AWS_OP_ERR;
+ }
+ aws_h2_connection_enqueue_outgoing_frame(connection, rst_stream);
+
+ /* If we ever fully support PUSH_PROMISE, this is where we'd remove the
+ * promised_stream_id from some reserved_streams datastructure */
+
+ return s_record_closed_stream(connection, stream_id, AWS_H2_STREAM_CLOSED_WHEN_RST_STREAM_SENT);
+}
+
+/* Move stream into "active" datastructures and notify stream that it can send frames now */
+static void s_move_stream_to_thread(
+ struct aws_h2_connection *connection,
+ struct aws_h2_stream *stream,
+ int new_stream_error_code) {
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+
+ if (new_stream_error_code) {
+ aws_raise_error(new_stream_error_code);
+ AWS_H2_STREAM_LOGF(
+ ERROR,
+ stream,
+ "Failed activating stream, error %d (%s)",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ uint32_t max_concurrent_streams = connection->thread_data.settings_peer[AWS_HTTP2_SETTINGS_MAX_CONCURRENT_STREAMS];
+ if (aws_hash_table_get_entry_count(&connection->thread_data.active_streams_map) >= max_concurrent_streams) {
+ AWS_H2_STREAM_LOG(ERROR, stream, "Failed activating stream, max concurrent streams are reached");
+ aws_raise_error(AWS_ERROR_HTTP_MAX_CONCURRENT_STREAMS_EXCEEDED);
+ goto error;
+ }
+
+ if (aws_hash_table_put(
+ &connection->thread_data.active_streams_map, (void *)(size_t)stream->base.id, stream, NULL)) {
+ AWS_H2_STREAM_LOG(ERROR, stream, "Failed inserting stream into map");
+ goto error;
+ }
+
+ enum aws_h2_stream_body_state body_state = AWS_H2_STREAM_BODY_STATE_NONE;
+ if (aws_h2_stream_on_activated(stream, &body_state)) {
+ goto error;
+ }
+
+ if (aws_hash_table_get_entry_count(&connection->thread_data.active_streams_map) == 1) {
+ /* transition from nothing to read -> something to read */
+ uint64_t now_ns = 0;
+ aws_channel_current_clock_time(connection->base.channel_slot->channel, &now_ns);
+ connection->thread_data.incoming_timestamp_ns = now_ns;
+ }
+
+ switch (body_state) {
+ case AWS_H2_STREAM_BODY_STATE_WAITING_WRITES:
+ aws_linked_list_push_back(&connection->thread_data.waiting_streams_list, &stream->node);
+ break;
+ case AWS_H2_STREAM_BODY_STATE_ONGOING:
+ aws_linked_list_push_back(&connection->thread_data.outgoing_streams_list, &stream->node);
+ break;
+ default:
+ break;
+ }
+ return;
+error:
+ /* If the stream got into any datastructures, s_stream_complete() will remove it */
+ s_stream_complete(connection, stream, aws_last_error());
+}
+
+/* Perform on-thread work that is triggered by calls to the connection/stream API */
+static void s_cross_thread_work_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ return;
+ }
+
+ struct aws_h2_connection *connection = arg;
+
+ struct aws_linked_list pending_frames;
+ aws_linked_list_init(&pending_frames);
+
+ struct aws_linked_list pending_streams;
+ aws_linked_list_init(&pending_streams);
+
+ struct aws_linked_list pending_settings;
+ aws_linked_list_init(&pending_settings);
+
+ struct aws_linked_list pending_ping;
+ aws_linked_list_init(&pending_ping);
+
+ struct aws_linked_list pending_goaway;
+ aws_linked_list_init(&pending_goaway);
+
+ size_t window_update_size;
+ int new_stream_error_code;
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+ connection->synced_data.is_cross_thread_work_task_scheduled = false;
+
+ aws_linked_list_swap_contents(&connection->synced_data.pending_frame_list, &pending_frames);
+ aws_linked_list_swap_contents(&connection->synced_data.pending_stream_list, &pending_streams);
+ aws_linked_list_swap_contents(&connection->synced_data.pending_settings_list, &pending_settings);
+ aws_linked_list_swap_contents(&connection->synced_data.pending_ping_list, &pending_ping);
+ aws_linked_list_swap_contents(&connection->synced_data.pending_goaway_list, &pending_goaway);
+ window_update_size = connection->synced_data.window_update_size;
+ connection->synced_data.window_update_size = 0;
+ new_stream_error_code = connection->synced_data.new_stream_error_code;
+
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ /* Enqueue new pending control frames */
+ while (!aws_linked_list_empty(&pending_frames)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&pending_frames);
+ struct aws_h2_frame *frame = AWS_CONTAINER_OF(node, struct aws_h2_frame, node);
+ aws_h2_connection_enqueue_outgoing_frame(connection, frame);
+ }
+
+ /* We already enqueued the window_update frame, just apply the change and let our peer check this value, no matter
+ * overflow happens or not. Peer will detect it for us. */
+ connection->thread_data.window_size_self =
+ aws_add_size_saturating(connection->thread_data.window_size_self, window_update_size);
+
+ /* Process new pending_streams */
+ while (!aws_linked_list_empty(&pending_streams)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&pending_streams);
+ struct aws_h2_stream *stream = AWS_CONTAINER_OF(node, struct aws_h2_stream, node);
+ s_move_stream_to_thread(connection, stream, new_stream_error_code);
+ }
+
+ /* Move pending settings to thread data */
+ while (!aws_linked_list_empty(&pending_settings)) {
+ aws_linked_list_push_back(
+ &connection->thread_data.pending_settings_queue, aws_linked_list_pop_front(&pending_settings));
+ }
+
+ /* Move pending PING to thread data */
+ while (!aws_linked_list_empty(&pending_ping)) {
+ aws_linked_list_push_back(
+ &connection->thread_data.pending_ping_queue, aws_linked_list_pop_front(&pending_ping));
+ }
+
+ /* Send user requested goaways */
+ while (!aws_linked_list_empty(&pending_goaway)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&pending_goaway);
+ struct aws_h2_pending_goaway *goaway = AWS_CONTAINER_OF(node, struct aws_h2_pending_goaway, node);
+ s_send_goaway(connection, goaway->http2_error, goaway->allow_more_streams, &goaway->debug_data);
+ aws_mem_release(connection->base.alloc, goaway);
+ }
+
+ /* It's likely that frames were queued while processing cross-thread work.
+ * If so, try writing them now */
+ aws_h2_try_write_outgoing_frames(connection);
+}
+
+int aws_h2_stream_activate(struct aws_http_stream *stream) {
+ struct aws_h2_stream *h2_stream = AWS_CONTAINER_OF(stream, struct aws_h2_stream, base);
+
+ struct aws_http_connection *base_connection = stream->owning_connection;
+ struct aws_h2_connection *connection = AWS_CONTAINER_OF(base_connection, struct aws_h2_connection, base);
+
+ int err;
+ bool was_cross_thread_work_scheduled = false;
+ { /* BEGIN CRITICAL SECTION */
+ s_acquire_stream_and_connection_lock(h2_stream, connection);
+
+ if (stream->id) {
+ /* stream has already been activated. */
+ s_release_stream_and_connection_lock(h2_stream, connection);
+ return AWS_OP_SUCCESS;
+ }
+
+ err = connection->synced_data.new_stream_error_code;
+ if (err) {
+ s_release_stream_and_connection_lock(h2_stream, connection);
+ goto error;
+ }
+
+ stream->id = aws_http_connection_get_next_stream_id(base_connection);
+
+ if (stream->id) {
+ /* success */
+ was_cross_thread_work_scheduled = connection->synced_data.is_cross_thread_work_task_scheduled;
+ connection->synced_data.is_cross_thread_work_task_scheduled = true;
+
+ aws_linked_list_push_back(&connection->synced_data.pending_stream_list, &h2_stream->node);
+ h2_stream->synced_data.api_state = AWS_H2_STREAM_API_STATE_ACTIVE;
+ }
+
+ s_release_stream_and_connection_lock(h2_stream, connection);
+ } /* END CRITICAL SECTION */
+
+ if (!stream->id) {
+ /* aws_http_connection_get_next_stream_id() raises its own error. */
+ return AWS_OP_ERR;
+ }
+
+ /* connection keeps activated stream alive until stream completes */
+ aws_atomic_fetch_add(&stream->refcount, 1);
+
+ if (!was_cross_thread_work_scheduled) {
+ CONNECTION_LOG(TRACE, connection, "Scheduling cross-thread work task");
+ aws_channel_schedule_task_now(connection->base.channel_slot->channel, &connection->cross_thread_work_task);
+ }
+
+ return AWS_OP_SUCCESS;
+
+error:
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Failed to activate the stream id=%p, new streams are not allowed now. error %d (%s)",
+ (void *)stream,
+ err,
+ aws_error_name(err));
+ return aws_raise_error(err);
+}
+
+static struct aws_http_stream *s_connection_make_request(
+ struct aws_http_connection *client_connection,
+ const struct aws_http_make_request_options *options) {
+
+ struct aws_h2_connection *connection = AWS_CONTAINER_OF(client_connection, struct aws_h2_connection, base);
+
+ /* #TODO: http/2-ify the request (ex: add ":method" header). Should we mutate a copy or the original? Validate?
+ * Or just pass pointer to headers struct and let encoder transform it while encoding? */
+
+ struct aws_h2_stream *stream = aws_h2_stream_new_request(client_connection, options);
+ if (!stream) {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Failed to create stream, error %d (%s)",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ return NULL;
+ }
+
+ int new_stream_error_code;
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+ new_stream_error_code = connection->synced_data.new_stream_error_code;
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ if (new_stream_error_code) {
+ aws_raise_error(new_stream_error_code);
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Cannot create request stream, error %d (%s)",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ AWS_H2_STREAM_LOG(DEBUG, stream, "Created HTTP/2 request stream"); /* #TODO: print method & path */
+ return &stream->base;
+
+error:
+ /* Force destruction of the stream, avoiding ref counting */
+ stream->base.vtable->destroy(&stream->base);
+ return NULL;
+}
+
+static void s_connection_close(struct aws_http_connection *connection_base) {
+ struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base);
+
+ /* Don't stop reading/writing immediately, let that happen naturally during the channel shutdown process. */
+ s_stop(connection, false /*stop_reading*/, false /*stop_writing*/, true /*schedule_shutdown*/, AWS_ERROR_SUCCESS);
+}
+
+static void s_connection_stop_new_request(struct aws_http_connection *connection_base) {
+ struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base);
+
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+ if (!connection->synced_data.new_stream_error_code) {
+ connection->synced_data.new_stream_error_code = AWS_ERROR_HTTP_CONNECTION_CLOSED;
+ }
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+}
+
+static bool s_connection_is_open(const struct aws_http_connection *connection_base) {
+ struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base);
+ bool is_open;
+
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+ is_open = connection->synced_data.is_open;
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ return is_open;
+}
+
+static bool s_connection_new_requests_allowed(const struct aws_http_connection *connection_base) {
+ struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base);
+ int new_stream_error_code;
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+ new_stream_error_code = connection->synced_data.new_stream_error_code;
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ return new_stream_error_code == 0;
+}
+
+static void s_connection_update_window(struct aws_http_connection *connection_base, uint32_t increment_size) {
+ struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base);
+ if (!increment_size) {
+ /* Silently do nothing. */
+ return;
+ }
+ if (!connection->conn_manual_window_management) {
+ /* auto-mode, manual update window is not supported, silently do nothing with warning log. */
+ CONNECTION_LOG(
+ DEBUG,
+ connection,
+ "Connection manual window management is off, update window operations are not supported.");
+ return;
+ }
+ struct aws_h2_frame *connection_window_update_frame =
+ aws_h2_frame_new_window_update(connection->base.alloc, 0, increment_size);
+ if (!connection_window_update_frame) {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Failed to create WINDOW_UPDATE frame on connection, error %s",
+ aws_error_name(aws_last_error()));
+ /* OOM should result in a crash. And the increment size is too huge is the only other failure case, which will
+ * result in overflow. */
+ goto overflow;
+ }
+
+ int err = 0;
+ bool cross_thread_work_should_schedule = false;
+ bool connection_open = false;
+ size_t sum_size = 0;
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+
+ err |= aws_add_size_checked(connection->synced_data.window_update_size, increment_size, &sum_size);
+ err |= sum_size > AWS_H2_WINDOW_UPDATE_MAX;
+ connection_open = connection->synced_data.is_open;
+
+ if (!err && connection_open) {
+ cross_thread_work_should_schedule = !connection->synced_data.is_cross_thread_work_task_scheduled;
+ connection->synced_data.is_cross_thread_work_task_scheduled = true;
+ aws_linked_list_push_back(
+ &connection->synced_data.pending_frame_list, &connection_window_update_frame->node);
+ connection->synced_data.window_update_size = sum_size;
+ }
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ if (err) {
+ CONNECTION_LOG(
+ ERROR,
+ connection,
+ "The connection's flow-control windows has been incremented beyond 2**31 -1, the max for HTTP/2. The ");
+ aws_h2_frame_destroy(connection_window_update_frame);
+ goto overflow;
+ }
+
+ if (cross_thread_work_should_schedule) {
+ CONNECTION_LOG(TRACE, connection, "Scheduling cross-thread work task");
+ aws_channel_schedule_task_now(connection->base.channel_slot->channel, &connection->cross_thread_work_task);
+ }
+
+ if (!connection_open) {
+ /* connection already closed, just do nothing */
+ aws_h2_frame_destroy(connection_window_update_frame);
+ return;
+ }
+ CONNECTION_LOGF(
+ TRACE,
+ connection,
+ "User requested to update the HTTP/2 connection's flow-control windows by %" PRIu32 ".",
+ increment_size);
+ return;
+overflow:
+ /* Shutdown the connection as overflow detected */
+ s_stop(
+ connection,
+ false /*stop_reading*/,
+ false /*stop_writing*/,
+ true /*schedule_shutdown*/,
+ AWS_ERROR_OVERFLOW_DETECTED);
+}
+
+static int s_connection_change_settings(
+ struct aws_http_connection *connection_base,
+ const struct aws_http2_setting *settings_array,
+ size_t num_settings,
+ aws_http2_on_change_settings_complete_fn *on_completed,
+ void *user_data) {
+
+ struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base);
+
+ if (!settings_array && num_settings) {
+ CONNECTION_LOG(ERROR, connection, "Settings_array is NULL and num_settings is not zero.");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ struct aws_h2_pending_settings *pending_settings =
+ s_new_pending_settings(connection->base.alloc, settings_array, num_settings, on_completed, user_data);
+ if (!pending_settings) {
+ return AWS_OP_ERR;
+ }
+ struct aws_h2_frame *settings_frame =
+ aws_h2_frame_new_settings(connection->base.alloc, settings_array, num_settings, false /*ACK*/);
+ if (!settings_frame) {
+ CONNECTION_LOGF(
+ ERROR, connection, "Failed to create settings frame, error %s", aws_error_name(aws_last_error()));
+ aws_mem_release(connection->base.alloc, pending_settings);
+ return AWS_OP_ERR;
+ }
+
+ bool was_cross_thread_work_scheduled = false;
+ bool connection_open;
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+
+ connection_open = connection->synced_data.is_open;
+ if (!connection_open) {
+ s_unlock_synced_data(connection);
+ goto closed;
+ }
+ was_cross_thread_work_scheduled = connection->synced_data.is_cross_thread_work_task_scheduled;
+ connection->synced_data.is_cross_thread_work_task_scheduled = true;
+ aws_linked_list_push_back(&connection->synced_data.pending_frame_list, &settings_frame->node);
+ aws_linked_list_push_back(&connection->synced_data.pending_settings_list, &pending_settings->node);
+
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ if (!was_cross_thread_work_scheduled) {
+ CONNECTION_LOG(TRACE, connection, "Scheduling cross-thread work task");
+ aws_channel_schedule_task_now(connection->base.channel_slot->channel, &connection->cross_thread_work_task);
+ }
+
+ return AWS_OP_SUCCESS;
+closed:
+ CONNECTION_LOG(ERROR, connection, "Failed to change settings, connection is closed or closing.");
+ aws_h2_frame_destroy(settings_frame);
+ aws_mem_release(connection->base.alloc, pending_settings);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+}
+
+static int s_connection_send_ping(
+ struct aws_http_connection *connection_base,
+ const struct aws_byte_cursor *optional_opaque_data,
+ aws_http2_on_ping_complete_fn *on_completed,
+ void *user_data) {
+
+ struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base);
+ if (optional_opaque_data && optional_opaque_data->len != 8) {
+ CONNECTION_LOG(ERROR, connection, "Only 8 bytes opaque data supported for PING in HTTP/2");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ uint64_t time_stamp;
+ if (aws_high_res_clock_get_ticks(&time_stamp)) {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Failed getting the time stamp to start PING, error %s",
+ aws_error_name(aws_last_error()));
+ return AWS_OP_ERR;
+ }
+ struct aws_h2_pending_ping *pending_ping =
+ s_new_pending_ping(connection->base.alloc, optional_opaque_data, time_stamp, user_data, on_completed);
+ if (!pending_ping) {
+ return AWS_OP_ERR;
+ }
+ struct aws_h2_frame *ping_frame =
+ aws_h2_frame_new_ping(connection->base.alloc, false /*ACK*/, pending_ping->opaque_data);
+ if (!ping_frame) {
+ CONNECTION_LOGF(ERROR, connection, "Failed to create PING frame, error %s", aws_error_name(aws_last_error()));
+ aws_mem_release(connection->base.alloc, pending_ping);
+ return AWS_OP_ERR;
+ }
+
+ bool was_cross_thread_work_scheduled = false;
+ bool connection_open;
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+
+ connection_open = connection->synced_data.is_open;
+ if (!connection_open) {
+ s_unlock_synced_data(connection);
+ goto closed;
+ }
+ was_cross_thread_work_scheduled = connection->synced_data.is_cross_thread_work_task_scheduled;
+ connection->synced_data.is_cross_thread_work_task_scheduled = true;
+ aws_linked_list_push_back(&connection->synced_data.pending_frame_list, &ping_frame->node);
+ aws_linked_list_push_back(&connection->synced_data.pending_ping_list, &pending_ping->node);
+
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ if (!was_cross_thread_work_scheduled) {
+ CONNECTION_LOG(TRACE, connection, "Scheduling cross-thread work task");
+ aws_channel_schedule_task_now(connection->base.channel_slot->channel, &connection->cross_thread_work_task);
+ }
+
+ return AWS_OP_SUCCESS;
+
+closed:
+ CONNECTION_LOG(ERROR, connection, "Failed to send ping, connection is closed or closing.");
+ aws_h2_frame_destroy(ping_frame);
+ aws_mem_release(connection->base.alloc, pending_ping);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+}
+
+static void s_connection_send_goaway(
+ struct aws_http_connection *connection_base,
+ uint32_t http2_error,
+ bool allow_more_streams,
+ const struct aws_byte_cursor *optional_debug_data) {
+
+ struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base);
+ struct aws_h2_pending_goaway *pending_goaway =
+ s_new_pending_goaway(connection->base.alloc, http2_error, allow_more_streams, optional_debug_data);
+
+ bool was_cross_thread_work_scheduled = false;
+ bool connection_open;
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+
+ connection_open = connection->synced_data.is_open;
+ if (!connection_open) {
+ s_unlock_synced_data(connection);
+ CONNECTION_LOG(DEBUG, connection, "Goaway not sent, connection is closed or closing.");
+ aws_mem_release(connection->base.alloc, pending_goaway);
+ return;
+ }
+ was_cross_thread_work_scheduled = connection->synced_data.is_cross_thread_work_task_scheduled;
+ connection->synced_data.is_cross_thread_work_task_scheduled = true;
+ aws_linked_list_push_back(&connection->synced_data.pending_goaway_list, &pending_goaway->node);
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ if (allow_more_streams && (http2_error != AWS_HTTP2_ERR_NO_ERROR)) {
+ CONNECTION_LOGF(
+ DEBUG,
+ connection,
+ "Send goaway with allow more streams on and non-zero error code %s(0x%x)",
+ aws_http2_error_code_to_str(http2_error),
+ http2_error);
+ }
+
+ if (!was_cross_thread_work_scheduled) {
+ CONNECTION_LOG(TRACE, connection, "Scheduling cross-thread work task");
+ aws_channel_schedule_task_now(connection->base.channel_slot->channel, &connection->cross_thread_work_task);
+ }
+}
+
+static void s_get_settings_general(
+ const struct aws_http_connection *connection_base,
+ struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT],
+ bool local) {
+
+ struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base);
+ uint32_t synced_settings[AWS_HTTP2_SETTINGS_END_RANGE];
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+ if (local) {
+ memcpy(
+ synced_settings, connection->synced_data.settings_self, sizeof(connection->synced_data.settings_self));
+ } else {
+ memcpy(
+ synced_settings, connection->synced_data.settings_peer, sizeof(connection->synced_data.settings_peer));
+ }
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ for (int i = AWS_HTTP2_SETTINGS_BEGIN_RANGE; i < AWS_HTTP2_SETTINGS_END_RANGE; i++) {
+ /* settings range begin with 1, store them into 0-based array of aws_http2_setting */
+ out_settings[i - 1].id = i;
+ out_settings[i - 1].value = synced_settings[i];
+ }
+ return;
+}
+
+static void s_connection_get_local_settings(
+ const struct aws_http_connection *connection_base,
+ struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT]) {
+ s_get_settings_general(connection_base, out_settings, true /*local*/);
+}
+
+static void s_connection_get_remote_settings(
+ const struct aws_http_connection *connection_base,
+ struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT]) {
+ s_get_settings_general(connection_base, out_settings, false /*local*/);
+}
+
+/* Send a GOAWAY with the lowest possible last-stream-id or graceful shutdown warning */
+static void s_send_goaway(
+ struct aws_h2_connection *connection,
+ uint32_t h2_error_code,
+ bool allow_more_streams,
+ const struct aws_byte_cursor *optional_debug_data) {
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+
+ uint32_t last_stream_id = allow_more_streams ? AWS_H2_STREAM_ID_MAX
+ : aws_min_u32(
+ connection->thread_data.latest_peer_initiated_stream_id,
+ connection->thread_data.goaway_sent_last_stream_id);
+
+ if (last_stream_id > connection->thread_data.goaway_sent_last_stream_id) {
+ CONNECTION_LOG(
+ DEBUG,
+ connection,
+ "GOAWAY frame with lower last stream id has been sent, ignoring sending graceful shutdown warning.");
+ return;
+ }
+
+ struct aws_byte_cursor debug_data;
+ AWS_ZERO_STRUCT(debug_data);
+ if (optional_debug_data) {
+ debug_data = *optional_debug_data;
+ }
+
+ struct aws_h2_frame *goaway =
+ aws_h2_frame_new_goaway(connection->base.alloc, last_stream_id, h2_error_code, debug_data);
+ if (!goaway) {
+ CONNECTION_LOGF(ERROR, connection, "Error creating GOAWAY frame, %s", aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ connection->thread_data.goaway_sent_last_stream_id = last_stream_id;
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+ connection->synced_data.goaway_sent_last_stream_id = last_stream_id;
+ connection->synced_data.goaway_sent_http2_error_code = h2_error_code;
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ aws_h2_connection_enqueue_outgoing_frame(connection, goaway);
+ return;
+
+error:
+ aws_h2_connection_shutdown_due_to_write_err(connection, aws_last_error());
+}
+
+static int s_connection_get_sent_goaway(
+ struct aws_http_connection *connection_base,
+ uint32_t *out_http2_error,
+ uint32_t *out_last_stream_id) {
+
+ struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base);
+ uint32_t sent_last_stream_id;
+ uint32_t sent_http2_error;
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+ sent_last_stream_id = connection->synced_data.goaway_sent_last_stream_id;
+ sent_http2_error = connection->synced_data.goaway_sent_http2_error_code;
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ uint32_t max_stream_id = AWS_H2_STREAM_ID_MAX;
+ if (sent_last_stream_id == max_stream_id + 1) {
+ CONNECTION_LOG(ERROR, connection, "No GOAWAY has been sent so far.");
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ *out_http2_error = sent_http2_error;
+ *out_last_stream_id = sent_last_stream_id;
+ return AWS_OP_SUCCESS;
+}
+
+static int s_connection_get_received_goaway(
+ struct aws_http_connection *connection_base,
+ uint32_t *out_http2_error,
+ uint32_t *out_last_stream_id) {
+
+ struct aws_h2_connection *connection = AWS_CONTAINER_OF(connection_base, struct aws_h2_connection, base);
+ uint32_t received_last_stream_id = 0;
+ uint32_t received_http2_error = 0;
+ bool goaway_not_ready = false;
+ uint32_t max_stream_id = AWS_H2_STREAM_ID_MAX;
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(connection);
+ if (connection->synced_data.goaway_received_last_stream_id == max_stream_id + 1) {
+ goaway_not_ready = true;
+ } else {
+ received_last_stream_id = connection->synced_data.goaway_received_last_stream_id;
+ received_http2_error = connection->synced_data.goaway_received_http2_error_code;
+ }
+ s_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ if (goaway_not_ready) {
+ CONNECTION_LOG(ERROR, connection, "No GOAWAY has been received so far.");
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ *out_http2_error = received_http2_error;
+ *out_last_stream_id = received_last_stream_id;
+ return AWS_OP_SUCCESS;
+}
+
+static int s_handler_process_read_message(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ struct aws_io_message *message) {
+ (void)slot;
+ struct aws_h2_connection *connection = handler->impl;
+
+ CONNECTION_LOGF(TRACE, connection, "Begin processing message of size %zu.", message->message_data.len);
+
+ if (connection->thread_data.is_reading_stopped) {
+ CONNECTION_LOG(ERROR, connection, "Cannot process message because connection is shutting down.");
+ goto clean_up;
+ }
+
+ /* Any error that bubbles up from the decoder or its callbacks is treated as
+ * a Connection Error (a GOAWAY frames is sent, and the connection is closed) */
+ struct aws_byte_cursor message_cursor = aws_byte_cursor_from_buf(&message->message_data);
+ struct aws_h2err err = aws_h2_decode(connection->thread_data.decoder, &message_cursor);
+ if (aws_h2err_failed(err)) {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Failure while receiving frames, %s. Sending GOAWAY %s(0x%x) and closing connection",
+ aws_error_name(err.aws_code),
+ aws_http2_error_code_to_str(err.h2_code),
+ err.h2_code);
+ goto shutdown;
+ }
+
+ /* HTTP/2 protocol uses WINDOW_UPDATE frames to coordinate data rates with peer,
+ * so we can just keep the aws_channel's read-window wide open */
+ if (aws_channel_slot_increment_read_window(slot, message->message_data.len)) {
+ CONNECTION_LOGF(
+ ERROR,
+ connection,
+ "Incrementing read window failed, error %d (%s). Closing connection",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ err = aws_h2err_from_last_error();
+ goto shutdown;
+ }
+
+ goto clean_up;
+
+shutdown:
+ s_send_goaway(connection, err.h2_code, false /*allow_more_streams*/, NULL /*optional_debug_data*/);
+ aws_h2_try_write_outgoing_frames(connection);
+ s_stop(connection, true /*stop_reading*/, false /*stop_writing*/, true /*schedule_shutdown*/, err.aws_code);
+
+clean_up:
+ aws_mem_release(message->allocator, message);
+
+ /* Flush any outgoing frames that might have been queued as a result of decoder callbacks. */
+ aws_h2_try_write_outgoing_frames(connection);
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_handler_process_write_message(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ struct aws_io_message *message) {
+
+ (void)handler;
+ (void)slot;
+ (void)message;
+ return aws_raise_error(AWS_ERROR_UNIMPLEMENTED);
+}
+
+static int s_handler_increment_read_window(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ size_t size) {
+
+ (void)handler;
+ (void)slot;
+ (void)size;
+ return aws_raise_error(AWS_ERROR_UNIMPLEMENTED);
+}
+
+static int s_handler_shutdown(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ enum aws_channel_direction dir,
+ int error_code,
+ bool free_scarce_resources_immediately) {
+
+ struct aws_h2_connection *connection = handler->impl;
+ CONNECTION_LOGF(
+ TRACE,
+ connection,
+ "Channel shutting down in %s direction with error code %d (%s).",
+ (dir == AWS_CHANNEL_DIR_READ) ? "read" : "write",
+ error_code,
+ aws_error_name(error_code));
+
+ if (dir == AWS_CHANNEL_DIR_READ) {
+ /* This call ensures that no further streams will be created. */
+ s_stop(connection, true /*stop_reading*/, false /*stop_writing*/, false /*schedule_shutdown*/, error_code);
+ /* Send user requested GOAWAY, if they haven't been sent before. It's OK to access
+ * synced_data.pending_goaway_list without holding the lock because no more user_requested GOAWAY can be added
+ * after s_stop() has been invoked. */
+ if (!aws_linked_list_empty(&connection->synced_data.pending_goaway_list)) {
+ while (!aws_linked_list_empty(&connection->synced_data.pending_goaway_list)) {
+ struct aws_linked_list_node *node =
+ aws_linked_list_pop_front(&connection->synced_data.pending_goaway_list);
+ struct aws_h2_pending_goaway *goaway = AWS_CONTAINER_OF(node, struct aws_h2_pending_goaway, node);
+ s_send_goaway(connection, goaway->http2_error, goaway->allow_more_streams, &goaway->debug_data);
+ aws_mem_release(connection->base.alloc, goaway);
+ }
+ aws_h2_try_write_outgoing_frames(connection);
+ }
+
+ /* Send GOAWAY if none have been sent so far,
+ * or if we've only sent a "graceful shutdown warning" that didn't name a last-stream-id */
+ if (connection->thread_data.goaway_sent_last_stream_id == AWS_H2_STREAM_ID_MAX) {
+ s_send_goaway(
+ connection,
+ error_code ? AWS_HTTP2_ERR_INTERNAL_ERROR : AWS_HTTP2_ERR_NO_ERROR,
+ false /*allow_more_streams*/,
+ NULL /*optional_debug_data*/);
+ aws_h2_try_write_outgoing_frames(connection);
+ }
+ aws_channel_slot_on_handler_shutdown_complete(
+ slot, AWS_CHANNEL_DIR_READ, error_code, free_scarce_resources_immediately);
+
+ } else /* AWS_CHANNEL_DIR_WRITE */ {
+ connection->thread_data.channel_shutdown_error_code = error_code;
+ connection->thread_data.channel_shutdown_immediately = free_scarce_resources_immediately;
+ connection->thread_data.channel_shutdown_waiting_for_goaway_to_be_written = true;
+
+ /* We'd prefer to wait until we know GOAWAY has been written, but don't wait if... */
+ if (free_scarce_resources_immediately /* we must finish ASAP */ ||
+ connection->thread_data.is_writing_stopped /* write will never complete */ ||
+ !connection->thread_data.is_outgoing_frames_task_active /* write is already complete */) {
+
+ s_finish_shutdown(connection);
+ } else {
+ CONNECTION_LOG(TRACE, connection, "HTTP/2 handler will finish shutdown once GOAWAY frame is written");
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_finish_shutdown(struct aws_h2_connection *connection) {
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+ AWS_PRECONDITION(connection->thread_data.channel_shutdown_waiting_for_goaway_to_be_written);
+
+ CONNECTION_LOG(TRACE, connection, "Finishing HTTP/2 handler shutdown");
+
+ connection->thread_data.channel_shutdown_waiting_for_goaway_to_be_written = false;
+
+ s_stop(
+ connection,
+ false /*stop_reading*/,
+ true /*stop_writing*/,
+ false /*schedule_shutdown*/,
+ connection->thread_data.channel_shutdown_error_code);
+
+ /* Remove remaining streams from internal datastructures and mark them as complete. */
+
+ struct aws_hash_iter stream_iter = aws_hash_iter_begin(&connection->thread_data.active_streams_map);
+ while (!aws_hash_iter_done(&stream_iter)) {
+ struct aws_h2_stream *stream = stream_iter.element.value;
+ aws_hash_iter_delete(&stream_iter, true);
+ aws_hash_iter_next(&stream_iter);
+
+ s_stream_complete(connection, stream, AWS_ERROR_HTTP_CONNECTION_CLOSED);
+ }
+
+ /* It's OK to access synced_data without holding the lock because
+ * no more streams or user-requested control frames can be added after s_stop() has been invoked. */
+ while (!aws_linked_list_empty(&connection->synced_data.pending_stream_list)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&connection->synced_data.pending_stream_list);
+ struct aws_h2_stream *stream = AWS_CONTAINER_OF(node, struct aws_h2_stream, node);
+ s_stream_complete(connection, stream, AWS_ERROR_HTTP_CONNECTION_CLOSED);
+ }
+
+ while (!aws_linked_list_empty(&connection->synced_data.pending_frame_list)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&connection->synced_data.pending_frame_list);
+ struct aws_h2_frame *frame = AWS_CONTAINER_OF(node, struct aws_h2_frame, node);
+ aws_h2_frame_destroy(frame);
+ }
+
+ /* invoke pending callbacks haven't moved into thread, and clean up the data */
+ while (!aws_linked_list_empty(&connection->synced_data.pending_settings_list)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&connection->synced_data.pending_settings_list);
+ struct aws_h2_pending_settings *settings = AWS_CONTAINER_OF(node, struct aws_h2_pending_settings, node);
+ if (settings->on_completed) {
+ settings->on_completed(&connection->base, AWS_ERROR_HTTP_CONNECTION_CLOSED, settings->user_data);
+ }
+ aws_mem_release(connection->base.alloc, settings);
+ }
+ while (!aws_linked_list_empty(&connection->synced_data.pending_ping_list)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&connection->synced_data.pending_ping_list);
+ struct aws_h2_pending_ping *ping = AWS_CONTAINER_OF(node, struct aws_h2_pending_ping, node);
+ if (ping->on_completed) {
+ ping->on_completed(&connection->base, 0 /*fake rtt*/, AWS_ERROR_HTTP_CONNECTION_CLOSED, ping->user_data);
+ }
+ aws_mem_release(connection->base.alloc, ping);
+ }
+
+ /* invoke pending callbacks moved into thread, and clean up the data */
+ while (!aws_linked_list_empty(&connection->thread_data.pending_settings_queue)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&connection->thread_data.pending_settings_queue);
+ struct aws_h2_pending_settings *pending_settings = AWS_CONTAINER_OF(node, struct aws_h2_pending_settings, node);
+ /* fire the user callback with error */
+ if (pending_settings->on_completed) {
+ pending_settings->on_completed(
+ &connection->base, AWS_ERROR_HTTP_CONNECTION_CLOSED, pending_settings->user_data);
+ }
+ aws_mem_release(connection->base.alloc, pending_settings);
+ }
+ while (!aws_linked_list_empty(&connection->thread_data.pending_ping_queue)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&connection->thread_data.pending_ping_queue);
+ struct aws_h2_pending_ping *pending_ping = AWS_CONTAINER_OF(node, struct aws_h2_pending_ping, node);
+ /* fire the user callback with error */
+ if (pending_ping->on_completed) {
+ pending_ping->on_completed(
+ &connection->base, 0 /*fake rtt*/, AWS_ERROR_HTTP_CONNECTION_CLOSED, pending_ping->user_data);
+ }
+ aws_mem_release(connection->base.alloc, pending_ping);
+ }
+ aws_channel_slot_on_handler_shutdown_complete(
+ connection->base.channel_slot,
+ AWS_CHANNEL_DIR_WRITE,
+ connection->thread_data.channel_shutdown_error_code,
+ connection->thread_data.channel_shutdown_immediately);
+}
+
+static size_t s_handler_initial_window_size(struct aws_channel_handler *handler) {
+ (void)handler;
+
+ /* HTTP/2 protocol uses WINDOW_UPDATE frames to coordinate data rates with peer,
+ * so we can just keep the aws_channel's read-window wide open */
+ return SIZE_MAX;
+}
+
+static size_t s_handler_message_overhead(struct aws_channel_handler *handler) {
+ (void)handler;
+
+ /* "All frames begin with a fixed 9-octet header followed by a variable-length payload" (RFC-7540 4.1) */
+ return 9;
+}
+
+static void s_reset_statistics(struct aws_channel_handler *handler) {
+ struct aws_h2_connection *connection = handler->impl;
+ aws_crt_statistics_http2_channel_reset(&connection->thread_data.stats);
+ if (aws_hash_table_get_entry_count(&connection->thread_data.active_streams_map) == 0) {
+ /* Check the current state */
+ connection->thread_data.stats.was_inactive = true;
+ }
+ return;
+}
+
+static void s_gather_statistics(struct aws_channel_handler *handler, struct aws_array_list *stats) {
+
+ struct aws_h2_connection *connection = handler->impl;
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(connection->base.channel_slot->channel));
+
+ /* TODO: Need update the way we calculate statistics, to account for user-controlled pauses.
+ * If user is adding chunks 1 by 1, there can naturally be a gap in the upload.
+ * If the user lets the stream-window go to zero, there can naturally be a gap in the download. */
+ uint64_t now_ns = 0;
+ if (aws_channel_current_clock_time(connection->base.channel_slot->channel, &now_ns)) {
+ return;
+ }
+
+ if (!aws_linked_list_empty(&connection->thread_data.outgoing_streams_list)) {
+ s_add_time_measurement_to_stats(
+ connection->thread_data.outgoing_timestamp_ns,
+ now_ns,
+ &connection->thread_data.stats.pending_outgoing_stream_ms);
+
+ connection->thread_data.outgoing_timestamp_ns = now_ns;
+ }
+ if (aws_hash_table_get_entry_count(&connection->thread_data.active_streams_map) != 0) {
+ s_add_time_measurement_to_stats(
+ connection->thread_data.incoming_timestamp_ns,
+ now_ns,
+ &connection->thread_data.stats.pending_incoming_stream_ms);
+
+ connection->thread_data.incoming_timestamp_ns = now_ns;
+ } else {
+ connection->thread_data.stats.was_inactive = true;
+ }
+
+ void *stats_base = &connection->thread_data.stats;
+ aws_array_list_push_back(stats, &stats_base);
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/h2_decoder.c b/contrib/restricted/aws/aws-c-http/source/h2_decoder.c
new file mode 100644
index 0000000000..5c8b7ab7b2
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/h2_decoder.c
@@ -0,0 +1,1592 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/http/private/h2_decoder.h>
+
+#include <aws/http/private/hpack.h>
+#include <aws/http/private/strutil.h>
+
+#include <aws/common/string.h>
+#include <aws/http/status_code.h>
+#include <aws/io/logging.h>
+
+#include <inttypes.h>
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4204) /* Declared initializers */
+#endif
+
+/***********************************************************************************************************************
+ * Constants
+ **********************************************************************************************************************/
+
+/* The scratch buffers data for states with bytes_required > 0. Must be big enough for largest state */
+static const size_t s_scratch_space_size = 9;
+
+/* Stream ids & dependencies should only write the bottom 31 bits */
+static const uint32_t s_31_bit_mask = UINT32_MAX >> 1;
+
+/* initial size for cookie buffer, buffer will grow if needed */
+static const size_t s_decoder_cookie_buffer_initial_size = 512;
+
+#define DECODER_LOGF(level, decoder, text, ...) \
+ AWS_LOGF_##level(AWS_LS_HTTP_DECODER, "id=%p " text, (decoder)->logging_id, __VA_ARGS__)
+#define DECODER_LOG(level, decoder, text) DECODER_LOGF(level, decoder, "%s", text)
+
+#define DECODER_CALL_VTABLE(decoder, fn) \
+ do { \
+ if ((decoder)->vtable->fn) { \
+ DECODER_LOG(TRACE, decoder, "Invoking callback " #fn); \
+ struct aws_h2err vtable_err = (decoder)->vtable->fn((decoder)->userdata); \
+ if (aws_h2err_failed(vtable_err)) { \
+ DECODER_LOGF( \
+ ERROR, \
+ decoder, \
+ "Error from callback " #fn ", %s->%s", \
+ aws_http2_error_code_to_str(vtable_err.h2_code), \
+ aws_error_name(vtable_err.aws_code)); \
+ return vtable_err; \
+ } \
+ } \
+ } while (false)
+#define DECODER_CALL_VTABLE_ARGS(decoder, fn, ...) \
+ do { \
+ if ((decoder)->vtable->fn) { \
+ DECODER_LOG(TRACE, decoder, "Invoking callback " #fn); \
+ struct aws_h2err vtable_err = (decoder)->vtable->fn(__VA_ARGS__, (decoder)->userdata); \
+ if (aws_h2err_failed(vtable_err)) { \
+ DECODER_LOGF( \
+ ERROR, \
+ decoder, \
+ "Error from callback " #fn ", %s->%s", \
+ aws_http2_error_code_to_str(vtable_err.h2_code), \
+ aws_error_name(vtable_err.aws_code)); \
+ return vtable_err; \
+ } \
+ } \
+ } while (false)
+#define DECODER_CALL_VTABLE_STREAM(decoder, fn) \
+ DECODER_CALL_VTABLE_ARGS(decoder, fn, (decoder)->frame_in_progress.stream_id)
+#define DECODER_CALL_VTABLE_STREAM_ARGS(decoder, fn, ...) \
+ DECODER_CALL_VTABLE_ARGS(decoder, fn, (decoder)->frame_in_progress.stream_id, __VA_ARGS__)
+
+/* for storing things in array without worrying about the specific values of the other AWS_HTTP_HEADER_XYZ enums */
+enum pseudoheader_name {
+ PSEUDOHEADER_UNKNOWN = -1, /* Unrecognized value */
+
+ /* Request pseudo-headers */
+ PSEUDOHEADER_METHOD,
+ PSEUDOHEADER_SCHEME,
+ PSEUDOHEADER_AUTHORITY,
+ PSEUDOHEADER_PATH,
+ /* Response pseudo-headers */
+ PSEUDOHEADER_STATUS,
+
+ PSEUDOHEADER_COUNT, /* Number of valid enums */
+};
+
+static const struct aws_byte_cursor *s_pseudoheader_name_to_cursor[PSEUDOHEADER_COUNT] = {
+ [PSEUDOHEADER_METHOD] = &aws_http_header_method,
+ [PSEUDOHEADER_SCHEME] = &aws_http_header_scheme,
+ [PSEUDOHEADER_AUTHORITY] = &aws_http_header_authority,
+ [PSEUDOHEADER_PATH] = &aws_http_header_path,
+ [PSEUDOHEADER_STATUS] = &aws_http_header_status,
+};
+
+static const enum aws_http_header_name s_pseudoheader_to_header_name[PSEUDOHEADER_COUNT] = {
+ [PSEUDOHEADER_METHOD] = AWS_HTTP_HEADER_METHOD,
+ [PSEUDOHEADER_SCHEME] = AWS_HTTP_HEADER_SCHEME,
+ [PSEUDOHEADER_AUTHORITY] = AWS_HTTP_HEADER_AUTHORITY,
+ [PSEUDOHEADER_PATH] = AWS_HTTP_HEADER_PATH,
+ [PSEUDOHEADER_STATUS] = AWS_HTTP_HEADER_STATUS,
+};
+
+static enum pseudoheader_name s_header_to_pseudoheader_name(enum aws_http_header_name name) {
+ /* The compiled switch statement is actually faster than array lookup with bounds-checking.
+ * (the lookup arrays above don't need to do bounds-checking) */
+ switch (name) {
+ case AWS_HTTP_HEADER_METHOD:
+ return PSEUDOHEADER_METHOD;
+ case AWS_HTTP_HEADER_SCHEME:
+ return PSEUDOHEADER_SCHEME;
+ case AWS_HTTP_HEADER_AUTHORITY:
+ return PSEUDOHEADER_AUTHORITY;
+ case AWS_HTTP_HEADER_PATH:
+ return PSEUDOHEADER_PATH;
+ case AWS_HTTP_HEADER_STATUS:
+ return PSEUDOHEADER_STATUS;
+ default:
+ return PSEUDOHEADER_UNKNOWN;
+ }
+}
+
+/***********************************************************************************************************************
+ * State Machine
+ **********************************************************************************************************************/
+
+typedef struct aws_h2err(state_fn)(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input);
+struct h2_decoder_state {
+ state_fn *fn;
+ uint32_t bytes_required;
+ const char *name;
+};
+
+#define DEFINE_STATE(_name, _bytes_required) \
+ static state_fn s_state_fn_##_name; \
+ enum { s_state_##_name##_requires_##_bytes_required##_bytes = _bytes_required }; \
+ static const struct h2_decoder_state s_state_##_name = { \
+ .fn = s_state_fn_##_name, \
+ .bytes_required = s_state_##_name##_requires_##_bytes_required##_bytes, \
+ .name = #_name, \
+ }
+
+/* Common states */
+DEFINE_STATE(prefix, 9);
+DEFINE_STATE(padding_len, 1);
+DEFINE_STATE(padding, 0);
+
+DEFINE_STATE(priority_block, 5);
+
+DEFINE_STATE(header_block_loop, 0);
+DEFINE_STATE(header_block_entry, 1); /* requires 1 byte, but may consume more */
+
+/* Frame-specific states */
+DEFINE_STATE(frame_data, 0);
+DEFINE_STATE(frame_headers, 0);
+DEFINE_STATE(frame_priority, 0);
+DEFINE_STATE(frame_rst_stream, 4);
+DEFINE_STATE(frame_settings_begin, 0);
+DEFINE_STATE(frame_settings_loop, 0);
+DEFINE_STATE(frame_settings_i, 6);
+DEFINE_STATE(frame_push_promise, 4);
+DEFINE_STATE(frame_ping, 8);
+DEFINE_STATE(frame_goaway, 8);
+DEFINE_STATE(frame_goaway_debug_data, 0);
+DEFINE_STATE(frame_window_update, 4);
+DEFINE_STATE(frame_continuation, 0);
+DEFINE_STATE(frame_unknown, 0);
+
+/* States that have nothing to do with frames */
+DEFINE_STATE(connection_preface_string, 1); /* requires 1 byte but may consume more */
+
+/* Helper for states that need to transition to frame-type states */
+static const struct h2_decoder_state *s_state_frames[AWS_H2_FRAME_TYPE_COUNT] = {
+ [AWS_H2_FRAME_T_DATA] = &s_state_frame_data,
+ [AWS_H2_FRAME_T_HEADERS] = &s_state_frame_headers,
+ [AWS_H2_FRAME_T_PRIORITY] = &s_state_frame_priority,
+ [AWS_H2_FRAME_T_RST_STREAM] = &s_state_frame_rst_stream,
+ [AWS_H2_FRAME_T_SETTINGS] = &s_state_frame_settings_begin,
+ [AWS_H2_FRAME_T_PUSH_PROMISE] = &s_state_frame_push_promise,
+ [AWS_H2_FRAME_T_PING] = &s_state_frame_ping,
+ [AWS_H2_FRAME_T_GOAWAY] = &s_state_frame_goaway,
+ [AWS_H2_FRAME_T_WINDOW_UPDATE] = &s_state_frame_window_update,
+ [AWS_H2_FRAME_T_CONTINUATION] = &s_state_frame_continuation,
+ [AWS_H2_FRAME_T_UNKNOWN] = &s_state_frame_unknown,
+};
+
+/***********************************************************************************************************************
+ * Struct
+ **********************************************************************************************************************/
+
+struct aws_h2_decoder {
+ /* Implementation data. */
+ struct aws_allocator *alloc;
+ const void *logging_id;
+ struct aws_hpack_decoder hpack;
+ bool is_server;
+ struct aws_byte_buf scratch;
+ const struct h2_decoder_state *state;
+ bool state_changed;
+
+ /* HTTP/2 connection preface must be first thing received (RFC-7540 3.5):
+ * Server must receive (client must send): magic string, then SETTINGS frame.
+ * Client must receive (server must send): SETTINGS frame. */
+ bool connection_preface_complete;
+
+ /* Cursor over the canonical client connection preface string */
+ struct aws_byte_cursor connection_preface_cursor;
+
+ /* Frame-in-progress */
+ struct aws_frame_in_progress {
+ enum aws_h2_frame_type type;
+ uint32_t stream_id;
+ uint32_t payload_len;
+ uint8_t padding_len;
+
+ struct {
+ bool ack;
+ bool end_stream;
+ bool end_headers;
+ bool priority;
+ } flags;
+ } frame_in_progress;
+
+ /* GOAWAY buffer */
+ struct aws_goaway_in_progress {
+ uint32_t last_stream;
+ uint32_t error_code;
+ /* Buffer of the received debug data in the latest goaway frame */
+ struct aws_byte_buf debug_data;
+ } goaway_in_progress;
+
+ /* A header-block starts with a HEADERS or PUSH_PROMISE frame, followed by 0 or more CONTINUATION frames.
+ * It's an error for any other frame-type or stream ID to arrive while a header-block is in progress.
+ * The header-block ends when a frame has the END_HEADERS flag set. (RFC-7540 4.3) */
+ struct aws_header_block_in_progress {
+ /* If 0, then no header-block in progress */
+ uint32_t stream_id;
+
+ /* Whether these are informational (1xx), normal, or trailing headers */
+ enum aws_http_header_block block_type;
+
+ /* Buffer up pseudo-headers and deliver them once they're all validated */
+ struct aws_string *pseudoheader_values[PSEUDOHEADER_COUNT];
+ enum aws_http_header_compression pseudoheader_compression[PSEUDOHEADER_COUNT];
+
+ /* All pseudo-header fields MUST appear in the header block before regular header fields. */
+ bool pseudoheaders_done;
+
+ /* T: PUSH_PROMISE header-block
+ * F: HEADERS header-block */
+ bool is_push_promise;
+
+ /* If frame that starts header-block has END_STREAM flag,
+ * then frame that ends header-block also ends the stream. */
+ bool ends_stream;
+
+ /* True if something occurs that makes the header-block malformed (ex: invalid header name).
+ * A malformed header-block is not a connection error, it's a Stream Error (RFC-7540 5.4.2).
+ * We continue decoding and report that it's malformed in on_headers_end(). */
+ bool malformed;
+
+ bool body_headers_forbidden;
+
+ /* Buffer up cookie header fields to concatenate separate ones */
+ struct aws_byte_buf cookies;
+ /* If separate cookie fields have different compression types, the concatenated cookie uses the strictest type.
+ */
+ enum aws_http_header_compression cookie_header_compression_type;
+ } header_block_in_progress;
+
+ /* Settings for decoder, which is based on the settings sent to the peer and ACKed by peer */
+ struct {
+ /* enable/disable server push */
+ uint32_t enable_push;
+ /* the size of the largest frame payload */
+ uint32_t max_frame_size;
+ } settings;
+
+ struct aws_array_list settings_buffer_list;
+
+ /* User callbacks and settings. */
+ const struct aws_h2_decoder_vtable *vtable;
+ void *userdata;
+
+ /* If this is set to true, decode may no longer be called */
+ bool has_errored;
+};
+
+/***********************************************************************************************************************/
+
+struct aws_h2_decoder *aws_h2_decoder_new(struct aws_h2_decoder_params *params) {
+ AWS_PRECONDITION(params);
+ AWS_PRECONDITION(params->alloc);
+ AWS_PRECONDITION(params->vtable);
+
+ struct aws_h2_decoder *decoder = NULL;
+ void *scratch_buf = NULL;
+
+ void *allocation = aws_mem_acquire_many(
+ params->alloc, 2, &decoder, sizeof(struct aws_h2_decoder), &scratch_buf, s_scratch_space_size);
+ if (!allocation) {
+ goto error;
+ }
+
+ AWS_ZERO_STRUCT(*decoder);
+ decoder->alloc = params->alloc;
+ decoder->vtable = params->vtable;
+ decoder->userdata = params->userdata;
+ decoder->logging_id = params->logging_id;
+ decoder->is_server = params->is_server;
+ decoder->connection_preface_complete = params->skip_connection_preface;
+
+ decoder->scratch = aws_byte_buf_from_empty_array(scratch_buf, s_scratch_space_size);
+
+ aws_hpack_decoder_init(&decoder->hpack, params->alloc, decoder);
+
+ if (decoder->is_server && !params->skip_connection_preface) {
+ decoder->state = &s_state_connection_preface_string;
+ decoder->connection_preface_cursor = aws_h2_connection_preface_client_string;
+ } else {
+ decoder->state = &s_state_prefix;
+ }
+
+ decoder->settings.enable_push = aws_h2_settings_initial[AWS_HTTP2_SETTINGS_ENABLE_PUSH];
+ decoder->settings.max_frame_size = aws_h2_settings_initial[AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE];
+
+ if (aws_array_list_init_dynamic(
+ &decoder->settings_buffer_list, decoder->alloc, 0, sizeof(struct aws_http2_setting))) {
+ goto error;
+ }
+
+ if (aws_byte_buf_init(
+ &decoder->header_block_in_progress.cookies, decoder->alloc, s_decoder_cookie_buffer_initial_size)) {
+ goto error;
+ }
+
+ return decoder;
+
+error:
+ if (decoder) {
+ aws_hpack_decoder_clean_up(&decoder->hpack);
+ aws_array_list_clean_up(&decoder->settings_buffer_list);
+ aws_byte_buf_clean_up(&decoder->header_block_in_progress.cookies);
+ }
+ aws_mem_release(params->alloc, allocation);
+ return NULL;
+}
+
+static void s_reset_header_block_in_progress(struct aws_h2_decoder *decoder) {
+ for (size_t i = 0; i < PSEUDOHEADER_COUNT; ++i) {
+ aws_string_destroy(decoder->header_block_in_progress.pseudoheader_values[i]);
+ }
+ struct aws_byte_buf cookie_backup = decoder->header_block_in_progress.cookies;
+ AWS_ZERO_STRUCT(decoder->header_block_in_progress);
+ decoder->header_block_in_progress.cookies = cookie_backup;
+ aws_byte_buf_reset(&decoder->header_block_in_progress.cookies, false);
+}
+
+void aws_h2_decoder_destroy(struct aws_h2_decoder *decoder) {
+ if (!decoder) {
+ return;
+ }
+ aws_array_list_clean_up(&decoder->settings_buffer_list);
+ aws_hpack_decoder_clean_up(&decoder->hpack);
+ s_reset_header_block_in_progress(decoder);
+ aws_byte_buf_clean_up(&decoder->header_block_in_progress.cookies);
+ aws_byte_buf_clean_up(&decoder->goaway_in_progress.debug_data);
+ aws_mem_release(decoder->alloc, decoder);
+}
+
+struct aws_h2err aws_h2_decode(struct aws_h2_decoder *decoder, struct aws_byte_cursor *data) {
+ AWS_PRECONDITION(decoder);
+ AWS_PRECONDITION(data);
+
+ AWS_FATAL_ASSERT(!decoder->has_errored);
+
+ struct aws_h2err err = AWS_H2ERR_SUCCESS;
+
+ /* Run decoder state machine until we're no longer changing states.
+ * We don't simply loop `while(data->len)` because some states consume no data,
+ * and these states should run even when there is no data left. */
+ do {
+ decoder->state_changed = false;
+
+ const uint32_t bytes_required = decoder->state->bytes_required;
+ AWS_ASSERT(bytes_required <= decoder->scratch.capacity);
+ const char *current_state_name = decoder->state->name;
+ const size_t prev_data_len = data->len;
+ (void)prev_data_len;
+
+ if (!decoder->scratch.len && data->len >= bytes_required) {
+ /* Easy case, there is no scratch and we have enough data, so just send it to the state */
+
+ DECODER_LOGF(TRACE, decoder, "Running state '%s' with %zu bytes available", current_state_name, data->len);
+
+ err = decoder->state->fn(decoder, data);
+ if (aws_h2err_failed(err)) {
+ goto handle_error;
+ }
+
+ AWS_ASSERT(prev_data_len - data->len >= bytes_required && "Decoder state requested more data than it used");
+ } else {
+ /* Otherwise, state requires a minimum amount of data and we have to use the scratch */
+ size_t bytes_to_read = bytes_required - decoder->scratch.len;
+ bool will_finish_state = true;
+
+ if (bytes_to_read > data->len) {
+ /* Not enough in this cursor, need to read as much as possible and then come back */
+ bytes_to_read = data->len;
+ will_finish_state = false;
+ }
+
+ if (AWS_LIKELY(bytes_to_read)) {
+ /* Read the appropriate number of bytes into scratch */
+ struct aws_byte_cursor to_read = aws_byte_cursor_advance(data, bytes_to_read);
+ bool succ = aws_byte_buf_write_from_whole_cursor(&decoder->scratch, to_read);
+ AWS_ASSERT(succ);
+ (void)succ;
+ }
+
+ /* If we have the correct number of bytes, call the state */
+ if (will_finish_state) {
+
+ DECODER_LOGF(TRACE, decoder, "Running state '%s' (using scratch)", current_state_name);
+
+ struct aws_byte_cursor state_data = aws_byte_cursor_from_buf(&decoder->scratch);
+ err = decoder->state->fn(decoder, &state_data);
+ if (aws_h2err_failed(err)) {
+ goto handle_error;
+ }
+
+ AWS_ASSERT(state_data.len == 0 && "Decoder state requested more data than it used");
+ } else {
+ DECODER_LOGF(
+ TRACE,
+ decoder,
+ "State '%s' requires %" PRIu32 " bytes, but only %zu available, trying again later",
+ current_state_name,
+ bytes_required,
+ decoder->scratch.len);
+ }
+ }
+ } while (decoder->state_changed);
+
+ return AWS_H2ERR_SUCCESS;
+
+handle_error:
+ decoder->has_errored = true;
+ return err;
+}
+
+/***********************************************************************************************************************
+ * State functions
+ **********************************************************************************************************************/
+
+static struct aws_h2err s_decoder_switch_state(struct aws_h2_decoder *decoder, const struct h2_decoder_state *state) {
+ /* Ensure payload is big enough to enter next state.
+ * If this fails, then the payload length we received is too small for this frame type.
+ * (ex: a RST_STREAM frame with < 4 bytes) */
+ if (decoder->frame_in_progress.payload_len < state->bytes_required) {
+ DECODER_LOGF(
+ ERROR, decoder, "%s payload is too small", aws_h2_frame_type_to_str(decoder->frame_in_progress.type));
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FRAME_SIZE_ERROR);
+ }
+
+ DECODER_LOGF(TRACE, decoder, "Moving from state '%s' to '%s'", decoder->state->name, state->name);
+ decoder->scratch.len = 0;
+ decoder->state = state;
+ decoder->state_changed = true;
+ return AWS_H2ERR_SUCCESS;
+}
+
+static struct aws_h2err s_decoder_switch_to_frame_state(struct aws_h2_decoder *decoder) {
+ AWS_ASSERT(decoder->frame_in_progress.type < AWS_H2_FRAME_TYPE_COUNT);
+ return s_decoder_switch_state(decoder, s_state_frames[decoder->frame_in_progress.type]);
+}
+
+static struct aws_h2err s_decoder_reset_state(struct aws_h2_decoder *decoder) {
+ /* Ensure we've consumed all payload (and padding) when state machine finishes this frame.
+ * If this fails, the payload length we received is too large for this frame type.
+ * (ex: a RST_STREAM frame with > 4 bytes) */
+ if (decoder->frame_in_progress.payload_len > 0 || decoder->frame_in_progress.padding_len > 0) {
+ DECODER_LOGF(
+ ERROR, decoder, "%s frame payload is too large", aws_h2_frame_type_to_str(decoder->frame_in_progress.type));
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FRAME_SIZE_ERROR);
+ }
+
+ DECODER_LOGF(TRACE, decoder, "%s frame complete", aws_h2_frame_type_to_str(decoder->frame_in_progress.type));
+
+ decoder->scratch.len = 0;
+ decoder->state = &s_state_prefix;
+ decoder->state_changed = true;
+
+ AWS_ZERO_STRUCT(decoder->frame_in_progress);
+ return AWS_H2ERR_SUCCESS;
+}
+
+/* Returns as much of the current frame's payload as possible, and updates payload_len */
+static struct aws_byte_cursor s_decoder_get_payload(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+
+ struct aws_byte_cursor result;
+
+ const uint32_t remaining_length = decoder->frame_in_progress.payload_len;
+ if (input->len < remaining_length) {
+ AWS_ASSERT(input->len <= UINT32_MAX);
+ result = aws_byte_cursor_advance(input, input->len);
+ } else {
+ result = aws_byte_cursor_advance(input, remaining_length);
+ }
+
+ decoder->frame_in_progress.payload_len -= (uint32_t)result.len;
+
+ return result;
+}
+
+/* clang-format off */
+
+/* Mask of flags supported by each frame type.
+ * Frames not listed have mask of 0, which means all flags will be ignored. */
+static const uint8_t s_acceptable_flags_for_frame[AWS_H2_FRAME_TYPE_COUNT] = {
+ [AWS_H2_FRAME_T_DATA] = AWS_H2_FRAME_F_END_STREAM | AWS_H2_FRAME_F_PADDED,
+ [AWS_H2_FRAME_T_HEADERS] = AWS_H2_FRAME_F_END_STREAM | AWS_H2_FRAME_F_END_HEADERS |
+ AWS_H2_FRAME_F_PADDED | AWS_H2_FRAME_F_PRIORITY,
+ [AWS_H2_FRAME_T_PRIORITY] = 0,
+ [AWS_H2_FRAME_T_RST_STREAM] = 0,
+ [AWS_H2_FRAME_T_SETTINGS] = AWS_H2_FRAME_F_ACK,
+ [AWS_H2_FRAME_T_PUSH_PROMISE] = AWS_H2_FRAME_F_END_HEADERS | AWS_H2_FRAME_F_PADDED,
+ [AWS_H2_FRAME_T_PING] = AWS_H2_FRAME_F_ACK,
+ [AWS_H2_FRAME_T_GOAWAY] = 0,
+ [AWS_H2_FRAME_T_WINDOW_UPDATE] = 0,
+ [AWS_H2_FRAME_T_CONTINUATION] = AWS_H2_FRAME_F_END_HEADERS,
+ [AWS_H2_FRAME_T_UNKNOWN] = 0,
+};
+
+enum stream_id_rules {
+ STREAM_ID_REQUIRED,
+ STREAM_ID_FORBIDDEN,
+ STREAM_ID_EITHER_WAY,
+};
+
+/* Frame-types generally either require a stream-id, or require that it be zero. */
+static const enum stream_id_rules s_stream_id_rules_for_frame[AWS_H2_FRAME_TYPE_COUNT] = {
+ [AWS_H2_FRAME_T_DATA] = STREAM_ID_REQUIRED,
+ [AWS_H2_FRAME_T_HEADERS] = STREAM_ID_REQUIRED,
+ [AWS_H2_FRAME_T_PRIORITY] = STREAM_ID_REQUIRED,
+ [AWS_H2_FRAME_T_RST_STREAM] = STREAM_ID_REQUIRED,
+ [AWS_H2_FRAME_T_SETTINGS] = STREAM_ID_FORBIDDEN,
+ [AWS_H2_FRAME_T_PUSH_PROMISE] = STREAM_ID_REQUIRED,
+ [AWS_H2_FRAME_T_PING] = STREAM_ID_FORBIDDEN,
+ [AWS_H2_FRAME_T_GOAWAY] = STREAM_ID_FORBIDDEN,
+ [AWS_H2_FRAME_T_WINDOW_UPDATE] = STREAM_ID_EITHER_WAY, /* WINDOW_UPDATE is special and can do either */
+ [AWS_H2_FRAME_T_CONTINUATION] = STREAM_ID_REQUIRED,
+ [AWS_H2_FRAME_T_UNKNOWN] = STREAM_ID_EITHER_WAY, /* Everything in an UNKNOWN frame type is ignored */
+};
+/* clang-format on */
+
+/* All frames begin with a fixed 9-octet header followed by a variable-length payload. (RFC-7540 4.1)
+ * This function processes everything preceding Frame Payload in the following diagram:
+ * +-----------------------------------------------+
+ * | Length (24) |
+ * +---------------+---------------+---------------+
+ * | Type (8) | Flags (8) |
+ * +-+-------------+---------------+-------------------------------+
+ * |R| Stream Identifier (31) |
+ * +=+=============================================================+
+ * | Frame Payload (0...) ...
+ * +---------------------------------------------------------------+
+ */
+static struct aws_h2err s_state_fn_prefix(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+
+ AWS_ASSERT(input->len >= s_state_prefix_requires_9_bytes);
+
+ struct aws_frame_in_progress *frame = &decoder->frame_in_progress;
+ uint8_t raw_type = 0;
+ uint8_t raw_flags = 0;
+
+ /* Read the raw values from the first 9 bytes */
+ bool all_read = true;
+ all_read &= aws_byte_cursor_read_be24(input, &frame->payload_len);
+ all_read &= aws_byte_cursor_read_u8(input, &raw_type);
+ all_read &= aws_byte_cursor_read_u8(input, &raw_flags);
+ all_read &= aws_byte_cursor_read_be32(input, &frame->stream_id);
+ AWS_ASSERT(all_read);
+ (void)all_read;
+
+ /* Validate frame type */
+ frame->type = raw_type < AWS_H2_FRAME_T_UNKNOWN ? raw_type : AWS_H2_FRAME_T_UNKNOWN;
+
+ /* Validate the frame's flags
+ * Flags that have no defined semantics for a particular frame type MUST be ignored (RFC-7540 4.1) */
+ const uint8_t flags = raw_flags & s_acceptable_flags_for_frame[decoder->frame_in_progress.type];
+
+ bool is_padded = flags & AWS_H2_FRAME_F_PADDED;
+ decoder->frame_in_progress.flags.ack = flags & AWS_H2_FRAME_F_ACK;
+ decoder->frame_in_progress.flags.end_stream = flags & AWS_H2_FRAME_F_END_STREAM;
+ decoder->frame_in_progress.flags.end_headers = flags & AWS_H2_FRAME_F_END_HEADERS;
+ decoder->frame_in_progress.flags.priority =
+ flags & AWS_H2_FRAME_F_PRIORITY || decoder->frame_in_progress.type == AWS_H2_FRAME_T_PRIORITY;
+
+ /* Connection preface requires that SETTINGS be sent first (RFC-7540 3.5).
+ * This should be the first error we check for, so that a connection sending
+ * total garbage data is likely to trigger this PROTOCOL_ERROR */
+ if (!decoder->connection_preface_complete) {
+ if (frame->type == AWS_H2_FRAME_T_SETTINGS && !frame->flags.ack) {
+ DECODER_LOG(TRACE, decoder, "Connection preface satisfied.");
+ decoder->connection_preface_complete = true;
+ } else {
+ DECODER_LOG(ERROR, decoder, "First frame must be SETTINGS");
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+ }
+
+ /* Validate the frame's stream ID. */
+
+ /* Reserved bit (1st bit) MUST be ignored when receiving (RFC-7540 4.1) */
+ frame->stream_id &= s_31_bit_mask;
+
+ /* Some frame types require a stream ID, some frame types require that stream ID be zero. */
+ const enum stream_id_rules stream_id_rules = s_stream_id_rules_for_frame[frame->type];
+ if (frame->stream_id) {
+ if (stream_id_rules == STREAM_ID_FORBIDDEN) {
+ DECODER_LOGF(ERROR, decoder, "Stream ID for %s frame must be 0.", aws_h2_frame_type_to_str(frame->type));
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+ } else {
+ if (stream_id_rules == STREAM_ID_REQUIRED) {
+ DECODER_LOGF(ERROR, decoder, "Stream ID for %s frame cannot be 0.", aws_h2_frame_type_to_str(frame->type));
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+ }
+
+ /* A header-block starts with a HEADERS or PUSH_PROMISE frame, followed by 0 or more CONTINUATION frames.
+ * It's an error for any other frame-type or stream ID to arrive while a header-block is in progress.
+ * (RFC-7540 4.3) */
+ if (frame->type == AWS_H2_FRAME_T_CONTINUATION) {
+ if (decoder->header_block_in_progress.stream_id != frame->stream_id) {
+ DECODER_LOG(ERROR, decoder, "Unexpected CONTINUATION frame.");
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+ } else {
+ if (decoder->header_block_in_progress.stream_id) {
+ DECODER_LOG(ERROR, decoder, "Expected CONTINUATION frame.");
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+ }
+
+ /* Validate payload length. */
+ uint32_t max_frame_size = decoder->settings.max_frame_size;
+ if (frame->payload_len > max_frame_size) {
+ DECODER_LOGF(
+ ERROR,
+ decoder,
+ "Decoder's max frame size is %" PRIu32 ", but frame of size %" PRIu32 " was received.",
+ max_frame_size,
+ frame->payload_len);
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FRAME_SIZE_ERROR);
+ }
+
+ DECODER_LOGF(
+ TRACE,
+ decoder,
+ "Done decoding frame prefix (type=%s stream-id=%" PRIu32 " payload-len=%" PRIu32 "), moving on to payload",
+ aws_h2_frame_type_to_str(frame->type),
+ frame->stream_id,
+ frame->payload_len);
+
+ if (is_padded) {
+ /* Read padding length if necessary */
+ return s_decoder_switch_state(decoder, &s_state_padding_len);
+ }
+ if (decoder->frame_in_progress.type == AWS_H2_FRAME_T_DATA) {
+ /* We invoke the on_data_begin here to report the whole payload size */
+ DECODER_CALL_VTABLE_STREAM_ARGS(
+ decoder, on_data_begin, frame->payload_len, 0 /*padding_len*/, frame->flags.end_stream);
+ }
+ if (decoder->frame_in_progress.flags.priority) {
+ /* Read the stream dependency and weight if PRIORITY is set */
+ return s_decoder_switch_state(decoder, &s_state_priority_block);
+ }
+
+ /* Set the state to the appropriate frame's state */
+ return s_decoder_switch_to_frame_state(decoder);
+}
+
+/* Frames that support padding, and have the PADDED flag set, begin with a 1-byte Pad Length.
+ * (Actual padding comes later at the very end of the frame)
+ * +---------------+
+ * |Pad Length? (8)|
+ * +---------------+
+ */
+static struct aws_h2err s_state_fn_padding_len(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+
+ AWS_ASSERT(input->len >= s_state_padding_len_requires_1_bytes);
+
+ struct aws_frame_in_progress *frame = &decoder->frame_in_progress;
+ /* Read the padding length */
+ bool succ = aws_byte_cursor_read_u8(input, &frame->padding_len);
+ AWS_ASSERT(succ);
+ (void)succ;
+
+ /* Adjust payload size so it doesn't include padding (or the 1-byte padding length) */
+ uint32_t reduce_payload = s_state_padding_len_requires_1_bytes + frame->padding_len;
+ if (reduce_payload > decoder->frame_in_progress.payload_len) {
+ DECODER_LOG(ERROR, decoder, "Padding length exceeds payload length");
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+
+ if (frame->type == AWS_H2_FRAME_T_DATA) {
+ /* We invoke the on_data_begin here to report the whole payload size and the padding size */
+ DECODER_CALL_VTABLE_STREAM_ARGS(
+ decoder, on_data_begin, frame->payload_len, frame->padding_len + 1, frame->flags.end_stream);
+ }
+
+ frame->payload_len -= reduce_payload;
+
+ DECODER_LOGF(TRACE, decoder, "Padding length of frame: %" PRIu32, frame->padding_len);
+ if (frame->flags.priority) {
+ /* Read the stream dependency and weight if PRIORITY is set */
+ return s_decoder_switch_state(decoder, &s_state_priority_block);
+ }
+
+ /* Set the state to the appropriate frame's state */
+ return s_decoder_switch_to_frame_state(decoder);
+}
+
+static struct aws_h2err s_state_fn_padding(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+
+ const uint8_t remaining_len = decoder->frame_in_progress.padding_len;
+ const uint8_t consuming_len = input->len < remaining_len ? (uint8_t)input->len : remaining_len;
+ aws_byte_cursor_advance(input, consuming_len);
+ decoder->frame_in_progress.padding_len -= consuming_len;
+
+ if (remaining_len == consuming_len) {
+ /* Done with the frame! */
+ return s_decoder_reset_state(decoder);
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+/* Shared code for:
+ * PRIORITY frame (RFC-7540 6.3)
+ * Start of HEADERS frame IF the priority flag is set (RFC-7540 6.2)
+ * +-+-------------+-----------------------------------------------+
+ * |E| Stream Dependency (31) |
+ * +-+-------------+-----------------------------------------------+
+ * | Weight (8) |
+ * +-+-------------+-----------------------------------------------+
+ */
+static struct aws_h2err s_state_fn_priority_block(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+
+ AWS_ASSERT(input->len >= s_state_priority_block_requires_5_bytes);
+
+ /* #NOTE: throw priority data on the GROUND. They make us hecka vulnerable to DDoS and stuff.
+ * https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2019-9513
+ */
+ aws_byte_cursor_advance(input, s_state_priority_block_requires_5_bytes);
+
+ decoder->frame_in_progress.payload_len -= s_state_priority_block_requires_5_bytes;
+
+ return s_decoder_switch_to_frame_state(decoder);
+}
+
+static struct aws_h2err s_state_fn_frame_data(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+
+ const struct aws_byte_cursor body_data = s_decoder_get_payload(decoder, input);
+
+ if (body_data.len) {
+ DECODER_CALL_VTABLE_STREAM_ARGS(decoder, on_data_i, body_data);
+ }
+
+ if (decoder->frame_in_progress.payload_len == 0) {
+ DECODER_CALL_VTABLE_STREAM(decoder, on_data_end);
+ /* If frame had END_STREAM flag, alert user now */
+ if (decoder->frame_in_progress.flags.end_stream) {
+ DECODER_CALL_VTABLE_STREAM(decoder, on_end_stream);
+ }
+
+ /* Process padding if necessary, otherwise we're done! */
+ return s_decoder_switch_state(decoder, &s_state_padding);
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+static struct aws_h2err s_state_fn_frame_headers(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+ (void)input;
+
+ /* Start header-block and alert the user */
+ decoder->header_block_in_progress.stream_id = decoder->frame_in_progress.stream_id;
+ decoder->header_block_in_progress.is_push_promise = false;
+ decoder->header_block_in_progress.ends_stream = decoder->frame_in_progress.flags.end_stream;
+
+ DECODER_CALL_VTABLE_STREAM(decoder, on_headers_begin);
+
+ /* Read the header-block fragment */
+ return s_decoder_switch_state(decoder, &s_state_header_block_loop);
+}
+static struct aws_h2err s_state_fn_frame_priority(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+ (void)input;
+
+ /* We already processed this data in the shared priority_block state, so we're done! */
+ return s_decoder_reset_state(decoder);
+}
+
+/* RST_STREAM is just a 4-byte error code.
+ * +---------------------------------------------------------------+
+ * | Error Code (32) |
+ * +---------------------------------------------------------------+
+ */
+static struct aws_h2err s_state_fn_frame_rst_stream(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+
+ AWS_ASSERT(input->len >= s_state_frame_rst_stream_requires_4_bytes);
+
+ uint32_t error_code = 0;
+ bool succ = aws_byte_cursor_read_be32(input, &error_code);
+ AWS_ASSERT(succ);
+ (void)succ;
+
+ decoder->frame_in_progress.payload_len -= s_state_frame_rst_stream_requires_4_bytes;
+
+ DECODER_CALL_VTABLE_STREAM_ARGS(decoder, on_rst_stream, error_code);
+
+ return s_decoder_reset_state(decoder);
+}
+
+/* A SETTINGS frame may contain any number of 6-byte entries.
+ * This state consumes no data, but sends us into the appropriate next state */
+static struct aws_h2err s_state_fn_frame_settings_begin(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+ (void)input;
+
+ /* If ack is set, report and we're done */
+ if (decoder->frame_in_progress.flags.ack) {
+ /* Receipt of a SETTINGS frame with the ACK flag set and a length field value other
+ * than 0 MUST be treated as a connection error of type FRAME_SIZE_ERROR */
+ if (decoder->frame_in_progress.payload_len) {
+ DECODER_LOGF(
+ ERROR,
+ decoder,
+ "SETTINGS ACK frame received, but it has non-0 payload length %" PRIu32,
+ decoder->frame_in_progress.payload_len);
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FRAME_SIZE_ERROR);
+ }
+
+ DECODER_CALL_VTABLE(decoder, on_settings_ack);
+ return s_decoder_reset_state(decoder);
+ }
+
+ if (decoder->frame_in_progress.payload_len % s_state_frame_settings_i_requires_6_bytes != 0) {
+ /* A SETTINGS frame with a length other than a multiple of 6 octets MUST be
+ * treated as a connection error (Section 5.4.1) of type FRAME_SIZE_ERROR */
+ DECODER_LOGF(
+ ERROR,
+ decoder,
+ "Settings frame payload length is %" PRIu32 ", but it must be divisible by %" PRIu32,
+ decoder->frame_in_progress.payload_len,
+ s_state_frame_settings_i_requires_6_bytes);
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FRAME_SIZE_ERROR);
+ }
+
+ /* Enter looping states until all entries are consumed. */
+ return s_decoder_switch_state(decoder, &s_state_frame_settings_loop);
+}
+
+/* Check if we're done consuming settings */
+static struct aws_h2err s_state_fn_frame_settings_loop(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+ (void)input;
+
+ if (decoder->frame_in_progress.payload_len == 0) {
+ /* Huzzah, done with the frame, fire the callback */
+ struct aws_array_list *buffer = &decoder->settings_buffer_list;
+ DECODER_CALL_VTABLE_ARGS(
+ decoder, on_settings, buffer->data, aws_array_list_length(&decoder->settings_buffer_list));
+ /* clean up the buffer */
+ aws_array_list_clear(&decoder->settings_buffer_list);
+ return s_decoder_reset_state(decoder);
+ }
+
+ return s_decoder_switch_state(decoder, &s_state_frame_settings_i);
+}
+
+/* Each run through this state consumes one 6-byte setting.
+ * There may be multiple settings in a SETTINGS frame.
+ * +-------------------------------+
+ * | Identifier (16) |
+ * +-------------------------------+-------------------------------+
+ * | Value (32) |
+ * +---------------------------------------------------------------+
+ */
+static struct aws_h2err s_state_fn_frame_settings_i(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+
+ AWS_ASSERT(input->len >= s_state_frame_settings_i_requires_6_bytes);
+
+ uint16_t id = 0;
+ uint32_t value = 0;
+
+ bool succ = aws_byte_cursor_read_be16(input, &id);
+ AWS_ASSERT(succ);
+ (void)succ;
+
+ succ = aws_byte_cursor_read_be32(input, &value);
+ AWS_ASSERT(succ);
+ (void)succ;
+
+ /* An endpoint that receives a SETTINGS frame with any unknown or unsupported identifier MUST ignore that setting.
+ * RFC-7540 6.5.2 */
+ if (id >= AWS_HTTP2_SETTINGS_BEGIN_RANGE && id < AWS_HTTP2_SETTINGS_END_RANGE) {
+ /* check the value meets the settings bounds */
+ if (value < aws_h2_settings_bounds[id][0] || value > aws_h2_settings_bounds[id][1]) {
+ DECODER_LOGF(
+ ERROR, decoder, "A value of SETTING frame is invalid, id: %" PRIu16 ", value: %" PRIu32, id, value);
+ if (id == AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE) {
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FLOW_CONTROL_ERROR);
+ } else {
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+ }
+ struct aws_http2_setting setting;
+ setting.id = id;
+ setting.value = value;
+ /* array_list will keep a copy of setting, it is fine to be a local variable */
+ if (aws_array_list_push_back(&decoder->settings_buffer_list, &setting)) {
+ DECODER_LOGF(ERROR, decoder, "Writing setting to buffer failed, %s", aws_error_name(aws_last_error()));
+ return aws_h2err_from_last_error();
+ }
+ }
+
+ /* Update payload len */
+ decoder->frame_in_progress.payload_len -= s_state_frame_settings_i_requires_6_bytes;
+
+ return s_decoder_switch_state(decoder, &s_state_frame_settings_loop);
+}
+
+/* Read 4-byte Promised Stream ID
+ * The rest of the frame is just like HEADERS, so move on to shared states...
+ * +-+-------------------------------------------------------------+
+ * |R| Promised Stream ID (31) |
+ * +-+-----------------------------+-------------------------------+
+ */
+static struct aws_h2err s_state_fn_frame_push_promise(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+
+ if (decoder->settings.enable_push == 0) {
+ /* treat the receipt of a PUSH_PROMISE frame as a connection error of type PROTOCOL_ERROR.(RFC-7540 6.5.2) */
+ DECODER_LOG(ERROR, decoder, "PUSH_PROMISE is invalid, the seting for enable push is 0");
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+
+ AWS_ASSERT(input->len >= s_state_frame_push_promise_requires_4_bytes);
+
+ uint32_t promised_stream_id = 0;
+ bool succ = aws_byte_cursor_read_be32(input, &promised_stream_id);
+ AWS_ASSERT(succ);
+ (void)succ;
+
+ decoder->frame_in_progress.payload_len -= s_state_frame_push_promise_requires_4_bytes;
+
+ /* Reserved bit (top bit) must be ignored when receiving (RFC-7540 4.1) */
+ promised_stream_id &= s_31_bit_mask;
+
+ /* Promised stream ID must not be 0 (RFC-7540 6.6).
+ * Promised stream ID (server-initiated) must be even-numbered (RFC-7540 5.1.1). */
+ if ((promised_stream_id == 0) || (promised_stream_id % 2) != 0) {
+ DECODER_LOGF(ERROR, decoder, "PUSH_PROMISE is promising invalid stream ID %" PRIu32, promised_stream_id);
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+
+ /* Server cannot receive PUSH_PROMISE frames */
+ if (decoder->is_server) {
+ DECODER_LOG(ERROR, decoder, "Server cannot receive PUSH_PROMISE frames");
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+
+ /* Start header-block and alert the user. */
+ decoder->header_block_in_progress.stream_id = decoder->frame_in_progress.stream_id;
+ decoder->header_block_in_progress.is_push_promise = true;
+ decoder->header_block_in_progress.ends_stream = false;
+
+ DECODER_CALL_VTABLE_STREAM_ARGS(decoder, on_push_promise_begin, promised_stream_id);
+
+ /* Read the header-block fragment */
+ return s_decoder_switch_state(decoder, &s_state_header_block_loop);
+}
+
+/* PING frame is just 8-bytes of opaque data.
+ * +---------------------------------------------------------------+
+ * | |
+ * | Opaque Data (64) |
+ * | |
+ * +---------------------------------------------------------------+
+ */
+static struct aws_h2err s_state_fn_frame_ping(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+
+ AWS_ASSERT(input->len >= s_state_frame_ping_requires_8_bytes);
+
+ uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE] = {0};
+ bool succ = aws_byte_cursor_read(input, &opaque_data, AWS_HTTP2_PING_DATA_SIZE);
+ AWS_ASSERT(succ);
+ (void)succ;
+
+ decoder->frame_in_progress.payload_len -= s_state_frame_ping_requires_8_bytes;
+
+ if (decoder->frame_in_progress.flags.ack) {
+ DECODER_CALL_VTABLE_ARGS(decoder, on_ping_ack, opaque_data);
+ } else {
+ DECODER_CALL_VTABLE_ARGS(decoder, on_ping, opaque_data);
+ }
+
+ return s_decoder_reset_state(decoder);
+}
+
+/* Read first 8 bytes of GOAWAY.
+ * This may be followed by N bytes of debug data.
+ * +-+-------------------------------------------------------------+
+ * |R| Last-Stream-ID (31) |
+ * +-+-------------------------------------------------------------+
+ * | Error Code (32) |
+ * +---------------------------------------------------------------+
+ */
+static struct aws_h2err s_state_fn_frame_goaway(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+
+ AWS_ASSERT(input->len >= s_state_frame_goaway_requires_8_bytes);
+
+ uint32_t last_stream = 0;
+ uint32_t error_code = AWS_HTTP2_ERR_NO_ERROR;
+
+ bool succ = aws_byte_cursor_read_be32(input, &last_stream);
+ AWS_ASSERT(succ);
+ (void)succ;
+
+ last_stream &= s_31_bit_mask;
+
+ succ = aws_byte_cursor_read_be32(input, &error_code);
+ AWS_ASSERT(succ);
+ (void)succ;
+
+ decoder->frame_in_progress.payload_len -= s_state_frame_goaway_requires_8_bytes;
+ uint32_t debug_data_length = decoder->frame_in_progress.payload_len;
+ /* Received new GOAWAY, clean up the previous one. Buffer it up and invoke the callback once the debug data decoded
+ * fully. */
+ decoder->goaway_in_progress.error_code = error_code;
+ decoder->goaway_in_progress.last_stream = last_stream;
+ int init_result = aws_byte_buf_init(&decoder->goaway_in_progress.debug_data, decoder->alloc, debug_data_length);
+ AWS_ASSERT(init_result == 0);
+ (void)init_result;
+
+ return s_decoder_switch_state(decoder, &s_state_frame_goaway_debug_data);
+}
+
+/* Optional remainder of GOAWAY frame.
+ * +---------------------------------------------------------------+
+ * | Additional Debug Data (*) |
+ * +---------------------------------------------------------------+
+ */
+static struct aws_h2err s_state_fn_frame_goaway_debug_data(
+ struct aws_h2_decoder *decoder,
+ struct aws_byte_cursor *input) {
+
+ struct aws_byte_cursor debug_data = s_decoder_get_payload(decoder, input);
+ if (debug_data.len > 0) {
+ /* As we initialized the buffer to the size of debug data, we can safely append here */
+ aws_byte_buf_append(&decoder->goaway_in_progress.debug_data, &debug_data);
+ }
+
+ /* If this is the last data in the frame, reset decoder */
+ if (decoder->frame_in_progress.payload_len == 0) {
+ struct aws_byte_cursor debug_cursor = aws_byte_cursor_from_buf(&decoder->goaway_in_progress.debug_data);
+
+ DECODER_CALL_VTABLE_ARGS(
+ decoder,
+ on_goaway,
+ decoder->goaway_in_progress.last_stream,
+ decoder->goaway_in_progress.error_code,
+ debug_cursor);
+ aws_byte_buf_clean_up(&decoder->goaway_in_progress.debug_data);
+ return s_decoder_reset_state(decoder);
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+/* WINDOW_UPDATE frame.
+ * +-+-------------------------------------------------------------+
+ * |R| Window Size Increment (31) |
+ * +-+-------------------------------------------------------------+
+ */
+static struct aws_h2err s_state_fn_frame_window_update(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+
+ AWS_ASSERT(input->len >= s_state_frame_window_update_requires_4_bytes);
+
+ uint32_t window_increment = 0;
+ bool succ = aws_byte_cursor_read_be32(input, &window_increment);
+ AWS_ASSERT(succ);
+ (void)succ;
+
+ decoder->frame_in_progress.payload_len -= s_state_frame_window_update_requires_4_bytes;
+
+ window_increment &= s_31_bit_mask;
+
+ DECODER_CALL_VTABLE_STREAM_ARGS(decoder, on_window_update, window_increment);
+
+ return s_decoder_reset_state(decoder);
+}
+
+/* CONTINUATION is a lot like HEADERS, so it uses shared states. */
+static struct aws_h2err s_state_fn_frame_continuation(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+ (void)input;
+
+ /* Read the header-block fragment */
+ return s_decoder_switch_state(decoder, &s_state_header_block_loop);
+}
+
+/* Implementations MUST ignore and discard any frame that has a type that is unknown. */
+static struct aws_h2err s_state_fn_frame_unknown(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+
+ /* Read all data possible, and throw it on the floor */
+ s_decoder_get_payload(decoder, input);
+
+ /* If there's no more data expected, end the frame */
+ if (decoder->frame_in_progress.payload_len == 0) {
+ return s_decoder_reset_state(decoder);
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+/* Perform analysis that can't be done until all pseudo-headers are received.
+ * Then deliver buffered pseudoheaders via callback */
+static struct aws_h2err s_flush_pseudoheaders(struct aws_h2_decoder *decoder) {
+ struct aws_header_block_in_progress *current_block = &decoder->header_block_in_progress;
+
+ if (current_block->malformed) {
+ goto already_malformed;
+ }
+
+ if (current_block->pseudoheaders_done) {
+ return AWS_H2ERR_SUCCESS;
+ }
+ current_block->pseudoheaders_done = true;
+
+ /* s_process_header_field() already checked that we're not mixing request & response pseudoheaders */
+ bool has_request_pseudoheaders = false;
+ for (int i = PSEUDOHEADER_METHOD; i <= PSEUDOHEADER_PATH; ++i) {
+ if (current_block->pseudoheader_values[i] != NULL) {
+ has_request_pseudoheaders = true;
+ break;
+ }
+ }
+
+ bool has_response_pseudoheaders = current_block->pseudoheader_values[PSEUDOHEADER_STATUS] != NULL;
+
+ if (current_block->is_push_promise && !has_request_pseudoheaders) {
+ DECODER_LOG(ERROR, decoder, "PUSH_PROMISE is missing :method");
+ goto malformed;
+ }
+
+ if (has_request_pseudoheaders) {
+ /* Request header-block. */
+ current_block->block_type = AWS_HTTP_HEADER_BLOCK_MAIN;
+
+ } else if (has_response_pseudoheaders) {
+ /* Response header block. */
+
+ /* Determine whether this is an Informational (1xx) response */
+ struct aws_byte_cursor status_value =
+ aws_byte_cursor_from_string(current_block->pseudoheader_values[PSEUDOHEADER_STATUS]);
+ uint64_t status_code;
+ if (status_value.len != 3 || aws_byte_cursor_utf8_parse_u64(status_value, &status_code)) {
+ DECODER_LOG(ERROR, decoder, ":status header has invalid value");
+ DECODER_LOGF(DEBUG, decoder, "Bad :status value is '" PRInSTR "'", AWS_BYTE_CURSOR_PRI(status_value));
+ goto malformed;
+ }
+
+ if (status_code / 100 == 1) {
+ current_block->block_type = AWS_HTTP_HEADER_BLOCK_INFORMATIONAL;
+
+ if (current_block->ends_stream) {
+ /* Informational headers do not constitute a full response (RFC-7540 8.1) */
+ DECODER_LOG(ERROR, decoder, "Informational (1xx) response cannot END_STREAM");
+ goto malformed;
+ }
+ current_block->body_headers_forbidden = true;
+ } else {
+ current_block->block_type = AWS_HTTP_HEADER_BLOCK_MAIN;
+ }
+ /**
+ * RFC-9110 8.6.
+ * A server MUST NOT send a Content-Length header field in any response with a status code of 1xx
+ * (Informational) or 204 (No Content).
+ */
+ current_block->body_headers_forbidden |= status_code == AWS_HTTP_STATUS_CODE_204_NO_CONTENT;
+
+ } else {
+ /* Trailing header block. */
+ if (!current_block->ends_stream) {
+ DECODER_LOG(ERROR, decoder, "HEADERS appear to be trailer, but lack END_STREAM");
+ goto malformed;
+ }
+
+ current_block->block_type = AWS_HTTP_HEADER_BLOCK_TRAILING;
+ }
+
+ /* #TODO RFC-7540 8.1.2.3 & 8.3 Validate request has correct pseudoheaders. Note different rules for CONNECT */
+ /* #TODO validate pseudoheader values. each one has its own special rules */
+
+ /* Finally, deliver header-fields via callback */
+ for (size_t i = 0; i < PSEUDOHEADER_COUNT; ++i) {
+ const struct aws_string *value_string = current_block->pseudoheader_values[i];
+ if (value_string) {
+
+ struct aws_http_header header_field = {
+ .name = *s_pseudoheader_name_to_cursor[i],
+ .value = aws_byte_cursor_from_string(value_string),
+ .compression = current_block->pseudoheader_compression[i],
+ };
+
+ enum aws_http_header_name name_enum = s_pseudoheader_to_header_name[i];
+
+ if (current_block->is_push_promise) {
+ DECODER_CALL_VTABLE_STREAM_ARGS(decoder, on_push_promise_i, &header_field, name_enum);
+ } else {
+ DECODER_CALL_VTABLE_STREAM_ARGS(
+ decoder, on_headers_i, &header_field, name_enum, current_block->block_type);
+ }
+ }
+ }
+
+ return AWS_H2ERR_SUCCESS;
+
+malformed:
+ /* A malformed header-block is not a connection error, it's a Stream Error (RFC-7540 5.4.2).
+ * We continue decoding and report that it's malformed in on_headers_end(). */
+ current_block->malformed = true;
+ return AWS_H2ERR_SUCCESS;
+already_malformed:
+ return AWS_H2ERR_SUCCESS;
+}
+
+/* Process single header-field.
+ * If it's invalid, mark the header-block as malformed.
+ * If it's valid, and header-block is not malformed, deliver via callback. */
+static struct aws_h2err s_process_header_field(
+ struct aws_h2_decoder *decoder,
+ const struct aws_http_header *header_field) {
+
+ struct aws_header_block_in_progress *current_block = &decoder->header_block_in_progress;
+ if (current_block->malformed) {
+ goto already_malformed;
+ }
+
+ const struct aws_byte_cursor name = header_field->name;
+ if (name.len == 0) {
+ DECODER_LOG(ERROR, decoder, "Header name is blank");
+ goto malformed;
+ }
+
+ enum aws_http_header_name name_enum = aws_http_lowercase_str_to_header_name(name);
+
+ bool is_pseudoheader = name.ptr[0] == ':';
+ if (is_pseudoheader) {
+ if (current_block->pseudoheaders_done) {
+ /* Note: being careful not to leak possibly sensitive data except at DEBUG level and lower */
+ DECODER_LOG(ERROR, decoder, "Pseudo-headers must appear before regular fields.");
+ DECODER_LOGF(DEBUG, decoder, "Misplaced pseudo-header is '" PRInSTR "'", AWS_BYTE_CURSOR_PRI(name));
+ goto malformed;
+ }
+
+ enum pseudoheader_name pseudoheader_enum = s_header_to_pseudoheader_name(name_enum);
+ if (pseudoheader_enum == PSEUDOHEADER_UNKNOWN) {
+ DECODER_LOG(ERROR, decoder, "Unrecognized pseudo-header");
+ DECODER_LOGF(DEBUG, decoder, "Unrecognized pseudo-header is '" PRInSTR "'", AWS_BYTE_CURSOR_PRI(name));
+ goto malformed;
+ }
+
+ /* Ensure request pseudo-headers vs response pseudoheaders were sent appropriately.
+ * This also ensures that request and response pseudoheaders aren't being mixed. */
+ bool expect_request_pseudoheader = decoder->is_server || current_block->is_push_promise;
+ bool is_request_pseudoheader = pseudoheader_enum != PSEUDOHEADER_STATUS;
+ if (expect_request_pseudoheader != is_request_pseudoheader) {
+ DECODER_LOGF(
+ ERROR, /* ok to log name of recognized pseudo-header at ERROR level */
+ decoder,
+ "'" PRInSTR "' pseudo-header cannot be in %s header-block to %s",
+ AWS_BYTE_CURSOR_PRI(name),
+ current_block->is_push_promise ? "PUSH_PROMISE" : "HEADERS",
+ decoder->is_server ? "server" : "client");
+ goto malformed;
+ }
+
+ /* Protect against duplicates. */
+ if (current_block->pseudoheader_values[pseudoheader_enum] != NULL) {
+ /* ok to log name of recognized pseudo-header at ERROR level */
+ DECODER_LOGF(
+ ERROR, decoder, "'" PRInSTR "' pseudo-header occurred multiple times", AWS_BYTE_CURSOR_PRI(name));
+ goto malformed;
+ }
+
+ /* Buffer up pseudo-headers, we'll deliver them later once they're all validated. */
+ current_block->pseudoheader_compression[pseudoheader_enum] = header_field->compression;
+ current_block->pseudoheader_values[pseudoheader_enum] =
+ aws_string_new_from_cursor(decoder->alloc, &header_field->value);
+ if (!current_block->pseudoheader_values[pseudoheader_enum]) {
+ return aws_h2err_from_last_error();
+ }
+
+ } else { /* Else regular header-field. */
+
+ /* Regular header-fields come after pseudo-headers, so make sure pseudo-headers are flushed */
+ if (!current_block->pseudoheaders_done) {
+ struct aws_h2err err = s_flush_pseudoheaders(decoder);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+
+ /* might have realized that header-block is malformed during flush */
+ if (current_block->malformed) {
+ goto already_malformed;
+ }
+ }
+
+ /* Validate header name (not necessary if string already matched against a known enum) */
+ if (name_enum == AWS_HTTP_HEADER_UNKNOWN) {
+ if (!aws_strutil_is_lowercase_http_token(name)) {
+ DECODER_LOG(ERROR, decoder, "Header name contains invalid characters");
+ DECODER_LOGF(DEBUG, decoder, "Bad header name is '" PRInSTR "'", AWS_BYTE_CURSOR_PRI(name));
+ goto malformed;
+ }
+ }
+
+ /* #TODO Validate characters used in header_field->value */
+
+ switch (name_enum) {
+ case AWS_HTTP_HEADER_COOKIE:
+ /* for a header cookie, we will not fire callback until we concatenate them all, let's store it at the
+ * buffer */
+ if (header_field->compression > current_block->cookie_header_compression_type) {
+ current_block->cookie_header_compression_type = header_field->compression;
+ }
+
+ if (current_block->cookies.len) {
+ /* add a delimiter */
+ struct aws_byte_cursor delimiter = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("; ");
+ if (aws_byte_buf_append_dynamic(&current_block->cookies, &delimiter)) {
+ return aws_h2err_from_last_error();
+ }
+ }
+ if (aws_byte_buf_append_dynamic(&current_block->cookies, &header_field->value)) {
+ return aws_h2err_from_last_error();
+ }
+ /* Early return */
+ return AWS_H2ERR_SUCCESS;
+ case AWS_HTTP_HEADER_TRANSFER_ENCODING:
+ case AWS_HTTP_HEADER_UPGRADE:
+ case AWS_HTTP_HEADER_KEEP_ALIVE:
+ case AWS_HTTP_HEADER_PROXY_CONNECTION: {
+ /* connection-specific header field are treated as malformed (RFC9113 8.2.2) */
+ DECODER_LOGF(
+ ERROR,
+ decoder,
+ "Connection-specific header ('" PRInSTR "') found, not allowed in HTTP/2",
+ AWS_BYTE_CURSOR_PRI(name));
+ goto malformed;
+ } break;
+
+ case AWS_HTTP_HEADER_CONTENT_LENGTH:
+ if (current_block->body_headers_forbidden) {
+ /* The content-length are forbidden */
+ DECODER_LOG(ERROR, decoder, "Unexpected Content-Length header found");
+ goto malformed;
+ }
+ break;
+ default:
+ break;
+ }
+ /* Deliver header-field via callback */
+ if (current_block->is_push_promise) {
+ DECODER_CALL_VTABLE_STREAM_ARGS(decoder, on_push_promise_i, header_field, name_enum);
+ } else {
+ DECODER_CALL_VTABLE_STREAM_ARGS(decoder, on_headers_i, header_field, name_enum, current_block->block_type);
+ }
+ }
+
+ return AWS_H2ERR_SUCCESS;
+
+malformed:
+ /* A malformed header-block is not a connection error, it's a Stream Error (RFC-7540 5.4.2).
+ * We continue decoding and report that it's malformed in on_headers_end(). */
+ current_block->malformed = true;
+ return AWS_H2ERR_SUCCESS;
+already_malformed:
+ return AWS_H2ERR_SUCCESS;
+}
+
+static struct aws_h2err s_flush_cookie_header(struct aws_h2_decoder *decoder) {
+ struct aws_header_block_in_progress *current_block = &decoder->header_block_in_progress;
+ if (current_block->malformed) {
+ return AWS_H2ERR_SUCCESS;
+ }
+ if (current_block->cookies.len == 0) {
+ /* Nothing to flush */
+ return AWS_H2ERR_SUCCESS;
+ }
+ struct aws_http_header concatenated_cookie;
+ struct aws_byte_cursor header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("cookie");
+ concatenated_cookie.name = header_name;
+ concatenated_cookie.value = aws_byte_cursor_from_buf(&current_block->cookies);
+ concatenated_cookie.compression = current_block->cookie_header_compression_type;
+ if (current_block->is_push_promise) {
+ DECODER_CALL_VTABLE_STREAM_ARGS(decoder, on_push_promise_i, &concatenated_cookie, AWS_HTTP_HEADER_COOKIE);
+ } else {
+ DECODER_CALL_VTABLE_STREAM_ARGS(
+ decoder, on_headers_i, &concatenated_cookie, AWS_HTTP_HEADER_COOKIE, current_block->block_type);
+ }
+ return AWS_H2ERR_SUCCESS;
+}
+
+/* This state checks whether we've consumed the current frame's entire header-block fragment.
+ * We revisit this state after each entry is decoded.
+ * This state consumes no data. */
+static struct aws_h2err s_state_fn_header_block_loop(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+ (void)input;
+
+ /* If we're out of payload data, handle frame complete */
+ if (decoder->frame_in_progress.payload_len == 0) {
+
+ /* If this is the end of the header-block, invoke callback and clear header_block_in_progress */
+ if (decoder->frame_in_progress.flags.end_headers) {
+ /* Ensure pseudo-headers have been flushed */
+ struct aws_h2err err = s_flush_pseudoheaders(decoder);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+ /* flush the concatenated cookie header */
+ err = s_flush_cookie_header(decoder);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+
+ bool malformed = decoder->header_block_in_progress.malformed;
+ DECODER_LOGF(TRACE, decoder, "Done decoding header-block, malformed=%d", malformed);
+
+ if (decoder->header_block_in_progress.is_push_promise) {
+ DECODER_CALL_VTABLE_STREAM_ARGS(decoder, on_push_promise_end, malformed);
+ } else {
+ DECODER_CALL_VTABLE_STREAM_ARGS(
+ decoder, on_headers_end, malformed, decoder->header_block_in_progress.block_type);
+ }
+
+ /* If header-block began with END_STREAM flag, alert user now */
+ if (decoder->header_block_in_progress.ends_stream) {
+ DECODER_CALL_VTABLE_STREAM(decoder, on_end_stream);
+ }
+
+ s_reset_header_block_in_progress(decoder);
+
+ } else {
+ DECODER_LOG(TRACE, decoder, "Done decoding header-block fragment, expecting CONTINUATION frames");
+ }
+
+ /* Finish this frame */
+ return s_decoder_switch_state(decoder, &s_state_padding);
+ }
+
+ DECODER_LOGF(
+ TRACE,
+ decoder,
+ "Decoding header-block entry, %" PRIu32 " bytes remaining in payload",
+ decoder->frame_in_progress.payload_len);
+
+ return s_decoder_switch_state(decoder, &s_state_header_block_entry);
+}
+
+/* We stay in this state until a single "entry" is decoded from the header-block fragment.
+ * Then we return to the header_block_loop state */
+static struct aws_h2err s_state_fn_header_block_entry(struct aws_h2_decoder *decoder, struct aws_byte_cursor *input) {
+ /* This state requires at least 1 byte, but will likely consume more */
+ AWS_ASSERT(input->len >= s_state_header_block_entry_requires_1_bytes);
+
+ /* Feed header-block fragment to HPACK decoder.
+ * Don't let decoder consume anything beyond payload_len. */
+ struct aws_byte_cursor fragment = *input;
+ if (fragment.len > decoder->frame_in_progress.payload_len) {
+ fragment.len = decoder->frame_in_progress.payload_len;
+ }
+
+ const size_t prev_fragment_len = fragment.len;
+
+ struct aws_hpack_decode_result result;
+ if (aws_hpack_decode(&decoder->hpack, &fragment, &result)) {
+ DECODER_LOGF(ERROR, decoder, "Error decoding header-block fragment: %s", aws_error_name(aws_last_error()));
+
+ /* Any possible error from HPACK decoder (except OOM) is treated as a COMPRESSION error. */
+ if (aws_last_error() == AWS_ERROR_OOM) {
+ return aws_h2err_from_last_error();
+ } else {
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_COMPRESSION_ERROR);
+ }
+ }
+
+ /* HPACK decoder returns when it reaches the end of an entry, or when it's consumed the whole fragment.
+ * Update input & payload_len to reflect the number of bytes consumed. */
+ const size_t bytes_consumed = prev_fragment_len - fragment.len;
+ aws_byte_cursor_advance(input, bytes_consumed);
+ decoder->frame_in_progress.payload_len -= (uint32_t)bytes_consumed;
+
+ if (result.type == AWS_HPACK_DECODE_T_ONGOING) {
+ /* HPACK decoder hasn't finished entry */
+
+ if (decoder->frame_in_progress.payload_len > 0) {
+ /* More payload is coming. Remain in state until it arrives */
+ DECODER_LOG(TRACE, decoder, "Header-block entry partially decoded, waiting for more data.");
+ return AWS_H2ERR_SUCCESS;
+ }
+
+ if (decoder->frame_in_progress.flags.end_headers) {
+ /* Reached end of the frame's payload, and this frame ends the header-block.
+ * Error if we ended up with a partially decoded entry. */
+ DECODER_LOG(ERROR, decoder, "Compression error: incomplete entry at end of header-block");
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_COMPRESSION_ERROR);
+ }
+
+ /* Reached end of this frame's payload, but CONTINUATION frames are expected to arrive.
+ * We'll resume decoding this entry when we get them. */
+ DECODER_LOG(TRACE, decoder, "Header-block entry partially decoded, resumes in CONTINUATION frame");
+ return s_decoder_switch_state(decoder, &s_state_header_block_loop);
+ }
+
+ /* Finished decoding HPACK entry! */
+
+ /* #TODO Enforces dynamic table resize rules from RFC-7541 4.2
+ * If dynamic table size changed via SETTINGS frame, next header-block must start with DYNAMIC_TABLE_RESIZE entry.
+ * Is it illegal to receive a resize entry at other times? */
+
+ /* #TODO The TE header field ... MUST NOT contain any value other than "trailers" */
+
+ if (result.type == AWS_HPACK_DECODE_T_HEADER_FIELD) {
+ const struct aws_http_header *header_field = &result.data.header_field;
+
+ DECODER_LOGF(
+ TRACE,
+ decoder,
+ "Decoded header field: \"" PRInSTR ": " PRInSTR "\"",
+ AWS_BYTE_CURSOR_PRI(header_field->name),
+ AWS_BYTE_CURSOR_PRI(header_field->value));
+
+ struct aws_h2err err = s_process_header_field(decoder, header_field);
+ if (aws_h2err_failed(err)) {
+ return err;
+ }
+ }
+
+ return s_decoder_switch_state(decoder, &s_state_header_block_loop);
+}
+
+/* The first thing a client sends on a connection is a 24 byte magic string (RFC-7540 3.5).
+ * Note that this state doesn't "require" the full 24 bytes, it runs as data arrives.
+ * This avoids hanging if < 24 bytes rolled in. */
+static struct aws_h2err s_state_fn_connection_preface_string(
+ struct aws_h2_decoder *decoder,
+ struct aws_byte_cursor *input) {
+ size_t remaining_len = decoder->connection_preface_cursor.len;
+ size_t consuming_len = input->len < remaining_len ? input->len : remaining_len;
+
+ struct aws_byte_cursor expected = aws_byte_cursor_advance(&decoder->connection_preface_cursor, consuming_len);
+
+ struct aws_byte_cursor received = aws_byte_cursor_advance(input, consuming_len);
+
+ if (!aws_byte_cursor_eq(&expected, &received)) {
+ DECODER_LOG(ERROR, decoder, "Client connection preface is invalid");
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR);
+ }
+
+ if (decoder->connection_preface_cursor.len == 0) {
+ /* Done receiving connection preface string, proceed to decoding normal frames. */
+ return s_decoder_reset_state(decoder);
+ }
+
+ /* Remain in state until more data arrives */
+ return AWS_H2ERR_SUCCESS;
+}
+
+void aws_h2_decoder_set_setting_header_table_size(struct aws_h2_decoder *decoder, uint32_t data) {
+ /* Set the protocol_max_size_setting for hpack. */
+ aws_hpack_decoder_update_max_table_size(&decoder->hpack, data);
+}
+
+void aws_h2_decoder_set_setting_enable_push(struct aws_h2_decoder *decoder, uint32_t data) {
+ decoder->settings.enable_push = data;
+}
+
+void aws_h2_decoder_set_setting_max_frame_size(struct aws_h2_decoder *decoder, uint32_t data) {
+ decoder->settings.max_frame_size = data;
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/h2_frames.c b/contrib/restricted/aws/aws-c-http/source/h2_frames.c
new file mode 100644
index 0000000000..12b5ca0849
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/h2_frames.c
@@ -0,0 +1,1233 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/h2_frames.h>
+
+#include <aws/compression/huffman.h>
+
+#include <aws/common/logging.h>
+
+#include <aws/io/stream.h>
+
+#include <inttypes.h>
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4204) /* non-constant aggregate initializer */
+#endif
+
+#define ENCODER_LOGF(level, encoder, text, ...) \
+ AWS_LOGF_##level(AWS_LS_HTTP_ENCODER, "id=%p " text, (encoder)->logging_id, __VA_ARGS__)
+
+#define ENCODER_LOG(level, encoder, text) ENCODER_LOGF(level, encoder, "%s", text)
+
+const struct aws_byte_cursor aws_h2_connection_preface_client_string =
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n");
+
+/* Initial values and bounds are from RFC-7540 6.5.2 */
+const uint32_t aws_h2_settings_initial[AWS_HTTP2_SETTINGS_END_RANGE] = {
+ [AWS_HTTP2_SETTINGS_HEADER_TABLE_SIZE] = 4096,
+ [AWS_HTTP2_SETTINGS_ENABLE_PUSH] = 1,
+ [AWS_HTTP2_SETTINGS_MAX_CONCURRENT_STREAMS] = UINT32_MAX, /* "Initially there is no limit to this value" */
+ [AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE] = AWS_H2_INIT_WINDOW_SIZE,
+ [AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE] = 16384,
+ [AWS_HTTP2_SETTINGS_MAX_HEADER_LIST_SIZE] = UINT32_MAX, /* "The initial value of this setting is unlimited" */
+};
+
+const uint32_t aws_h2_settings_bounds[AWS_HTTP2_SETTINGS_END_RANGE][2] = {
+ [AWS_HTTP2_SETTINGS_HEADER_TABLE_SIZE][0] = 0,
+ [AWS_HTTP2_SETTINGS_HEADER_TABLE_SIZE][1] = UINT32_MAX,
+
+ [AWS_HTTP2_SETTINGS_ENABLE_PUSH][0] = 0,
+ [AWS_HTTP2_SETTINGS_ENABLE_PUSH][1] = 1,
+
+ [AWS_HTTP2_SETTINGS_MAX_CONCURRENT_STREAMS][0] = 0,
+ [AWS_HTTP2_SETTINGS_MAX_CONCURRENT_STREAMS][1] = UINT32_MAX,
+
+ [AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE][0] = 0,
+ [AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE][1] = AWS_H2_WINDOW_UPDATE_MAX,
+
+ [AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE][0] = 16384,
+ [AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE][1] = AWS_H2_PAYLOAD_MAX,
+
+ [AWS_HTTP2_SETTINGS_MAX_HEADER_LIST_SIZE][0] = 0,
+ [AWS_HTTP2_SETTINGS_MAX_HEADER_LIST_SIZE][1] = UINT32_MAX,
+};
+
+/* Stream ids & dependencies should only write the bottom 31 bits */
+static const uint32_t s_u32_top_bit_mask = UINT32_MAX << 31;
+
+/* Bytes to initially reserve for encoding of an entire header block. Buffer will grow if necessary. */
+static const size_t s_encoded_header_block_reserve = 128; /* Value pulled from thin air */
+
+#define DEFINE_FRAME_VTABLE(NAME) \
+ static aws_h2_frame_destroy_fn s_frame_##NAME##_destroy; \
+ static aws_h2_frame_encode_fn s_frame_##NAME##_encode; \
+ static const struct aws_h2_frame_vtable s_frame_##NAME##_vtable = { \
+ .destroy = s_frame_##NAME##_destroy, \
+ .encode = s_frame_##NAME##_encode, \
+ }
+
+const char *aws_h2_frame_type_to_str(enum aws_h2_frame_type type) {
+ switch (type) {
+ case AWS_H2_FRAME_T_DATA:
+ return "DATA";
+ case AWS_H2_FRAME_T_HEADERS:
+ return "HEADERS";
+ case AWS_H2_FRAME_T_PRIORITY:
+ return "PRIORITY";
+ case AWS_H2_FRAME_T_RST_STREAM:
+ return "RST_STREAM";
+ case AWS_H2_FRAME_T_SETTINGS:
+ return "SETTINGS";
+ case AWS_H2_FRAME_T_PUSH_PROMISE:
+ return "PUSH_PROMISE";
+ case AWS_H2_FRAME_T_PING:
+ return "PING";
+ case AWS_H2_FRAME_T_GOAWAY:
+ return "GOAWAY";
+ case AWS_H2_FRAME_T_WINDOW_UPDATE:
+ return "WINDOW_UPDATE";
+ case AWS_H2_FRAME_T_CONTINUATION:
+ return "CONTINUATION";
+ default:
+ return "**UNKNOWN**";
+ }
+}
+
+const char *aws_http2_error_code_to_str(enum aws_http2_error_code h2_error_code) {
+ switch (h2_error_code) {
+ case AWS_HTTP2_ERR_NO_ERROR:
+ return "NO_ERROR";
+ case AWS_HTTP2_ERR_PROTOCOL_ERROR:
+ return "PROTOCOL_ERROR";
+ case AWS_HTTP2_ERR_INTERNAL_ERROR:
+ return "INTERNAL_ERROR";
+ case AWS_HTTP2_ERR_FLOW_CONTROL_ERROR:
+ return "FLOW_CONTROL_ERROR";
+ case AWS_HTTP2_ERR_SETTINGS_TIMEOUT:
+ return "SETTINGS_TIMEOUT";
+ case AWS_HTTP2_ERR_STREAM_CLOSED:
+ return "STREAM_CLOSED";
+ case AWS_HTTP2_ERR_FRAME_SIZE_ERROR:
+ return "FRAME_SIZE_ERROR";
+ case AWS_HTTP2_ERR_REFUSED_STREAM:
+ return "REFUSED_STREAM";
+ case AWS_HTTP2_ERR_CANCEL:
+ return "CANCEL";
+ case AWS_HTTP2_ERR_COMPRESSION_ERROR:
+ return "COMPRESSION_ERROR";
+ case AWS_HTTP2_ERR_CONNECT_ERROR:
+ return "CONNECT_ERROR";
+ case AWS_HTTP2_ERR_ENHANCE_YOUR_CALM:
+ return "ENHANCE_YOUR_CALM";
+ case AWS_HTTP2_ERR_INADEQUATE_SECURITY:
+ return "INADEQUATE_SECURITY";
+ case AWS_HTTP2_ERR_HTTP_1_1_REQUIRED:
+ return "HTTP_1_1_REQUIRED";
+ default:
+ return "UNKNOWN_ERROR";
+ }
+}
+
+struct aws_h2err aws_h2err_from_h2_code(enum aws_http2_error_code h2_error_code) {
+ AWS_PRECONDITION(h2_error_code > AWS_HTTP2_ERR_NO_ERROR && h2_error_code < AWS_HTTP2_ERR_COUNT);
+
+ return (struct aws_h2err){
+ .h2_code = h2_error_code,
+ .aws_code = AWS_ERROR_HTTP_PROTOCOL_ERROR,
+ };
+}
+
+struct aws_h2err aws_h2err_from_aws_code(int aws_error_code) {
+ AWS_PRECONDITION(aws_error_code != 0);
+
+ return (struct aws_h2err){
+ .h2_code = AWS_HTTP2_ERR_INTERNAL_ERROR,
+ .aws_code = aws_error_code,
+ };
+}
+
+struct aws_h2err aws_h2err_from_last_error(void) {
+ return aws_h2err_from_aws_code(aws_last_error());
+}
+
+bool aws_h2err_success(struct aws_h2err err) {
+ return err.h2_code == 0 && err.aws_code == 0;
+}
+
+bool aws_h2err_failed(struct aws_h2err err) {
+ return err.h2_code != 0 || err.aws_code != 0;
+}
+
+int aws_h2_validate_stream_id(uint32_t stream_id) {
+ if (stream_id == 0 || stream_id > AWS_H2_STREAM_ID_MAX) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ return AWS_OP_SUCCESS;
+}
+
+/**
+ * Determine max frame payload length that will:
+ * 1) fit in output's available space
+ * 2) obey encoders current MAX_FRAME_SIZE
+ *
+ * Assumes no part of the frame has been written yet to output.
+ * The total length of the frame would be: returned-payload-len + AWS_H2_FRAME_PREFIX_SIZE
+ *
+ * Raises error if there is not enough space available for even a frame prefix.
+ */
+static int s_get_max_contiguous_payload_length(
+ const struct aws_h2_frame_encoder *encoder,
+ const struct aws_byte_buf *output,
+ size_t *max_payload_length) {
+
+ const size_t space_available = output->capacity - output->len;
+
+ size_t max_payload_given_space_available;
+ if (aws_sub_size_checked(space_available, AWS_H2_FRAME_PREFIX_SIZE, &max_payload_given_space_available)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ size_t max_payload_given_settings = encoder->settings.max_frame_size;
+
+ *max_payload_length = aws_min_size(max_payload_given_space_available, max_payload_given_settings);
+ return AWS_OP_SUCCESS;
+}
+
+/***********************************************************************************************************************
+ * Priority
+ **********************************************************************************************************************/
+static size_t s_frame_priority_settings_size = 5;
+
+static void s_frame_priority_settings_encode(
+ const struct aws_h2_frame_priority_settings *priority,
+ struct aws_byte_buf *output) {
+ AWS_PRECONDITION(priority);
+ AWS_PRECONDITION(output);
+ AWS_PRECONDITION((priority->stream_dependency & s_u32_top_bit_mask) == 0);
+ (void)s_u32_top_bit_mask;
+
+ /* PRIORITY is encoded as (RFC-7540 6.3):
+ * +-+-------------------------------------------------------------+
+ * |E| Stream Dependency (31) |
+ * +-+-------------+-----------------------------------------------+
+ * | Weight (8) |
+ * +-+-------------+
+ */
+ bool writes_ok = true;
+
+ /* Write the top 4 bytes */
+ uint32_t top_bytes = priority->stream_dependency | ((uint32_t)priority->stream_dependency_exclusive << 31);
+ writes_ok &= aws_byte_buf_write_be32(output, top_bytes);
+
+ /* Write the priority weight */
+ writes_ok &= aws_byte_buf_write_u8(output, priority->weight);
+
+ AWS_ASSERT(writes_ok);
+ (void)writes_ok;
+}
+
+/***********************************************************************************************************************
+ * Common Frame Prefix
+ **********************************************************************************************************************/
+static void s_init_frame_base(
+ struct aws_h2_frame *frame_base,
+ struct aws_allocator *alloc,
+ enum aws_h2_frame_type type,
+ const struct aws_h2_frame_vtable *vtable,
+ uint32_t stream_id) {
+
+ frame_base->vtable = vtable;
+ frame_base->alloc = alloc;
+ frame_base->type = type;
+ frame_base->stream_id = stream_id;
+}
+
+static void s_frame_prefix_encode(
+ enum aws_h2_frame_type type,
+ uint32_t stream_id,
+ size_t length,
+ uint8_t flags,
+ struct aws_byte_buf *output) {
+ AWS_PRECONDITION(output);
+ AWS_PRECONDITION(!(stream_id & s_u32_top_bit_mask), "Invalid stream ID");
+ AWS_PRECONDITION(length <= AWS_H2_PAYLOAD_MAX);
+
+ /* Frame prefix is encoded like this (RFC-7540 4.1):
+ * +-----------------------------------------------+
+ * | Length (24) |
+ * +---------------+---------------+---------------+
+ * | Type (8) | Flags (8) |
+ * +-+-------------+---------------+-------------------------------+
+ * |R| Stream Identifier (31) |
+ * +=+=============================================================+
+ */
+ bool writes_ok = true;
+
+ /* Write length */
+ writes_ok &= aws_byte_buf_write_be24(output, (uint32_t)length);
+
+ /* Write type */
+ writes_ok &= aws_byte_buf_write_u8(output, type);
+
+ /* Write flags */
+ writes_ok &= aws_byte_buf_write_u8(output, flags);
+
+ /* Write stream id (with reserved first bit) */
+ writes_ok &= aws_byte_buf_write_be32(output, stream_id);
+
+ AWS_ASSERT(writes_ok);
+ (void)writes_ok;
+}
+
+/***********************************************************************************************************************
+ * Encoder
+ **********************************************************************************************************************/
+int aws_h2_frame_encoder_init(
+ struct aws_h2_frame_encoder *encoder,
+ struct aws_allocator *allocator,
+ const void *logging_id) {
+
+ AWS_PRECONDITION(encoder);
+ AWS_PRECONDITION(allocator);
+
+ AWS_ZERO_STRUCT(*encoder);
+ encoder->allocator = allocator;
+ encoder->logging_id = logging_id;
+
+ aws_hpack_encoder_init(&encoder->hpack, allocator, logging_id);
+
+ encoder->settings.max_frame_size = aws_h2_settings_initial[AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE];
+ return AWS_OP_SUCCESS;
+}
+void aws_h2_frame_encoder_clean_up(struct aws_h2_frame_encoder *encoder) {
+ AWS_PRECONDITION(encoder);
+
+ aws_hpack_encoder_clean_up(&encoder->hpack);
+}
+
+/***********************************************************************************************************************
+ * DATA
+ **********************************************************************************************************************/
+int aws_h2_encode_data_frame(
+ struct aws_h2_frame_encoder *encoder,
+ uint32_t stream_id,
+ struct aws_input_stream *body_stream,
+ bool body_ends_stream,
+ uint8_t pad_length,
+ int32_t *stream_window_size_peer,
+ size_t *connection_window_size_peer,
+ struct aws_byte_buf *output,
+ bool *body_complete,
+ bool *body_stalled) {
+
+ AWS_PRECONDITION(encoder);
+ AWS_PRECONDITION(body_stream);
+ AWS_PRECONDITION(output);
+ AWS_PRECONDITION(body_complete);
+ AWS_PRECONDITION(body_stalled);
+ AWS_PRECONDITION(*stream_window_size_peer > 0);
+
+ if (aws_h2_validate_stream_id(stream_id)) {
+ return AWS_OP_ERR;
+ }
+
+ *body_complete = false;
+ *body_stalled = false;
+ uint8_t flags = 0;
+
+ /*
+ * Payload-length is the first thing encoded in a frame, but we don't know how
+ * much data we'll get from the body-stream until we actually read it.
+ * Therefore, we determine the exact location that the body data should go,
+ * then stream the body directly into that part of the output buffer.
+ * Then we will go and write the other parts of the frame in around it.
+ */
+
+ size_t bytes_preceding_body = AWS_H2_FRAME_PREFIX_SIZE;
+ size_t payload_overhead = 0; /* Amount of "payload" that will not contain body (padding) */
+ if (pad_length > 0) {
+ flags |= AWS_H2_FRAME_F_PADDED;
+
+ /* Padding len is 1st byte of payload (padding itself goes at end of payload) */
+ bytes_preceding_body += 1;
+ payload_overhead = 1 + pad_length;
+ }
+
+ /* Max amount allowed by stream and connection flow-control window */
+ size_t min_window_size = aws_min_size(*stream_window_size_peer, *connection_window_size_peer);
+
+ /* Max amount of payload we can do right now */
+ size_t max_payload;
+ if (s_get_max_contiguous_payload_length(encoder, output, &max_payload)) {
+ goto handle_waiting_for_more_space;
+ }
+ /* The flow-control window will limit the size for max_payload of a flow-controlled frame */
+ max_payload = aws_min_size(max_payload, min_window_size);
+ /* Max amount of body we can fit in the payload*/
+ size_t max_body;
+ if (aws_sub_size_checked(max_payload, payload_overhead, &max_body) || max_body == 0) {
+ goto handle_waiting_for_more_space;
+ }
+
+ /* Use a sub-buffer to limit where body can go */
+ struct aws_byte_buf body_sub_buf =
+ aws_byte_buf_from_empty_array(output->buffer + output->len + bytes_preceding_body, max_body);
+
+ /* Read body into sub-buffer */
+ if (aws_input_stream_read(body_stream, &body_sub_buf)) {
+ goto error;
+ }
+
+ /* Check if we've reached the end of the body */
+ struct aws_stream_status body_status;
+ if (aws_input_stream_get_status(body_stream, &body_status)) {
+ goto error;
+ }
+
+ if (body_status.is_end_of_stream) {
+ *body_complete = true;
+ if (body_ends_stream) {
+ flags |= AWS_H2_FRAME_F_END_STREAM;
+ }
+ } else {
+ if (body_sub_buf.len < body_sub_buf.capacity) {
+ /* Body stream was unable to provide as much data as it could have */
+ *body_stalled = true;
+
+ if (body_sub_buf.len == 0) {
+ /* This frame would have no useful information, don't even bother sending it */
+ goto handle_nothing_to_send_right_now;
+ }
+ }
+ }
+
+ ENCODER_LOGF(
+ TRACE,
+ encoder,
+ "Encoding frame type=DATA stream_id=%" PRIu32 " data_len=%zu stalled=%d%s",
+ stream_id,
+ body_sub_buf.len,
+ *body_stalled,
+ (flags & AWS_H2_FRAME_F_END_STREAM) ? " END_STREAM" : "");
+
+ /*
+ * Write in the other parts of the frame.
+ */
+ bool writes_ok = true;
+
+ /* Write the frame prefix */
+ const size_t payload_len = body_sub_buf.len + payload_overhead;
+ s_frame_prefix_encode(AWS_H2_FRAME_T_DATA, stream_id, payload_len, flags, output);
+
+ /* Write pad length */
+ if (flags & AWS_H2_FRAME_F_PADDED) {
+ writes_ok &= aws_byte_buf_write_u8(output, pad_length);
+ }
+
+ /* Increment output->len to jump over the body that we already wrote in */
+ AWS_ASSERT(output->buffer + output->len == body_sub_buf.buffer && "Streamed DATA to wrong position");
+ output->len += body_sub_buf.len;
+
+ /* Write padding */
+ if (flags & AWS_H2_FRAME_F_PADDED) {
+ writes_ok &= aws_byte_buf_write_u8_n(output, 0, pad_length);
+ }
+
+ /* update the connection window size now, we will update stream window size when this function returns */
+ AWS_ASSERT(payload_len <= min_window_size);
+ *connection_window_size_peer -= payload_len;
+ *stream_window_size_peer -= (int32_t)payload_len;
+
+ AWS_ASSERT(writes_ok);
+ (void)writes_ok;
+ return AWS_OP_SUCCESS;
+
+handle_waiting_for_more_space:
+ ENCODER_LOGF(TRACE, encoder, "Insufficient space to encode DATA for stream %" PRIu32 " right now", stream_id);
+ return AWS_OP_SUCCESS;
+
+handle_nothing_to_send_right_now:
+ ENCODER_LOGF(INFO, encoder, "Stream %" PRIu32 " produced 0 bytes of body data", stream_id);
+ return AWS_OP_SUCCESS;
+
+error:
+ return AWS_OP_ERR;
+}
+
+/***********************************************************************************************************************
+ * HEADERS / PUSH_PROMISE
+ **********************************************************************************************************************/
+DEFINE_FRAME_VTABLE(headers);
+
+/* Represents a HEADERS or PUSH_PROMISE frame (followed by zero or more CONTINUATION frames) */
+struct aws_h2_frame_headers {
+ struct aws_h2_frame base;
+
+ /* Common data */
+ const struct aws_http_headers *headers;
+ uint8_t pad_length; /* Set to 0 to disable AWS_H2_FRAME_F_PADDED */
+
+ /* HEADERS-only data */
+ bool end_stream; /* AWS_H2_FRAME_F_END_STREAM */
+ bool has_priority; /* AWS_H2_FRAME_F_PRIORITY */
+ struct aws_h2_frame_priority_settings priority;
+
+ /* PUSH_PROMISE-only data */
+ uint32_t promised_stream_id;
+
+ /* State */
+ enum {
+ AWS_H2_HEADERS_STATE_INIT,
+ AWS_H2_HEADERS_STATE_FIRST_FRAME, /* header-block pre-encoded, no frames written yet */
+ AWS_H2_HEADERS_STATE_CONTINUATION, /* first frame written, need to write CONTINUATION frames now */
+ AWS_H2_HEADERS_STATE_COMPLETE,
+ } state;
+
+ struct aws_byte_buf whole_encoded_header_block;
+ struct aws_byte_cursor header_block_cursor; /* tracks progress sending encoded header-block in fragments */
+};
+
+static struct aws_h2_frame *s_frame_new_headers_or_push_promise(
+ struct aws_allocator *allocator,
+ enum aws_h2_frame_type frame_type,
+ uint32_t stream_id,
+ const struct aws_http_headers *headers,
+ uint8_t pad_length,
+ bool end_stream,
+ const struct aws_h2_frame_priority_settings *optional_priority,
+ uint32_t promised_stream_id) {
+
+ /* TODO: Host and ":authority" are no longer permitted to disagree. Should we enforce it here or sent it as
+ * requested, let the server side reject the request? */
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(frame_type == AWS_H2_FRAME_T_HEADERS || frame_type == AWS_H2_FRAME_T_PUSH_PROMISE);
+ AWS_PRECONDITION(headers);
+
+ /* Validate args */
+
+ if (aws_h2_validate_stream_id(stream_id)) {
+ return NULL;
+ }
+
+ if (frame_type == AWS_H2_FRAME_T_PUSH_PROMISE) {
+ if (aws_h2_validate_stream_id(promised_stream_id)) {
+ return NULL;
+ }
+ }
+
+ if (optional_priority && aws_h2_validate_stream_id(optional_priority->stream_dependency)) {
+ return NULL;
+ }
+
+ /* Create */
+
+ struct aws_h2_frame_headers *frame = aws_mem_calloc(allocator, 1, sizeof(struct aws_h2_frame_headers));
+ if (!frame) {
+ return NULL;
+ }
+
+ if (aws_byte_buf_init(&frame->whole_encoded_header_block, allocator, s_encoded_header_block_reserve)) {
+ goto error;
+ }
+
+ if (frame_type == AWS_H2_FRAME_T_HEADERS) {
+ frame->end_stream = end_stream;
+ if (optional_priority) {
+ frame->has_priority = true;
+ frame->priority = *optional_priority;
+ }
+ } else {
+ frame->promised_stream_id = promised_stream_id;
+ }
+
+ s_init_frame_base(&frame->base, allocator, frame_type, &s_frame_headers_vtable, stream_id);
+
+ aws_http_headers_acquire((struct aws_http_headers *)headers);
+ frame->headers = headers;
+ frame->pad_length = pad_length;
+
+ return &frame->base;
+
+error:
+ s_frame_headers_destroy(&frame->base);
+ return NULL;
+}
+
+struct aws_h2_frame *aws_h2_frame_new_headers(
+ struct aws_allocator *allocator,
+ uint32_t stream_id,
+ const struct aws_http_headers *headers,
+ bool end_stream,
+ uint8_t pad_length,
+ const struct aws_h2_frame_priority_settings *optional_priority) {
+
+ return s_frame_new_headers_or_push_promise(
+ allocator,
+ AWS_H2_FRAME_T_HEADERS,
+ stream_id,
+ headers,
+ pad_length,
+ end_stream,
+ optional_priority,
+ 0 /* HEADERS doesn't have promised_stream_id */);
+}
+
+struct aws_h2_frame *aws_h2_frame_new_push_promise(
+ struct aws_allocator *allocator,
+ uint32_t stream_id,
+ uint32_t promised_stream_id,
+ const struct aws_http_headers *headers,
+ uint8_t pad_length) {
+
+ return s_frame_new_headers_or_push_promise(
+ allocator,
+ AWS_H2_FRAME_T_PUSH_PROMISE,
+ stream_id,
+ headers,
+ pad_length,
+ false /* PUSH_PROMISE doesn't have end_stream flag */,
+ NULL /* PUSH_PROMISE doesn't have priority_settings */,
+ promised_stream_id);
+}
+
+static void s_frame_headers_destroy(struct aws_h2_frame *frame_base) {
+ struct aws_h2_frame_headers *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_headers, base);
+ aws_http_headers_release((struct aws_http_headers *)frame->headers);
+ aws_byte_buf_clean_up(&frame->whole_encoded_header_block);
+ aws_mem_release(frame->base.alloc, frame);
+}
+
+/* Encode the next frame for this header-block (or encode nothing if output buffer is too small). */
+static void s_encode_single_header_block_frame(
+ struct aws_h2_frame_headers *frame,
+ struct aws_h2_frame_encoder *encoder,
+ struct aws_byte_buf *output,
+ bool *waiting_for_more_space) {
+
+ /*
+ * Figure out the details of the next frame to encode.
+ * The first frame will be either HEADERS or PUSH_PROMISE.
+ * All subsequent frames will be CONTINUATION
+ */
+
+ enum aws_h2_frame_type frame_type;
+ uint8_t flags = 0;
+ uint8_t pad_length = 0;
+ const struct aws_h2_frame_priority_settings *priority_settings = NULL;
+ const uint32_t *promised_stream_id = NULL;
+ size_t payload_overhead = 0; /* Amount of payload holding things other than header-block (padding, etc) */
+
+ if (frame->state == AWS_H2_HEADERS_STATE_FIRST_FRAME) {
+ frame_type = frame->base.type;
+
+ if (frame->pad_length > 0) {
+ flags |= AWS_H2_FRAME_F_PADDED;
+ pad_length = frame->pad_length;
+ payload_overhead += 1 + pad_length;
+ }
+
+ if (frame->has_priority) {
+ priority_settings = &frame->priority;
+ flags |= AWS_H2_FRAME_F_PRIORITY;
+ payload_overhead += s_frame_priority_settings_size;
+ }
+
+ if (frame->end_stream) {
+ flags |= AWS_H2_FRAME_F_END_STREAM;
+ }
+
+ if (frame_type == AWS_H2_FRAME_T_PUSH_PROMISE) {
+ promised_stream_id = &frame->promised_stream_id;
+ payload_overhead += 4;
+ }
+
+ } else /* CONTINUATION */ {
+ frame_type = AWS_H2_FRAME_T_CONTINUATION;
+ }
+
+ /*
+ * Figure out what size header-block fragment should go in this frame.
+ */
+
+ size_t max_payload;
+ if (s_get_max_contiguous_payload_length(encoder, output, &max_payload)) {
+ goto handle_waiting_for_more_space;
+ }
+
+ size_t max_fragment;
+ if (aws_sub_size_checked(max_payload, payload_overhead, &max_fragment)) {
+ goto handle_waiting_for_more_space;
+ }
+
+ const size_t fragment_len = aws_min_size(max_fragment, frame->header_block_cursor.len);
+ if (fragment_len == frame->header_block_cursor.len) {
+ /* This will finish the header-block */
+ flags |= AWS_H2_FRAME_F_END_HEADERS;
+ } else {
+ /* If we're not finishing the header-block, is it even worth trying to send this frame now? */
+ const size_t even_worth_sending_threshold = AWS_H2_FRAME_PREFIX_SIZE + payload_overhead;
+ if (fragment_len < even_worth_sending_threshold) {
+ goto handle_waiting_for_more_space;
+ }
+ }
+
+ /*
+ * Ok, it fits! Write the frame
+ */
+ ENCODER_LOGF(
+ TRACE,
+ encoder,
+ "Encoding frame type=%s stream_id=%" PRIu32 "%s%s",
+ aws_h2_frame_type_to_str(frame_type),
+ frame->base.stream_id,
+ (flags & AWS_H2_FRAME_F_END_HEADERS) ? " END_HEADERS" : "",
+ (flags & AWS_H2_FRAME_F_END_STREAM) ? " END_STREAM" : "");
+
+ bool writes_ok = true;
+
+ /* Write the frame prefix */
+ const size_t payload_len = fragment_len + payload_overhead;
+ s_frame_prefix_encode(frame_type, frame->base.stream_id, payload_len, flags, output);
+
+ /* Write pad length */
+ if (flags & AWS_H2_FRAME_F_PADDED) {
+ AWS_ASSERT(frame_type != AWS_H2_FRAME_T_CONTINUATION);
+ writes_ok &= aws_byte_buf_write_u8(output, pad_length);
+ }
+
+ /* Write priority */
+ if (flags & AWS_H2_FRAME_F_PRIORITY) {
+ AWS_ASSERT(frame_type == AWS_H2_FRAME_T_HEADERS);
+ s_frame_priority_settings_encode(priority_settings, output);
+ }
+
+ /* Write promised stream ID */
+ if (promised_stream_id) {
+ AWS_ASSERT(frame_type == AWS_H2_FRAME_T_PUSH_PROMISE);
+ writes_ok &= aws_byte_buf_write_be32(output, *promised_stream_id);
+ }
+
+ /* Write header-block fragment */
+ if (fragment_len > 0) {
+ struct aws_byte_cursor fragment = aws_byte_cursor_advance(&frame->header_block_cursor, fragment_len);
+ writes_ok &= aws_byte_buf_write_from_whole_cursor(output, fragment);
+ }
+
+ /* Write padding */
+ if (flags & AWS_H2_FRAME_F_PADDED) {
+ writes_ok &= aws_byte_buf_write_u8_n(output, 0, pad_length);
+ }
+
+ AWS_ASSERT(writes_ok);
+ (void)writes_ok;
+
+ /* Success! Wrote entire frame. It's safe to change state now */
+ frame->state =
+ flags & AWS_H2_FRAME_F_END_HEADERS ? AWS_H2_HEADERS_STATE_COMPLETE : AWS_H2_HEADERS_STATE_CONTINUATION;
+ *waiting_for_more_space = false;
+ return;
+
+handle_waiting_for_more_space:
+ ENCODER_LOGF(
+ TRACE,
+ encoder,
+ "Insufficient space to encode %s for stream %" PRIu32 " right now",
+ aws_h2_frame_type_to_str(frame->base.type),
+ frame->base.stream_id);
+ *waiting_for_more_space = true;
+}
+
+static int s_frame_headers_encode(
+ struct aws_h2_frame *frame_base,
+ struct aws_h2_frame_encoder *encoder,
+ struct aws_byte_buf *output,
+ bool *complete) {
+
+ struct aws_h2_frame_headers *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_headers, base);
+
+ /* Pre-encode the entire header-block into another buffer
+ * the first time we're called. */
+ if (frame->state == AWS_H2_HEADERS_STATE_INIT) {
+ if (aws_hpack_encode_header_block(&encoder->hpack, frame->headers, &frame->whole_encoded_header_block)) {
+ ENCODER_LOGF(
+ ERROR,
+ encoder,
+ "Error doing HPACK encoding on %s of stream %" PRIu32 ": %s",
+ aws_h2_frame_type_to_str(frame->base.type),
+ frame->base.stream_id,
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ frame->header_block_cursor = aws_byte_cursor_from_buf(&frame->whole_encoded_header_block);
+ frame->state = AWS_H2_HEADERS_STATE_FIRST_FRAME;
+ }
+
+ /* Write frames (HEADER or PUSH_PROMISE, followed by N CONTINUATION frames)
+ * until we're done writing header-block or the buffer is too full to continue */
+ bool waiting_for_more_space = false;
+ while (frame->state < AWS_H2_HEADERS_STATE_COMPLETE && !waiting_for_more_space) {
+ s_encode_single_header_block_frame(frame, encoder, output, &waiting_for_more_space);
+ }
+
+ *complete = frame->state == AWS_H2_HEADERS_STATE_COMPLETE;
+ return AWS_OP_SUCCESS;
+
+error:
+ return AWS_OP_ERR;
+}
+
+/***********************************************************************************************************************
+ * aws_h2_frame_prebuilt - Used by small simple frame types that we can pre-encode at the time of creation.
+ * The pre-encoded buffer is then just copied bit-by-bit during the actual "encode()" function.
+ *
+ * It's safe to pre-encode a frame if it doesn't query/mutate any external state. So PING is totally great
+ * to pre-encode, but HEADERS (which queries MAX_FRAME_SIZE and mutates the HPACK table) would be a bad candidate.
+ **********************************************************************************************************************/
+struct aws_h2_frame_prebuilt {
+ struct aws_h2_frame base;
+
+ /* The whole entire frame is pre-encoded to this buffer during construction.
+ * The buffer has the exact capacity necessary to hold the frame */
+ struct aws_byte_buf encoded_buf;
+
+ /* After construction, this cursor points to the full contents of encoded_buf.
+ * As encode() is called, we copy the contents to output and advance the cursor.*/
+ struct aws_byte_cursor cursor;
+};
+
+DEFINE_FRAME_VTABLE(prebuilt);
+
+/* Can't pre-encode a frame unless it's guaranteed to fit, regardless of current settings. */
+static size_t s_prebuilt_payload_max(void) {
+ return aws_h2_settings_bounds[AWS_HTTP2_SETTINGS_MAX_FRAME_SIZE][0];
+}
+
+/* Create aws_h2_frame_prebuilt and encode frame prefix into frame->encoded_buf.
+ * Caller must encode the payload to fill the rest of the encoded_buf. */
+static struct aws_h2_frame_prebuilt *s_h2_frame_new_prebuilt(
+ struct aws_allocator *allocator,
+ enum aws_h2_frame_type type,
+ uint32_t stream_id,
+ size_t payload_len,
+ uint8_t flags) {
+
+ AWS_PRECONDITION(payload_len <= s_prebuilt_payload_max());
+
+ const size_t encoded_frame_len = AWS_H2_FRAME_PREFIX_SIZE + payload_len;
+
+ /* Use single allocation for frame and buffer storage */
+ struct aws_h2_frame_prebuilt *frame;
+ void *storage;
+ if (!aws_mem_acquire_many(
+ allocator, 2, &frame, sizeof(struct aws_h2_frame_prebuilt), &storage, encoded_frame_len)) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*frame);
+ s_init_frame_base(&frame->base, allocator, type, &s_frame_prebuilt_vtable, stream_id);
+
+ /* encoded_buf has the exact amount of space necessary for the full encoded frame.
+ * The constructor of our subclass must finish filling up encoded_buf with the payload. */
+ frame->encoded_buf = aws_byte_buf_from_empty_array(storage, encoded_frame_len);
+
+ /* cursor points to full capacity of encoded_buf.
+ * Our subclass's constructor will finish writing the payload and fill encoded_buf to capacity.
+ * When encode() is called, we'll copy cursor's contents into available output space and advance the cursor. */
+ frame->cursor = aws_byte_cursor_from_array(storage, encoded_frame_len);
+
+ /* Write frame prefix */
+ s_frame_prefix_encode(type, stream_id, payload_len, flags, &frame->encoded_buf);
+
+ return frame;
+}
+
+static void s_frame_prebuilt_destroy(struct aws_h2_frame *frame_base) {
+ aws_mem_release(frame_base->alloc, frame_base);
+}
+
+static int s_frame_prebuilt_encode(
+ struct aws_h2_frame *frame_base,
+ struct aws_h2_frame_encoder *encoder,
+ struct aws_byte_buf *output,
+ bool *complete) {
+
+ (void)encoder;
+ struct aws_h2_frame_prebuilt *frame = AWS_CONTAINER_OF(frame_base, struct aws_h2_frame_prebuilt, base);
+
+ /* encoded_buf should have been filled to capacity during construction */
+ AWS_ASSERT(frame->encoded_buf.len == frame->encoded_buf.capacity);
+
+ /* After construction, cursor points to the full contents of encoded_buf.
+ * As encode() is called, we copy the contents to output and advance the cursor. */
+ if (frame->cursor.len == frame->encoded_buf.len) {
+ /* We haven't sent anything yet, announce start of frame */
+ ENCODER_LOGF(
+ TRACE,
+ encoder,
+ "Encoding frame type=%s stream_id=%" PRIu32,
+ aws_h2_frame_type_to_str(frame->base.type),
+ frame->base.stream_id);
+ } else {
+ /* We've already sent a bit, announce that we're resuming */
+ ENCODER_LOGF(
+ TRACE,
+ encoder,
+ "Resume encoding frame type=%s stream_id=%" PRIu32,
+ aws_h2_frame_type_to_str(frame->base.type),
+ frame->base.stream_id);
+ }
+
+ bool writes_ok = true;
+
+ /* Copy as much as we can from cursor (pre-encoded frame contents) to output.
+ * Advance the cursor to mark our progress. */
+ size_t chunk_len = aws_min_size(frame->cursor.len, output->capacity - output->len);
+ struct aws_byte_cursor chunk = aws_byte_cursor_advance(&frame->cursor, chunk_len);
+ writes_ok &= aws_byte_buf_write_from_whole_cursor(output, chunk);
+ AWS_ASSERT(writes_ok);
+ (void)writes_ok;
+
+ if (frame->cursor.len == 0) {
+ *complete = true;
+ } else {
+ ENCODER_LOGF(
+ TRACE,
+ encoder,
+ "Incomplete encoding of frame type=%s stream_id=%" PRIu32 ", will resume later...",
+ aws_h2_frame_type_to_str(frame->base.type),
+ frame->base.stream_id);
+
+ *complete = false;
+ }
+ return AWS_OP_SUCCESS;
+}
+
+/***********************************************************************************************************************
+ * PRIORITY
+ **********************************************************************************************************************/
+struct aws_h2_frame *aws_h2_frame_new_priority(
+ struct aws_allocator *allocator,
+ uint32_t stream_id,
+ const struct aws_h2_frame_priority_settings *priority) {
+
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(priority);
+
+ if (aws_h2_validate_stream_id(stream_id) || aws_h2_validate_stream_id(priority->stream_dependency)) {
+ return NULL;
+ }
+
+ /* PRIORITY can be pre-encoded */
+ const uint8_t flags = 0;
+ const size_t payload_len = s_frame_priority_settings_size;
+
+ struct aws_h2_frame_prebuilt *frame =
+ s_h2_frame_new_prebuilt(allocator, AWS_H2_FRAME_T_PRIORITY, stream_id, payload_len, flags);
+ if (!frame) {
+ return NULL;
+ }
+
+ /* Write the priority settings */
+ s_frame_priority_settings_encode(priority, &frame->encoded_buf);
+
+ return &frame->base;
+}
+
+/***********************************************************************************************************************
+ * RST_STREAM
+ **********************************************************************************************************************/
+static const size_t s_frame_rst_stream_length = 4;
+
+struct aws_h2_frame *aws_h2_frame_new_rst_stream(
+ struct aws_allocator *allocator,
+ uint32_t stream_id,
+ uint32_t error_code) {
+
+ if (aws_h2_validate_stream_id(stream_id)) {
+ return NULL;
+ }
+
+ /* RST_STREAM can be pre-encoded */
+ const uint8_t flags = 0;
+ const size_t payload_len = s_frame_rst_stream_length;
+
+ struct aws_h2_frame_prebuilt *frame =
+ s_h2_frame_new_prebuilt(allocator, AWS_H2_FRAME_T_RST_STREAM, stream_id, payload_len, flags);
+ if (!frame) {
+ return NULL;
+ }
+
+ /* Write RST_STREAM payload (RFC-7540 6.4):
+ * +---------------------------------------------------------------+
+ * | Error Code (32) |
+ * +---------------------------------------------------------------+
+ */
+ bool writes_ok = true;
+ writes_ok &= aws_byte_buf_write_be32(&frame->encoded_buf, error_code);
+ AWS_ASSERT(writes_ok);
+ (void)writes_ok;
+
+ return &frame->base;
+}
+
+/***********************************************************************************************************************
+ * SETTINGS
+ **********************************************************************************************************************/
+static const size_t s_frame_setting_length = 6;
+
+struct aws_h2_frame *aws_h2_frame_new_settings(
+ struct aws_allocator *allocator,
+ const struct aws_http2_setting *settings_array,
+ size_t num_settings,
+ bool ack) {
+
+ AWS_PRECONDITION(settings_array || num_settings == 0);
+
+ /* Cannot send settings in an ACK frame */
+ if (ack && num_settings > 0) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ /* Check against insane edge case of too many settings to fit in a frame. */
+ const size_t max_settings = s_prebuilt_payload_max() / s_frame_setting_length;
+ if (num_settings > max_settings) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_ENCODER,
+ "Cannot create SETTINGS frame with %zu settings, the limit is %zu.",
+ num_settings,
+ max_settings);
+
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ /* SETTINGS can be pre-encoded */
+ const uint8_t flags = ack ? AWS_H2_FRAME_F_ACK : 0;
+ const size_t payload_len = num_settings * s_frame_setting_length;
+ const uint32_t stream_id = 0;
+
+ struct aws_h2_frame_prebuilt *frame =
+ s_h2_frame_new_prebuilt(allocator, AWS_H2_FRAME_T_SETTINGS, stream_id, payload_len, flags);
+ if (!frame) {
+ return NULL;
+ }
+
+ /* Write the settings, each one is encoded like (RFC-7540 6.5.1):
+ * +-------------------------------+
+ * | Identifier (16) |
+ * +-------------------------------+-------------------------------+
+ * | Value (32) |
+ * +---------------------------------------------------------------+
+ */
+ bool writes_ok = true;
+ for (size_t i = 0; i < num_settings; ++i) {
+ writes_ok &= aws_byte_buf_write_be16(&frame->encoded_buf, settings_array[i].id);
+ writes_ok &= aws_byte_buf_write_be32(&frame->encoded_buf, settings_array[i].value);
+ }
+ AWS_ASSERT(writes_ok);
+ (void)writes_ok;
+
+ return &frame->base;
+}
+
+/***********************************************************************************************************************
+ * PING
+ **********************************************************************************************************************/
+struct aws_h2_frame *aws_h2_frame_new_ping(
+ struct aws_allocator *allocator,
+ bool ack,
+ const uint8_t opaque_data[AWS_HTTP2_PING_DATA_SIZE]) {
+
+ /* PING can be pre-encoded */
+ const uint8_t flags = ack ? AWS_H2_FRAME_F_ACK : 0;
+ const size_t payload_len = AWS_HTTP2_PING_DATA_SIZE;
+ const uint32_t stream_id = 0;
+
+ struct aws_h2_frame_prebuilt *frame =
+ s_h2_frame_new_prebuilt(allocator, AWS_H2_FRAME_T_PING, stream_id, payload_len, flags);
+ if (!frame) {
+ return NULL;
+ }
+
+ /* Write the PING payload (RFC-7540 6.7):
+ * +---------------------------------------------------------------+
+ * | |
+ * | Opaque Data (64) |
+ * | |
+ * +---------------------------------------------------------------+
+ */
+ bool writes_ok = true;
+ writes_ok &= aws_byte_buf_write(&frame->encoded_buf, opaque_data, AWS_HTTP2_PING_DATA_SIZE);
+ AWS_ASSERT(writes_ok);
+ (void)writes_ok;
+
+ /* PING responses SHOULD be given higher priority than any other frame */
+ frame->base.high_priority = ack;
+ return &frame->base;
+}
+
+/***********************************************************************************************************************
+ * GOAWAY
+ **********************************************************************************************************************/
+static const size_t s_frame_goaway_length_min = 8;
+
+struct aws_h2_frame *aws_h2_frame_new_goaway(
+ struct aws_allocator *allocator,
+ uint32_t last_stream_id,
+ uint32_t error_code,
+ struct aws_byte_cursor debug_data) {
+
+ /* If debug_data is too long, don't sent it.
+ * It's more important that the GOAWAY frame gets sent. */
+ const size_t debug_data_max = s_prebuilt_payload_max() - s_frame_goaway_length_min;
+ if (debug_data.len > debug_data_max) {
+ AWS_LOGF_WARN(
+ AWS_LS_HTTP_ENCODER,
+ "Sending GOAWAY without debug-data. Debug-data size %zu exceeds internal limit of %zu",
+ debug_data.len,
+ debug_data_max);
+
+ debug_data.len = 0;
+ }
+
+ /* It would be illegal to send a lower value, this is unrecoverable */
+ AWS_FATAL_ASSERT(last_stream_id <= AWS_H2_STREAM_ID_MAX);
+
+ /* GOAWAY can be pre-encoded */
+ const uint8_t flags = 0;
+ const size_t payload_len = debug_data.len + s_frame_goaway_length_min;
+ const uint32_t stream_id = 0;
+
+ struct aws_h2_frame_prebuilt *frame =
+ s_h2_frame_new_prebuilt(allocator, AWS_H2_FRAME_T_GOAWAY, stream_id, payload_len, flags);
+ if (!frame) {
+ return NULL;
+ }
+
+ /* Write the GOAWAY payload (RFC-7540 6.8):
+ * +-+-------------------------------------------------------------+
+ * |R| Last-Stream-ID (31) |
+ * +-+-------------------------------------------------------------+
+ * | Error Code (32) |
+ * +---------------------------------------------------------------+
+ * | Additional Debug Data (*) |
+ * +---------------------------------------------------------------+
+ */
+ bool writes_ok = true;
+ writes_ok &= aws_byte_buf_write_be32(&frame->encoded_buf, last_stream_id);
+ writes_ok &= aws_byte_buf_write_be32(&frame->encoded_buf, error_code);
+ writes_ok &= aws_byte_buf_write_from_whole_cursor(&frame->encoded_buf, debug_data);
+ AWS_ASSERT(writes_ok);
+ (void)writes_ok;
+
+ return &frame->base;
+}
+
+/***********************************************************************************************************************
+ * WINDOW_UPDATE
+ **********************************************************************************************************************/
+static const size_t s_frame_window_update_length = 4;
+
+struct aws_h2_frame *aws_h2_frame_new_window_update(
+ struct aws_allocator *allocator,
+ uint32_t stream_id,
+ uint32_t window_size_increment) {
+
+ /* Note: stream_id may be zero or non-zero */
+ if (stream_id > AWS_H2_STREAM_ID_MAX) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ if (window_size_increment > AWS_H2_WINDOW_UPDATE_MAX) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_ENCODER,
+ "Window increment size %" PRIu32 " exceeds HTTP/2 max %" PRIu32,
+ window_size_increment,
+ AWS_H2_WINDOW_UPDATE_MAX);
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ /* WINDOW_UPDATE can be pre-encoded */
+ const uint8_t flags = 0;
+ const size_t payload_len = s_frame_window_update_length;
+
+ struct aws_h2_frame_prebuilt *frame =
+ s_h2_frame_new_prebuilt(allocator, AWS_H2_FRAME_T_WINDOW_UPDATE, stream_id, payload_len, flags);
+ if (!frame) {
+ return NULL;
+ }
+
+ /* Write the WINDOW_UPDATE payload (RFC-7540 6.9):
+ * +-+-------------------------------------------------------------+
+ * |R| Window Size Increment (31) |
+ * +-+-------------------------------------------------------------+
+ */
+ bool writes_ok = true;
+ writes_ok &= aws_byte_buf_write_be32(&frame->encoded_buf, window_size_increment);
+ AWS_ASSERT(writes_ok);
+ (void)writes_ok;
+
+ return &frame->base;
+}
+
+void aws_h2_frame_destroy(struct aws_h2_frame *frame) {
+ if (frame) {
+ frame->vtable->destroy(frame);
+ }
+}
+
+int aws_h2_encode_frame(
+ struct aws_h2_frame_encoder *encoder,
+ struct aws_h2_frame *frame,
+ struct aws_byte_buf *output,
+ bool *frame_complete) {
+
+ AWS_PRECONDITION(encoder);
+ AWS_PRECONDITION(frame);
+ AWS_PRECONDITION(output);
+ AWS_PRECONDITION(frame_complete);
+
+ if (encoder->has_errored) {
+ ENCODER_LOG(ERROR, encoder, "Encoder cannot be used again after an error");
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ if (encoder->current_frame && (encoder->current_frame != frame)) {
+ ENCODER_LOG(ERROR, encoder, "Cannot encode new frame until previous frame completes");
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ *frame_complete = false;
+
+ if (frame->vtable->encode(frame, encoder, output, frame_complete)) {
+ ENCODER_LOGF(
+ ERROR,
+ encoder,
+ "Failed to encode frame type=%s stream_id=%" PRIu32 ", %s",
+ aws_h2_frame_type_to_str(frame->type),
+ frame->stream_id,
+ aws_error_name(aws_last_error()));
+ encoder->has_errored = true;
+ return AWS_OP_ERR;
+ }
+
+ encoder->current_frame = *frame_complete ? NULL : frame;
+ return AWS_OP_SUCCESS;
+}
+
+void aws_h2_frame_encoder_set_setting_header_table_size(struct aws_h2_frame_encoder *encoder, uint32_t data) {
+ /* Setting for dynamic table size changed from peer, we will update the dynamic table size when we encoder the next
+ * header block */
+ aws_hpack_encoder_update_max_table_size(&encoder->hpack, data);
+}
+
+void aws_h2_frame_encoder_set_setting_max_frame_size(struct aws_h2_frame_encoder *encoder, uint32_t data) {
+ encoder->settings.max_frame_size = data;
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/h2_stream.c b/contrib/restricted/aws/aws-c-http/source/h2_stream.c
new file mode 100644
index 0000000000..85232db006
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/h2_stream.c
@@ -0,0 +1,1321 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/h2_stream.h>
+
+#include <aws/http/private/h2_connection.h>
+#include <aws/http/private/strutil.h>
+#include <aws/http/status_code.h>
+#include <aws/io/channel.h>
+#include <aws/io/logging.h>
+#include <aws/io/stream.h>
+
+/* Apple toolchains such as xcode and swiftpm define the DEBUG symbol. undef it here so we can actually use the token */
+#undef DEBUG
+
+static void s_stream_destroy(struct aws_http_stream *stream_base);
+static void s_stream_update_window(struct aws_http_stream *stream_base, size_t increment_size);
+static int s_stream_reset_stream(struct aws_http_stream *stream_base, uint32_t http2_error);
+static int s_stream_get_received_error_code(struct aws_http_stream *stream_base, uint32_t *out_http2_error);
+static int s_stream_get_sent_error_code(struct aws_http_stream *stream_base, uint32_t *out_http2_error);
+static int s_stream_write_data(
+ struct aws_http_stream *stream_base,
+ const struct aws_http2_stream_write_data_options *options);
+
+static void s_stream_cross_thread_work_task(struct aws_channel_task *task, void *arg, enum aws_task_status status);
+static struct aws_h2err s_send_rst_and_close_stream(struct aws_h2_stream *stream, struct aws_h2err stream_error);
+static int s_stream_reset_stream_internal(struct aws_http_stream *stream_base, struct aws_h2err stream_error);
+
+struct aws_http_stream_vtable s_h2_stream_vtable = {
+ .destroy = s_stream_destroy,
+ .update_window = s_stream_update_window,
+ .activate = aws_h2_stream_activate,
+ .http1_write_chunk = NULL,
+ .http2_reset_stream = s_stream_reset_stream,
+ .http2_get_received_error_code = s_stream_get_received_error_code,
+ .http2_get_sent_error_code = s_stream_get_sent_error_code,
+ .http2_write_data = s_stream_write_data,
+};
+
+const char *aws_h2_stream_state_to_str(enum aws_h2_stream_state state) {
+ switch (state) {
+ case AWS_H2_STREAM_STATE_IDLE:
+ return "IDLE";
+ case AWS_H2_STREAM_STATE_RESERVED_LOCAL:
+ return "RESERVED_LOCAL";
+ case AWS_H2_STREAM_STATE_RESERVED_REMOTE:
+ return "RESERVED_REMOTE";
+ case AWS_H2_STREAM_STATE_OPEN:
+ return "OPEN";
+ case AWS_H2_STREAM_STATE_HALF_CLOSED_LOCAL:
+ return "HALF_CLOSED_LOCAL";
+ case AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE:
+ return "HALF_CLOSED_REMOTE";
+ case AWS_H2_STREAM_STATE_CLOSED:
+ return "CLOSED";
+ default:
+ /* unreachable */
+ AWS_ASSERT(0);
+ return "*** UNKNOWN ***";
+ }
+}
+
+static struct aws_h2_connection *s_get_h2_connection(const struct aws_h2_stream *stream) {
+ return AWS_CONTAINER_OF(stream->base.owning_connection, struct aws_h2_connection, base);
+}
+
+static void s_lock_synced_data(struct aws_h2_stream *stream) {
+ int err = aws_mutex_lock(&stream->synced_data.lock);
+ AWS_ASSERT(!err && "lock failed");
+ (void)err;
+}
+
+static void s_unlock_synced_data(struct aws_h2_stream *stream) {
+ int err = aws_mutex_unlock(&stream->synced_data.lock);
+ AWS_ASSERT(!err && "unlock failed");
+ (void)err;
+}
+
+#define AWS_PRECONDITION_ON_CHANNEL_THREAD(STREAM) \
+ AWS_PRECONDITION(aws_channel_thread_is_callers_thread(s_get_h2_connection(STREAM)->base.channel_slot->channel))
+
+static bool s_client_state_allows_frame_type[AWS_H2_STREAM_STATE_COUNT][AWS_H2_FRAME_TYPE_COUNT] = {
+ /* State before anything is sent or received */
+ [AWS_H2_STREAM_STATE_IDLE] = {0},
+ /* Client streams are never in reserved (local) state */
+ [AWS_H2_STREAM_STATE_RESERVED_LOCAL] = {0},
+ /* Client received push-request via PUSH_PROMISE on another stream.
+ * Waiting for push-response to start arriving on this server-initiated stream. */
+ [AWS_H2_STREAM_STATE_RESERVED_REMOTE] =
+ {
+ [AWS_H2_FRAME_T_HEADERS] = true,
+ [AWS_H2_FRAME_T_RST_STREAM] = true,
+ },
+ /* Client is sending request and has not received full response yet. */
+ [AWS_H2_STREAM_STATE_OPEN] =
+ {
+ [AWS_H2_FRAME_T_DATA] = true,
+ [AWS_H2_FRAME_T_HEADERS] = true,
+ [AWS_H2_FRAME_T_RST_STREAM] = true,
+ [AWS_H2_FRAME_T_PUSH_PROMISE] = true,
+ [AWS_H2_FRAME_T_WINDOW_UPDATE] = true,
+ },
+ /* Client has sent full request (END_STREAM), but has not received full response yet. */
+ [AWS_H2_STREAM_STATE_HALF_CLOSED_LOCAL] =
+ {
+ [AWS_H2_FRAME_T_DATA] = true,
+ [AWS_H2_FRAME_T_HEADERS] = true,
+ [AWS_H2_FRAME_T_RST_STREAM] = true,
+ [AWS_H2_FRAME_T_PUSH_PROMISE] = true,
+ [AWS_H2_FRAME_T_WINDOW_UPDATE] = true,
+ },
+ /* Client has received full response (END_STREAM), but is still sending request (uncommon). */
+ [AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE] =
+ {
+ [AWS_H2_FRAME_T_RST_STREAM] = true,
+ [AWS_H2_FRAME_T_WINDOW_UPDATE] = true,
+ },
+ /* Full request sent (END_STREAM) and full response received (END_STREAM).
+ * OR sent RST_STREAM. OR received RST_STREAM. */
+ [AWS_H2_STREAM_STATE_CLOSED] = {0},
+};
+
+static bool s_server_state_allows_frame_type[AWS_H2_STREAM_STATE_COUNT][AWS_H2_FRAME_TYPE_COUNT] = {
+ /* State before anything is sent or received, waiting for request headers to arrives and start things off */
+ [AWS_H2_STREAM_STATE_IDLE] =
+ {
+ [AWS_H2_FRAME_T_HEADERS] = true,
+ },
+ /* Server sent push-request via PUSH_PROMISE on a client-initiated stream,
+ * but hasn't started sending the push-response on this server-initiated stream yet. */
+ [AWS_H2_STREAM_STATE_RESERVED_LOCAL] =
+ {
+ [AWS_H2_FRAME_T_RST_STREAM] = true,
+ [AWS_H2_FRAME_T_WINDOW_UPDATE] = true,
+ },
+ /* Server streams are never in reserved (remote) state */
+ [AWS_H2_STREAM_STATE_RESERVED_REMOTE] = {0},
+ /* Server is receiving request, and has sent full response yet. */
+ [AWS_H2_STREAM_STATE_OPEN] =
+ {
+ [AWS_H2_FRAME_T_HEADERS] = true,
+ [AWS_H2_FRAME_T_DATA] = true,
+ [AWS_H2_FRAME_T_RST_STREAM] = true,
+ [AWS_H2_FRAME_T_WINDOW_UPDATE] = true,
+ },
+ /* Server has sent full response (END_STREAM), but has not received full response yet (uncommon). */
+ [AWS_H2_STREAM_STATE_HALF_CLOSED_LOCAL] =
+ {
+ [AWS_H2_FRAME_T_HEADERS] = true,
+ [AWS_H2_FRAME_T_DATA] = true,
+ [AWS_H2_FRAME_T_RST_STREAM] = true,
+ [AWS_H2_FRAME_T_WINDOW_UPDATE] = true,
+ },
+ /* Server has received full request (END_STREAM), and is still sending response. */
+ [AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE] =
+ {
+ [AWS_H2_FRAME_T_RST_STREAM] = true,
+ [AWS_H2_FRAME_T_WINDOW_UPDATE] = true,
+ },
+ /* Full request received (END_STREAM) and full response sent (END_STREAM).
+ * OR sent RST_STREAM. OR received RST_STREAM. */
+ [AWS_H2_STREAM_STATE_CLOSED] = {0},
+};
+
+/* Returns the appropriate Stream Error if given frame not allowed in current state */
+static struct aws_h2err s_check_state_allows_frame_type(
+ const struct aws_h2_stream *stream,
+ enum aws_h2_frame_type frame_type) {
+
+ AWS_PRECONDITION(frame_type < AWS_H2_FRAME_T_UNKNOWN); /* Decoder won't invoke callbacks for unknown frame types */
+ AWS_PRECONDITION_ON_CHANNEL_THREAD(stream);
+
+ const enum aws_h2_stream_state state = stream->thread_data.state;
+
+ bool allowed;
+ if (stream->base.server_data) {
+ allowed = s_server_state_allows_frame_type[state][frame_type];
+ } else {
+ allowed = s_client_state_allows_frame_type[state][frame_type];
+ }
+
+ if (allowed) {
+ return AWS_H2ERR_SUCCESS;
+ }
+
+ /* Determine specific error code */
+ enum aws_http2_error_code h2_error_code = AWS_HTTP2_ERR_PROTOCOL_ERROR;
+
+ /* If peer knows the state is closed, then it's a STREAM_CLOSED error */
+ if (state == AWS_H2_STREAM_STATE_CLOSED || state == AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE) {
+ h2_error_code = AWS_HTTP2_ERR_STREAM_CLOSED;
+ }
+
+ AWS_H2_STREAM_LOGF(
+ ERROR,
+ stream,
+ "Malformed message, cannot receive %s frame in %s state",
+ aws_h2_frame_type_to_str(frame_type),
+ aws_h2_stream_state_to_str(state));
+
+ return aws_h2err_from_h2_code(h2_error_code);
+}
+
+static int s_stream_send_update_window_frame(struct aws_h2_stream *stream, size_t increment_size) {
+ AWS_PRECONDITION_ON_CHANNEL_THREAD(stream);
+ AWS_PRECONDITION(increment_size <= AWS_H2_WINDOW_UPDATE_MAX);
+
+ struct aws_h2_connection *connection = s_get_h2_connection(stream);
+ struct aws_h2_frame *stream_window_update_frame =
+ aws_h2_frame_new_window_update(stream->base.alloc, stream->base.id, (uint32_t)increment_size);
+
+ if (!stream_window_update_frame) {
+ AWS_H2_STREAM_LOGF(
+ ERROR,
+ stream,
+ "Failed to create WINDOW_UPDATE frame on connection, error %s",
+ aws_error_name(aws_last_error()));
+ return AWS_OP_ERR;
+ }
+ aws_h2_connection_enqueue_outgoing_frame(connection, stream_window_update_frame);
+
+ return AWS_OP_SUCCESS;
+}
+
+struct aws_h2_stream *aws_h2_stream_new_request(
+ struct aws_http_connection *client_connection,
+ const struct aws_http_make_request_options *options) {
+ AWS_PRECONDITION(client_connection);
+ AWS_PRECONDITION(options);
+
+ struct aws_h2_stream *stream = aws_mem_calloc(client_connection->alloc, 1, sizeof(struct aws_h2_stream));
+
+ /* Initialize base stream */
+ stream->base.vtable = &s_h2_stream_vtable;
+ stream->base.alloc = client_connection->alloc;
+ stream->base.owning_connection = client_connection;
+ stream->base.user_data = options->user_data;
+ stream->base.on_incoming_headers = options->on_response_headers;
+ stream->base.on_incoming_header_block_done = options->on_response_header_block_done;
+ stream->base.on_incoming_body = options->on_response_body;
+ stream->base.on_complete = options->on_complete;
+ stream->base.on_destroy = options->on_destroy;
+ stream->base.client_data = &stream->base.client_or_server_data.client;
+ stream->base.client_data->response_status = AWS_HTTP_STATUS_CODE_UNKNOWN;
+ aws_linked_list_init(&stream->thread_data.outgoing_writes);
+ aws_linked_list_init(&stream->synced_data.pending_write_list);
+
+ /* Stream refcount starts at 1, and gets incremented again for the connection upon a call to activate() */
+ aws_atomic_init_int(&stream->base.refcount, 1);
+
+ enum aws_http_version message_version = aws_http_message_get_protocol_version(options->request);
+ switch (message_version) {
+ case AWS_HTTP_VERSION_1_1:
+ /* TODO: don't automatic transform HTTP/1 message. Let user explicitly pass in HTTP/2 request */
+ stream->thread_data.outgoing_message =
+ aws_http2_message_new_from_http1(stream->base.alloc, options->request);
+ if (!stream->thread_data.outgoing_message) {
+ AWS_H2_STREAM_LOG(ERROR, stream, "Stream failed to create the HTTP/2 message from HTTP/1.1 message");
+ goto error;
+ }
+ break;
+ case AWS_HTTP_VERSION_2:
+ stream->thread_data.outgoing_message = options->request;
+ aws_http_message_acquire(stream->thread_data.outgoing_message);
+ break;
+ default:
+ /* Not supported */
+ aws_raise_error(AWS_ERROR_HTTP_UNSUPPORTED_PROTOCOL);
+ goto error;
+ }
+ struct aws_byte_cursor method;
+ AWS_ZERO_STRUCT(method);
+ if (aws_http_message_get_request_method(options->request, &method)) {
+ goto error;
+ }
+ stream->base.request_method = aws_http_str_to_method(method);
+
+ /* Init H2 specific stuff */
+ stream->thread_data.state = AWS_H2_STREAM_STATE_IDLE;
+ /* stream end is implicit if the request isn't using manual data writes */
+ stream->synced_data.manual_write_ended = !options->http2_use_manual_data_writes;
+ stream->manual_write = options->http2_use_manual_data_writes;
+
+ /* if there's a request body to write, add it as the first outgoing write */
+ struct aws_input_stream *body_stream = aws_http_message_get_body_stream(options->request);
+ if (body_stream) {
+ struct aws_h2_stream_data_write *body_write =
+ aws_mem_calloc(stream->base.alloc, 1, sizeof(struct aws_h2_stream_data_write));
+ body_write->data_stream = aws_input_stream_acquire(body_stream);
+ body_write->end_stream = !stream->manual_write;
+ aws_linked_list_push_back(&stream->thread_data.outgoing_writes, &body_write->node);
+ }
+
+ stream->sent_reset_error_code = -1;
+ stream->received_reset_error_code = -1;
+ stream->synced_data.reset_error.h2_code = AWS_HTTP2_ERR_COUNT;
+ stream->synced_data.api_state = AWS_H2_STREAM_API_STATE_INIT;
+ if (aws_mutex_init(&stream->synced_data.lock)) {
+ AWS_H2_STREAM_LOGF(
+ ERROR, stream, "Mutex init error %d (%s).", aws_last_error(), aws_error_name(aws_last_error()));
+ goto error;
+ }
+ aws_channel_task_init(
+ &stream->cross_thread_work_task, s_stream_cross_thread_work_task, stream, "HTTP/2 stream cross-thread work");
+ return stream;
+error:
+ s_stream_destroy(&stream->base);
+ return NULL;
+}
+
+static void s_stream_cross_thread_work_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+
+ struct aws_h2_stream *stream = arg;
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ goto end;
+ }
+
+ struct aws_h2_connection *connection = s_get_h2_connection(stream);
+
+ if (aws_h2_stream_get_state(stream) == AWS_H2_STREAM_STATE_CLOSED) {
+ /* stream is closed, silently ignoring the requests from user */
+ AWS_H2_STREAM_LOG(
+ TRACE, stream, "Stream closed before cross thread work task runs, ignoring everything was sent by user.");
+ goto end;
+ }
+
+ /* Not sending window update at half closed remote state */
+ bool ignore_window_update = (aws_h2_stream_get_state(stream) == AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE);
+ bool reset_called;
+ size_t window_update_size;
+ struct aws_h2err reset_error;
+
+ struct aws_linked_list pending_writes;
+ aws_linked_list_init(&pending_writes);
+
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(stream);
+ stream->synced_data.is_cross_thread_work_task_scheduled = false;
+
+ /* window_update_size is ensured to be not greater than AWS_H2_WINDOW_UPDATE_MAX */
+ window_update_size = stream->synced_data.window_update_size;
+ stream->synced_data.window_update_size = 0;
+ reset_called = stream->synced_data.reset_called;
+ reset_error = stream->synced_data.reset_error;
+
+ /* copy out pending writes */
+ aws_linked_list_swap_contents(&pending_writes, &stream->synced_data.pending_write_list);
+
+ s_unlock_synced_data(stream);
+ } /* END CRITICAL SECTION */
+
+ if (window_update_size > 0 && !ignore_window_update) {
+ if (s_stream_send_update_window_frame(stream, window_update_size)) {
+ /* Treat this as a connection error */
+ aws_h2_connection_shutdown_due_to_write_err(connection, aws_last_error());
+ }
+ }
+
+ /* The largest legal value will be 2 * max window size, which is way less than INT64_MAX, so if the window_size_self
+ * overflows, remote peer will find it out. So just apply the change and ignore the possible overflow.*/
+ stream->thread_data.window_size_self += window_update_size;
+
+ if (reset_called) {
+ struct aws_h2err returned_h2err = s_send_rst_and_close_stream(stream, reset_error);
+ if (aws_h2err_failed(returned_h2err)) {
+ aws_h2_connection_shutdown_due_to_write_err(connection, returned_h2err.aws_code);
+ }
+ }
+
+ if (stream->thread_data.waiting_for_writes && !aws_linked_list_empty(&pending_writes)) {
+ /* Got more to write, move the stream back to outgoing list */
+ aws_linked_list_remove(&stream->node);
+ aws_linked_list_push_back(&connection->thread_data.outgoing_streams_list, &stream->node);
+ stream->thread_data.waiting_for_writes = false;
+ }
+ /* move any pending writes to the outgoing write queue */
+ aws_linked_list_move_all_back(&stream->thread_data.outgoing_writes, &pending_writes);
+
+ /* It's likely that frames were queued while processing cross-thread work.
+ * If so, try writing them now */
+ aws_h2_try_write_outgoing_frames(connection);
+
+end:
+ aws_http_stream_release(&stream->base);
+}
+
+static void s_stream_data_write_destroy(
+ struct aws_h2_stream *stream,
+ struct aws_h2_stream_data_write *write,
+ int error_code) {
+
+ AWS_PRECONDITION(stream);
+ AWS_PRECONDITION(write);
+ if (write->on_complete) {
+ write->on_complete(&stream->base, error_code, write->user_data);
+ }
+ if (write->data_stream) {
+ aws_input_stream_release(write->data_stream);
+ }
+ aws_mem_release(stream->base.alloc, write);
+}
+
+static void s_h2_stream_destroy_pending_writes(struct aws_h2_stream *stream) {
+ /**
+ * Only called when stream is not active and will never be active afterward (destroying).
+ * Under this assumption, we can safely touch `stream->synced_data.pending_write_list` without
+ * lock, as the user can only add write to the list when the stream is ACTIVE
+ */
+ AWS_ASSERT(stream->synced_data.api_state != AWS_H2_STREAM_API_STATE_ACTIVE);
+ aws_linked_list_move_all_back(
+ &stream->thread_data.outgoing_writes,
+ &stream->synced_data.pending_write_list); /* clean up any outgoing writes */
+ while (!aws_linked_list_empty(&stream->thread_data.outgoing_writes)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&stream->thread_data.outgoing_writes);
+ struct aws_h2_stream_data_write *write = AWS_CONTAINER_OF(node, struct aws_h2_stream_data_write, node);
+ AWS_LOGF_DEBUG(AWS_LS_HTTP_STREAM, "Stream closing, cancelling write of stream %p", (void *)write->data_stream);
+ s_stream_data_write_destroy(stream, write, AWS_ERROR_HTTP_STREAM_HAS_COMPLETED);
+ }
+}
+
+static void s_stream_destroy(struct aws_http_stream *stream_base) {
+ AWS_PRECONDITION(stream_base);
+ struct aws_h2_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h2_stream, base);
+
+ s_h2_stream_destroy_pending_writes(stream);
+
+ AWS_H2_STREAM_LOG(DEBUG, stream, "Destroying stream");
+ aws_mutex_clean_up(&stream->synced_data.lock);
+ aws_http_message_release(stream->thread_data.outgoing_message);
+
+ aws_mem_release(stream->base.alloc, stream);
+}
+
+void aws_h2_stream_complete(struct aws_h2_stream *stream, int error_code) {
+ { /* BEGIN CRITICAL SECTION */
+ /* clean up any pending writes */
+ s_lock_synced_data(stream);
+ /* The stream is complete now, this will prevent further writes from being queued */
+ stream->synced_data.api_state = AWS_H2_STREAM_API_STATE_COMPLETE;
+ s_unlock_synced_data(stream);
+ } /* END CRITICAL SECTION */
+
+ s_h2_stream_destroy_pending_writes(stream);
+
+ /* Invoke callback */
+ if (stream->base.on_complete) {
+ stream->base.on_complete(&stream->base, error_code, stream->base.user_data);
+ }
+}
+
+static void s_stream_update_window(struct aws_http_stream *stream_base, size_t increment_size) {
+ AWS_PRECONDITION(stream_base);
+ struct aws_h2_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h2_stream, base);
+ struct aws_h2_connection *connection = s_get_h2_connection(stream);
+ if (!increment_size) {
+ return;
+ }
+ if (!connection->base.stream_manual_window_management) {
+ /* auto-mode, manual update window is not supported */
+ AWS_H2_STREAM_LOG(
+ DEBUG, stream, "Manual window management is off, update window operations are not supported.");
+ return;
+ }
+
+ int err = 0;
+ bool stream_is_init;
+ bool cross_thread_work_should_schedule = false;
+ size_t sum_size;
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(stream);
+
+ err |= aws_add_size_checked(stream->synced_data.window_update_size, increment_size, &sum_size);
+ err |= sum_size > AWS_H2_WINDOW_UPDATE_MAX;
+ stream_is_init = stream->synced_data.api_state == AWS_H2_STREAM_API_STATE_INIT;
+
+ if (!err && !stream_is_init) {
+ cross_thread_work_should_schedule = !stream->synced_data.is_cross_thread_work_task_scheduled;
+ stream->synced_data.is_cross_thread_work_task_scheduled = true;
+ stream->synced_data.window_update_size = sum_size;
+ }
+ s_unlock_synced_data(stream);
+ } /* END CRITICAL SECTION */
+
+ if (cross_thread_work_should_schedule) {
+ AWS_H2_STREAM_LOG(TRACE, stream, "Scheduling stream cross-thread work task");
+ /* increment the refcount of stream to keep it alive until the task runs */
+ aws_atomic_fetch_add(&stream->base.refcount, 1);
+ aws_channel_schedule_task_now(connection->base.channel_slot->channel, &stream->cross_thread_work_task);
+ return;
+ }
+
+ if (stream_is_init) {
+ AWS_H2_STREAM_LOG(
+ ERROR,
+ stream,
+ "Stream update window failed. Stream is in initialized state, please activate the stream first.");
+ aws_raise_error(AWS_ERROR_INVALID_STATE);
+ return;
+ }
+
+ if (err) {
+ /* The increment_size is still not 100% safe, since we cannot control the incoming data frame. So just
+ * ruled out the value that is obviously wrong values */
+ AWS_H2_STREAM_LOG(
+ ERROR,
+ stream,
+ "The stream's flow-control window has been incremented beyond 2**31 -1, the max for HTTP/2. The stream "
+ "will close.");
+ aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
+ struct aws_h2err stream_error = {
+ .aws_code = AWS_ERROR_OVERFLOW_DETECTED,
+ .h2_code = AWS_HTTP2_ERR_INTERNAL_ERROR,
+ };
+ /* Only when stream is not initialized reset will fail. So, we can assert it to be succeed. */
+ AWS_FATAL_ASSERT(s_stream_reset_stream_internal(stream_base, stream_error) == AWS_OP_SUCCESS);
+ }
+ return;
+}
+
+static int s_stream_reset_stream_internal(struct aws_http_stream *stream_base, struct aws_h2err stream_error) {
+
+ struct aws_h2_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h2_stream, base);
+ struct aws_h2_connection *connection = s_get_h2_connection(stream);
+ bool reset_called;
+ bool stream_is_init;
+ bool cross_thread_work_should_schedule = false;
+
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(stream);
+
+ reset_called = stream->synced_data.reset_called;
+ stream_is_init = stream->synced_data.api_state == AWS_H2_STREAM_API_STATE_INIT;
+ if (!reset_called && !stream_is_init) {
+ cross_thread_work_should_schedule = !stream->synced_data.is_cross_thread_work_task_scheduled;
+ stream->synced_data.reset_called = true;
+ stream->synced_data.reset_error = stream_error;
+ }
+ s_unlock_synced_data(stream);
+ } /* END CRITICAL SECTION */
+
+ if (stream_is_init) {
+ AWS_H2_STREAM_LOG(
+ ERROR, stream, "Reset stream failed. Stream is in initialized state, please activate the stream first.");
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+ if (cross_thread_work_should_schedule) {
+ AWS_H2_STREAM_LOG(TRACE, stream, "Scheduling stream cross-thread work task");
+ /* increment the refcount of stream to keep it alive until the task runs */
+ aws_atomic_fetch_add(&stream->base.refcount, 1);
+ aws_channel_schedule_task_now(connection->base.channel_slot->channel, &stream->cross_thread_work_task);
+ return AWS_OP_SUCCESS;
+ }
+ if (reset_called) {
+ AWS_H2_STREAM_LOG(DEBUG, stream, "Reset stream ignored. Reset stream has been called already.");
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_stream_reset_stream(struct aws_http_stream *stream_base, uint32_t http2_error) {
+ struct aws_h2err stream_error = {
+ .aws_code = AWS_ERROR_HTTP_RST_STREAM_SENT,
+ .h2_code = http2_error,
+ };
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: User requested RST_STREAM with error code %s (0x%x)",
+ (void *)stream_base,
+ aws_http2_error_code_to_str(http2_error),
+ http2_error);
+ return s_stream_reset_stream_internal(stream_base, stream_error);
+}
+
+static int s_stream_get_received_error_code(struct aws_http_stream *stream_base, uint32_t *out_http2_error) {
+ struct aws_h2_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h2_stream, base);
+ if (stream->received_reset_error_code == -1) {
+ return aws_raise_error(AWS_ERROR_HTTP_DATA_NOT_AVAILABLE);
+ }
+ *out_http2_error = (uint32_t)stream->received_reset_error_code;
+ return AWS_OP_SUCCESS;
+}
+
+static int s_stream_get_sent_error_code(struct aws_http_stream *stream_base, uint32_t *out_http2_error) {
+ struct aws_h2_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h2_stream, base);
+ if (stream->sent_reset_error_code == -1) {
+ return aws_raise_error(AWS_ERROR_HTTP_DATA_NOT_AVAILABLE);
+ }
+ *out_http2_error = (uint32_t)stream->sent_reset_error_code;
+ return AWS_OP_SUCCESS;
+}
+
+enum aws_h2_stream_state aws_h2_stream_get_state(const struct aws_h2_stream *stream) {
+ AWS_PRECONDITION_ON_CHANNEL_THREAD(stream);
+ return stream->thread_data.state;
+}
+
+/* Given a Stream Error, send RST_STREAM frame and close stream.
+ * A Connection Error is returned if something goes catastrophically wrong */
+static struct aws_h2err s_send_rst_and_close_stream(struct aws_h2_stream *stream, struct aws_h2err stream_error) {
+ AWS_PRECONDITION_ON_CHANNEL_THREAD(stream);
+ AWS_PRECONDITION(stream->thread_data.state != AWS_H2_STREAM_STATE_CLOSED);
+
+ struct aws_h2_connection *connection = s_get_h2_connection(stream);
+
+ stream->thread_data.state = AWS_H2_STREAM_STATE_CLOSED;
+ AWS_H2_STREAM_LOGF(
+ DEBUG,
+ stream,
+ "Sending RST_STREAM with error code %s (0x%x). State -> CLOSED",
+ aws_http2_error_code_to_str(stream_error.h2_code),
+ stream_error.h2_code);
+
+ /* Send RST_STREAM */
+ struct aws_h2_frame *rst_stream_frame =
+ aws_h2_frame_new_rst_stream(stream->base.alloc, stream->base.id, stream_error.h2_code);
+ AWS_FATAL_ASSERT(rst_stream_frame != NULL);
+ aws_h2_connection_enqueue_outgoing_frame(connection, rst_stream_frame); /* connection takes ownership of frame */
+ stream->sent_reset_error_code = stream_error.h2_code;
+
+ /* Tell connection that stream is now closed */
+ if (aws_h2_connection_on_stream_closed(
+ connection, stream, AWS_H2_STREAM_CLOSED_WHEN_RST_STREAM_SENT, stream_error.aws_code)) {
+ return aws_h2err_from_last_error();
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+struct aws_h2err aws_h2_stream_window_size_change(struct aws_h2_stream *stream, int32_t size_changed, bool self) {
+ if (self) {
+ if (stream->thread_data.window_size_self + size_changed > AWS_H2_WINDOW_UPDATE_MAX) {
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FLOW_CONTROL_ERROR);
+ }
+ stream->thread_data.window_size_self += size_changed;
+ } else {
+ if ((int64_t)stream->thread_data.window_size_peer + size_changed > AWS_H2_WINDOW_UPDATE_MAX) {
+ return aws_h2err_from_h2_code(AWS_HTTP2_ERR_FLOW_CONTROL_ERROR);
+ }
+ stream->thread_data.window_size_peer += size_changed;
+ }
+ return AWS_H2ERR_SUCCESS;
+}
+
+static inline bool s_h2_stream_has_outgoing_writes(struct aws_h2_stream *stream) {
+ return !aws_linked_list_empty(&stream->thread_data.outgoing_writes);
+}
+
+static void s_h2_stream_write_data_complete(struct aws_h2_stream *stream, bool *waiting_writes) {
+ AWS_PRECONDITION(waiting_writes);
+ AWS_PRECONDITION(s_h2_stream_has_outgoing_writes(stream));
+
+ /* finish/clean up the current write operation */
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&stream->thread_data.outgoing_writes);
+ struct aws_h2_stream_data_write *write_op = AWS_CONTAINER_OF(node, struct aws_h2_stream_data_write, node);
+ const bool ending_stream = write_op->end_stream;
+ s_stream_data_write_destroy(stream, write_op, AWS_OP_SUCCESS);
+
+ /* check to see if there are more queued writes or stream_end was called */
+ *waiting_writes = !ending_stream && !s_h2_stream_has_outgoing_writes(stream);
+}
+
+static struct aws_h2_stream_data_write *s_h2_stream_get_current_write(struct aws_h2_stream *stream) {
+ AWS_PRECONDITION(s_h2_stream_has_outgoing_writes(stream));
+ struct aws_linked_list_node *node = aws_linked_list_front(&stream->thread_data.outgoing_writes);
+ struct aws_h2_stream_data_write *write = AWS_CONTAINER_OF(node, struct aws_h2_stream_data_write, node);
+ return write;
+}
+
+static struct aws_input_stream *s_h2_stream_get_data_stream(struct aws_h2_stream *stream) {
+ struct aws_h2_stream_data_write *write = s_h2_stream_get_current_write(stream);
+ return write->data_stream;
+}
+
+static bool s_h2_stream_does_current_write_end_stream(struct aws_h2_stream *stream) {
+ struct aws_h2_stream_data_write *write = s_h2_stream_get_current_write(stream);
+ return write->end_stream;
+}
+
+int aws_h2_stream_on_activated(struct aws_h2_stream *stream, enum aws_h2_stream_body_state *body_state) {
+ AWS_PRECONDITION_ON_CHANNEL_THREAD(stream);
+
+ struct aws_h2_connection *connection = s_get_h2_connection(stream);
+
+ /* Create HEADERS frame */
+ struct aws_http_message *msg = stream->thread_data.outgoing_message;
+ /* Should be ensured when the stream is created */
+ AWS_ASSERT(aws_http_message_get_protocol_version(msg) == AWS_HTTP_VERSION_2);
+ /* If manual write, always has data to be sent. */
+ bool with_data = aws_http_message_get_body_stream(msg) != NULL || stream->manual_write;
+
+ struct aws_http_headers *h2_headers = aws_http_message_get_headers(msg);
+
+ struct aws_h2_frame *headers_frame = aws_h2_frame_new_headers(
+ stream->base.alloc,
+ stream->base.id,
+ h2_headers,
+ !with_data /* end_stream */,
+ 0 /* padding - not currently configurable via public API */,
+ NULL /* priority - not currently configurable via public API */);
+
+ if (!headers_frame) {
+ AWS_H2_STREAM_LOGF(ERROR, stream, "Failed to create HEADERS frame: %s", aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ /* Initialize the flow-control window size */
+ stream->thread_data.window_size_peer =
+ connection->thread_data.settings_peer[AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
+ stream->thread_data.window_size_self =
+ connection->thread_data.settings_self[AWS_HTTP2_SETTINGS_INITIAL_WINDOW_SIZE];
+
+ if (with_data) {
+ /* If stream has DATA to send, put it in the outgoing_streams_list, and we'll send data later */
+ stream->thread_data.state = AWS_H2_STREAM_STATE_OPEN;
+ AWS_H2_STREAM_LOG(TRACE, stream, "Sending HEADERS. State -> OPEN");
+ } else {
+ /* If stream has no body, then HEADERS frame marks the end of outgoing data */
+ stream->thread_data.state = AWS_H2_STREAM_STATE_HALF_CLOSED_LOCAL;
+ AWS_H2_STREAM_LOG(TRACE, stream, "Sending HEADERS with END_STREAM. State -> HALF_CLOSED_LOCAL");
+ }
+
+ if (s_h2_stream_has_outgoing_writes(stream)) {
+ *body_state = AWS_H2_STREAM_BODY_STATE_ONGOING;
+ } else {
+ if (stream->manual_write) {
+ stream->thread_data.waiting_for_writes = true;
+ *body_state = AWS_H2_STREAM_BODY_STATE_WAITING_WRITES;
+ } else {
+ *body_state = AWS_H2_STREAM_BODY_STATE_NONE;
+ }
+ }
+ aws_h2_connection_enqueue_outgoing_frame(connection, headers_frame);
+ return AWS_OP_SUCCESS;
+
+error:
+ return AWS_OP_ERR;
+}
+
+int aws_h2_stream_encode_data_frame(
+ struct aws_h2_stream *stream,
+ struct aws_h2_frame_encoder *encoder,
+ struct aws_byte_buf *output,
+ int *data_encode_status) {
+
+ AWS_PRECONDITION_ON_CHANNEL_THREAD(stream);
+ AWS_PRECONDITION(
+ stream->thread_data.state == AWS_H2_STREAM_STATE_OPEN ||
+ stream->thread_data.state == AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE);
+ struct aws_h2_connection *connection = s_get_h2_connection(stream);
+ AWS_PRECONDITION(connection->thread_data.window_size_peer > AWS_H2_MIN_WINDOW_SIZE);
+
+ if (stream->thread_data.window_size_peer <= AWS_H2_MIN_WINDOW_SIZE) {
+ /* The stream is stalled now */
+ *data_encode_status = AWS_H2_DATA_ENCODE_ONGOING_WINDOW_STALLED;
+ return AWS_OP_SUCCESS;
+ }
+
+ *data_encode_status = AWS_H2_DATA_ENCODE_COMPLETE;
+ struct aws_input_stream *input_stream = s_h2_stream_get_data_stream(stream);
+ AWS_ASSERT(input_stream);
+
+ bool input_stream_complete = false;
+ bool input_stream_stalled = false;
+ bool ends_stream = s_h2_stream_does_current_write_end_stream(stream);
+ if (aws_h2_encode_data_frame(
+ encoder,
+ stream->base.id,
+ input_stream,
+ ends_stream,
+ 0 /*pad_length*/,
+ &stream->thread_data.window_size_peer,
+ &connection->thread_data.window_size_peer,
+ output,
+ &input_stream_complete,
+ &input_stream_stalled)) {
+
+ /* Failed to write DATA, treat it as a Stream Error */
+ AWS_H2_STREAM_LOGF(ERROR, stream, "Error encoding stream DATA, %s", aws_error_name(aws_last_error()));
+ struct aws_h2err returned_h2err = s_send_rst_and_close_stream(stream, aws_h2err_from_last_error());
+ if (aws_h2err_failed(returned_h2err)) {
+ aws_h2_connection_shutdown_due_to_write_err(connection, returned_h2err.aws_code);
+ }
+ return AWS_OP_SUCCESS;
+ }
+
+ bool waiting_writes = false;
+ if (input_stream_complete) {
+ s_h2_stream_write_data_complete(stream, &waiting_writes);
+ }
+
+ /*
+ * input_stream_complete for manual writes just means the current outgoing_write is complete. The body is not
+ * complete for real until the stream is told to close
+ */
+ if (input_stream_complete && ends_stream) {
+ /* Done sending data. No more data will be sent. */
+ if (stream->thread_data.state == AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE) {
+ /* Both sides have sent END_STREAM */
+ stream->thread_data.state = AWS_H2_STREAM_STATE_CLOSED;
+ AWS_H2_STREAM_LOG(TRACE, stream, "Sent END_STREAM. State -> CLOSED");
+ /* Tell connection that stream is now closed */
+ if (aws_h2_connection_on_stream_closed(
+ connection, stream, AWS_H2_STREAM_CLOSED_WHEN_BOTH_SIDES_END_STREAM, AWS_ERROR_SUCCESS)) {
+ return AWS_OP_ERR;
+ }
+ } else {
+ /* Else can't close until we receive END_STREAM */
+ stream->thread_data.state = AWS_H2_STREAM_STATE_HALF_CLOSED_LOCAL;
+ AWS_H2_STREAM_LOG(TRACE, stream, "Sent END_STREAM. State -> HALF_CLOSED_LOCAL");
+ }
+ } else {
+ *data_encode_status = AWS_H2_DATA_ENCODE_ONGOING;
+ if (input_stream_stalled) {
+ AWS_ASSERT(!input_stream_complete);
+ *data_encode_status = AWS_H2_DATA_ENCODE_ONGOING_BODY_STREAM_STALLED;
+ }
+ if (stream->thread_data.window_size_peer <= AWS_H2_MIN_WINDOW_SIZE) {
+ /* if body and window both stalled, we take the window stalled status, which will take the stream out
+ * from outgoing list */
+ *data_encode_status = AWS_H2_DATA_ENCODE_ONGOING_WINDOW_STALLED;
+ }
+ if (waiting_writes) {
+ /* if window stalled and we waiting for manual writes, we take waiting writes status, which will be handled
+ * properly if more writes coming, but windows is still stalled. But not the other way around. */
+ AWS_ASSERT(input_stream_complete);
+ *data_encode_status = AWS_H2_DATA_ENCODE_ONGOING_WAITING_FOR_WRITES;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+struct aws_h2err aws_h2_stream_on_decoder_headers_begin(struct aws_h2_stream *stream) {
+ AWS_PRECONDITION_ON_CHANNEL_THREAD(stream);
+
+ struct aws_h2err stream_err = s_check_state_allows_frame_type(stream, AWS_H2_FRAME_T_HEADERS);
+ if (aws_h2err_failed(stream_err)) {
+ return s_send_rst_and_close_stream(stream, stream_err);
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+struct aws_h2err aws_h2_stream_on_decoder_headers_i(
+ struct aws_h2_stream *stream,
+ const struct aws_http_header *header,
+ enum aws_http_header_name name_enum,
+ enum aws_http_header_block block_type) {
+
+ AWS_PRECONDITION_ON_CHANNEL_THREAD(stream);
+
+ /* Not calling s_check_state_allows_frame_type() here because we already checked
+ * at start of HEADERS frame in aws_h2_stream_on_decoder_headers_begin() */
+
+ bool is_server = stream->base.server_data;
+
+ /* RFC-7540 8.1 - Message consists of:
+ * - 0+ Informational 1xx headers (response-only, decoder validates that this only occurs in responses)
+ * - 1 main headers with normal request or response.
+ * - 0 or 1 trailing headers with no pseudo-headers */
+ switch (block_type) {
+ case AWS_HTTP_HEADER_BLOCK_INFORMATIONAL:
+ if (stream->thread_data.received_main_headers) {
+ AWS_H2_STREAM_LOG(
+ ERROR, stream, "Malformed message, received informational (1xx) response after main response");
+ goto malformed;
+ }
+ break;
+ case AWS_HTTP_HEADER_BLOCK_MAIN:
+ if (stream->thread_data.received_main_headers) {
+ AWS_H2_STREAM_LOG(ERROR, stream, "Malformed message, received second set of headers");
+ goto malformed;
+ }
+ break;
+ case AWS_HTTP_HEADER_BLOCK_TRAILING:
+ if (!stream->thread_data.received_main_headers) {
+ /* A HEADERS frame without any pseudo-headers looks like trailing headers to the decoder */
+ AWS_H2_STREAM_LOG(ERROR, stream, "Malformed headers lack required pseudo-header fields.");
+ goto malformed;
+ }
+ break;
+ default:
+ AWS_ASSERT(0);
+ }
+
+ if (is_server) {
+ return aws_h2err_from_aws_code(AWS_ERROR_UNIMPLEMENTED);
+
+ } else {
+ /* Client */
+ switch (name_enum) {
+ case AWS_HTTP_HEADER_STATUS: {
+ uint64_t status_code = 0;
+ int err = aws_byte_cursor_utf8_parse_u64(header->value, &status_code);
+ AWS_ASSERT(!err && "Invalid :status value. Decoder should have already validated this");
+ (void)err;
+
+ stream->base.client_data->response_status = (int)status_code;
+ } break;
+ case AWS_HTTP_HEADER_CONTENT_LENGTH: {
+ if (stream->thread_data.content_length_received) {
+ AWS_H2_STREAM_LOG(ERROR, stream, "Duplicate content-length value");
+ goto malformed;
+ }
+ if (aws_byte_cursor_utf8_parse_u64(header->value, &stream->thread_data.incoming_content_length)) {
+ AWS_H2_STREAM_LOG(ERROR, stream, "Invalid content-length value");
+ goto malformed;
+ }
+ stream->thread_data.content_length_received = true;
+ } break;
+ default:
+ break;
+ }
+ }
+
+ if (stream->base.on_incoming_headers) {
+ if (stream->base.on_incoming_headers(&stream->base, block_type, header, 1, stream->base.user_data)) {
+ AWS_H2_STREAM_LOGF(
+ ERROR, stream, "Incoming header callback raised error, %s", aws_error_name(aws_last_error()));
+ return s_send_rst_and_close_stream(stream, aws_h2err_from_last_error());
+ }
+ }
+
+ return AWS_H2ERR_SUCCESS;
+
+malformed:
+ /* RFC-9113 8.1.1 Malformed requests or responses that are detected MUST be treated as a stream error
+ * (Section 5.4.2) of type PROTOCOL_ERROR.*/
+ return s_send_rst_and_close_stream(stream, aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR));
+}
+
+struct aws_h2err aws_h2_stream_on_decoder_headers_end(
+ struct aws_h2_stream *stream,
+ bool malformed,
+ enum aws_http_header_block block_type) {
+
+ AWS_PRECONDITION_ON_CHANNEL_THREAD(stream);
+
+ /* Not calling s_check_state_allows_frame_type() here because we already checked
+ * at start of HEADERS frame in aws_h2_stream_on_decoder_headers_begin() */
+
+ if (malformed) {
+ AWS_H2_STREAM_LOG(ERROR, stream, "Headers are malformed");
+ return s_send_rst_and_close_stream(stream, aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR));
+ }
+
+ switch (block_type) {
+ case AWS_HTTP_HEADER_BLOCK_INFORMATIONAL:
+ AWS_H2_STREAM_LOG(TRACE, stream, "Informational 1xx header-block done.");
+ break;
+ case AWS_HTTP_HEADER_BLOCK_MAIN:
+ AWS_H2_STREAM_LOG(TRACE, stream, "Main header-block done.");
+ stream->thread_data.received_main_headers = true;
+ break;
+ case AWS_HTTP_HEADER_BLOCK_TRAILING:
+ AWS_H2_STREAM_LOG(TRACE, stream, "Trailing 1xx header-block done.");
+ break;
+ default:
+ AWS_ASSERT(0);
+ }
+
+ if (stream->base.on_incoming_header_block_done) {
+ if (stream->base.on_incoming_header_block_done(&stream->base, block_type, stream->base.user_data)) {
+ AWS_H2_STREAM_LOGF(
+ ERROR,
+ stream,
+ "Incoming-header-block-done callback raised error, %s",
+ aws_error_name(aws_last_error()));
+ return s_send_rst_and_close_stream(stream, aws_h2err_from_last_error());
+ }
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+struct aws_h2err aws_h2_stream_on_decoder_push_promise(struct aws_h2_stream *stream, uint32_t promised_stream_id) {
+ AWS_PRECONDITION_ON_CHANNEL_THREAD(stream);
+
+ struct aws_h2err stream_err = s_check_state_allows_frame_type(stream, AWS_H2_FRAME_T_PUSH_PROMISE);
+ if (aws_h2err_failed(stream_err)) {
+ return s_send_rst_and_close_stream(stream, stream_err);
+ }
+
+ /* Note: Until we have a need for it, PUSH_PROMISE is not a fully supported feature.
+ * Promised streams are automatically rejected in a manner compliant with RFC-7540. */
+ AWS_H2_STREAM_LOG(DEBUG, stream, "Automatically rejecting promised stream, PUSH_PROMISE is not fully supported");
+ if (aws_h2_connection_send_rst_and_close_reserved_stream(
+ s_get_h2_connection(stream), promised_stream_id, AWS_HTTP2_ERR_REFUSED_STREAM)) {
+ return aws_h2err_from_last_error();
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+static int s_stream_send_update_window(struct aws_h2_stream *stream, uint32_t window_size) {
+ struct aws_h2_frame *stream_window_update_frame =
+ aws_h2_frame_new_window_update(stream->base.alloc, stream->base.id, window_size);
+ if (!stream_window_update_frame) {
+ AWS_H2_STREAM_LOGF(
+ ERROR,
+ stream,
+ "WINDOW_UPDATE frame on stream failed to be sent, error %s",
+ aws_error_name(aws_last_error()));
+ return AWS_OP_ERR;
+ }
+
+ aws_h2_connection_enqueue_outgoing_frame(s_get_h2_connection(stream), stream_window_update_frame);
+ stream->thread_data.window_size_self += window_size;
+ return AWS_OP_SUCCESS;
+}
+
+struct aws_h2err aws_h2_stream_on_decoder_data_begin(
+ struct aws_h2_stream *stream,
+ uint32_t payload_len,
+ uint32_t total_padding_bytes,
+ bool end_stream) {
+
+ AWS_PRECONDITION_ON_CHANNEL_THREAD(stream);
+
+ struct aws_h2err stream_err = s_check_state_allows_frame_type(stream, AWS_H2_FRAME_T_DATA);
+ if (aws_h2err_failed(stream_err)) {
+ return s_send_rst_and_close_stream(stream, stream_err);
+ }
+
+ if (!stream->thread_data.received_main_headers) {
+ AWS_H2_STREAM_LOG(ERROR, stream, "Malformed message, received DATA before main HEADERS");
+ return s_send_rst_and_close_stream(stream, aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR));
+ }
+
+ if (stream->thread_data.content_length_received) {
+ uint64_t data_len = payload_len - total_padding_bytes;
+ if (aws_add_u64_checked(
+ stream->thread_data.incoming_data_length, data_len, &stream->thread_data.incoming_data_length)) {
+ return s_send_rst_and_close_stream(stream, aws_h2err_from_aws_code(AWS_ERROR_OVERFLOW_DETECTED));
+ }
+
+ if (stream->thread_data.incoming_data_length > stream->thread_data.incoming_content_length) {
+ AWS_H2_STREAM_LOGF(
+ ERROR,
+ stream,
+ "Total received data payload=%" PRIu64 " has exceed the received content-length header, which=%" PRIi64
+ ". Closing malformed stream",
+ stream->thread_data.incoming_data_length,
+ stream->thread_data.incoming_content_length);
+ return s_send_rst_and_close_stream(stream, aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR));
+ }
+ }
+
+ /* RFC-7540 6.9.1:
+ * The sender MUST NOT send a flow-controlled frame with a length that exceeds
+ * the space available in either of the flow-control windows advertised by the receiver.
+ * Frames with zero length with the END_STREAM flag set (that is, an empty DATA frame)
+ * MAY be sent if there is no available space in either flow-control window. */
+ if ((int32_t)payload_len > stream->thread_data.window_size_self && payload_len != 0) {
+ AWS_H2_STREAM_LOGF(
+ ERROR,
+ stream,
+ "DATA length=%" PRIu32 " exceeds flow-control window=%" PRIi64,
+ payload_len,
+ stream->thread_data.window_size_self);
+ return s_send_rst_and_close_stream(stream, aws_h2err_from_h2_code(AWS_HTTP2_ERR_FLOW_CONTROL_ERROR));
+ }
+ stream->thread_data.window_size_self -= payload_len;
+
+ /* If stream isn't over, we may need to send automatic window updates to keep data flowing */
+ if (!end_stream) {
+ uint32_t auto_window_update;
+ if (stream->base.owning_connection->stream_manual_window_management) {
+ /* Automatically update the flow-window to account for padding, even though "manual window management"
+ * is enabled, because the current API doesn't have any way to inform the user about padding,
+ * so we can't expect them to manage it themselves. */
+ auto_window_update = total_padding_bytes;
+ } else {
+ /* Automatically update the full amount we just received */
+ auto_window_update = payload_len;
+ }
+
+ if (auto_window_update != 0) {
+ if (s_stream_send_update_window(stream, auto_window_update)) {
+ return aws_h2err_from_last_error();
+ }
+ AWS_H2_STREAM_LOGF(
+ TRACE,
+ stream,
+ "Automatically updating stream window by %" PRIu32 "(%" PRIu32 " due to padding).",
+ auto_window_update,
+ total_padding_bytes);
+ }
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+struct aws_h2err aws_h2_stream_on_decoder_data_i(struct aws_h2_stream *stream, struct aws_byte_cursor data) {
+ AWS_PRECONDITION_ON_CHANNEL_THREAD(stream);
+
+ /* Not calling s_check_state_allows_frame_type() here because we already checked at start of DATA frame in
+ * aws_h2_stream_on_decoder_data_begin() */
+
+ if (stream->base.on_incoming_body) {
+ if (stream->base.on_incoming_body(&stream->base, &data, stream->base.user_data)) {
+ AWS_H2_STREAM_LOGF(
+ ERROR, stream, "Incoming body callback raised error, %s", aws_error_name(aws_last_error()));
+ return s_send_rst_and_close_stream(stream, aws_h2err_from_last_error());
+ }
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+struct aws_h2err aws_h2_stream_on_decoder_window_update(
+ struct aws_h2_stream *stream,
+ uint32_t window_size_increment,
+ bool *window_resume) {
+ AWS_PRECONDITION_ON_CHANNEL_THREAD(stream);
+
+ *window_resume = false;
+
+ struct aws_h2err stream_err = s_check_state_allows_frame_type(stream, AWS_H2_FRAME_T_WINDOW_UPDATE);
+ if (aws_h2err_failed(stream_err)) {
+ return s_send_rst_and_close_stream(stream, stream_err);
+ }
+ if (window_size_increment == 0) {
+ /* flow-control window increment of 0 MUST be treated as error (RFC7540 6.9.1) */
+ AWS_H2_STREAM_LOG(ERROR, stream, "Window update frame with 0 increment size");
+ return s_send_rst_and_close_stream(stream, aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR));
+ }
+ int32_t old_window_size = stream->thread_data.window_size_peer;
+ stream_err = (aws_h2_stream_window_size_change(stream, window_size_increment, false /*self*/));
+ if (aws_h2err_failed(stream_err)) {
+ /* We MUST NOT allow a flow-control window to exceed the max */
+ AWS_H2_STREAM_LOG(
+ ERROR, stream, "Window update frame causes the stream flow-control window to exceed the maximum size");
+ return s_send_rst_and_close_stream(stream, stream_err);
+ }
+ if (stream->thread_data.window_size_peer > AWS_H2_MIN_WINDOW_SIZE && old_window_size <= AWS_H2_MIN_WINDOW_SIZE) {
+ *window_resume = true;
+ }
+ return AWS_H2ERR_SUCCESS;
+}
+
+struct aws_h2err aws_h2_stream_on_decoder_end_stream(struct aws_h2_stream *stream) {
+ AWS_PRECONDITION_ON_CHANNEL_THREAD(stream);
+
+ /* Not calling s_check_state_allows_frame_type() here because END_STREAM isn't
+ * an actual frame type. It's a flag on DATA or HEADERS frames, and we
+ * already checked the legality of those frames in their respective callbacks. */
+
+ if (stream->thread_data.content_length_received) {
+ if (stream->base.request_method != AWS_HTTP_METHOD_HEAD &&
+ stream->base.client_data->response_status != AWS_HTTP_STATUS_CODE_304_NOT_MODIFIED) {
+ /**
+ * RFC-9110 8.6.
+ * A server MAY send a Content-Length header field in a response to a HEAD request.
+ * A server MAY send a Content-Length header field in a 304 (Not Modified) response.
+ * But both of these condition will have no body receive.
+ */
+ if (stream->thread_data.incoming_data_length != stream->thread_data.incoming_content_length) {
+ /**
+ * RFC-9113 8.1.1:
+ * A request or response is also malformed if the value of a content-length header field does not equal
+ * the sum of the DATA frame payload lengths that form the content, unless the message is defined as
+ * having no content.
+ *
+ * Clients MUST NOT accept a malformed response.
+ */
+ AWS_H2_STREAM_LOGF(
+ ERROR,
+ stream,
+ "Total received data payload=%" PRIu64
+ " does not match the received content-length header, which=%" PRIi64 ". Closing malformed stream",
+ stream->thread_data.incoming_data_length,
+ stream->thread_data.incoming_content_length);
+ return s_send_rst_and_close_stream(stream, aws_h2err_from_h2_code(AWS_HTTP2_ERR_PROTOCOL_ERROR));
+ }
+ }
+ }
+
+ if (stream->thread_data.state == AWS_H2_STREAM_STATE_HALF_CLOSED_LOCAL) {
+ /* Both sides have sent END_STREAM */
+ stream->thread_data.state = AWS_H2_STREAM_STATE_CLOSED;
+ AWS_H2_STREAM_LOG(TRACE, stream, "Received END_STREAM. State -> CLOSED");
+ /* Tell connection that stream is now closed */
+ if (aws_h2_connection_on_stream_closed(
+ s_get_h2_connection(stream),
+ stream,
+ AWS_H2_STREAM_CLOSED_WHEN_BOTH_SIDES_END_STREAM,
+ AWS_ERROR_SUCCESS)) {
+ return aws_h2err_from_last_error();
+ }
+
+ } else {
+ /* Else can't close until our side sends END_STREAM */
+ stream->thread_data.state = AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE;
+ AWS_H2_STREAM_LOG(TRACE, stream, "Received END_STREAM. State -> HALF_CLOSED_REMOTE");
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+struct aws_h2err aws_h2_stream_on_decoder_rst_stream(struct aws_h2_stream *stream, uint32_t h2_error_code) {
+ AWS_PRECONDITION_ON_CHANNEL_THREAD(stream);
+
+ /* Check that this state allows RST_STREAM. */
+ struct aws_h2err err = s_check_state_allows_frame_type(stream, AWS_H2_FRAME_T_RST_STREAM);
+ if (aws_h2err_failed(err)) {
+ /* Usually we send a RST_STREAM when the state doesn't allow a frame type, but RFC-7540 5.4.2 says:
+ * "To avoid looping, an endpoint MUST NOT send a RST_STREAM in response to a RST_STREAM frame." */
+ return err;
+ }
+
+ /* RFC-7540 8.1 - a server MAY request that the client abort transmission of a request without error by sending a
+ * RST_STREAM with an error code of NO_ERROR after sending a complete response (i.e., a frame with the END_STREAM
+ * flag). Clients MUST NOT discard responses as a result of receiving such a RST_STREAM */
+ int aws_error_code;
+ if (stream->base.client_data && (h2_error_code == AWS_HTTP2_ERR_NO_ERROR) &&
+ (stream->thread_data.state == AWS_H2_STREAM_STATE_HALF_CLOSED_REMOTE)) {
+
+ aws_error_code = AWS_ERROR_SUCCESS;
+
+ } else {
+ aws_error_code = AWS_ERROR_HTTP_RST_STREAM_RECEIVED;
+ AWS_H2_STREAM_LOGF(
+ ERROR,
+ stream,
+ "Peer terminated stream with HTTP/2 RST_STREAM frame, error-code=0x%x(%s)",
+ h2_error_code,
+ aws_http2_error_code_to_str(h2_error_code));
+ }
+
+ stream->thread_data.state = AWS_H2_STREAM_STATE_CLOSED;
+ stream->received_reset_error_code = h2_error_code;
+
+ AWS_H2_STREAM_LOGF(
+ TRACE,
+ stream,
+ "Received RST_STREAM code=0x%x(%s). State -> CLOSED",
+ h2_error_code,
+ aws_http2_error_code_to_str(h2_error_code));
+
+ if (aws_h2_connection_on_stream_closed(
+ s_get_h2_connection(stream), stream, AWS_H2_STREAM_CLOSED_WHEN_RST_STREAM_RECEIVED, aws_error_code)) {
+ return aws_h2err_from_last_error();
+ }
+
+ return AWS_H2ERR_SUCCESS;
+}
+
+static int s_stream_write_data(
+ struct aws_http_stream *stream_base,
+ const struct aws_http2_stream_write_data_options *options) {
+ struct aws_h2_stream *stream = AWS_CONTAINER_OF(stream_base, struct aws_h2_stream, base);
+ if (!stream->manual_write) {
+ AWS_H2_STREAM_LOG(
+ ERROR,
+ stream,
+ "Manual writes are not enabled. You need to enable manual writes using by setting "
+ "'http2_use_manual_data_writes' to true in 'aws_http_make_request_options'");
+ return aws_raise_error(AWS_ERROR_HTTP_MANUAL_WRITE_NOT_ENABLED);
+ }
+ struct aws_h2_connection *connection = s_get_h2_connection(stream);
+
+ /* queue this new write into the pending write list for the stream */
+ struct aws_h2_stream_data_write *pending_write =
+ aws_mem_calloc(stream->base.alloc, 1, sizeof(struct aws_h2_stream_data_write));
+ if (options->data) {
+ pending_write->data_stream = aws_input_stream_acquire(options->data);
+ } else {
+ struct aws_byte_cursor empty_cursor;
+ AWS_ZERO_STRUCT(empty_cursor);
+ pending_write->data_stream = aws_input_stream_new_from_cursor(stream->base.alloc, &empty_cursor);
+ }
+ bool schedule_cross_thread_work = false;
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(stream);
+ {
+ if (stream->synced_data.api_state != AWS_H2_STREAM_API_STATE_ACTIVE) {
+ s_unlock_synced_data(stream);
+ int error_code = stream->synced_data.api_state == AWS_H2_STREAM_API_STATE_INIT
+ ? AWS_ERROR_HTTP_STREAM_NOT_ACTIVATED
+ : AWS_ERROR_HTTP_STREAM_HAS_COMPLETED;
+ s_stream_data_write_destroy(stream, pending_write, error_code);
+ AWS_H2_STREAM_LOG(ERROR, stream, "Cannot write DATA frames to an inactive or closed stream");
+ return aws_raise_error(error_code);
+ }
+
+ if (stream->synced_data.manual_write_ended) {
+ s_unlock_synced_data(stream);
+ s_stream_data_write_destroy(stream, pending_write, AWS_ERROR_HTTP_MANUAL_WRITE_HAS_COMPLETED);
+ AWS_H2_STREAM_LOG(ERROR, stream, "Cannot write DATA frames to a stream after manual write ended");
+ /* Fail with error, otherwise, people can wait for on_complete callback that will never be invoked. */
+ return aws_raise_error(AWS_ERROR_HTTP_MANUAL_WRITE_HAS_COMPLETED);
+ }
+ /* Not setting this until we're sure we succeeded, so that callback doesn't fire on cleanup if we fail */
+ if (options->end_stream) {
+ stream->synced_data.manual_write_ended = true;
+ }
+ pending_write->end_stream = options->end_stream;
+ pending_write->on_complete = options->on_complete;
+ pending_write->user_data = options->user_data;
+
+ aws_linked_list_push_back(&stream->synced_data.pending_write_list, &pending_write->node);
+ schedule_cross_thread_work = !stream->synced_data.is_cross_thread_work_task_scheduled;
+ stream->synced_data.is_cross_thread_work_task_scheduled = true;
+ }
+ s_unlock_synced_data(stream);
+ } /* END CRITICAL SECTION */
+
+ if (schedule_cross_thread_work) {
+ AWS_H2_STREAM_LOG(TRACE, stream, "Scheduling stream cross-thread work task");
+ /* increment the refcount of stream to keep it alive until the task runs */
+ aws_atomic_fetch_add(&stream->base.refcount, 1);
+ aws_channel_schedule_task_now(connection->base.channel_slot->channel, &stream->cross_thread_work_task);
+ }
+
+ return AWS_OP_SUCCESS;
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/hpack.c b/contrib/restricted/aws/aws-c-http/source/hpack.c
new file mode 100644
index 0000000000..ef3d0b3dcf
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/hpack.c
@@ -0,0 +1,525 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/http/private/hpack.h>
+
+/* #TODO test empty strings */
+
+/* #TODO remove all OOM error handling in HTTP/2 & HPACK. make functions void if possible */
+
+/* RFC-7540 6.5.2 */
+const size_t s_hpack_dynamic_table_initial_size = 4096;
+const size_t s_hpack_dynamic_table_initial_elements = 512;
+/* TODO: shouldn't be a hardcoded max_size, it should be driven by SETTINGS_HEADER_TABLE_SIZE */
+const size_t s_hpack_dynamic_table_max_size = 16 * 1024 * 1024;
+
+/* Used for growing the dynamic table buffer when it fills up */
+const float s_hpack_dynamic_table_buffer_growth_rate = 1.5F;
+
+struct aws_http_header s_static_header_table[] = {
+#define HEADER(_index, _name) \
+ [_index] = { \
+ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(_name), \
+ },
+
+#define HEADER_WITH_VALUE(_index, _name, _value) \
+ [_index] = { \
+ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(_name), \
+ .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(_value), \
+ },
+
+#include <aws/http/private/hpack_header_static_table.def>
+
+#undef HEADER
+#undef HEADER_WITH_VALUE
+};
+static const size_t s_static_header_table_size = AWS_ARRAY_SIZE(s_static_header_table);
+
+struct aws_byte_cursor s_static_header_table_name_only[] = {
+#define HEADER(_index, _name) [_index] = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(_name),
+#define HEADER_WITH_VALUE(_index, _name, _value) HEADER(_index, _name)
+
+#include <aws/http/private/hpack_header_static_table.def>
+
+#undef HEADER
+#undef HEADER_WITH_VALUE
+};
+
+/* aws_http_header * -> size_t */
+static struct aws_hash_table s_static_header_reverse_lookup;
+/* aws_byte_cursor * -> size_t */
+static struct aws_hash_table s_static_header_reverse_lookup_name_only;
+
+static uint64_t s_header_hash(const void *key) {
+ const struct aws_http_header *header = key;
+
+ return aws_hash_combine(aws_hash_byte_cursor_ptr(&header->name), aws_hash_byte_cursor_ptr(&header->value));
+}
+
+static bool s_header_eq(const void *a, const void *b) {
+ const struct aws_http_header *left = a;
+ const struct aws_http_header *right = b;
+
+ if (!aws_byte_cursor_eq(&left->name, &right->name)) {
+ return false;
+ }
+
+ /* If the header stored in the table doesn't have a value, then it's a match */
+ return aws_byte_cursor_eq(&left->value, &right->value);
+}
+
+void aws_hpack_static_table_init(struct aws_allocator *allocator) {
+
+ int result = aws_hash_table_init(
+ &s_static_header_reverse_lookup,
+ allocator,
+ s_static_header_table_size - 1,
+ s_header_hash,
+ s_header_eq,
+ NULL,
+ NULL);
+ AWS_FATAL_ASSERT(AWS_OP_SUCCESS == result);
+
+ result = aws_hash_table_init(
+ &s_static_header_reverse_lookup_name_only,
+ allocator,
+ s_static_header_table_size - 1,
+ aws_hash_byte_cursor_ptr,
+ (aws_hash_callback_eq_fn *)aws_byte_cursor_eq,
+ NULL,
+ NULL);
+ AWS_FATAL_ASSERT(AWS_OP_SUCCESS == result);
+
+ /* Process in reverse so that name_only prefers lower indices */
+ for (size_t i = s_static_header_table_size - 1; i > 0; --i) {
+ /* the tables are created as 1-based indexing */
+ result = aws_hash_table_put(&s_static_header_reverse_lookup, &s_static_header_table[i], (void *)i, NULL);
+ AWS_FATAL_ASSERT(AWS_OP_SUCCESS == result);
+
+ result = aws_hash_table_put(
+ &s_static_header_reverse_lookup_name_only, &s_static_header_table_name_only[i], (void *)(i), NULL);
+ AWS_FATAL_ASSERT(AWS_OP_SUCCESS == result);
+ }
+}
+
+void aws_hpack_static_table_clean_up() {
+ aws_hash_table_clean_up(&s_static_header_reverse_lookup);
+ aws_hash_table_clean_up(&s_static_header_reverse_lookup_name_only);
+}
+
+#define HPACK_LOGF(level, hpack, text, ...) \
+ AWS_LOGF_##level((hpack)->log_subject, "id=%p [HPACK]: " text, (hpack)->log_id, __VA_ARGS__)
+#define HPACK_LOG(level, hpack, text) HPACK_LOGF(level, hpack, "%s", text)
+
+void aws_hpack_context_init(
+ struct aws_hpack_context *context,
+ struct aws_allocator *allocator,
+ enum aws_http_log_subject log_subject,
+ const void *log_id) {
+
+ AWS_ZERO_STRUCT(*context);
+ context->allocator = allocator;
+ context->log_subject = log_subject;
+ context->log_id = log_id;
+
+ /* Initialize dynamic table */
+ context->dynamic_table.max_size = s_hpack_dynamic_table_initial_size;
+ context->dynamic_table.buffer_capacity = s_hpack_dynamic_table_initial_elements;
+ context->dynamic_table.buffer =
+ aws_mem_calloc(allocator, context->dynamic_table.buffer_capacity, sizeof(struct aws_http_header));
+
+ aws_hash_table_init(
+ &context->dynamic_table.reverse_lookup,
+ allocator,
+ s_hpack_dynamic_table_initial_elements,
+ s_header_hash,
+ s_header_eq,
+ NULL,
+ NULL);
+
+ aws_hash_table_init(
+ &context->dynamic_table.reverse_lookup_name_only,
+ allocator,
+ s_hpack_dynamic_table_initial_elements,
+ aws_hash_byte_cursor_ptr,
+ (aws_hash_callback_eq_fn *)aws_byte_cursor_eq,
+ NULL,
+ NULL);
+}
+
+static struct aws_http_header *s_dynamic_table_get(const struct aws_hpack_context *context, size_t index);
+
+static void s_clean_up_dynamic_table_buffer(struct aws_hpack_context *context) {
+ while (context->dynamic_table.num_elements > 0) {
+ struct aws_http_header *back = s_dynamic_table_get(context, context->dynamic_table.num_elements - 1);
+ context->dynamic_table.num_elements -= 1;
+ /* clean-up the memory we allocate for it */
+ aws_mem_release(context->allocator, back->name.ptr);
+ }
+ aws_mem_release(context->allocator, context->dynamic_table.buffer);
+}
+
+void aws_hpack_context_clean_up(struct aws_hpack_context *context) {
+ if (context->dynamic_table.buffer) {
+ s_clean_up_dynamic_table_buffer(context);
+ }
+ aws_hash_table_clean_up(&context->dynamic_table.reverse_lookup);
+ aws_hash_table_clean_up(&context->dynamic_table.reverse_lookup_name_only);
+ AWS_ZERO_STRUCT(*context);
+}
+
+size_t aws_hpack_get_header_size(const struct aws_http_header *header) {
+ return header->name.len + header->value.len + 32;
+}
+
+size_t aws_hpack_get_dynamic_table_num_elements(const struct aws_hpack_context *context) {
+ return context->dynamic_table.num_elements;
+}
+
+size_t aws_hpack_get_dynamic_table_max_size(const struct aws_hpack_context *context) {
+ return context->dynamic_table.max_size;
+}
+
+/*
+ * Gets the header from the dynamic table.
+ * NOTE: This function only bounds checks on the buffer size, not the number of elements.
+ */
+static struct aws_http_header *s_dynamic_table_get(const struct aws_hpack_context *context, size_t index) {
+
+ AWS_ASSERT(index < context->dynamic_table.buffer_capacity);
+
+ return &context->dynamic_table
+ .buffer[(context->dynamic_table.index_0 + index) % context->dynamic_table.buffer_capacity];
+}
+
+const struct aws_http_header *aws_hpack_get_header(const struct aws_hpack_context *context, size_t index) {
+ if (index == 0 || index >= s_static_header_table_size + context->dynamic_table.num_elements) {
+ aws_raise_error(AWS_ERROR_INVALID_INDEX);
+ return NULL;
+ }
+
+ /* Check static table */
+ if (index < s_static_header_table_size) {
+ return &s_static_header_table[index];
+ }
+
+ /* Check dynamic table */
+ return s_dynamic_table_get(context, index - s_static_header_table_size);
+}
+
+/* TODO: remove `bool search_value`, this option has no reason to exist */
+size_t aws_hpack_find_index(
+ const struct aws_hpack_context *context,
+ const struct aws_http_header *header,
+ bool search_value,
+ bool *found_value) {
+
+ *found_value = false;
+
+ struct aws_hash_element *elem = NULL;
+ if (search_value) {
+ /* Check name-and-value first in static table */
+ aws_hash_table_find(&s_static_header_reverse_lookup, header, &elem);
+ if (elem) {
+ /* TODO: Maybe always set found_value to true? Who cares that the value is empty if they matched? */
+ /* If an element was found, check if it has a value */
+ *found_value = ((const struct aws_http_header *)elem->key)->value.len;
+ return (size_t)elem->value;
+ }
+ /* Check name-and-value in dynamic table */
+ aws_hash_table_find(&context->dynamic_table.reverse_lookup, header, &elem);
+ if (elem) {
+ /* TODO: Maybe always set found_value to true? Who cares that the value is empty if they matched? */
+ *found_value = ((const struct aws_http_header *)elem->key)->value.len;
+ goto trans_index_from_dynamic_table;
+ }
+ }
+ /* Check the name-only table. Note, even if we search for value, when we fail in searching for name-and-value, we
+ * should also check the name only table */
+ aws_hash_table_find(&s_static_header_reverse_lookup_name_only, &header->name, &elem);
+ if (elem) {
+ return (size_t)elem->value;
+ }
+ aws_hash_table_find(&context->dynamic_table.reverse_lookup_name_only, &header->name, &elem);
+ if (elem) {
+ goto trans_index_from_dynamic_table;
+ }
+ return 0;
+
+trans_index_from_dynamic_table:
+ AWS_ASSERT(elem);
+ size_t index;
+ const size_t absolute_index = (size_t)elem->value;
+ if (absolute_index >= context->dynamic_table.index_0) {
+ index = absolute_index - context->dynamic_table.index_0;
+ } else {
+ index = (context->dynamic_table.buffer_capacity - context->dynamic_table.index_0) + absolute_index;
+ }
+ /* Need to add the static table size to re-base indicies */
+ index += s_static_header_table_size;
+ return index;
+}
+
+/* Remove elements from the dynamic table until it fits in max_size bytes */
+static int s_dynamic_table_shrink(struct aws_hpack_context *context, size_t max_size) {
+ while (context->dynamic_table.size > max_size && context->dynamic_table.num_elements > 0) {
+ struct aws_http_header *back = s_dynamic_table_get(context, context->dynamic_table.num_elements - 1);
+
+ /* "Remove" the header from the table */
+ context->dynamic_table.size -= aws_hpack_get_header_size(back);
+ context->dynamic_table.num_elements -= 1;
+
+ /* Remove old header from hash tables */
+ if (aws_hash_table_remove(&context->dynamic_table.reverse_lookup, back, NULL, NULL)) {
+ HPACK_LOG(ERROR, context, "Failed to remove header from the reverse lookup table");
+ goto error;
+ }
+
+ /* If the name-only lookup is pointing to the element we're removing, it needs to go.
+ * If not, it's pointing to a younger, sexier element. */
+ struct aws_hash_element *elem = NULL;
+ aws_hash_table_find(&context->dynamic_table.reverse_lookup_name_only, &back->name, &elem);
+ if (elem && elem->key == back) {
+ if (aws_hash_table_remove_element(&context->dynamic_table.reverse_lookup_name_only, elem)) {
+ HPACK_LOG(ERROR, context, "Failed to remove header from the reverse lookup (name-only) table");
+ goto error;
+ }
+ }
+
+ /* clean up the memory we allocated to hold the name and value string*/
+ aws_mem_release(context->allocator, back->name.ptr);
+ }
+
+ return AWS_OP_SUCCESS;
+
+error:
+ return AWS_OP_ERR;
+}
+
+/*
+ * Resizes the dynamic table storage buffer to new_max_elements.
+ * Useful when inserting over capacity, or when downsizing.
+ * Do shrink first, if you want to remove elements, or memory leak will happen.
+ */
+static int s_dynamic_table_resize_buffer(struct aws_hpack_context *context, size_t new_max_elements) {
+
+ /* Clear the old hash tables */
+ aws_hash_table_clear(&context->dynamic_table.reverse_lookup);
+ aws_hash_table_clear(&context->dynamic_table.reverse_lookup_name_only);
+
+ struct aws_http_header *new_buffer = NULL;
+
+ if (AWS_UNLIKELY(new_max_elements == 0)) {
+ /* If new buffer is of size 0, don't both initializing, just clean up the old one. */
+ goto cleanup_old_buffer;
+ }
+
+ /* Allocate the new buffer */
+ new_buffer = aws_mem_calloc(context->allocator, new_max_elements, sizeof(struct aws_http_header));
+ if (!new_buffer) {
+ return AWS_OP_ERR;
+ }
+
+ /* Don't bother copying data if old buffer was of size 0 */
+ if (AWS_UNLIKELY(context->dynamic_table.num_elements == 0)) {
+ goto reset_dyn_table_state;
+ }
+
+ /*
+ * Take a buffer that looks like this:
+ *
+ * Index 0
+ * ^
+ * +---------------------------+
+ * | Below Block | Above Block |
+ * +---------------------------+
+ * And make it look like this:
+ *
+ * Index 0
+ * ^
+ * +-------------+-------------+
+ * | Above Block | Below Block |
+ * +-------------+-------------+
+ */
+
+ /* Copy as much the above block as possible */
+ size_t above_block_size = context->dynamic_table.buffer_capacity - context->dynamic_table.index_0;
+ if (above_block_size > new_max_elements) {
+ above_block_size = new_max_elements;
+ }
+ memcpy(
+ new_buffer,
+ context->dynamic_table.buffer + context->dynamic_table.index_0,
+ above_block_size * sizeof(struct aws_http_header));
+
+ /* Copy as much of below block as possible */
+ const size_t free_blocks_available = new_max_elements - above_block_size;
+ const size_t old_blocks_to_copy = context->dynamic_table.buffer_capacity - above_block_size;
+ const size_t below_block_size = aws_min_size(free_blocks_available, old_blocks_to_copy);
+ if (below_block_size) {
+ memcpy(
+ new_buffer + above_block_size,
+ context->dynamic_table.buffer,
+ below_block_size * sizeof(struct aws_http_header));
+ }
+
+ /* Free the old memory */
+cleanup_old_buffer:
+ aws_mem_release(context->allocator, context->dynamic_table.buffer);
+
+ /* Reset state */
+reset_dyn_table_state:
+ if (context->dynamic_table.num_elements > new_max_elements) {
+ context->dynamic_table.num_elements = new_max_elements;
+ }
+ context->dynamic_table.buffer_capacity = new_max_elements;
+ context->dynamic_table.index_0 = 0;
+ context->dynamic_table.buffer = new_buffer;
+
+ /* Re-insert all of the reverse lookup elements */
+ for (size_t i = 0; i < context->dynamic_table.num_elements; ++i) {
+ if (aws_hash_table_put(
+ &context->dynamic_table.reverse_lookup, &context->dynamic_table.buffer[i], (void *)i, NULL)) {
+ return AWS_OP_ERR;
+ }
+ if (aws_hash_table_put(
+ &context->dynamic_table.reverse_lookup_name_only,
+ &context->dynamic_table.buffer[i].name,
+ (void *)i,
+ NULL)) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_hpack_insert_header(struct aws_hpack_context *context, const struct aws_http_header *header) {
+
+ /* Don't move forward if no elements allowed in the dynamic table */
+ if (AWS_UNLIKELY(context->dynamic_table.max_size == 0)) {
+ return AWS_OP_SUCCESS;
+ }
+
+ const size_t header_size = aws_hpack_get_header_size(header);
+
+ /* If for whatever reason this new header is bigger than the total table size, burn everything to the ground. */
+ if (AWS_UNLIKELY(header_size > context->dynamic_table.max_size)) {
+ /* #TODO handle this. It's not an error. It should simply result in an empty table RFC-7541 4.4 */
+ goto error;
+ }
+
+ /* Rotate out headers until there's room for the new header (this function will return immediately if nothing needs
+ * to be evicted) */
+ if (s_dynamic_table_shrink(context, context->dynamic_table.max_size - header_size)) {
+ goto error;
+ }
+
+ /* If we're out of space in the buffer, grow it */
+ if (context->dynamic_table.num_elements == context->dynamic_table.buffer_capacity) {
+ /* If the buffer is currently of 0 size, reset it back to its initial size */
+ const size_t new_size =
+ context->dynamic_table.buffer_capacity
+ ? (size_t)(context->dynamic_table.buffer_capacity * s_hpack_dynamic_table_buffer_growth_rate)
+ : s_hpack_dynamic_table_initial_elements;
+
+ if (s_dynamic_table_resize_buffer(context, new_size)) {
+ goto error;
+ }
+ }
+
+ /* Decrement index 0, wrapping if necessary */
+ if (context->dynamic_table.index_0 == 0) {
+ context->dynamic_table.index_0 = context->dynamic_table.buffer_capacity - 1;
+ } else {
+ context->dynamic_table.index_0--;
+ }
+
+ /* Increment num_elements */
+ context->dynamic_table.num_elements++;
+ /* Increment the size */
+ context->dynamic_table.size += header_size;
+
+ /* Put the header at the "front" of the table */
+ struct aws_http_header *table_header = s_dynamic_table_get(context, 0);
+
+ /* TODO:: We can optimize this with ring buffer. */
+ /* allocate memory for the name and value, which will be deallocated whenever the entry is evicted from the table or
+ * the table is cleaned up. We keep the pointer in the name pointer of each entry */
+ const size_t buf_memory_size = header->name.len + header->value.len;
+
+ if (buf_memory_size) {
+ uint8_t *buf_memory = aws_mem_acquire(context->allocator, buf_memory_size);
+ if (!buf_memory) {
+ return AWS_OP_ERR;
+ }
+ struct aws_byte_buf buf = aws_byte_buf_from_empty_array(buf_memory, buf_memory_size);
+ /* Copy header, then backup strings into our own allocation */
+ *table_header = *header;
+ aws_byte_buf_append_and_update(&buf, &table_header->name);
+ aws_byte_buf_append_and_update(&buf, &table_header->value);
+ } else {
+ /* if buf_memory_size is 0, no memory needed, we will insert the empty header into dynamic table */
+ *table_header = *header;
+ table_header->name.ptr = NULL;
+ table_header->value.ptr = NULL;
+ }
+ /* Write the new header to the look up tables */
+ if (aws_hash_table_put(
+ &context->dynamic_table.reverse_lookup, table_header, (void *)context->dynamic_table.index_0, NULL)) {
+ goto error;
+ }
+ /* Note that we can just blindly put here, we want to overwrite any older entry so it isn't accidentally removed. */
+ if (aws_hash_table_put(
+ &context->dynamic_table.reverse_lookup_name_only,
+ &table_header->name,
+ (void *)context->dynamic_table.index_0,
+ NULL)) {
+ goto error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+error:
+ /* Do not attempt to handle the error, if something goes wrong, close the connection */
+ return AWS_OP_ERR;
+}
+
+int aws_hpack_resize_dynamic_table(struct aws_hpack_context *context, size_t new_max_size) {
+
+ /* Nothing to see here! */
+ if (new_max_size == context->dynamic_table.max_size) {
+ return AWS_OP_SUCCESS;
+ }
+
+ if (new_max_size > s_hpack_dynamic_table_max_size) {
+
+ HPACK_LOGF(
+ ERROR,
+ context,
+ "New dynamic table max size %zu is greater than the supported max size (%zu)",
+ new_max_size,
+ s_hpack_dynamic_table_max_size);
+ aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
+ goto error;
+ }
+
+ /* If downsizing, remove elements until we're within the new size constraints */
+ if (s_dynamic_table_shrink(context, new_max_size)) {
+ goto error;
+ }
+
+ /* Resize the buffer to the current size */
+ if (s_dynamic_table_resize_buffer(context, context->dynamic_table.num_elements)) {
+ goto error;
+ }
+
+ /* Update the max size */
+ context->dynamic_table.max_size = new_max_size;
+
+ return AWS_OP_SUCCESS;
+
+error:
+ return AWS_OP_ERR;
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/hpack_decoder.c b/contrib/restricted/aws/aws-c-http/source/hpack_decoder.c
new file mode 100644
index 0000000000..936cd8d4f5
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/hpack_decoder.c
@@ -0,0 +1,446 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/http/private/hpack.h>
+
+#define HPACK_LOGF(level, decoder, text, ...) \
+ AWS_LOGF_##level(AWS_LS_HTTP_DECODER, "id=%p [HPACK]: " text, (decoder)->log_id, __VA_ARGS__)
+#define HPACK_LOG(level, decoder, text) HPACK_LOGF(level, decoder, "%s", text)
+
+struct aws_huffman_symbol_coder *hpack_get_coder(void);
+
+/* Used while decoding the header name & value, grows if necessary */
+const size_t s_hpack_decoder_scratch_initial_size = 512;
+
+void aws_hpack_decoder_init(struct aws_hpack_decoder *decoder, struct aws_allocator *allocator, const void *log_id) {
+ AWS_ZERO_STRUCT(*decoder);
+ decoder->log_id = log_id;
+
+ aws_huffman_decoder_init(&decoder->huffman_decoder, hpack_get_coder());
+ aws_huffman_decoder_allow_growth(&decoder->huffman_decoder, true);
+
+ aws_hpack_context_init(&decoder->context, allocator, AWS_LS_HTTP_DECODER, log_id);
+
+ aws_byte_buf_init(&decoder->progress_entry.scratch, allocator, s_hpack_decoder_scratch_initial_size);
+
+ decoder->dynamic_table_protocol_max_size_setting = aws_hpack_get_dynamic_table_max_size(&decoder->context);
+}
+
+void aws_hpack_decoder_clean_up(struct aws_hpack_decoder *decoder) {
+ aws_hpack_context_clean_up(&decoder->context);
+ aws_byte_buf_clean_up(&decoder->progress_entry.scratch);
+ AWS_ZERO_STRUCT(*decoder);
+}
+
+static const struct aws_http_header *s_get_header_u64(const struct aws_hpack_decoder *decoder, uint64_t index) {
+ if (index > SIZE_MAX) {
+ HPACK_LOG(ERROR, decoder, "Header index is absurdly large");
+ aws_raise_error(AWS_ERROR_INVALID_INDEX);
+ return NULL;
+ }
+
+ return aws_hpack_get_header(&decoder->context, (size_t)index);
+}
+
+void aws_hpack_decoder_update_max_table_size(struct aws_hpack_decoder *decoder, uint32_t setting_max_size) {
+ decoder->dynamic_table_protocol_max_size_setting = setting_max_size;
+}
+
+/* Return a byte with the N right-most bits masked.
+ * Ex: 2 -> 00000011 */
+static uint8_t s_masked_right_bits_u8(uint8_t num_masked_bits) {
+ AWS_ASSERT(num_masked_bits <= 8);
+ const uint8_t cut_bits = 8 - num_masked_bits;
+ return UINT8_MAX >> cut_bits;
+}
+
+int aws_hpack_decode_integer(
+ struct aws_hpack_decoder *decoder,
+ struct aws_byte_cursor *to_decode,
+ uint8_t prefix_size,
+ uint64_t *integer,
+ bool *complete) {
+
+ AWS_PRECONDITION(decoder);
+ AWS_PRECONDITION(to_decode);
+ AWS_PRECONDITION(prefix_size <= 8);
+ AWS_PRECONDITION(integer);
+
+ const uint8_t prefix_mask = s_masked_right_bits_u8(prefix_size);
+
+ struct hpack_progress_integer *progress = &decoder->progress_integer;
+
+ while (to_decode->len) {
+ switch (progress->state) {
+ case HPACK_INTEGER_STATE_INIT: {
+ /* Read the first byte, and check whether this is it, or we need to continue */
+ uint8_t byte = 0;
+ bool succ = aws_byte_cursor_read_u8(to_decode, &byte);
+ AWS_FATAL_ASSERT(succ);
+
+ /* Cut the prefix */
+ byte &= prefix_mask;
+
+ /* No matter what, the first byte's value is always added to the integer */
+ *integer = byte;
+
+ if (byte != prefix_mask) {
+ goto handle_complete;
+ }
+
+ progress->state = HPACK_INTEGER_STATE_VALUE;
+ } break;
+
+ case HPACK_INTEGER_STATE_VALUE: {
+ uint8_t byte = 0;
+ bool succ = aws_byte_cursor_read_u8(to_decode, &byte);
+ AWS_FATAL_ASSERT(succ);
+
+ uint64_t new_byte_value = (uint64_t)(byte & 127) << progress->bit_count;
+ if (*integer + new_byte_value < *integer) {
+ return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
+ }
+ *integer += new_byte_value;
+
+ /* Check if we're done */
+ if ((byte & 128) == 0) {
+ goto handle_complete;
+ }
+
+ /* Increment the bit count */
+ progress->bit_count += 7;
+
+ /* 7 Bits are expected to be used, so if we get to the point where any of
+ * those bits can't be used it's a decoding error */
+ if (progress->bit_count > 64 - 7) {
+ return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
+ }
+ } break;
+ }
+ }
+
+ /* Fell out of data loop, must need more data */
+ *complete = false;
+ return AWS_OP_SUCCESS;
+
+handle_complete:
+ AWS_ZERO_STRUCT(decoder->progress_integer);
+ *complete = true;
+ return AWS_OP_SUCCESS;
+}
+
+int aws_hpack_decode_string(
+ struct aws_hpack_decoder *decoder,
+ struct aws_byte_cursor *to_decode,
+ struct aws_byte_buf *output,
+ bool *complete) {
+
+ AWS_PRECONDITION(decoder);
+ AWS_PRECONDITION(to_decode);
+ AWS_PRECONDITION(output);
+ AWS_PRECONDITION(complete);
+
+ struct hpack_progress_string *progress = &decoder->progress_string;
+
+ while (to_decode->len) {
+ switch (progress->state) {
+ case HPACK_STRING_STATE_INIT: {
+ /* Do init stuff */
+ progress->state = HPACK_STRING_STATE_LENGTH;
+ progress->use_huffman = *to_decode->ptr >> 7;
+ aws_huffman_decoder_reset(&decoder->huffman_decoder);
+ /* fallthrough, since we didn't consume any data */
+ }
+ /* FALLTHRU */
+ case HPACK_STRING_STATE_LENGTH: {
+ bool length_complete = false;
+ if (aws_hpack_decode_integer(decoder, to_decode, 7, &progress->length, &length_complete)) {
+ return AWS_OP_ERR;
+ }
+
+ if (!length_complete) {
+ goto handle_ongoing;
+ }
+
+ if (progress->length == 0) {
+ goto handle_complete;
+ }
+
+ if (progress->length > SIZE_MAX) {
+ return aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
+ }
+
+ progress->state = HPACK_STRING_STATE_VALUE;
+ } break;
+
+ case HPACK_STRING_STATE_VALUE: {
+ /* Take either as much data as we need, or as much as we can */
+ size_t to_process = aws_min_size((size_t)progress->length, to_decode->len);
+ progress->length -= to_process;
+
+ struct aws_byte_cursor chunk = aws_byte_cursor_advance(to_decode, to_process);
+
+ if (progress->use_huffman) {
+ if (aws_huffman_decode(&decoder->huffman_decoder, &chunk, output)) {
+ HPACK_LOGF(ERROR, decoder, "Error from Huffman decoder: %s", aws_error_name(aws_last_error()));
+ return AWS_OP_ERR;
+ }
+
+ /* Decoder should consume all bytes we feed it.
+ * EOS (end-of-string) symbol could stop it early, but HPACK says to treat EOS as error. */
+ if (chunk.len != 0) {
+ HPACK_LOG(ERROR, decoder, "Huffman encoded end-of-string symbol is illegal");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ } else {
+ if (aws_byte_buf_append_dynamic(output, &chunk)) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ /* If whole length consumed, we're done */
+ if (progress->length == 0) {
+ /* #TODO Validate any padding bits left over in final byte of string.
+ * "A padding not corresponding to the most significant bits of the
+ * code for the EOS symbol MUST be treated as a decoding error" */
+
+ /* #TODO impose limits on string length */
+
+ goto handle_complete;
+ }
+ } break;
+ }
+ }
+
+handle_ongoing:
+ /* Fell out of to_decode loop, must still be in progress */
+ AWS_ASSERT(to_decode->len == 0);
+ *complete = false;
+ return AWS_OP_SUCCESS;
+
+handle_complete:
+ AWS_ASSERT(decoder->progress_string.length == 0);
+ AWS_ZERO_STRUCT(decoder->progress_string);
+ *complete = true;
+ return AWS_OP_SUCCESS;
+}
+
+/* Implements RFC-7541 Section 6 - Binary Format */
+int aws_hpack_decode(
+ struct aws_hpack_decoder *decoder,
+ struct aws_byte_cursor *to_decode,
+ struct aws_hpack_decode_result *result) {
+
+ AWS_PRECONDITION(decoder);
+ AWS_PRECONDITION(to_decode);
+ AWS_PRECONDITION(result);
+
+ /* Run state machine until we decode a complete entry.
+ * Every state requires data, so we can simply loop until no more data available. */
+ while (to_decode->len) {
+ switch (decoder->progress_entry.state) {
+
+ case HPACK_ENTRY_STATE_INIT: {
+ /* Reset entry */
+ AWS_ZERO_STRUCT(decoder->progress_entry.u);
+ decoder->progress_entry.scratch.len = 0;
+
+ /* Determine next state by looking at first few bits of the next byte:
+ * 1xxxxxxx: Indexed Header Field Representation
+ * 01xxxxxx: Literal Header Field with Incremental Indexing
+ * 001xxxxx: Dynamic Table Size Update
+ * 0001xxxx: Literal Header Field Never Indexed
+ * 0000xxxx: Literal Header Field without Indexing */
+ uint8_t first_byte = to_decode->ptr[0];
+ if (first_byte & (1 << 7)) {
+ /* 1xxxxxxx: Indexed Header Field Representation */
+ decoder->progress_entry.state = HPACK_ENTRY_STATE_INDEXED;
+
+ } else if (first_byte & (1 << 6)) {
+ /* 01xxxxxx: Literal Header Field with Incremental Indexing */
+ decoder->progress_entry.u.literal.compression = AWS_HTTP_HEADER_COMPRESSION_USE_CACHE;
+ decoder->progress_entry.u.literal.prefix_size = 6;
+ decoder->progress_entry.state = HPACK_ENTRY_STATE_LITERAL_BEGIN;
+
+ } else if (first_byte & (1 << 5)) {
+ /* 001xxxxx: Dynamic Table Size Update */
+ decoder->progress_entry.state = HPACK_ENTRY_STATE_DYNAMIC_TABLE_RESIZE;
+
+ } else if (first_byte & (1 << 4)) {
+ /* 0001xxxx: Literal Header Field Never Indexed */
+ decoder->progress_entry.u.literal.compression = AWS_HTTP_HEADER_COMPRESSION_NO_FORWARD_CACHE;
+ decoder->progress_entry.u.literal.prefix_size = 4;
+ decoder->progress_entry.state = HPACK_ENTRY_STATE_LITERAL_BEGIN;
+ } else {
+ /* 0000xxxx: Literal Header Field without Indexing */
+ decoder->progress_entry.u.literal.compression = AWS_HTTP_HEADER_COMPRESSION_NO_CACHE;
+ decoder->progress_entry.u.literal.prefix_size = 4;
+ decoder->progress_entry.state = HPACK_ENTRY_STATE_LITERAL_BEGIN;
+ }
+ } break;
+
+ /* RFC-7541 6.1. Indexed Header Field Representation.
+ * Decode one integer, which is an index into the table.
+ * Result is the header name and value stored there. */
+ case HPACK_ENTRY_STATE_INDEXED: {
+ bool complete = false;
+ uint64_t *index = &decoder->progress_entry.u.indexed.index;
+ if (aws_hpack_decode_integer(decoder, to_decode, 7, index, &complete)) {
+ return AWS_OP_ERR;
+ }
+
+ if (!complete) {
+ break;
+ }
+
+ const struct aws_http_header *header = s_get_header_u64(decoder, *index);
+ if (!header) {
+ return AWS_OP_ERR;
+ }
+
+ result->type = AWS_HPACK_DECODE_T_HEADER_FIELD;
+ result->data.header_field = *header;
+ goto handle_complete;
+ } break;
+
+ /* RFC-7541 6.2. Literal Header Field Representation.
+ * We use multiple states to decode a literal...
+ * The header-name MAY come from the table and MAY be encoded as a string.
+ * The header-value is ALWAYS encoded as a string.
+ *
+ * This BEGIN state decodes one integer.
+ * If it's non-zero, then it's the index in the table where we'll get the header-name from.
+ * If it's zero, then we move to the HEADER_NAME state and decode header-name as a string instead */
+ case HPACK_ENTRY_STATE_LITERAL_BEGIN: {
+ struct hpack_progress_literal *literal = &decoder->progress_entry.u.literal;
+
+ bool index_complete = false;
+ if (aws_hpack_decode_integer(
+ decoder, to_decode, literal->prefix_size, &literal->name_index, &index_complete)) {
+ return AWS_OP_ERR;
+ }
+
+ if (!index_complete) {
+ break;
+ }
+
+ if (literal->name_index == 0) {
+ /* Index 0 means header-name is not in table. Need to decode header-name as a string instead */
+ decoder->progress_entry.state = HPACK_ENTRY_STATE_LITERAL_NAME_STRING;
+ break;
+ }
+
+ /* Otherwise we found index of header-name in table. */
+ const struct aws_http_header *header = s_get_header_u64(decoder, literal->name_index);
+ if (!header) {
+ return AWS_OP_ERR;
+ }
+
+ /* Store the name in scratch. We don't just keep a pointer to it because it could be
+ * evicted from the dynamic table later, when we save the literal. */
+ if (aws_byte_buf_append_dynamic(&decoder->progress_entry.scratch, &header->name)) {
+ return AWS_OP_ERR;
+ }
+
+ /* Move on to decoding header-value.
+ * Value will also decode into the scratch, so save where name ends. */
+ literal->name_length = header->name.len;
+ decoder->progress_entry.state = HPACK_ENTRY_STATE_LITERAL_VALUE_STRING;
+ } break;
+
+ /* We only end up in this state if header-name is encoded as string. */
+ case HPACK_ENTRY_STATE_LITERAL_NAME_STRING: {
+ bool string_complete = false;
+ if (aws_hpack_decode_string(decoder, to_decode, &decoder->progress_entry.scratch, &string_complete)) {
+ return AWS_OP_ERR;
+ }
+
+ if (!string_complete) {
+ break;
+ }
+
+ /* Done decoding name string! Move on to decoding the value string.
+ * Value will also decode into the scratch, so save where name ends. */
+ decoder->progress_entry.u.literal.name_length = decoder->progress_entry.scratch.len;
+ decoder->progress_entry.state = HPACK_ENTRY_STATE_LITERAL_VALUE_STRING;
+ } break;
+
+ /* Final state for "literal" entries.
+ * Decode the header-value string, then deliver the results. */
+ case HPACK_ENTRY_STATE_LITERAL_VALUE_STRING: {
+ bool string_complete = false;
+ if (aws_hpack_decode_string(decoder, to_decode, &decoder->progress_entry.scratch, &string_complete)) {
+ return AWS_OP_ERR;
+ }
+
+ if (!string_complete) {
+ break;
+ }
+
+ /* Done decoding value string. Done decoding entry. */
+ struct hpack_progress_literal *literal = &decoder->progress_entry.u.literal;
+
+ /* Set up a header with name and value (which are packed one after the other in scratch) */
+ struct aws_http_header header;
+ header.value = aws_byte_cursor_from_buf(&decoder->progress_entry.scratch);
+ header.name = aws_byte_cursor_advance(&header.value, literal->name_length);
+ header.compression = literal->compression;
+
+ /* Save to table if necessary */
+ if (literal->compression == AWS_HTTP_HEADER_COMPRESSION_USE_CACHE) {
+ if (aws_hpack_insert_header(&decoder->context, &header)) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ result->type = AWS_HPACK_DECODE_T_HEADER_FIELD;
+ result->data.header_field = header;
+ goto handle_complete;
+ } break;
+
+ /* RFC-7541 6.3. Dynamic Table Size Update
+ * Read one integer, which is the new maximum size for the dynamic table. */
+ case HPACK_ENTRY_STATE_DYNAMIC_TABLE_RESIZE: {
+ uint64_t *size64 = &decoder->progress_entry.u.dynamic_table_resize.size;
+ bool size_complete = false;
+ if (aws_hpack_decode_integer(decoder, to_decode, 5, size64, &size_complete)) {
+ return AWS_OP_ERR;
+ }
+
+ if (!size_complete) {
+ break;
+ }
+ /* The new maximum size MUST be lower than or equal to the limit determined by the protocol using HPACK.
+ * A value that exceeds this limit MUST be treated as a decoding error. */
+ if (*size64 > decoder->dynamic_table_protocol_max_size_setting) {
+ HPACK_LOG(ERROR, decoder, "Dynamic table update size is larger than the protocal setting");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ size_t size = (size_t)*size64;
+
+ HPACK_LOGF(TRACE, decoder, "Dynamic table size update %zu", size);
+ if (aws_hpack_resize_dynamic_table(&decoder->context, size)) {
+ return AWS_OP_ERR;
+ }
+
+ result->type = AWS_HPACK_DECODE_T_DYNAMIC_TABLE_RESIZE;
+ result->data.dynamic_table_resize = size;
+ goto handle_complete;
+ } break;
+
+ default: {
+ AWS_ASSERT(0 && "invalid state");
+ } break;
+ }
+ }
+
+ AWS_ASSERT(to_decode->len == 0);
+ result->type = AWS_HPACK_DECODE_T_ONGOING;
+ return AWS_OP_SUCCESS;
+
+handle_complete:
+ AWS_ASSERT(result->type != AWS_HPACK_DECODE_T_ONGOING);
+ decoder->progress_entry.state = HPACK_ENTRY_STATE_INIT;
+ return AWS_OP_SUCCESS;
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/hpack_encoder.c b/contrib/restricted/aws/aws-c-http/source/hpack_encoder.c
new file mode 100644
index 0000000000..6d792c14c5
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/hpack_encoder.c
@@ -0,0 +1,418 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/http/private/hpack.h>
+
+#define HPACK_LOGF(level, encoder, text, ...) \
+ AWS_LOGF_##level(AWS_LS_HTTP_ENCODER, "id=%p [HPACK]: " text, (encoder)->log_id, __VA_ARGS__)
+#define HPACK_LOG(level, encoder, text) HPACK_LOGF(level, encoder, "%s", text)
+
+struct aws_huffman_symbol_coder *hpack_get_coder(void);
+
+void aws_hpack_encoder_init(struct aws_hpack_encoder *encoder, struct aws_allocator *allocator, const void *log_id) {
+
+ AWS_ZERO_STRUCT(*encoder);
+ encoder->log_id = log_id;
+
+ aws_huffman_encoder_init(&encoder->huffman_encoder, hpack_get_coder());
+
+ aws_hpack_context_init(&encoder->context, allocator, AWS_LS_HTTP_ENCODER, log_id);
+
+ encoder->dynamic_table_size_update.pending = false;
+ encoder->dynamic_table_size_update.latest_value = SIZE_MAX;
+ encoder->dynamic_table_size_update.smallest_value = SIZE_MAX;
+}
+
+void aws_hpack_encoder_clean_up(struct aws_hpack_encoder *encoder) {
+ aws_hpack_context_clean_up(&encoder->context);
+ AWS_ZERO_STRUCT(*encoder);
+}
+
+void aws_hpack_encoder_set_huffman_mode(struct aws_hpack_encoder *encoder, enum aws_hpack_huffman_mode mode) {
+ encoder->huffman_mode = mode;
+}
+
+void aws_hpack_encoder_update_max_table_size(struct aws_hpack_encoder *encoder, uint32_t new_max_size) {
+
+ if (!encoder->dynamic_table_size_update.pending) {
+ encoder->dynamic_table_size_update.pending = true;
+ }
+ encoder->dynamic_table_size_update.smallest_value =
+ aws_min_size(new_max_size, encoder->dynamic_table_size_update.smallest_value);
+
+ /* TODO: don't necessarily go as high as possible. The peer said the encoder's
+ * dynamic table COULD get this big, but it's not required to.
+ * It's probably not a good idea to let the peer decide how much memory we allocate.
+ * Not sure how to cap it though... Use a hardcoded number?
+ * Match whatever SETTINGS_HEADER_TABLE_SIZE this side sends? */
+ encoder->dynamic_table_size_update.latest_value = new_max_size;
+}
+
+/* Return a byte with the N right-most bits masked.
+ * Ex: 2 -> 00000011 */
+static uint8_t s_masked_right_bits_u8(uint8_t num_masked_bits) {
+ AWS_ASSERT(num_masked_bits <= 8);
+ const uint8_t cut_bits = 8 - num_masked_bits;
+ return UINT8_MAX >> cut_bits;
+}
+
+/* If buffer isn't big enough, grow it intelligently */
+static int s_ensure_space(struct aws_byte_buf *output, size_t required_space) {
+ size_t available_space = output->capacity - output->len;
+ if (required_space <= available_space) {
+ return AWS_OP_SUCCESS;
+ }
+
+ /* Capacity must grow to at least this size */
+ size_t required_capacity;
+ if (aws_add_size_checked(output->len, required_space, &required_capacity)) {
+ return AWS_OP_ERR;
+ }
+
+ /* Prefer to double capacity, but if that's not enough grow to exactly required_capacity */
+ size_t double_capacity = aws_add_size_saturating(output->capacity, output->capacity);
+ size_t reserve = aws_max_size(required_capacity, double_capacity);
+ return aws_byte_buf_reserve(output, reserve);
+}
+
+int aws_hpack_encode_integer(
+ uint64_t integer,
+ uint8_t starting_bits,
+ uint8_t prefix_size,
+ struct aws_byte_buf *output) {
+ AWS_ASSERT(prefix_size <= 8);
+
+ const uint8_t prefix_mask = s_masked_right_bits_u8(prefix_size);
+ AWS_ASSERT((starting_bits & prefix_mask) == 0);
+
+ const size_t original_len = output->len;
+
+ if (integer < prefix_mask) {
+ /* If the integer fits inside the specified number of bits but won't be all 1's, just write it */
+
+ /* Just write out the bits we care about */
+ uint8_t first_byte = starting_bits | (uint8_t)integer;
+ if (aws_byte_buf_append_byte_dynamic(output, first_byte)) {
+ goto error;
+ }
+ } else {
+ /* Set all of the bits in the first octet to 1 */
+ uint8_t first_byte = starting_bits | prefix_mask;
+ if (aws_byte_buf_append_byte_dynamic(output, first_byte)) {
+ goto error;
+ }
+
+ integer -= prefix_mask;
+
+ const uint64_t hi_57bit_mask = UINT64_MAX - (UINT8_MAX >> 1);
+
+ do {
+ /* Take top 7 bits from the integer */
+ uint8_t this_octet = integer % 128;
+ if (integer & hi_57bit_mask) {
+ /* If there's more after this octet, set the hi bit */
+ this_octet += 128;
+ }
+
+ if (aws_byte_buf_append_byte_dynamic(output, this_octet)) {
+ goto error;
+ }
+
+ /* Remove the written bits */
+ integer >>= 7;
+ } while (integer);
+ }
+
+ return AWS_OP_SUCCESS;
+error:
+ output->len = original_len;
+ return AWS_OP_ERR;
+}
+
+int aws_hpack_encode_string(
+ struct aws_hpack_encoder *encoder,
+ struct aws_byte_cursor to_encode,
+ struct aws_byte_buf *output) {
+
+ AWS_PRECONDITION(encoder);
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&to_encode));
+ AWS_PRECONDITION(output);
+
+ const size_t original_len = output->len;
+
+ /* Determine length of encoded string (and whether or not to use huffman) */
+ uint8_t use_huffman;
+ size_t str_length;
+ switch (encoder->huffman_mode) {
+ case AWS_HPACK_HUFFMAN_NEVER:
+ use_huffman = 0;
+ str_length = to_encode.len;
+ break;
+
+ case AWS_HPACK_HUFFMAN_ALWAYS:
+ use_huffman = 1;
+ str_length = aws_huffman_get_encoded_length(&encoder->huffman_encoder, to_encode);
+ break;
+
+ case AWS_HPACK_HUFFMAN_SMALLEST:
+ str_length = aws_huffman_get_encoded_length(&encoder->huffman_encoder, to_encode);
+ if (str_length < to_encode.len) {
+ use_huffman = 1;
+ } else {
+ str_length = to_encode.len;
+ use_huffman = 0;
+ }
+ break;
+
+ default:
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ goto error;
+ }
+
+ /*
+ * String literals are encoded like so (RFC-7541 5.2):
+ * H is whether or not data is huffman-encoded.
+ *
+ * 0 1 2 3 4 5 6 7
+ * +---+---+---+---+---+---+---+---+
+ * | H | String Length (7+) |
+ * +---+---------------------------+
+ * | String Data (Length octets) |
+ * +-------------------------------+
+ */
+
+ /* Encode string length */
+ uint8_t starting_bits = use_huffman << 7;
+ if (aws_hpack_encode_integer(str_length, starting_bits, 7, output)) {
+ HPACK_LOGF(ERROR, encoder, "Error encoding HPACK integer: %s", aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ /* Encode string data */
+ if (str_length > 0) {
+ if (use_huffman) {
+ /* Huffman encoder doesn't grow buffer, so we ensure it's big enough here */
+ if (s_ensure_space(output, str_length)) {
+ goto error;
+ }
+
+ if (aws_huffman_encode(&encoder->huffman_encoder, &to_encode, output)) {
+ HPACK_LOGF(ERROR, encoder, "Error from Huffman encoder: %s", aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ } else {
+ if (aws_byte_buf_append_dynamic(output, &to_encode)) {
+ goto error;
+ }
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+
+error:
+ output->len = original_len;
+ aws_huffman_encoder_reset(&encoder->huffman_encoder);
+ return AWS_OP_ERR;
+}
+
+/* All types that HPACK might encode/decode (RFC-7541 6 - Binary Format) */
+enum aws_hpack_entry_type {
+ AWS_HPACK_ENTRY_INDEXED_HEADER_FIELD, /* RFC-7541 6.1 */
+ AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITH_INCREMENTAL_INDEXING, /* RFC-7541 6.2.1 */
+ AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING, /* RFC-7541 6.2.2 */
+ AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED, /* RFC-7541 6.2.3 */
+ AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE, /* RFC-7541 6.3 */
+ AWS_HPACK_ENTRY_TYPE_COUNT,
+};
+
+/**
+ * First byte each entry type looks like this (RFC-7541 6):
+ * The "xxxxx" part is the "N-bit prefix" of the entry's first encoded integer.
+ *
+ * 1xxxxxxx: Indexed Header Field Representation
+ * 01xxxxxx: Literal Header Field with Incremental Indexing
+ * 001xxxxx: Dynamic Table Size Update
+ * 0001xxxx: Literal Header Field Never Indexed
+ * 0000xxxx: Literal Header Field without Indexing
+ */
+static const uint8_t s_hpack_entry_starting_bit_pattern[AWS_HPACK_ENTRY_TYPE_COUNT] = {
+ [AWS_HPACK_ENTRY_INDEXED_HEADER_FIELD] = 1 << 7,
+ [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITH_INCREMENTAL_INDEXING] = 1 << 6,
+ [AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE] = 1 << 5,
+ [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED] = 1 << 4,
+ [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING] = 0 << 4,
+};
+
+static const uint8_t s_hpack_entry_num_prefix_bits[AWS_HPACK_ENTRY_TYPE_COUNT] = {
+ [AWS_HPACK_ENTRY_INDEXED_HEADER_FIELD] = 7,
+ [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITH_INCREMENTAL_INDEXING] = 6,
+ [AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE] = 5,
+ [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED] = 4,
+ [AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING] = 4,
+};
+
+static int s_convert_http_compression_to_literal_entry_type(
+ enum aws_http_header_compression compression,
+ enum aws_hpack_entry_type *out_entry_type) {
+
+ switch (compression) {
+ case AWS_HTTP_HEADER_COMPRESSION_USE_CACHE:
+ *out_entry_type = AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITH_INCREMENTAL_INDEXING;
+ return AWS_OP_SUCCESS;
+
+ case AWS_HTTP_HEADER_COMPRESSION_NO_CACHE:
+ *out_entry_type = AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITHOUT_INDEXING;
+ return AWS_OP_SUCCESS;
+
+ case AWS_HTTP_HEADER_COMPRESSION_NO_FORWARD_CACHE:
+ *out_entry_type = AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_NEVER_INDEXED;
+ return AWS_OP_SUCCESS;
+ }
+
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+}
+
+static int s_encode_header_field(
+ struct aws_hpack_encoder *encoder,
+ const struct aws_http_header *header,
+ struct aws_byte_buf *output) {
+
+ AWS_PRECONDITION(encoder);
+ AWS_PRECONDITION(header);
+ AWS_PRECONDITION(output);
+
+ size_t original_len = output->len;
+
+ /* Search for header-field in tables */
+ bool found_indexed_value;
+ size_t header_index = aws_hpack_find_index(&encoder->context, header, true, &found_indexed_value);
+
+ if (header->compression != AWS_HTTP_HEADER_COMPRESSION_USE_CACHE) {
+ /* If user doesn't want to use indexed value, then don't use it */
+ found_indexed_value = false;
+ }
+
+ if (header_index && found_indexed_value) {
+ /* Indexed header field */
+ const enum aws_hpack_entry_type entry_type = AWS_HPACK_ENTRY_INDEXED_HEADER_FIELD;
+
+ /* encode the one index (along with the entry type), and we're done! */
+ uint8_t starting_bit_pattern = s_hpack_entry_starting_bit_pattern[entry_type];
+ uint8_t num_prefix_bits = s_hpack_entry_num_prefix_bits[entry_type];
+ if (aws_hpack_encode_integer(header_index, starting_bit_pattern, num_prefix_bits, output)) {
+ goto error;
+ }
+
+ return AWS_OP_SUCCESS;
+ }
+
+ /* Else, Literal header field... */
+
+ /* determine exactly which type of literal header-field to encode. */
+ enum aws_hpack_entry_type literal_entry_type = AWS_HPACK_ENTRY_TYPE_COUNT;
+ if (s_convert_http_compression_to_literal_entry_type(header->compression, &literal_entry_type)) {
+ goto error;
+ }
+
+ /* the entry type makes up the first few bits of the next integer we encode */
+ uint8_t starting_bit_pattern = s_hpack_entry_starting_bit_pattern[literal_entry_type];
+ uint8_t num_prefix_bits = s_hpack_entry_num_prefix_bits[literal_entry_type];
+
+ if (header_index) {
+ /* Literal header field, indexed name */
+
+ /* first encode the index of name */
+ if (aws_hpack_encode_integer(header_index, starting_bit_pattern, num_prefix_bits, output)) {
+ goto error;
+ }
+ } else {
+ /* Literal header field, new name */
+
+ /* first encode index of 0 to indicate that header-name is not indexed */
+ if (aws_hpack_encode_integer(0, starting_bit_pattern, num_prefix_bits, output)) {
+ goto error;
+ }
+
+ /* next encode header-name string */
+ if (aws_hpack_encode_string(encoder, header->name, output)) {
+ goto error;
+ }
+ }
+
+ /* then encode header-value string, and we're done encoding! */
+ if (aws_hpack_encode_string(encoder, header->value, output)) {
+ goto error;
+ }
+
+ /* if "incremental indexing" type, insert header into the dynamic table. */
+ if (AWS_HPACK_ENTRY_LITERAL_HEADER_FIELD_WITH_INCREMENTAL_INDEXING == literal_entry_type) {
+ if (aws_hpack_insert_header(&encoder->context, header)) {
+ goto error;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+error:
+ output->len = original_len;
+ return AWS_OP_ERR;
+}
+
+int aws_hpack_encode_header_block(
+ struct aws_hpack_encoder *encoder,
+ const struct aws_http_headers *headers,
+ struct aws_byte_buf *output) {
+
+ /* Encode a dynamic table size update at the beginning of the first header-block
+ * following the change to the dynamic table size RFC-7541 4.2 */
+ if (encoder->dynamic_table_size_update.pending) {
+ if (encoder->dynamic_table_size_update.smallest_value != encoder->dynamic_table_size_update.latest_value) {
+ size_t smallest_update_value = encoder->dynamic_table_size_update.smallest_value;
+ HPACK_LOGF(
+ TRACE, encoder, "Encoding smallest dynamic table size update entry size: %zu", smallest_update_value);
+ if (aws_hpack_resize_dynamic_table(&encoder->context, smallest_update_value)) {
+ HPACK_LOGF(ERROR, encoder, "Dynamic table resize failed, size: %zu", smallest_update_value);
+ return AWS_OP_ERR;
+ }
+ uint8_t starting_bit_pattern = s_hpack_entry_starting_bit_pattern[AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE];
+ uint8_t num_prefix_bits = s_hpack_entry_num_prefix_bits[AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE];
+ if (aws_hpack_encode_integer(smallest_update_value, starting_bit_pattern, num_prefix_bits, output)) {
+ HPACK_LOGF(
+ ERROR,
+ encoder,
+ "Integer encoding failed for table size update entry, integer: %zu",
+ smallest_update_value);
+ return AWS_OP_ERR;
+ }
+ }
+ size_t last_update_value = encoder->dynamic_table_size_update.latest_value;
+ HPACK_LOGF(TRACE, encoder, "Encoding last dynamic table size update entry size: %zu", last_update_value);
+ if (aws_hpack_resize_dynamic_table(&encoder->context, last_update_value)) {
+ HPACK_LOGF(ERROR, encoder, "Dynamic table resize failed, size: %zu", last_update_value);
+ return AWS_OP_ERR;
+ }
+ uint8_t starting_bit_pattern = s_hpack_entry_starting_bit_pattern[AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE];
+ uint8_t num_prefix_bits = s_hpack_entry_num_prefix_bits[AWS_HPACK_ENTRY_DYNAMIC_TABLE_RESIZE];
+ if (aws_hpack_encode_integer(last_update_value, starting_bit_pattern, num_prefix_bits, output)) {
+ HPACK_LOGF(
+ ERROR, encoder, "Integer encoding failed for table size update entry, integer: %zu", last_update_value);
+ return AWS_OP_ERR;
+ }
+
+ encoder->dynamic_table_size_update.pending = false;
+ encoder->dynamic_table_size_update.latest_value = SIZE_MAX;
+ encoder->dynamic_table_size_update.smallest_value = SIZE_MAX;
+ }
+
+ const size_t num_headers = aws_http_headers_count(headers);
+ for (size_t i = 0; i < num_headers; ++i) {
+ struct aws_http_header header;
+ aws_http_headers_get_index(headers, i, &header);
+ if (s_encode_header_field(encoder, &header, output)) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/hpack_huffman_static.c b/contrib/restricted/aws/aws-c-http/source/hpack_huffman_static.c
new file mode 100644
index 0000000000..4c832f6a7c
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/hpack_huffman_static.c
@@ -0,0 +1,2337 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+/* WARNING: THIS FILE WAS AUTOMATICALLY GENERATED. DO NOT EDIT. */
+/* clang-format off */
+
+#include <aws/compression/huffman.h>
+
+static struct aws_huffman_code code_points[] = {
+ { .pattern = 0x1ff8, .num_bits = 13 }, /* ' ' 0 */
+ { .pattern = 0x7fffd8, .num_bits = 23 }, /* ' ' 1 */
+ { .pattern = 0xfffffe2, .num_bits = 28 }, /* ' ' 2 */
+ { .pattern = 0xfffffe3, .num_bits = 28 }, /* ' ' 3 */
+ { .pattern = 0xfffffe4, .num_bits = 28 }, /* ' ' 4 */
+ { .pattern = 0xfffffe5, .num_bits = 28 }, /* ' ' 5 */
+ { .pattern = 0xfffffe6, .num_bits = 28 }, /* ' ' 6 */
+ { .pattern = 0xfffffe7, .num_bits = 28 }, /* ' ' 7 */
+ { .pattern = 0xfffffe8, .num_bits = 28 }, /* ' ' 8 */
+ { .pattern = 0xffffea, .num_bits = 24 }, /* ' ' 9 */
+ { .pattern = 0x3ffffffc, .num_bits = 30 }, /* ' ' 10 */
+ { .pattern = 0xfffffe9, .num_bits = 28 }, /* ' ' 11 */
+ { .pattern = 0xfffffea, .num_bits = 28 }, /* ' ' 12 */
+ { .pattern = 0x3ffffffd, .num_bits = 30 }, /* ' ' 13 */
+ { .pattern = 0xfffffeb, .num_bits = 28 }, /* ' ' 14 */
+ { .pattern = 0xfffffec, .num_bits = 28 }, /* ' ' 15 */
+ { .pattern = 0xfffffed, .num_bits = 28 }, /* ' ' 16 */
+ { .pattern = 0xfffffee, .num_bits = 28 }, /* ' ' 17 */
+ { .pattern = 0xfffffef, .num_bits = 28 }, /* ' ' 18 */
+ { .pattern = 0xffffff0, .num_bits = 28 }, /* ' ' 19 */
+ { .pattern = 0xffffff1, .num_bits = 28 }, /* ' ' 20 */
+ { .pattern = 0xffffff2, .num_bits = 28 }, /* ' ' 21 */
+ { .pattern = 0x3ffffffe, .num_bits = 30 }, /* ' ' 22 */
+ { .pattern = 0xffffff3, .num_bits = 28 }, /* ' ' 23 */
+ { .pattern = 0xffffff4, .num_bits = 28 }, /* ' ' 24 */
+ { .pattern = 0xffffff5, .num_bits = 28 }, /* ' ' 25 */
+ { .pattern = 0xffffff6, .num_bits = 28 }, /* ' ' 26 */
+ { .pattern = 0xffffff7, .num_bits = 28 }, /* ' ' 27 */
+ { .pattern = 0xffffff8, .num_bits = 28 }, /* ' ' 28 */
+ { .pattern = 0xffffff9, .num_bits = 28 }, /* ' ' 29 */
+ { .pattern = 0xffffffa, .num_bits = 28 }, /* ' ' 30 */
+ { .pattern = 0xffffffb, .num_bits = 28 }, /* ' ' 31 */
+ { .pattern = 0x14, .num_bits = 6 }, /* ' ' 32 */
+ { .pattern = 0x3f8, .num_bits = 10 }, /* '!' 33 */
+ { .pattern = 0x3f9, .num_bits = 10 }, /* '"' 34 */
+ { .pattern = 0xffa, .num_bits = 12 }, /* '#' 35 */
+ { .pattern = 0x1ff9, .num_bits = 13 }, /* '$' 36 */
+ { .pattern = 0x15, .num_bits = 6 }, /* '%' 37 */
+ { .pattern = 0xf8, .num_bits = 8 }, /* '&' 38 */
+ { .pattern = 0x7fa, .num_bits = 11 }, /* ''' 39 */
+ { .pattern = 0x3fa, .num_bits = 10 }, /* '(' 40 */
+ { .pattern = 0x3fb, .num_bits = 10 }, /* ')' 41 */
+ { .pattern = 0xf9, .num_bits = 8 }, /* '*' 42 */
+ { .pattern = 0x7fb, .num_bits = 11 }, /* '+' 43 */
+ { .pattern = 0xfa, .num_bits = 8 }, /* ',' 44 */
+ { .pattern = 0x16, .num_bits = 6 }, /* '-' 45 */
+ { .pattern = 0x17, .num_bits = 6 }, /* '.' 46 */
+ { .pattern = 0x18, .num_bits = 6 }, /* '/' 47 */
+ { .pattern = 0x0, .num_bits = 5 }, /* '0' 48 */
+ { .pattern = 0x1, .num_bits = 5 }, /* '1' 49 */
+ { .pattern = 0x2, .num_bits = 5 }, /* '2' 50 */
+ { .pattern = 0x19, .num_bits = 6 }, /* '3' 51 */
+ { .pattern = 0x1a, .num_bits = 6 }, /* '4' 52 */
+ { .pattern = 0x1b, .num_bits = 6 }, /* '5' 53 */
+ { .pattern = 0x1c, .num_bits = 6 }, /* '6' 54 */
+ { .pattern = 0x1d, .num_bits = 6 }, /* '7' 55 */
+ { .pattern = 0x1e, .num_bits = 6 }, /* '8' 56 */
+ { .pattern = 0x1f, .num_bits = 6 }, /* '9' 57 */
+ { .pattern = 0x5c, .num_bits = 7 }, /* ':' 58 */
+ { .pattern = 0xfb, .num_bits = 8 }, /* ';' 59 */
+ { .pattern = 0x7ffc, .num_bits = 15 }, /* '<' 60 */
+ { .pattern = 0x20, .num_bits = 6 }, /* '=' 61 */
+ { .pattern = 0xffb, .num_bits = 12 }, /* '>' 62 */
+ { .pattern = 0x3fc, .num_bits = 10 }, /* '?' 63 */
+ { .pattern = 0x1ffa, .num_bits = 13 }, /* '@' 64 */
+ { .pattern = 0x21, .num_bits = 6 }, /* 'A' 65 */
+ { .pattern = 0x5d, .num_bits = 7 }, /* 'B' 66 */
+ { .pattern = 0x5e, .num_bits = 7 }, /* 'C' 67 */
+ { .pattern = 0x5f, .num_bits = 7 }, /* 'D' 68 */
+ { .pattern = 0x60, .num_bits = 7 }, /* 'E' 69 */
+ { .pattern = 0x61, .num_bits = 7 }, /* 'F' 70 */
+ { .pattern = 0x62, .num_bits = 7 }, /* 'G' 71 */
+ { .pattern = 0x63, .num_bits = 7 }, /* 'H' 72 */
+ { .pattern = 0x64, .num_bits = 7 }, /* 'I' 73 */
+ { .pattern = 0x65, .num_bits = 7 }, /* 'J' 74 */
+ { .pattern = 0x66, .num_bits = 7 }, /* 'K' 75 */
+ { .pattern = 0x67, .num_bits = 7 }, /* 'L' 76 */
+ { .pattern = 0x68, .num_bits = 7 }, /* 'M' 77 */
+ { .pattern = 0x69, .num_bits = 7 }, /* 'N' 78 */
+ { .pattern = 0x6a, .num_bits = 7 }, /* 'O' 79 */
+ { .pattern = 0x6b, .num_bits = 7 }, /* 'P' 80 */
+ { .pattern = 0x6c, .num_bits = 7 }, /* 'Q' 81 */
+ { .pattern = 0x6d, .num_bits = 7 }, /* 'R' 82 */
+ { .pattern = 0x6e, .num_bits = 7 }, /* 'S' 83 */
+ { .pattern = 0x6f, .num_bits = 7 }, /* 'T' 84 */
+ { .pattern = 0x70, .num_bits = 7 }, /* 'U' 85 */
+ { .pattern = 0x71, .num_bits = 7 }, /* 'V' 86 */
+ { .pattern = 0x72, .num_bits = 7 }, /* 'W' 87 */
+ { .pattern = 0xfc, .num_bits = 8 }, /* 'X' 88 */
+ { .pattern = 0x73, .num_bits = 7 }, /* 'Y' 89 */
+ { .pattern = 0xfd, .num_bits = 8 }, /* 'Z' 90 */
+ { .pattern = 0x1ffb, .num_bits = 13 }, /* '[' 91 */
+ { .pattern = 0x7fff0, .num_bits = 19 }, /* '\' 92 */
+ { .pattern = 0x1ffc, .num_bits = 13 }, /* ']' 93 */
+ { .pattern = 0x3ffc, .num_bits = 14 }, /* '^' 94 */
+ { .pattern = 0x22, .num_bits = 6 }, /* '_' 95 */
+ { .pattern = 0x7ffd, .num_bits = 15 }, /* '`' 96 */
+ { .pattern = 0x3, .num_bits = 5 }, /* 'a' 97 */
+ { .pattern = 0x23, .num_bits = 6 }, /* 'b' 98 */
+ { .pattern = 0x4, .num_bits = 5 }, /* 'c' 99 */
+ { .pattern = 0x24, .num_bits = 6 }, /* 'd' 100 */
+ { .pattern = 0x5, .num_bits = 5 }, /* 'e' 101 */
+ { .pattern = 0x25, .num_bits = 6 }, /* 'f' 102 */
+ { .pattern = 0x26, .num_bits = 6 }, /* 'g' 103 */
+ { .pattern = 0x27, .num_bits = 6 }, /* 'h' 104 */
+ { .pattern = 0x6, .num_bits = 5 }, /* 'i' 105 */
+ { .pattern = 0x74, .num_bits = 7 }, /* 'j' 106 */
+ { .pattern = 0x75, .num_bits = 7 }, /* 'k' 107 */
+ { .pattern = 0x28, .num_bits = 6 }, /* 'l' 108 */
+ { .pattern = 0x29, .num_bits = 6 }, /* 'm' 109 */
+ { .pattern = 0x2a, .num_bits = 6 }, /* 'n' 110 */
+ { .pattern = 0x7, .num_bits = 5 }, /* 'o' 111 */
+ { .pattern = 0x2b, .num_bits = 6 }, /* 'p' 112 */
+ { .pattern = 0x76, .num_bits = 7 }, /* 'q' 113 */
+ { .pattern = 0x2c, .num_bits = 6 }, /* 'r' 114 */
+ { .pattern = 0x8, .num_bits = 5 }, /* 's' 115 */
+ { .pattern = 0x9, .num_bits = 5 }, /* 't' 116 */
+ { .pattern = 0x2d, .num_bits = 6 }, /* 'u' 117 */
+ { .pattern = 0x77, .num_bits = 7 }, /* 'v' 118 */
+ { .pattern = 0x78, .num_bits = 7 }, /* 'w' 119 */
+ { .pattern = 0x79, .num_bits = 7 }, /* 'x' 120 */
+ { .pattern = 0x7a, .num_bits = 7 }, /* 'y' 121 */
+ { .pattern = 0x7b, .num_bits = 7 }, /* 'z' 122 */
+ { .pattern = 0x7ffe, .num_bits = 15 }, /* '{' 123 */
+ { .pattern = 0x7fc, .num_bits = 11 }, /* '|' 124 */
+ { .pattern = 0x3ffd, .num_bits = 14 }, /* '}' 125 */
+ { .pattern = 0x1ffd, .num_bits = 13 }, /* '~' 126 */
+ { .pattern = 0xffffffc, .num_bits = 28 }, /* ' ' 127 */
+ { .pattern = 0xfffe6, .num_bits = 20 }, /* ' ' 128 */
+ { .pattern = 0x3fffd2, .num_bits = 22 }, /* ' ' 129 */
+ { .pattern = 0xfffe7, .num_bits = 20 }, /* ' ' 130 */
+ { .pattern = 0xfffe8, .num_bits = 20 }, /* ' ' 131 */
+ { .pattern = 0x3fffd3, .num_bits = 22 }, /* ' ' 132 */
+ { .pattern = 0x3fffd4, .num_bits = 22 }, /* ' ' 133 */
+ { .pattern = 0x3fffd5, .num_bits = 22 }, /* ' ' 134 */
+ { .pattern = 0x7fffd9, .num_bits = 23 }, /* ' ' 135 */
+ { .pattern = 0x3fffd6, .num_bits = 22 }, /* ' ' 136 */
+ { .pattern = 0x7fffda, .num_bits = 23 }, /* ' ' 137 */
+ { .pattern = 0x7fffdb, .num_bits = 23 }, /* ' ' 138 */
+ { .pattern = 0x7fffdc, .num_bits = 23 }, /* ' ' 139 */
+ { .pattern = 0x7fffdd, .num_bits = 23 }, /* ' ' 140 */
+ { .pattern = 0x7fffde, .num_bits = 23 }, /* ' ' 141 */
+ { .pattern = 0xffffeb, .num_bits = 24 }, /* ' ' 142 */
+ { .pattern = 0x7fffdf, .num_bits = 23 }, /* ' ' 143 */
+ { .pattern = 0xffffec, .num_bits = 24 }, /* ' ' 144 */
+ { .pattern = 0xffffed, .num_bits = 24 }, /* ' ' 145 */
+ { .pattern = 0x3fffd7, .num_bits = 22 }, /* ' ' 146 */
+ { .pattern = 0x7fffe0, .num_bits = 23 }, /* ' ' 147 */
+ { .pattern = 0xffffee, .num_bits = 24 }, /* ' ' 148 */
+ { .pattern = 0x7fffe1, .num_bits = 23 }, /* ' ' 149 */
+ { .pattern = 0x7fffe2, .num_bits = 23 }, /* ' ' 150 */
+ { .pattern = 0x7fffe3, .num_bits = 23 }, /* ' ' 151 */
+ { .pattern = 0x7fffe4, .num_bits = 23 }, /* ' ' 152 */
+ { .pattern = 0x1fffdc, .num_bits = 21 }, /* ' ' 153 */
+ { .pattern = 0x3fffd8, .num_bits = 22 }, /* ' ' 154 */
+ { .pattern = 0x7fffe5, .num_bits = 23 }, /* ' ' 155 */
+ { .pattern = 0x3fffd9, .num_bits = 22 }, /* ' ' 156 */
+ { .pattern = 0x7fffe6, .num_bits = 23 }, /* ' ' 157 */
+ { .pattern = 0x7fffe7, .num_bits = 23 }, /* ' ' 158 */
+ { .pattern = 0xffffef, .num_bits = 24 }, /* ' ' 159 */
+ { .pattern = 0x3fffda, .num_bits = 22 }, /* ' ' 160 */
+ { .pattern = 0x1fffdd, .num_bits = 21 }, /* ' ' 161 */
+ { .pattern = 0xfffe9, .num_bits = 20 }, /* ' ' 162 */
+ { .pattern = 0x3fffdb, .num_bits = 22 }, /* ' ' 163 */
+ { .pattern = 0x3fffdc, .num_bits = 22 }, /* ' ' 164 */
+ { .pattern = 0x7fffe8, .num_bits = 23 }, /* ' ' 165 */
+ { .pattern = 0x7fffe9, .num_bits = 23 }, /* ' ' 166 */
+ { .pattern = 0x1fffde, .num_bits = 21 }, /* ' ' 167 */
+ { .pattern = 0x7fffea, .num_bits = 23 }, /* ' ' 168 */
+ { .pattern = 0x3fffdd, .num_bits = 22 }, /* ' ' 169 */
+ { .pattern = 0x3fffde, .num_bits = 22 }, /* ' ' 170 */
+ { .pattern = 0xfffff0, .num_bits = 24 }, /* ' ' 171 */
+ { .pattern = 0x1fffdf, .num_bits = 21 }, /* ' ' 172 */
+ { .pattern = 0x3fffdf, .num_bits = 22 }, /* ' ' 173 */
+ { .pattern = 0x7fffeb, .num_bits = 23 }, /* ' ' 174 */
+ { .pattern = 0x7fffec, .num_bits = 23 }, /* ' ' 175 */
+ { .pattern = 0x1fffe0, .num_bits = 21 }, /* ' ' 176 */
+ { .pattern = 0x1fffe1, .num_bits = 21 }, /* ' ' 177 */
+ { .pattern = 0x3fffe0, .num_bits = 22 }, /* ' ' 178 */
+ { .pattern = 0x1fffe2, .num_bits = 21 }, /* ' ' 179 */
+ { .pattern = 0x7fffed, .num_bits = 23 }, /* ' ' 180 */
+ { .pattern = 0x3fffe1, .num_bits = 22 }, /* ' ' 181 */
+ { .pattern = 0x7fffee, .num_bits = 23 }, /* ' ' 182 */
+ { .pattern = 0x7fffef, .num_bits = 23 }, /* ' ' 183 */
+ { .pattern = 0xfffea, .num_bits = 20 }, /* ' ' 184 */
+ { .pattern = 0x3fffe2, .num_bits = 22 }, /* ' ' 185 */
+ { .pattern = 0x3fffe3, .num_bits = 22 }, /* ' ' 186 */
+ { .pattern = 0x3fffe4, .num_bits = 22 }, /* ' ' 187 */
+ { .pattern = 0x7ffff0, .num_bits = 23 }, /* ' ' 188 */
+ { .pattern = 0x3fffe5, .num_bits = 22 }, /* ' ' 189 */
+ { .pattern = 0x3fffe6, .num_bits = 22 }, /* ' ' 190 */
+ { .pattern = 0x7ffff1, .num_bits = 23 }, /* ' ' 191 */
+ { .pattern = 0x3ffffe0, .num_bits = 26 }, /* ' ' 192 */
+ { .pattern = 0x3ffffe1, .num_bits = 26 }, /* ' ' 193 */
+ { .pattern = 0xfffeb, .num_bits = 20 }, /* ' ' 194 */
+ { .pattern = 0x7fff1, .num_bits = 19 }, /* ' ' 195 */
+ { .pattern = 0x3fffe7, .num_bits = 22 }, /* ' ' 196 */
+ { .pattern = 0x7ffff2, .num_bits = 23 }, /* ' ' 197 */
+ { .pattern = 0x3fffe8, .num_bits = 22 }, /* ' ' 198 */
+ { .pattern = 0x1ffffec, .num_bits = 25 }, /* ' ' 199 */
+ { .pattern = 0x3ffffe2, .num_bits = 26 }, /* ' ' 200 */
+ { .pattern = 0x3ffffe3, .num_bits = 26 }, /* ' ' 201 */
+ { .pattern = 0x3ffffe4, .num_bits = 26 }, /* ' ' 202 */
+ { .pattern = 0x7ffffde, .num_bits = 27 }, /* ' ' 203 */
+ { .pattern = 0x7ffffdf, .num_bits = 27 }, /* ' ' 204 */
+ { .pattern = 0x3ffffe5, .num_bits = 26 }, /* ' ' 205 */
+ { .pattern = 0xfffff1, .num_bits = 24 }, /* ' ' 206 */
+ { .pattern = 0x1ffffed, .num_bits = 25 }, /* ' ' 207 */
+ { .pattern = 0x7fff2, .num_bits = 19 }, /* ' ' 208 */
+ { .pattern = 0x1fffe3, .num_bits = 21 }, /* ' ' 209 */
+ { .pattern = 0x3ffffe6, .num_bits = 26 }, /* ' ' 210 */
+ { .pattern = 0x7ffffe0, .num_bits = 27 }, /* ' ' 211 */
+ { .pattern = 0x7ffffe1, .num_bits = 27 }, /* ' ' 212 */
+ { .pattern = 0x3ffffe7, .num_bits = 26 }, /* ' ' 213 */
+ { .pattern = 0x7ffffe2, .num_bits = 27 }, /* ' ' 214 */
+ { .pattern = 0xfffff2, .num_bits = 24 }, /* ' ' 215 */
+ { .pattern = 0x1fffe4, .num_bits = 21 }, /* ' ' 216 */
+ { .pattern = 0x1fffe5, .num_bits = 21 }, /* ' ' 217 */
+ { .pattern = 0x3ffffe8, .num_bits = 26 }, /* ' ' 218 */
+ { .pattern = 0x3ffffe9, .num_bits = 26 }, /* ' ' 219 */
+ { .pattern = 0xffffffd, .num_bits = 28 }, /* ' ' 220 */
+ { .pattern = 0x7ffffe3, .num_bits = 27 }, /* ' ' 221 */
+ { .pattern = 0x7ffffe4, .num_bits = 27 }, /* ' ' 222 */
+ { .pattern = 0x7ffffe5, .num_bits = 27 }, /* ' ' 223 */
+ { .pattern = 0xfffec, .num_bits = 20 }, /* ' ' 224 */
+ { .pattern = 0xfffff3, .num_bits = 24 }, /* ' ' 225 */
+ { .pattern = 0xfffed, .num_bits = 20 }, /* ' ' 226 */
+ { .pattern = 0x1fffe6, .num_bits = 21 }, /* ' ' 227 */
+ { .pattern = 0x3fffe9, .num_bits = 22 }, /* ' ' 228 */
+ { .pattern = 0x1fffe7, .num_bits = 21 }, /* ' ' 229 */
+ { .pattern = 0x1fffe8, .num_bits = 21 }, /* ' ' 230 */
+ { .pattern = 0x7ffff3, .num_bits = 23 }, /* ' ' 231 */
+ { .pattern = 0x3fffea, .num_bits = 22 }, /* ' ' 232 */
+ { .pattern = 0x3fffeb, .num_bits = 22 }, /* ' ' 233 */
+ { .pattern = 0x1ffffee, .num_bits = 25 }, /* ' ' 234 */
+ { .pattern = 0x1ffffef, .num_bits = 25 }, /* ' ' 235 */
+ { .pattern = 0xfffff4, .num_bits = 24 }, /* ' ' 236 */
+ { .pattern = 0xfffff5, .num_bits = 24 }, /* ' ' 237 */
+ { .pattern = 0x3ffffea, .num_bits = 26 }, /* ' ' 238 */
+ { .pattern = 0x7ffff4, .num_bits = 23 }, /* ' ' 239 */
+ { .pattern = 0x3ffffeb, .num_bits = 26 }, /* ' ' 240 */
+ { .pattern = 0x7ffffe6, .num_bits = 27 }, /* ' ' 241 */
+ { .pattern = 0x3ffffec, .num_bits = 26 }, /* ' ' 242 */
+ { .pattern = 0x3ffffed, .num_bits = 26 }, /* ' ' 243 */
+ { .pattern = 0x7ffffe7, .num_bits = 27 }, /* ' ' 244 */
+ { .pattern = 0x7ffffe8, .num_bits = 27 }, /* ' ' 245 */
+ { .pattern = 0x7ffffe9, .num_bits = 27 }, /* ' ' 246 */
+ { .pattern = 0x7ffffea, .num_bits = 27 }, /* ' ' 247 */
+ { .pattern = 0x7ffffeb, .num_bits = 27 }, /* ' ' 248 */
+ { .pattern = 0xffffffe, .num_bits = 28 }, /* ' ' 249 */
+ { .pattern = 0x7ffffec, .num_bits = 27 }, /* ' ' 250 */
+ { .pattern = 0x7ffffed, .num_bits = 27 }, /* ' ' 251 */
+ { .pattern = 0x7ffffee, .num_bits = 27 }, /* ' ' 252 */
+ { .pattern = 0x7ffffef, .num_bits = 27 }, /* ' ' 253 */
+ { .pattern = 0x7fffff0, .num_bits = 27 }, /* ' ' 254 */
+ { .pattern = 0x3ffffee, .num_bits = 26 }, /* ' ' 255 */
+};
+
+static struct aws_huffman_code encode_symbol(uint8_t symbol, void *userdata) {
+ (void)userdata;
+
+ return code_points[symbol];
+}
+
+/* NOLINTNEXTLINE(readability-function-size) */
+static uint8_t decode_symbol(uint32_t bits, uint8_t *symbol, void *userdata) {
+ (void)userdata;
+
+ if (bits & 0x80000000) {
+ goto node_1;
+ } else {
+ goto node_0;
+ }
+
+node_0:
+ if (bits & 0x40000000) {
+ goto node_01;
+ } else {
+ goto node_00;
+ }
+
+node_00:
+ if (bits & 0x20000000) {
+ goto node_001;
+ } else {
+ goto node_000;
+ }
+
+node_000:
+ if (bits & 0x10000000) {
+ goto node_0001;
+ } else {
+ goto node_0000;
+ }
+
+node_0000:
+ if (bits & 0x8000000) {
+ *symbol = 49;
+ return 5;
+ } else {
+ *symbol = 48;
+ return 5;
+ }
+
+node_0001:
+ if (bits & 0x8000000) {
+ *symbol = 97;
+ return 5;
+ } else {
+ *symbol = 50;
+ return 5;
+ }
+
+node_001:
+ if (bits & 0x10000000) {
+ goto node_0011;
+ } else {
+ goto node_0010;
+ }
+
+node_0010:
+ if (bits & 0x8000000) {
+ *symbol = 101;
+ return 5;
+ } else {
+ *symbol = 99;
+ return 5;
+ }
+
+node_0011:
+ if (bits & 0x8000000) {
+ *symbol = 111;
+ return 5;
+ } else {
+ *symbol = 105;
+ return 5;
+ }
+
+node_01:
+ if (bits & 0x20000000) {
+ goto node_011;
+ } else {
+ goto node_010;
+ }
+
+node_010:
+ if (bits & 0x10000000) {
+ goto node_0101;
+ } else {
+ goto node_0100;
+ }
+
+node_0100:
+ if (bits & 0x8000000) {
+ *symbol = 116;
+ return 5;
+ } else {
+ *symbol = 115;
+ return 5;
+ }
+
+node_0101:
+ if (bits & 0x8000000) {
+ goto node_01011;
+ } else {
+ goto node_01010;
+ }
+
+node_01010:
+ if (bits & 0x4000000) {
+ *symbol = 37;
+ return 6;
+ } else {
+ *symbol = 32;
+ return 6;
+ }
+
+node_01011:
+ if (bits & 0x4000000) {
+ *symbol = 46;
+ return 6;
+ } else {
+ *symbol = 45;
+ return 6;
+ }
+
+node_011:
+ if (bits & 0x10000000) {
+ goto node_0111;
+ } else {
+ goto node_0110;
+ }
+
+node_0110:
+ if (bits & 0x8000000) {
+ goto node_01101;
+ } else {
+ goto node_01100;
+ }
+
+node_01100:
+ if (bits & 0x4000000) {
+ *symbol = 51;
+ return 6;
+ } else {
+ *symbol = 47;
+ return 6;
+ }
+
+node_01101:
+ if (bits & 0x4000000) {
+ *symbol = 53;
+ return 6;
+ } else {
+ *symbol = 52;
+ return 6;
+ }
+
+node_0111:
+ if (bits & 0x8000000) {
+ goto node_01111;
+ } else {
+ goto node_01110;
+ }
+
+node_01110:
+ if (bits & 0x4000000) {
+ *symbol = 55;
+ return 6;
+ } else {
+ *symbol = 54;
+ return 6;
+ }
+
+node_01111:
+ if (bits & 0x4000000) {
+ *symbol = 57;
+ return 6;
+ } else {
+ *symbol = 56;
+ return 6;
+ }
+
+node_1:
+ if (bits & 0x40000000) {
+ goto node_11;
+ } else {
+ goto node_10;
+ }
+
+node_10:
+ if (bits & 0x20000000) {
+ goto node_101;
+ } else {
+ goto node_100;
+ }
+
+node_100:
+ if (bits & 0x10000000) {
+ goto node_1001;
+ } else {
+ goto node_1000;
+ }
+
+node_1000:
+ if (bits & 0x8000000) {
+ goto node_10001;
+ } else {
+ goto node_10000;
+ }
+
+node_10000:
+ if (bits & 0x4000000) {
+ *symbol = 65;
+ return 6;
+ } else {
+ *symbol = 61;
+ return 6;
+ }
+
+node_10001:
+ if (bits & 0x4000000) {
+ *symbol = 98;
+ return 6;
+ } else {
+ *symbol = 95;
+ return 6;
+ }
+
+node_1001:
+ if (bits & 0x8000000) {
+ goto node_10011;
+ } else {
+ goto node_10010;
+ }
+
+node_10010:
+ if (bits & 0x4000000) {
+ *symbol = 102;
+ return 6;
+ } else {
+ *symbol = 100;
+ return 6;
+ }
+
+node_10011:
+ if (bits & 0x4000000) {
+ *symbol = 104;
+ return 6;
+ } else {
+ *symbol = 103;
+ return 6;
+ }
+
+node_101:
+ if (bits & 0x10000000) {
+ goto node_1011;
+ } else {
+ goto node_1010;
+ }
+
+node_1010:
+ if (bits & 0x8000000) {
+ goto node_10101;
+ } else {
+ goto node_10100;
+ }
+
+node_10100:
+ if (bits & 0x4000000) {
+ *symbol = 109;
+ return 6;
+ } else {
+ *symbol = 108;
+ return 6;
+ }
+
+node_10101:
+ if (bits & 0x4000000) {
+ *symbol = 112;
+ return 6;
+ } else {
+ *symbol = 110;
+ return 6;
+ }
+
+node_1011:
+ if (bits & 0x8000000) {
+ goto node_10111;
+ } else {
+ goto node_10110;
+ }
+
+node_10110:
+ if (bits & 0x4000000) {
+ *symbol = 117;
+ return 6;
+ } else {
+ *symbol = 114;
+ return 6;
+ }
+
+node_10111:
+ if (bits & 0x4000000) {
+ goto node_101111;
+ } else {
+ goto node_101110;
+ }
+
+node_101110:
+ if (bits & 0x2000000) {
+ *symbol = 66;
+ return 7;
+ } else {
+ *symbol = 58;
+ return 7;
+ }
+
+node_101111:
+ if (bits & 0x2000000) {
+ *symbol = 68;
+ return 7;
+ } else {
+ *symbol = 67;
+ return 7;
+ }
+
+node_11:
+ if (bits & 0x20000000) {
+ goto node_111;
+ } else {
+ goto node_110;
+ }
+
+node_110:
+ if (bits & 0x10000000) {
+ goto node_1101;
+ } else {
+ goto node_1100;
+ }
+
+node_1100:
+ if (bits & 0x8000000) {
+ goto node_11001;
+ } else {
+ goto node_11000;
+ }
+
+node_11000:
+ if (bits & 0x4000000) {
+ goto node_110001;
+ } else {
+ goto node_110000;
+ }
+
+node_110000:
+ if (bits & 0x2000000) {
+ *symbol = 70;
+ return 7;
+ } else {
+ *symbol = 69;
+ return 7;
+ }
+
+node_110001:
+ if (bits & 0x2000000) {
+ *symbol = 72;
+ return 7;
+ } else {
+ *symbol = 71;
+ return 7;
+ }
+
+node_11001:
+ if (bits & 0x4000000) {
+ goto node_110011;
+ } else {
+ goto node_110010;
+ }
+
+node_110010:
+ if (bits & 0x2000000) {
+ *symbol = 74;
+ return 7;
+ } else {
+ *symbol = 73;
+ return 7;
+ }
+
+node_110011:
+ if (bits & 0x2000000) {
+ *symbol = 76;
+ return 7;
+ } else {
+ *symbol = 75;
+ return 7;
+ }
+
+node_1101:
+ if (bits & 0x8000000) {
+ goto node_11011;
+ } else {
+ goto node_11010;
+ }
+
+node_11010:
+ if (bits & 0x4000000) {
+ goto node_110101;
+ } else {
+ goto node_110100;
+ }
+
+node_110100:
+ if (bits & 0x2000000) {
+ *symbol = 78;
+ return 7;
+ } else {
+ *symbol = 77;
+ return 7;
+ }
+
+node_110101:
+ if (bits & 0x2000000) {
+ *symbol = 80;
+ return 7;
+ } else {
+ *symbol = 79;
+ return 7;
+ }
+
+node_11011:
+ if (bits & 0x4000000) {
+ goto node_110111;
+ } else {
+ goto node_110110;
+ }
+
+node_110110:
+ if (bits & 0x2000000) {
+ *symbol = 82;
+ return 7;
+ } else {
+ *symbol = 81;
+ return 7;
+ }
+
+node_110111:
+ if (bits & 0x2000000) {
+ *symbol = 84;
+ return 7;
+ } else {
+ *symbol = 83;
+ return 7;
+ }
+
+node_111:
+ if (bits & 0x10000000) {
+ goto node_1111;
+ } else {
+ goto node_1110;
+ }
+
+node_1110:
+ if (bits & 0x8000000) {
+ goto node_11101;
+ } else {
+ goto node_11100;
+ }
+
+node_11100:
+ if (bits & 0x4000000) {
+ goto node_111001;
+ } else {
+ goto node_111000;
+ }
+
+node_111000:
+ if (bits & 0x2000000) {
+ *symbol = 86;
+ return 7;
+ } else {
+ *symbol = 85;
+ return 7;
+ }
+
+node_111001:
+ if (bits & 0x2000000) {
+ *symbol = 89;
+ return 7;
+ } else {
+ *symbol = 87;
+ return 7;
+ }
+
+node_11101:
+ if (bits & 0x4000000) {
+ goto node_111011;
+ } else {
+ goto node_111010;
+ }
+
+node_111010:
+ if (bits & 0x2000000) {
+ *symbol = 107;
+ return 7;
+ } else {
+ *symbol = 106;
+ return 7;
+ }
+
+node_111011:
+ if (bits & 0x2000000) {
+ *symbol = 118;
+ return 7;
+ } else {
+ *symbol = 113;
+ return 7;
+ }
+
+node_1111:
+ if (bits & 0x8000000) {
+ goto node_11111;
+ } else {
+ goto node_11110;
+ }
+
+node_11110:
+ if (bits & 0x4000000) {
+ goto node_111101;
+ } else {
+ goto node_111100;
+ }
+
+node_111100:
+ if (bits & 0x2000000) {
+ *symbol = 120;
+ return 7;
+ } else {
+ *symbol = 119;
+ return 7;
+ }
+
+node_111101:
+ if (bits & 0x2000000) {
+ *symbol = 122;
+ return 7;
+ } else {
+ *symbol = 121;
+ return 7;
+ }
+
+node_11111:
+ if (bits & 0x4000000) {
+ goto node_111111;
+ } else {
+ goto node_111110;
+ }
+
+node_111110:
+ if (bits & 0x2000000) {
+ goto node_1111101;
+ } else {
+ goto node_1111100;
+ }
+
+node_1111100:
+ if (bits & 0x1000000) {
+ *symbol = 42;
+ return 8;
+ } else {
+ *symbol = 38;
+ return 8;
+ }
+
+node_1111101:
+ if (bits & 0x1000000) {
+ *symbol = 59;
+ return 8;
+ } else {
+ *symbol = 44;
+ return 8;
+ }
+
+node_111111:
+ if (bits & 0x2000000) {
+ goto node_1111111;
+ } else {
+ goto node_1111110;
+ }
+
+node_1111110:
+ if (bits & 0x1000000) {
+ *symbol = 90;
+ return 8;
+ } else {
+ *symbol = 88;
+ return 8;
+ }
+
+node_1111111:
+ if (bits & 0x1000000) {
+ goto node_11111111;
+ } else {
+ goto node_11111110;
+ }
+
+node_11111110:
+ if (bits & 0x800000) {
+ goto node_111111101;
+ } else {
+ goto node_111111100;
+ }
+
+node_111111100:
+ if (bits & 0x400000) {
+ *symbol = 34;
+ return 10;
+ } else {
+ *symbol = 33;
+ return 10;
+ }
+
+node_111111101:
+ if (bits & 0x400000) {
+ *symbol = 41;
+ return 10;
+ } else {
+ *symbol = 40;
+ return 10;
+ }
+
+node_11111111:
+ if (bits & 0x800000) {
+ goto node_111111111;
+ } else {
+ goto node_111111110;
+ }
+
+node_111111110:
+ if (bits & 0x400000) {
+ goto node_1111111101;
+ } else {
+ *symbol = 63;
+ return 10;
+ }
+
+node_1111111101:
+ if (bits & 0x200000) {
+ *symbol = 43;
+ return 11;
+ } else {
+ *symbol = 39;
+ return 11;
+ }
+
+node_111111111:
+ if (bits & 0x400000) {
+ goto node_1111111111;
+ } else {
+ goto node_1111111110;
+ }
+
+node_1111111110:
+ if (bits & 0x200000) {
+ goto node_11111111101;
+ } else {
+ *symbol = 124;
+ return 11;
+ }
+
+node_11111111101:
+ if (bits & 0x100000) {
+ *symbol = 62;
+ return 12;
+ } else {
+ *symbol = 35;
+ return 12;
+ }
+
+node_1111111111:
+ if (bits & 0x200000) {
+ goto node_11111111111;
+ } else {
+ goto node_11111111110;
+ }
+
+node_11111111110:
+ if (bits & 0x100000) {
+ goto node_111111111101;
+ } else {
+ goto node_111111111100;
+ }
+
+node_111111111100:
+ if (bits & 0x80000) {
+ *symbol = 36;
+ return 13;
+ } else {
+ *symbol = 0;
+ return 13;
+ }
+
+node_111111111101:
+ if (bits & 0x80000) {
+ *symbol = 91;
+ return 13;
+ } else {
+ *symbol = 64;
+ return 13;
+ }
+
+node_11111111111:
+ if (bits & 0x100000) {
+ goto node_111111111111;
+ } else {
+ goto node_111111111110;
+ }
+
+node_111111111110:
+ if (bits & 0x80000) {
+ *symbol = 126;
+ return 13;
+ } else {
+ *symbol = 93;
+ return 13;
+ }
+
+node_111111111111:
+ if (bits & 0x80000) {
+ goto node_1111111111111;
+ } else {
+ goto node_1111111111110;
+ }
+
+node_1111111111110:
+ if (bits & 0x40000) {
+ *symbol = 125;
+ return 14;
+ } else {
+ *symbol = 94;
+ return 14;
+ }
+
+node_1111111111111:
+ if (bits & 0x40000) {
+ goto node_11111111111111;
+ } else {
+ goto node_11111111111110;
+ }
+
+node_11111111111110:
+ if (bits & 0x20000) {
+ *symbol = 96;
+ return 15;
+ } else {
+ *symbol = 60;
+ return 15;
+ }
+
+node_11111111111111:
+ if (bits & 0x20000) {
+ goto node_111111111111111;
+ } else {
+ *symbol = 123;
+ return 15;
+ }
+
+node_111111111111111:
+ if (bits & 0x10000) {
+ goto node_1111111111111111;
+ } else {
+ goto node_1111111111111110;
+ }
+
+node_1111111111111110:
+ if (bits & 0x8000) {
+ goto node_11111111111111101;
+ } else {
+ goto node_11111111111111100;
+ }
+
+node_11111111111111100:
+ if (bits & 0x4000) {
+ goto node_111111111111111001;
+ } else {
+ goto node_111111111111111000;
+ }
+
+node_111111111111111000:
+ if (bits & 0x2000) {
+ *symbol = 195;
+ return 19;
+ } else {
+ *symbol = 92;
+ return 19;
+ }
+
+node_111111111111111001:
+ if (bits & 0x2000) {
+ goto node_1111111111111110011;
+ } else {
+ *symbol = 208;
+ return 19;
+ }
+
+node_1111111111111110011:
+ if (bits & 0x1000) {
+ *symbol = 130;
+ return 20;
+ } else {
+ *symbol = 128;
+ return 20;
+ }
+
+node_11111111111111101:
+ if (bits & 0x4000) {
+ goto node_111111111111111011;
+ } else {
+ goto node_111111111111111010;
+ }
+
+node_111111111111111010:
+ if (bits & 0x2000) {
+ goto node_1111111111111110101;
+ } else {
+ goto node_1111111111111110100;
+ }
+
+node_1111111111111110100:
+ if (bits & 0x1000) {
+ *symbol = 162;
+ return 20;
+ } else {
+ *symbol = 131;
+ return 20;
+ }
+
+node_1111111111111110101:
+ if (bits & 0x1000) {
+ *symbol = 194;
+ return 20;
+ } else {
+ *symbol = 184;
+ return 20;
+ }
+
+node_111111111111111011:
+ if (bits & 0x2000) {
+ goto node_1111111111111110111;
+ } else {
+ goto node_1111111111111110110;
+ }
+
+node_1111111111111110110:
+ if (bits & 0x1000) {
+ *symbol = 226;
+ return 20;
+ } else {
+ *symbol = 224;
+ return 20;
+ }
+
+node_1111111111111110111:
+ if (bits & 0x1000) {
+ goto node_11111111111111101111;
+ } else {
+ goto node_11111111111111101110;
+ }
+
+node_11111111111111101110:
+ if (bits & 0x800) {
+ *symbol = 161;
+ return 21;
+ } else {
+ *symbol = 153;
+ return 21;
+ }
+
+node_11111111111111101111:
+ if (bits & 0x800) {
+ *symbol = 172;
+ return 21;
+ } else {
+ *symbol = 167;
+ return 21;
+ }
+
+node_1111111111111111:
+ if (bits & 0x8000) {
+ goto node_11111111111111111;
+ } else {
+ goto node_11111111111111110;
+ }
+
+node_11111111111111110:
+ if (bits & 0x4000) {
+ goto node_111111111111111101;
+ } else {
+ goto node_111111111111111100;
+ }
+
+node_111111111111111100:
+ if (bits & 0x2000) {
+ goto node_1111111111111111001;
+ } else {
+ goto node_1111111111111111000;
+ }
+
+node_1111111111111111000:
+ if (bits & 0x1000) {
+ goto node_11111111111111110001;
+ } else {
+ goto node_11111111111111110000;
+ }
+
+node_11111111111111110000:
+ if (bits & 0x800) {
+ *symbol = 177;
+ return 21;
+ } else {
+ *symbol = 176;
+ return 21;
+ }
+
+node_11111111111111110001:
+ if (bits & 0x800) {
+ *symbol = 209;
+ return 21;
+ } else {
+ *symbol = 179;
+ return 21;
+ }
+
+node_1111111111111111001:
+ if (bits & 0x1000) {
+ goto node_11111111111111110011;
+ } else {
+ goto node_11111111111111110010;
+ }
+
+node_11111111111111110010:
+ if (bits & 0x800) {
+ *symbol = 217;
+ return 21;
+ } else {
+ *symbol = 216;
+ return 21;
+ }
+
+node_11111111111111110011:
+ if (bits & 0x800) {
+ *symbol = 229;
+ return 21;
+ } else {
+ *symbol = 227;
+ return 21;
+ }
+
+node_111111111111111101:
+ if (bits & 0x2000) {
+ goto node_1111111111111111011;
+ } else {
+ goto node_1111111111111111010;
+ }
+
+node_1111111111111111010:
+ if (bits & 0x1000) {
+ goto node_11111111111111110101;
+ } else {
+ goto node_11111111111111110100;
+ }
+
+node_11111111111111110100:
+ if (bits & 0x800) {
+ goto node_111111111111111101001;
+ } else {
+ *symbol = 230;
+ return 21;
+ }
+
+node_111111111111111101001:
+ if (bits & 0x400) {
+ *symbol = 132;
+ return 22;
+ } else {
+ *symbol = 129;
+ return 22;
+ }
+
+node_11111111111111110101:
+ if (bits & 0x800) {
+ goto node_111111111111111101011;
+ } else {
+ goto node_111111111111111101010;
+ }
+
+node_111111111111111101010:
+ if (bits & 0x400) {
+ *symbol = 134;
+ return 22;
+ } else {
+ *symbol = 133;
+ return 22;
+ }
+
+node_111111111111111101011:
+ if (bits & 0x400) {
+ *symbol = 146;
+ return 22;
+ } else {
+ *symbol = 136;
+ return 22;
+ }
+
+node_1111111111111111011:
+ if (bits & 0x1000) {
+ goto node_11111111111111110111;
+ } else {
+ goto node_11111111111111110110;
+ }
+
+node_11111111111111110110:
+ if (bits & 0x800) {
+ goto node_111111111111111101101;
+ } else {
+ goto node_111111111111111101100;
+ }
+
+node_111111111111111101100:
+ if (bits & 0x400) {
+ *symbol = 156;
+ return 22;
+ } else {
+ *symbol = 154;
+ return 22;
+ }
+
+node_111111111111111101101:
+ if (bits & 0x400) {
+ *symbol = 163;
+ return 22;
+ } else {
+ *symbol = 160;
+ return 22;
+ }
+
+node_11111111111111110111:
+ if (bits & 0x800) {
+ goto node_111111111111111101111;
+ } else {
+ goto node_111111111111111101110;
+ }
+
+node_111111111111111101110:
+ if (bits & 0x400) {
+ *symbol = 169;
+ return 22;
+ } else {
+ *symbol = 164;
+ return 22;
+ }
+
+node_111111111111111101111:
+ if (bits & 0x400) {
+ *symbol = 173;
+ return 22;
+ } else {
+ *symbol = 170;
+ return 22;
+ }
+
+node_11111111111111111:
+ if (bits & 0x4000) {
+ goto node_111111111111111111;
+ } else {
+ goto node_111111111111111110;
+ }
+
+node_111111111111111110:
+ if (bits & 0x2000) {
+ goto node_1111111111111111101;
+ } else {
+ goto node_1111111111111111100;
+ }
+
+node_1111111111111111100:
+ if (bits & 0x1000) {
+ goto node_11111111111111111001;
+ } else {
+ goto node_11111111111111111000;
+ }
+
+node_11111111111111111000:
+ if (bits & 0x800) {
+ goto node_111111111111111110001;
+ } else {
+ goto node_111111111111111110000;
+ }
+
+node_111111111111111110000:
+ if (bits & 0x400) {
+ *symbol = 181;
+ return 22;
+ } else {
+ *symbol = 178;
+ return 22;
+ }
+
+node_111111111111111110001:
+ if (bits & 0x400) {
+ *symbol = 186;
+ return 22;
+ } else {
+ *symbol = 185;
+ return 22;
+ }
+
+node_11111111111111111001:
+ if (bits & 0x800) {
+ goto node_111111111111111110011;
+ } else {
+ goto node_111111111111111110010;
+ }
+
+node_111111111111111110010:
+ if (bits & 0x400) {
+ *symbol = 189;
+ return 22;
+ } else {
+ *symbol = 187;
+ return 22;
+ }
+
+node_111111111111111110011:
+ if (bits & 0x400) {
+ *symbol = 196;
+ return 22;
+ } else {
+ *symbol = 190;
+ return 22;
+ }
+
+node_1111111111111111101:
+ if (bits & 0x1000) {
+ goto node_11111111111111111011;
+ } else {
+ goto node_11111111111111111010;
+ }
+
+node_11111111111111111010:
+ if (bits & 0x800) {
+ goto node_111111111111111110101;
+ } else {
+ goto node_111111111111111110100;
+ }
+
+node_111111111111111110100:
+ if (bits & 0x400) {
+ *symbol = 228;
+ return 22;
+ } else {
+ *symbol = 198;
+ return 22;
+ }
+
+node_111111111111111110101:
+ if (bits & 0x400) {
+ *symbol = 233;
+ return 22;
+ } else {
+ *symbol = 232;
+ return 22;
+ }
+
+node_11111111111111111011:
+ if (bits & 0x800) {
+ goto node_111111111111111110111;
+ } else {
+ goto node_111111111111111110110;
+ }
+
+node_111111111111111110110:
+ if (bits & 0x400) {
+ goto node_1111111111111111101101;
+ } else {
+ goto node_1111111111111111101100;
+ }
+
+node_1111111111111111101100:
+ if (bits & 0x200) {
+ *symbol = 135;
+ return 23;
+ } else {
+ *symbol = 1;
+ return 23;
+ }
+
+node_1111111111111111101101:
+ if (bits & 0x200) {
+ *symbol = 138;
+ return 23;
+ } else {
+ *symbol = 137;
+ return 23;
+ }
+
+node_111111111111111110111:
+ if (bits & 0x400) {
+ goto node_1111111111111111101111;
+ } else {
+ goto node_1111111111111111101110;
+ }
+
+node_1111111111111111101110:
+ if (bits & 0x200) {
+ *symbol = 140;
+ return 23;
+ } else {
+ *symbol = 139;
+ return 23;
+ }
+
+node_1111111111111111101111:
+ if (bits & 0x200) {
+ *symbol = 143;
+ return 23;
+ } else {
+ *symbol = 141;
+ return 23;
+ }
+
+node_111111111111111111:
+ if (bits & 0x2000) {
+ goto node_1111111111111111111;
+ } else {
+ goto node_1111111111111111110;
+ }
+
+node_1111111111111111110:
+ if (bits & 0x1000) {
+ goto node_11111111111111111101;
+ } else {
+ goto node_11111111111111111100;
+ }
+
+node_11111111111111111100:
+ if (bits & 0x800) {
+ goto node_111111111111111111001;
+ } else {
+ goto node_111111111111111111000;
+ }
+
+node_111111111111111111000:
+ if (bits & 0x400) {
+ goto node_1111111111111111110001;
+ } else {
+ goto node_1111111111111111110000;
+ }
+
+node_1111111111111111110000:
+ if (bits & 0x200) {
+ *symbol = 149;
+ return 23;
+ } else {
+ *symbol = 147;
+ return 23;
+ }
+
+node_1111111111111111110001:
+ if (bits & 0x200) {
+ *symbol = 151;
+ return 23;
+ } else {
+ *symbol = 150;
+ return 23;
+ }
+
+node_111111111111111111001:
+ if (bits & 0x400) {
+ goto node_1111111111111111110011;
+ } else {
+ goto node_1111111111111111110010;
+ }
+
+node_1111111111111111110010:
+ if (bits & 0x200) {
+ *symbol = 155;
+ return 23;
+ } else {
+ *symbol = 152;
+ return 23;
+ }
+
+node_1111111111111111110011:
+ if (bits & 0x200) {
+ *symbol = 158;
+ return 23;
+ } else {
+ *symbol = 157;
+ return 23;
+ }
+
+node_11111111111111111101:
+ if (bits & 0x800) {
+ goto node_111111111111111111011;
+ } else {
+ goto node_111111111111111111010;
+ }
+
+node_111111111111111111010:
+ if (bits & 0x400) {
+ goto node_1111111111111111110101;
+ } else {
+ goto node_1111111111111111110100;
+ }
+
+node_1111111111111111110100:
+ if (bits & 0x200) {
+ *symbol = 166;
+ return 23;
+ } else {
+ *symbol = 165;
+ return 23;
+ }
+
+node_1111111111111111110101:
+ if (bits & 0x200) {
+ *symbol = 174;
+ return 23;
+ } else {
+ *symbol = 168;
+ return 23;
+ }
+
+node_111111111111111111011:
+ if (bits & 0x400) {
+ goto node_1111111111111111110111;
+ } else {
+ goto node_1111111111111111110110;
+ }
+
+node_1111111111111111110110:
+ if (bits & 0x200) {
+ *symbol = 180;
+ return 23;
+ } else {
+ *symbol = 175;
+ return 23;
+ }
+
+node_1111111111111111110111:
+ if (bits & 0x200) {
+ *symbol = 183;
+ return 23;
+ } else {
+ *symbol = 182;
+ return 23;
+ }
+
+node_1111111111111111111:
+ if (bits & 0x1000) {
+ goto node_11111111111111111111;
+ } else {
+ goto node_11111111111111111110;
+ }
+
+node_11111111111111111110:
+ if (bits & 0x800) {
+ goto node_111111111111111111101;
+ } else {
+ goto node_111111111111111111100;
+ }
+
+node_111111111111111111100:
+ if (bits & 0x400) {
+ goto node_1111111111111111111001;
+ } else {
+ goto node_1111111111111111111000;
+ }
+
+node_1111111111111111111000:
+ if (bits & 0x200) {
+ *symbol = 191;
+ return 23;
+ } else {
+ *symbol = 188;
+ return 23;
+ }
+
+node_1111111111111111111001:
+ if (bits & 0x200) {
+ *symbol = 231;
+ return 23;
+ } else {
+ *symbol = 197;
+ return 23;
+ }
+
+node_111111111111111111101:
+ if (bits & 0x400) {
+ goto node_1111111111111111111011;
+ } else {
+ goto node_1111111111111111111010;
+ }
+
+node_1111111111111111111010:
+ if (bits & 0x200) {
+ goto node_11111111111111111110101;
+ } else {
+ *symbol = 239;
+ return 23;
+ }
+
+node_11111111111111111110101:
+ if (bits & 0x100) {
+ *symbol = 142;
+ return 24;
+ } else {
+ *symbol = 9;
+ return 24;
+ }
+
+node_1111111111111111111011:
+ if (bits & 0x200) {
+ goto node_11111111111111111110111;
+ } else {
+ goto node_11111111111111111110110;
+ }
+
+node_11111111111111111110110:
+ if (bits & 0x100) {
+ *symbol = 145;
+ return 24;
+ } else {
+ *symbol = 144;
+ return 24;
+ }
+
+node_11111111111111111110111:
+ if (bits & 0x100) {
+ *symbol = 159;
+ return 24;
+ } else {
+ *symbol = 148;
+ return 24;
+ }
+
+node_11111111111111111111:
+ if (bits & 0x800) {
+ goto node_111111111111111111111;
+ } else {
+ goto node_111111111111111111110;
+ }
+
+node_111111111111111111110:
+ if (bits & 0x400) {
+ goto node_1111111111111111111101;
+ } else {
+ goto node_1111111111111111111100;
+ }
+
+node_1111111111111111111100:
+ if (bits & 0x200) {
+ goto node_11111111111111111111001;
+ } else {
+ goto node_11111111111111111111000;
+ }
+
+node_11111111111111111111000:
+ if (bits & 0x100) {
+ *symbol = 206;
+ return 24;
+ } else {
+ *symbol = 171;
+ return 24;
+ }
+
+node_11111111111111111111001:
+ if (bits & 0x100) {
+ *symbol = 225;
+ return 24;
+ } else {
+ *symbol = 215;
+ return 24;
+ }
+
+node_1111111111111111111101:
+ if (bits & 0x200) {
+ goto node_11111111111111111111011;
+ } else {
+ goto node_11111111111111111111010;
+ }
+
+node_11111111111111111111010:
+ if (bits & 0x100) {
+ *symbol = 237;
+ return 24;
+ } else {
+ *symbol = 236;
+ return 24;
+ }
+
+node_11111111111111111111011:
+ if (bits & 0x100) {
+ goto node_111111111111111111110111;
+ } else {
+ goto node_111111111111111111110110;
+ }
+
+node_111111111111111111110110:
+ if (bits & 0x80) {
+ *symbol = 207;
+ return 25;
+ } else {
+ *symbol = 199;
+ return 25;
+ }
+
+node_111111111111111111110111:
+ if (bits & 0x80) {
+ *symbol = 235;
+ return 25;
+ } else {
+ *symbol = 234;
+ return 25;
+ }
+
+node_111111111111111111111:
+ if (bits & 0x400) {
+ goto node_1111111111111111111111;
+ } else {
+ goto node_1111111111111111111110;
+ }
+
+node_1111111111111111111110:
+ if (bits & 0x200) {
+ goto node_11111111111111111111101;
+ } else {
+ goto node_11111111111111111111100;
+ }
+
+node_11111111111111111111100:
+ if (bits & 0x100) {
+ goto node_111111111111111111111001;
+ } else {
+ goto node_111111111111111111111000;
+ }
+
+node_111111111111111111111000:
+ if (bits & 0x80) {
+ goto node_1111111111111111111110001;
+ } else {
+ goto node_1111111111111111111110000;
+ }
+
+node_1111111111111111111110000:
+ if (bits & 0x40) {
+ *symbol = 193;
+ return 26;
+ } else {
+ *symbol = 192;
+ return 26;
+ }
+
+node_1111111111111111111110001:
+ if (bits & 0x40) {
+ *symbol = 201;
+ return 26;
+ } else {
+ *symbol = 200;
+ return 26;
+ }
+
+node_111111111111111111111001:
+ if (bits & 0x80) {
+ goto node_1111111111111111111110011;
+ } else {
+ goto node_1111111111111111111110010;
+ }
+
+node_1111111111111111111110010:
+ if (bits & 0x40) {
+ *symbol = 205;
+ return 26;
+ } else {
+ *symbol = 202;
+ return 26;
+ }
+
+node_1111111111111111111110011:
+ if (bits & 0x40) {
+ *symbol = 213;
+ return 26;
+ } else {
+ *symbol = 210;
+ return 26;
+ }
+
+node_11111111111111111111101:
+ if (bits & 0x100) {
+ goto node_111111111111111111111011;
+ } else {
+ goto node_111111111111111111111010;
+ }
+
+node_111111111111111111111010:
+ if (bits & 0x80) {
+ goto node_1111111111111111111110101;
+ } else {
+ goto node_1111111111111111111110100;
+ }
+
+node_1111111111111111111110100:
+ if (bits & 0x40) {
+ *symbol = 219;
+ return 26;
+ } else {
+ *symbol = 218;
+ return 26;
+ }
+
+node_1111111111111111111110101:
+ if (bits & 0x40) {
+ *symbol = 240;
+ return 26;
+ } else {
+ *symbol = 238;
+ return 26;
+ }
+
+node_111111111111111111111011:
+ if (bits & 0x80) {
+ goto node_1111111111111111111110111;
+ } else {
+ goto node_1111111111111111111110110;
+ }
+
+node_1111111111111111111110110:
+ if (bits & 0x40) {
+ *symbol = 243;
+ return 26;
+ } else {
+ *symbol = 242;
+ return 26;
+ }
+
+node_1111111111111111111110111:
+ if (bits & 0x40) {
+ goto node_11111111111111111111101111;
+ } else {
+ *symbol = 255;
+ return 26;
+ }
+
+node_11111111111111111111101111:
+ if (bits & 0x20) {
+ *symbol = 204;
+ return 27;
+ } else {
+ *symbol = 203;
+ return 27;
+ }
+
+node_1111111111111111111111:
+ if (bits & 0x200) {
+ goto node_11111111111111111111111;
+ } else {
+ goto node_11111111111111111111110;
+ }
+
+node_11111111111111111111110:
+ if (bits & 0x100) {
+ goto node_111111111111111111111101;
+ } else {
+ goto node_111111111111111111111100;
+ }
+
+node_111111111111111111111100:
+ if (bits & 0x80) {
+ goto node_1111111111111111111111001;
+ } else {
+ goto node_1111111111111111111111000;
+ }
+
+node_1111111111111111111111000:
+ if (bits & 0x40) {
+ goto node_11111111111111111111110001;
+ } else {
+ goto node_11111111111111111111110000;
+ }
+
+node_11111111111111111111110000:
+ if (bits & 0x20) {
+ *symbol = 212;
+ return 27;
+ } else {
+ *symbol = 211;
+ return 27;
+ }
+
+node_11111111111111111111110001:
+ if (bits & 0x20) {
+ *symbol = 221;
+ return 27;
+ } else {
+ *symbol = 214;
+ return 27;
+ }
+
+node_1111111111111111111111001:
+ if (bits & 0x40) {
+ goto node_11111111111111111111110011;
+ } else {
+ goto node_11111111111111111111110010;
+ }
+
+node_11111111111111111111110010:
+ if (bits & 0x20) {
+ *symbol = 223;
+ return 27;
+ } else {
+ *symbol = 222;
+ return 27;
+ }
+
+node_11111111111111111111110011:
+ if (bits & 0x20) {
+ *symbol = 244;
+ return 27;
+ } else {
+ *symbol = 241;
+ return 27;
+ }
+
+node_111111111111111111111101:
+ if (bits & 0x80) {
+ goto node_1111111111111111111111011;
+ } else {
+ goto node_1111111111111111111111010;
+ }
+
+node_1111111111111111111111010:
+ if (bits & 0x40) {
+ goto node_11111111111111111111110101;
+ } else {
+ goto node_11111111111111111111110100;
+ }
+
+node_11111111111111111111110100:
+ if (bits & 0x20) {
+ *symbol = 246;
+ return 27;
+ } else {
+ *symbol = 245;
+ return 27;
+ }
+
+node_11111111111111111111110101:
+ if (bits & 0x20) {
+ *symbol = 248;
+ return 27;
+ } else {
+ *symbol = 247;
+ return 27;
+ }
+
+node_1111111111111111111111011:
+ if (bits & 0x40) {
+ goto node_11111111111111111111110111;
+ } else {
+ goto node_11111111111111111111110110;
+ }
+
+node_11111111111111111111110110:
+ if (bits & 0x20) {
+ *symbol = 251;
+ return 27;
+ } else {
+ *symbol = 250;
+ return 27;
+ }
+
+node_11111111111111111111110111:
+ if (bits & 0x20) {
+ *symbol = 253;
+ return 27;
+ } else {
+ *symbol = 252;
+ return 27;
+ }
+
+node_11111111111111111111111:
+ if (bits & 0x100) {
+ goto node_111111111111111111111111;
+ } else {
+ goto node_111111111111111111111110;
+ }
+
+node_111111111111111111111110:
+ if (bits & 0x80) {
+ goto node_1111111111111111111111101;
+ } else {
+ goto node_1111111111111111111111100;
+ }
+
+node_1111111111111111111111100:
+ if (bits & 0x40) {
+ goto node_11111111111111111111111001;
+ } else {
+ goto node_11111111111111111111111000;
+ }
+
+node_11111111111111111111111000:
+ if (bits & 0x20) {
+ goto node_111111111111111111111110001;
+ } else {
+ *symbol = 254;
+ return 27;
+ }
+
+node_111111111111111111111110001:
+ if (bits & 0x10) {
+ *symbol = 3;
+ return 28;
+ } else {
+ *symbol = 2;
+ return 28;
+ }
+
+node_11111111111111111111111001:
+ if (bits & 0x20) {
+ goto node_111111111111111111111110011;
+ } else {
+ goto node_111111111111111111111110010;
+ }
+
+node_111111111111111111111110010:
+ if (bits & 0x10) {
+ *symbol = 5;
+ return 28;
+ } else {
+ *symbol = 4;
+ return 28;
+ }
+
+node_111111111111111111111110011:
+ if (bits & 0x10) {
+ *symbol = 7;
+ return 28;
+ } else {
+ *symbol = 6;
+ return 28;
+ }
+
+node_1111111111111111111111101:
+ if (bits & 0x40) {
+ goto node_11111111111111111111111011;
+ } else {
+ goto node_11111111111111111111111010;
+ }
+
+node_11111111111111111111111010:
+ if (bits & 0x20) {
+ goto node_111111111111111111111110101;
+ } else {
+ goto node_111111111111111111111110100;
+ }
+
+node_111111111111111111111110100:
+ if (bits & 0x10) {
+ *symbol = 11;
+ return 28;
+ } else {
+ *symbol = 8;
+ return 28;
+ }
+
+node_111111111111111111111110101:
+ if (bits & 0x10) {
+ *symbol = 14;
+ return 28;
+ } else {
+ *symbol = 12;
+ return 28;
+ }
+
+node_11111111111111111111111011:
+ if (bits & 0x20) {
+ goto node_111111111111111111111110111;
+ } else {
+ goto node_111111111111111111111110110;
+ }
+
+node_111111111111111111111110110:
+ if (bits & 0x10) {
+ *symbol = 16;
+ return 28;
+ } else {
+ *symbol = 15;
+ return 28;
+ }
+
+node_111111111111111111111110111:
+ if (bits & 0x10) {
+ *symbol = 18;
+ return 28;
+ } else {
+ *symbol = 17;
+ return 28;
+ }
+
+node_111111111111111111111111:
+ if (bits & 0x80) {
+ goto node_1111111111111111111111111;
+ } else {
+ goto node_1111111111111111111111110;
+ }
+
+node_1111111111111111111111110:
+ if (bits & 0x40) {
+ goto node_11111111111111111111111101;
+ } else {
+ goto node_11111111111111111111111100;
+ }
+
+node_11111111111111111111111100:
+ if (bits & 0x20) {
+ goto node_111111111111111111111111001;
+ } else {
+ goto node_111111111111111111111111000;
+ }
+
+node_111111111111111111111111000:
+ if (bits & 0x10) {
+ *symbol = 20;
+ return 28;
+ } else {
+ *symbol = 19;
+ return 28;
+ }
+
+node_111111111111111111111111001:
+ if (bits & 0x10) {
+ *symbol = 23;
+ return 28;
+ } else {
+ *symbol = 21;
+ return 28;
+ }
+
+node_11111111111111111111111101:
+ if (bits & 0x20) {
+ goto node_111111111111111111111111011;
+ } else {
+ goto node_111111111111111111111111010;
+ }
+
+node_111111111111111111111111010:
+ if (bits & 0x10) {
+ *symbol = 25;
+ return 28;
+ } else {
+ *symbol = 24;
+ return 28;
+ }
+
+node_111111111111111111111111011:
+ if (bits & 0x10) {
+ *symbol = 27;
+ return 28;
+ } else {
+ *symbol = 26;
+ return 28;
+ }
+
+node_1111111111111111111111111:
+ if (bits & 0x40) {
+ goto node_11111111111111111111111111;
+ } else {
+ goto node_11111111111111111111111110;
+ }
+
+node_11111111111111111111111110:
+ if (bits & 0x20) {
+ goto node_111111111111111111111111101;
+ } else {
+ goto node_111111111111111111111111100;
+ }
+
+node_111111111111111111111111100:
+ if (bits & 0x10) {
+ *symbol = 29;
+ return 28;
+ } else {
+ *symbol = 28;
+ return 28;
+ }
+
+node_111111111111111111111111101:
+ if (bits & 0x10) {
+ *symbol = 31;
+ return 28;
+ } else {
+ *symbol = 30;
+ return 28;
+ }
+
+node_11111111111111111111111111:
+ if (bits & 0x20) {
+ goto node_111111111111111111111111111;
+ } else {
+ goto node_111111111111111111111111110;
+ }
+
+node_111111111111111111111111110:
+ if (bits & 0x10) {
+ *symbol = 220;
+ return 28;
+ } else {
+ *symbol = 127;
+ return 28;
+ }
+
+node_111111111111111111111111111:
+ if (bits & 0x10) {
+ goto node_1111111111111111111111111111;
+ } else {
+ *symbol = 249;
+ return 28;
+ }
+
+node_1111111111111111111111111111:
+ if (bits & 0x8) {
+ goto node_11111111111111111111111111111;
+ } else {
+ goto node_11111111111111111111111111110;
+ }
+
+node_11111111111111111111111111110:
+ if (bits & 0x4) {
+ *symbol = 13;
+ return 30;
+ } else {
+ *symbol = 10;
+ return 30;
+ }
+
+node_11111111111111111111111111111:
+ if (bits & 0x4) {
+ return 0; /* invalid node */
+ } else {
+ *symbol = 22;
+ return 30;
+ }
+
+}
+
+struct aws_huffman_symbol_coder *hpack_get_coder(void) {
+
+ static struct aws_huffman_symbol_coder coder = {
+ .encode = encode_symbol,
+ .decode = decode_symbol,
+ .userdata = NULL,
+ };
+ return &coder;
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/http.c b/contrib/restricted/aws/aws-c-http/source/http.c
new file mode 100644
index 0000000000..8a8fe92bd1
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/http.c
@@ -0,0 +1,565 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/hash_table.h>
+#include <aws/compression/compression.h>
+#include <aws/http/private/hpack.h>
+#include <aws/http/private/http_impl.h>
+#include <aws/http/status_code.h>
+#include <aws/io/logging.h>
+
+#include <ctype.h>
+
+#define AWS_DEFINE_ERROR_INFO_HTTP(CODE, STR) [(CODE)-0x0800] = AWS_DEFINE_ERROR_INFO(CODE, STR, "aws-c-http")
+
+/* clang-format off */
+static struct aws_error_info s_errors[] = {
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_UNKNOWN,
+ "Encountered an unknown error."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_HEADER_NOT_FOUND,
+ "The specified header was not found"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_INVALID_HEADER_FIELD,
+ "Invalid header field, including a forbidden header field."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_INVALID_HEADER_NAME,
+ "Invalid header name."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_INVALID_HEADER_VALUE,
+ "Invalid header value."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_INVALID_METHOD,
+ "Method is invalid."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_INVALID_PATH,
+ "Path is invalid."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_INVALID_STATUS_CODE,
+ "Status code is invalid."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_MISSING_BODY_STREAM,
+ "Given the provided headers (ex: Content-Length), a body is expected."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_INVALID_BODY_STREAM,
+ "A body stream provided, but the message does not allow body (ex: response for HEAD Request and 304 response)"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_CONNECTION_CLOSED,
+ "The connection has closed or is closing."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_SWITCHED_PROTOCOLS,
+ "The connection has switched protocols."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_UNSUPPORTED_PROTOCOL,
+ "An unsupported protocol was encountered."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_REACTION_REQUIRED,
+ "A necessary function was not invoked from a user callback."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_DATA_NOT_AVAILABLE,
+ "This data is not yet available."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_OUTGOING_STREAM_LENGTH_INCORRECT,
+ "Amount of data streamed out does not match the previously declared length."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_CALLBACK_FAILURE,
+ "A callback has reported failure."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_WEBSOCKET_UPGRADE_FAILURE,
+ "Failed to upgrade HTTP connection to Websocket."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_WEBSOCKET_CLOSE_FRAME_SENT,
+ "Websocket has sent CLOSE frame, no more data will be sent."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_WEBSOCKET_IS_MIDCHANNEL_HANDLER,
+ "Operation cannot be performed because websocket has been converted to a midchannel handler."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_CONNECTION_MANAGER_INVALID_STATE_FOR_ACQUIRE,
+ "Acquire called after the connection manager's ref count has reached zero"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_CONNECTION_MANAGER_VENDED_CONNECTION_UNDERFLOW,
+ "Release called when the connection manager's vended connection count was zero"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_SERVER_CLOSED,
+ "The http server is closed, no more connections will be accepted"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_PROXY_CONNECT_FAILED,
+ "Proxy-based connection establishment failed because the CONNECT call failed"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_CONNECTION_MANAGER_SHUTTING_DOWN,
+ "Connection acquisition failed because connection manager is shutting down"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_CHANNEL_THROUGHPUT_FAILURE,
+ "Http connection channel shut down due to failure to meet throughput minimum"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_PROTOCOL_ERROR,
+ "Protocol rules violated by peer"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_STREAM_IDS_EXHAUSTED,
+ "Connection exhausted all possible HTTP-stream IDs. Establish a new connection for new streams."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_GOAWAY_RECEIVED,
+ "Peer sent GOAWAY to initiate connection shutdown. Establish a new connection to retry the HTTP-streams."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_RST_STREAM_RECEIVED,
+ "Peer sent RST_STREAM to terminate HTTP-stream."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_RST_STREAM_SENT,
+ "RST_STREAM has sent from local implementation and HTTP-stream has been terminated."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_STREAM_NOT_ACTIVATED,
+ "HTTP-stream must be activated before use."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_STREAM_HAS_COMPLETED,
+ "HTTP-stream has completed, action cannot be performed."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_PROXY_STRATEGY_NTLM_CHALLENGE_TOKEN_MISSING,
+ "NTLM Proxy strategy was initiated without a challenge token"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_PROXY_STRATEGY_TOKEN_RETRIEVAL_FAILURE,
+ "Failure in user code while retrieving proxy auth token"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_PROXY_CONNECT_FAILED_RETRYABLE,
+ "Proxy connection attempt failed but the negotiation could be continued on a new connection"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_PROTOCOL_SWITCH_FAILURE,
+ "Internal state failure prevent connection from switching protocols"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_MAX_CONCURRENT_STREAMS_EXCEEDED,
+ "Max concurrent stream reached"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_STREAM_MANAGER_SHUTTING_DOWN,
+ "Stream acquisition failed because stream manager is shutting down"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_STREAM_MANAGER_CONNECTION_ACQUIRE_FAILURE,
+ "Stream acquisition failed because stream manager failed to acquire a connection"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_STREAM_MANAGER_UNEXPECTED_HTTP_VERSION,
+ "Stream acquisition failed because stream manager got an unexpected version of HTTP connection"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR,
+ "Websocket protocol rules violated by peer"),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_MANUAL_WRITE_NOT_ENABLED,
+ "Manual write failed because manual writes are not enabled."),
+ AWS_DEFINE_ERROR_INFO_HTTP(
+ AWS_ERROR_HTTP_MANUAL_WRITE_HAS_COMPLETED,
+ "Manual write failed because manual writes are already completed."),
+};
+/* clang-format on */
+
+static struct aws_error_info_list s_error_list = {
+ .error_list = s_errors,
+ .count = AWS_ARRAY_SIZE(s_errors),
+};
+
+static struct aws_log_subject_info s_log_subject_infos[] = {
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_GENERAL, "http", "Misc HTTP logging"),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_CONNECTION, "http-connection", "HTTP client or server connection"),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_ENCODER, "http-encoder", "HTTP data encoder"),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_DECODER, "http-decoder", "HTTP data decoder"),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_SERVER, "http-server", "HTTP server socket listening for incoming connections"),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_STREAM, "http-stream", "HTTP request-response exchange"),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_CONNECTION_MANAGER, "connection-manager", "HTTP connection manager"),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_STREAM_MANAGER, "http2-stream-manager", "HTTP/2 stream manager"),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_WEBSOCKET, "websocket", "Websocket"),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_HTTP_WEBSOCKET_SETUP, "websocket-setup", "Websocket setup"),
+ DEFINE_LOG_SUBJECT_INFO(
+ AWS_LS_HTTP_PROXY_NEGOTIATION,
+ "proxy-negotiation",
+ "Negotiating an http connection with a proxy server"),
+};
+
+static struct aws_log_subject_info_list s_log_subject_list = {
+ .subject_list = s_log_subject_infos,
+ .count = AWS_ARRAY_SIZE(s_log_subject_infos),
+};
+
+struct aws_enum_value {
+ struct aws_allocator *allocator;
+ int value;
+};
+
+static void s_destroy_enum_value(void *value) {
+ struct aws_enum_value *enum_value = value;
+ aws_mem_release(enum_value->allocator, enum_value);
+}
+
+/**
+ * Given array of aws_byte_cursors, init hashtable where...
+ * Key is aws_byte_cursor* (pointing into cursor from array) and comparisons are case-insensitive.
+ * Value is the array index cast to a void*.
+ */
+static void s_init_str_to_enum_hash_table(
+ struct aws_hash_table *table,
+ struct aws_allocator *alloc,
+ struct aws_byte_cursor *str_array,
+ int start_index,
+ int end_index,
+ bool ignore_case) {
+
+ int err = aws_hash_table_init(
+ table,
+ alloc,
+ end_index - start_index,
+ ignore_case ? aws_hash_byte_cursor_ptr_ignore_case : aws_hash_byte_cursor_ptr,
+ (aws_hash_callback_eq_fn *)(ignore_case ? aws_byte_cursor_eq_ignore_case : aws_byte_cursor_eq),
+ NULL,
+ s_destroy_enum_value);
+ AWS_FATAL_ASSERT(!err);
+
+ for (int i = start_index; i < end_index; ++i) {
+ int was_created = 0;
+ struct aws_enum_value *enum_value = aws_mem_calloc(alloc, 1, sizeof(struct aws_enum_value));
+ AWS_FATAL_ASSERT(enum_value);
+ enum_value->allocator = alloc;
+ enum_value->value = i;
+
+ AWS_FATAL_ASSERT(str_array[i].ptr && "Missing enum string");
+ err = aws_hash_table_put(table, &str_array[i], (void *)enum_value, &was_created);
+ AWS_FATAL_ASSERT(!err && was_created);
+ }
+}
+
+/**
+ * Given key, get value from table initialized by s_init_str_to_enum_hash_table().
+ * Returns -1 if key not found.
+ */
+static int s_find_in_str_to_enum_hash_table(const struct aws_hash_table *table, struct aws_byte_cursor *key) {
+ struct aws_hash_element *elem;
+ aws_hash_table_find(table, key, &elem);
+ if (elem) {
+ struct aws_enum_value *enum_value = elem->value;
+ return enum_value->value;
+ }
+ return -1;
+}
+
+/* METHODS */
+static struct aws_hash_table s_method_str_to_enum; /* for string -> enum lookup */
+static struct aws_byte_cursor s_method_enum_to_str[AWS_HTTP_METHOD_COUNT]; /* for enum -> string lookup */
+
+static void s_methods_init(struct aws_allocator *alloc) {
+ s_method_enum_to_str[AWS_HTTP_METHOD_GET] = aws_http_method_get;
+ s_method_enum_to_str[AWS_HTTP_METHOD_HEAD] = aws_http_method_head;
+ s_method_enum_to_str[AWS_HTTP_METHOD_CONNECT] = aws_http_method_connect;
+
+ s_init_str_to_enum_hash_table(
+ &s_method_str_to_enum,
+ alloc,
+ s_method_enum_to_str,
+ AWS_HTTP_METHOD_UNKNOWN + 1,
+ AWS_HTTP_METHOD_COUNT,
+ false /* DO NOT ignore case of method */);
+}
+
+static void s_methods_clean_up(void) {
+ aws_hash_table_clean_up(&s_method_str_to_enum);
+}
+
+enum aws_http_method aws_http_str_to_method(struct aws_byte_cursor cursor) {
+ int method = s_find_in_str_to_enum_hash_table(&s_method_str_to_enum, &cursor);
+ if (method >= 0) {
+ return (enum aws_http_method)method;
+ }
+ return AWS_HTTP_METHOD_UNKNOWN;
+}
+
+/* VERSIONS */
+static struct aws_byte_cursor s_version_enum_to_str[AWS_HTTP_HEADER_COUNT]; /* for enum -> string lookup */
+
+static void s_versions_init(struct aws_allocator *alloc) {
+ (void)alloc;
+ s_version_enum_to_str[AWS_HTTP_VERSION_UNKNOWN] = aws_byte_cursor_from_c_str("Unknown");
+ s_version_enum_to_str[AWS_HTTP_VERSION_1_0] = aws_byte_cursor_from_c_str("HTTP/1.0");
+ s_version_enum_to_str[AWS_HTTP_VERSION_1_1] = aws_byte_cursor_from_c_str("HTTP/1.1");
+ s_version_enum_to_str[AWS_HTTP_VERSION_2] = aws_byte_cursor_from_c_str("HTTP/2");
+}
+
+static void s_versions_clean_up(void) {}
+
+struct aws_byte_cursor aws_http_version_to_str(enum aws_http_version version) {
+ if ((int)version < AWS_HTTP_VERSION_UNKNOWN || (int)version >= AWS_HTTP_VERSION_COUNT) {
+ version = AWS_HTTP_VERSION_UNKNOWN;
+ }
+
+ return s_version_enum_to_str[version];
+}
+
+/* HEADERS */
+static struct aws_hash_table s_header_str_to_enum; /* for case-insensitive string -> enum lookup */
+static struct aws_hash_table s_lowercase_header_str_to_enum; /* for case-sensitive string -> enum lookup */
+static struct aws_byte_cursor s_header_enum_to_str[AWS_HTTP_HEADER_COUNT]; /* for enum -> string lookup */
+
+static void s_headers_init(struct aws_allocator *alloc) {
+ s_header_enum_to_str[AWS_HTTP_HEADER_METHOD] = aws_byte_cursor_from_c_str(":method");
+ s_header_enum_to_str[AWS_HTTP_HEADER_SCHEME] = aws_byte_cursor_from_c_str(":scheme");
+ s_header_enum_to_str[AWS_HTTP_HEADER_AUTHORITY] = aws_byte_cursor_from_c_str(":authority");
+ s_header_enum_to_str[AWS_HTTP_HEADER_PATH] = aws_byte_cursor_from_c_str(":path");
+ s_header_enum_to_str[AWS_HTTP_HEADER_STATUS] = aws_byte_cursor_from_c_str(":status");
+ s_header_enum_to_str[AWS_HTTP_HEADER_COOKIE] = aws_byte_cursor_from_c_str("cookie");
+ s_header_enum_to_str[AWS_HTTP_HEADER_SET_COOKIE] = aws_byte_cursor_from_c_str("set-cookie");
+ s_header_enum_to_str[AWS_HTTP_HEADER_HOST] = aws_byte_cursor_from_c_str("host");
+ s_header_enum_to_str[AWS_HTTP_HEADER_CONNECTION] = aws_byte_cursor_from_c_str("connection");
+ s_header_enum_to_str[AWS_HTTP_HEADER_CONTENT_LENGTH] = aws_byte_cursor_from_c_str("content-length");
+ s_header_enum_to_str[AWS_HTTP_HEADER_EXPECT] = aws_byte_cursor_from_c_str("expect");
+ s_header_enum_to_str[AWS_HTTP_HEADER_TRANSFER_ENCODING] = aws_byte_cursor_from_c_str("transfer-encoding");
+ s_header_enum_to_str[AWS_HTTP_HEADER_CACHE_CONTROL] = aws_byte_cursor_from_c_str("cache-control");
+ s_header_enum_to_str[AWS_HTTP_HEADER_MAX_FORWARDS] = aws_byte_cursor_from_c_str("max-forwards");
+ s_header_enum_to_str[AWS_HTTP_HEADER_PRAGMA] = aws_byte_cursor_from_c_str("pragma");
+ s_header_enum_to_str[AWS_HTTP_HEADER_RANGE] = aws_byte_cursor_from_c_str("range");
+ s_header_enum_to_str[AWS_HTTP_HEADER_TE] = aws_byte_cursor_from_c_str("te");
+ s_header_enum_to_str[AWS_HTTP_HEADER_CONTENT_ENCODING] = aws_byte_cursor_from_c_str("content-encoding");
+ s_header_enum_to_str[AWS_HTTP_HEADER_CONTENT_TYPE] = aws_byte_cursor_from_c_str("content-type");
+ s_header_enum_to_str[AWS_HTTP_HEADER_CONTENT_RANGE] = aws_byte_cursor_from_c_str("content-range");
+ s_header_enum_to_str[AWS_HTTP_HEADER_TRAILER] = aws_byte_cursor_from_c_str("trailer");
+ s_header_enum_to_str[AWS_HTTP_HEADER_WWW_AUTHENTICATE] = aws_byte_cursor_from_c_str("www-authenticate");
+ s_header_enum_to_str[AWS_HTTP_HEADER_AUTHORIZATION] = aws_byte_cursor_from_c_str("authorization");
+ s_header_enum_to_str[AWS_HTTP_HEADER_PROXY_AUTHENTICATE] = aws_byte_cursor_from_c_str("proxy-authenticate");
+ s_header_enum_to_str[AWS_HTTP_HEADER_PROXY_AUTHORIZATION] = aws_byte_cursor_from_c_str("proxy-authorization");
+ s_header_enum_to_str[AWS_HTTP_HEADER_AGE] = aws_byte_cursor_from_c_str("age");
+ s_header_enum_to_str[AWS_HTTP_HEADER_EXPIRES] = aws_byte_cursor_from_c_str("expires");
+ s_header_enum_to_str[AWS_HTTP_HEADER_DATE] = aws_byte_cursor_from_c_str("date");
+ s_header_enum_to_str[AWS_HTTP_HEADER_LOCATION] = aws_byte_cursor_from_c_str("location");
+ s_header_enum_to_str[AWS_HTTP_HEADER_RETRY_AFTER] = aws_byte_cursor_from_c_str("retry-after");
+ s_header_enum_to_str[AWS_HTTP_HEADER_VARY] = aws_byte_cursor_from_c_str("vary");
+ s_header_enum_to_str[AWS_HTTP_HEADER_WARNING] = aws_byte_cursor_from_c_str("warning");
+ s_header_enum_to_str[AWS_HTTP_HEADER_UPGRADE] = aws_byte_cursor_from_c_str("upgrade");
+ s_header_enum_to_str[AWS_HTTP_HEADER_KEEP_ALIVE] = aws_byte_cursor_from_c_str("keep-alive");
+ s_header_enum_to_str[AWS_HTTP_HEADER_PROXY_CONNECTION] = aws_byte_cursor_from_c_str("proxy-connection");
+
+ s_init_str_to_enum_hash_table(
+ &s_header_str_to_enum,
+ alloc,
+ s_header_enum_to_str,
+ AWS_HTTP_HEADER_UNKNOWN + 1,
+ AWS_HTTP_HEADER_COUNT,
+ true /* ignore case */);
+
+ s_init_str_to_enum_hash_table(
+ &s_lowercase_header_str_to_enum,
+ alloc,
+ s_header_enum_to_str,
+ AWS_HTTP_HEADER_UNKNOWN + 1,
+ AWS_HTTP_HEADER_COUNT,
+ false /* ignore case */);
+}
+
+static void s_headers_clean_up(void) {
+ aws_hash_table_clean_up(&s_header_str_to_enum);
+ aws_hash_table_clean_up(&s_lowercase_header_str_to_enum);
+}
+
+enum aws_http_header_name aws_http_str_to_header_name(struct aws_byte_cursor cursor) {
+ int header = s_find_in_str_to_enum_hash_table(&s_header_str_to_enum, &cursor);
+ if (header >= 0) {
+ return (enum aws_http_header_name)header;
+ }
+ return AWS_HTTP_HEADER_UNKNOWN;
+}
+
+enum aws_http_header_name aws_http_lowercase_str_to_header_name(struct aws_byte_cursor cursor) {
+ int header = s_find_in_str_to_enum_hash_table(&s_lowercase_header_str_to_enum, &cursor);
+ if (header >= 0) {
+ return (enum aws_http_header_name)header;
+ }
+ return AWS_HTTP_HEADER_UNKNOWN;
+}
+
+/* STATUS */
+const char *aws_http_status_text(int status_code) {
+ /**
+ * Data from Internet Assigned Numbers Authority (IANA):
+ * https://www.iana.org/assignments/http-status-codes/http-status-codes.txt
+ */
+ switch (status_code) {
+ case AWS_HTTP_STATUS_CODE_100_CONTINUE:
+ return "Continue";
+ case AWS_HTTP_STATUS_CODE_101_SWITCHING_PROTOCOLS:
+ return "Switching Protocols";
+ case AWS_HTTP_STATUS_CODE_102_PROCESSING:
+ return "Processing";
+ case AWS_HTTP_STATUS_CODE_103_EARLY_HINTS:
+ return "Early Hints";
+ case AWS_HTTP_STATUS_CODE_200_OK:
+ return "OK";
+ case AWS_HTTP_STATUS_CODE_201_CREATED:
+ return "Created";
+ case AWS_HTTP_STATUS_CODE_202_ACCEPTED:
+ return "Accepted";
+ case AWS_HTTP_STATUS_CODE_203_NON_AUTHORITATIVE_INFORMATION:
+ return "Non-Authoritative Information";
+ case AWS_HTTP_STATUS_CODE_204_NO_CONTENT:
+ return "No Content";
+ case AWS_HTTP_STATUS_CODE_205_RESET_CONTENT:
+ return "Reset Content";
+ case AWS_HTTP_STATUS_CODE_206_PARTIAL_CONTENT:
+ return "Partial Content";
+ case AWS_HTTP_STATUS_CODE_207_MULTI_STATUS:
+ return "Multi-Status";
+ case AWS_HTTP_STATUS_CODE_208_ALREADY_REPORTED:
+ return "Already Reported";
+ case AWS_HTTP_STATUS_CODE_226_IM_USED:
+ return "IM Used";
+ case AWS_HTTP_STATUS_CODE_300_MULTIPLE_CHOICES:
+ return "Multiple Choices";
+ case AWS_HTTP_STATUS_CODE_301_MOVED_PERMANENTLY:
+ return "Moved Permanently";
+ case AWS_HTTP_STATUS_CODE_302_FOUND:
+ return "Found";
+ case AWS_HTTP_STATUS_CODE_303_SEE_OTHER:
+ return "See Other";
+ case AWS_HTTP_STATUS_CODE_304_NOT_MODIFIED:
+ return "Not Modified";
+ case AWS_HTTP_STATUS_CODE_305_USE_PROXY:
+ return "Use Proxy";
+ case AWS_HTTP_STATUS_CODE_307_TEMPORARY_REDIRECT:
+ return "Temporary Redirect";
+ case AWS_HTTP_STATUS_CODE_308_PERMANENT_REDIRECT:
+ return "Permanent Redirect";
+ case AWS_HTTP_STATUS_CODE_400_BAD_REQUEST:
+ return "Bad Request";
+ case AWS_HTTP_STATUS_CODE_401_UNAUTHORIZED:
+ return "Unauthorized";
+ case AWS_HTTP_STATUS_CODE_402_PAYMENT_REQUIRED:
+ return "Payment Required";
+ case AWS_HTTP_STATUS_CODE_403_FORBIDDEN:
+ return "Forbidden";
+ case AWS_HTTP_STATUS_CODE_404_NOT_FOUND:
+ return "Not Found";
+ case AWS_HTTP_STATUS_CODE_405_METHOD_NOT_ALLOWED:
+ return "Method Not Allowed";
+ case AWS_HTTP_STATUS_CODE_406_NOT_ACCEPTABLE:
+ return "Not Acceptable";
+ case AWS_HTTP_STATUS_CODE_407_PROXY_AUTHENTICATION_REQUIRED:
+ return "Proxy Authentication Required";
+ case AWS_HTTP_STATUS_CODE_408_REQUEST_TIMEOUT:
+ return "Request Timeout";
+ case AWS_HTTP_STATUS_CODE_409_CONFLICT:
+ return "Conflict";
+ case AWS_HTTP_STATUS_CODE_410_GONE:
+ return "Gone";
+ case AWS_HTTP_STATUS_CODE_411_LENGTH_REQUIRED:
+ return "Length Required";
+ case AWS_HTTP_STATUS_CODE_412_PRECONDITION_FAILED:
+ return "Precondition Failed";
+ case AWS_HTTP_STATUS_CODE_413_REQUEST_ENTITY_TOO_LARGE:
+ return "Payload Too Large";
+ case AWS_HTTP_STATUS_CODE_414_REQUEST_URI_TOO_LONG:
+ return "URI Too Long";
+ case AWS_HTTP_STATUS_CODE_415_UNSUPPORTED_MEDIA_TYPE:
+ return "Unsupported Media Type";
+ case AWS_HTTP_STATUS_CODE_416_REQUESTED_RANGE_NOT_SATISFIABLE:
+ return "Range Not Satisfiable";
+ case AWS_HTTP_STATUS_CODE_417_EXPECTATION_FAILED:
+ return "Expectation Failed";
+ case AWS_HTTP_STATUS_CODE_421_MISDIRECTED_REQUEST:
+ return "Misdirected Request";
+ case AWS_HTTP_STATUS_CODE_422_UNPROCESSABLE_ENTITY:
+ return "Unprocessable Entity";
+ case AWS_HTTP_STATUS_CODE_423_LOCKED:
+ return "Locked";
+ case AWS_HTTP_STATUS_CODE_424_FAILED_DEPENDENCY:
+ return "Failed Dependency";
+ case AWS_HTTP_STATUS_CODE_425_TOO_EARLY:
+ return "Too Early";
+ case AWS_HTTP_STATUS_CODE_426_UPGRADE_REQUIRED:
+ return "Upgrade Required";
+ case AWS_HTTP_STATUS_CODE_428_PRECONDITION_REQUIRED:
+ return "Precondition Required";
+ case AWS_HTTP_STATUS_CODE_429_TOO_MANY_REQUESTS:
+ return "Too Many Requests";
+ case AWS_HTTP_STATUS_CODE_431_REQUEST_HEADER_FIELDS_TOO_LARGE:
+ return "Request Header Fields Too Large";
+ case AWS_HTTP_STATUS_CODE_451_UNAVAILABLE_FOR_LEGAL_REASON:
+ return "Unavailable For Legal Reasons";
+ case AWS_HTTP_STATUS_CODE_500_INTERNAL_SERVER_ERROR:
+ return "Internal Server Error";
+ case AWS_HTTP_STATUS_CODE_501_NOT_IMPLEMENTED:
+ return "Not Implemented";
+ case AWS_HTTP_STATUS_CODE_502_BAD_GATEWAY:
+ return "Bad Gateway";
+ case AWS_HTTP_STATUS_CODE_503_SERVICE_UNAVAILABLE:
+ return "Service Unavailable";
+ case AWS_HTTP_STATUS_CODE_504_GATEWAY_TIMEOUT:
+ return "Gateway Timeout";
+ case AWS_HTTP_STATUS_CODE_505_HTTP_VERSION_NOT_SUPPORTED:
+ return "HTTP Version Not Supported";
+ case AWS_HTTP_STATUS_CODE_506_VARIANT_ALSO_NEGOTIATES:
+ return "Variant Also Negotiates";
+ case AWS_HTTP_STATUS_CODE_507_INSUFFICIENT_STORAGE:
+ return "Insufficient Storage";
+ case AWS_HTTP_STATUS_CODE_508_LOOP_DETECTED:
+ return "Loop Detected";
+ case AWS_HTTP_STATUS_CODE_510_NOT_EXTENDED:
+ return "Not Extended";
+ case AWS_HTTP_STATUS_CODE_511_NETWORK_AUTHENTICATION_REQUIRED:
+ return "Network Authentication Required";
+ default:
+ return "";
+ }
+}
+
+static bool s_library_initialized = false;
+void aws_http_library_init(struct aws_allocator *alloc) {
+ if (s_library_initialized) {
+ return;
+ }
+ s_library_initialized = true;
+
+ aws_io_library_init(alloc);
+ aws_compression_library_init(alloc);
+ aws_register_error_info(&s_error_list);
+ aws_register_log_subject_info_list(&s_log_subject_list);
+ s_methods_init(alloc);
+ s_headers_init(alloc);
+ s_versions_init(alloc);
+ aws_hpack_static_table_init(alloc);
+}
+
+void aws_http_library_clean_up(void) {
+ if (!s_library_initialized) {
+ return;
+ }
+ s_library_initialized = false;
+
+ aws_thread_join_all_managed();
+ aws_unregister_error_info(&s_error_list);
+ aws_unregister_log_subject_info_list(&s_log_subject_list);
+ s_methods_clean_up();
+ s_headers_clean_up();
+ s_versions_clean_up();
+ aws_hpack_static_table_clean_up();
+ aws_compression_library_clean_up();
+ aws_io_library_clean_up();
+}
+
+void aws_http_fatal_assert_library_initialized() {
+ if (!s_library_initialized) {
+ AWS_LOGF_FATAL(
+ AWS_LS_HTTP_GENERAL,
+ "aws_http_library_init() must be called before using any functionality in aws-c-http.");
+
+ AWS_FATAL_ASSERT(s_library_initialized);
+ }
+}
+
+const struct aws_byte_cursor aws_http_method_get = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("GET");
+const struct aws_byte_cursor aws_http_method_head = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("HEAD");
+const struct aws_byte_cursor aws_http_method_post = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("POST");
+const struct aws_byte_cursor aws_http_method_put = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("PUT");
+const struct aws_byte_cursor aws_http_method_delete = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("DELETE");
+const struct aws_byte_cursor aws_http_method_connect = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("CONNECT");
+const struct aws_byte_cursor aws_http_method_options = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("OPTIONS");
+
+const struct aws_byte_cursor aws_http_header_method = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(":method");
+const struct aws_byte_cursor aws_http_header_scheme = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(":scheme");
+const struct aws_byte_cursor aws_http_header_authority = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(":authority");
+const struct aws_byte_cursor aws_http_header_path = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(":path");
+const struct aws_byte_cursor aws_http_header_status = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(":status");
+
+const struct aws_byte_cursor aws_http_scheme_http = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("http");
+const struct aws_byte_cursor aws_http_scheme_https = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("https");
diff --git a/contrib/restricted/aws/aws-c-http/source/http2_stream_manager.c b/contrib/restricted/aws/aws-c-http/source/http2_stream_manager.c
new file mode 100644
index 0000000000..fb23199376
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/http2_stream_manager.c
@@ -0,0 +1,1238 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/array_list.h>
+#include <aws/common/clock.h>
+#include <aws/common/hash_table.h>
+#include <aws/common/logging.h>
+#include <aws/http/connection.h>
+#include <aws/http/connection_manager.h>
+#include <aws/http/request_response.h>
+#include <aws/io/channel.h>
+#include <aws/io/channel_bootstrap.h>
+#include <aws/io/event_loop.h>
+
+#include <aws/http/http2_stream_manager.h>
+#include <aws/http/private/http2_stream_manager_impl.h>
+#include <aws/http/private/request_response_impl.h>
+#include <aws/http/status_code.h>
+
+#include <inttypes.h>
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4204) /* non-constant aggregate initializer */
+#endif
+
+/* Apple toolchains such as xcode and swiftpm define the DEBUG symbol. undef it here so we can actually use the token */
+#undef DEBUG
+
+#define STREAM_MANAGER_LOGF(level, stream_manager, text, ...) \
+ AWS_LOGF_##level(AWS_LS_HTTP_STREAM_MANAGER, "id=%p: " text, (void *)(stream_manager), __VA_ARGS__)
+#define STREAM_MANAGER_LOG(level, stream_manager, text) STREAM_MANAGER_LOGF(level, stream_manager, "%s", text)
+
+/* 3 seconds */
+static const size_t s_default_ping_timeout_ms = 3000;
+
+static void s_stream_manager_start_destroy(struct aws_http2_stream_manager *stream_manager);
+static void s_aws_http2_stream_manager_build_transaction_synced(struct aws_http2_stream_management_transaction *work);
+static void s_aws_http2_stream_manager_execute_transaction(struct aws_http2_stream_management_transaction *work);
+
+static struct aws_h2_sm_pending_stream_acquisition *s_new_pending_stream_acquisition(
+ struct aws_allocator *allocator,
+ const struct aws_http_make_request_options *options,
+ aws_http2_stream_manager_on_stream_acquired_fn *callback,
+ void *user_data) {
+ struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_h2_sm_pending_stream_acquisition));
+
+ /* Copy the options and keep the underlying message alive */
+ pending_stream_acquisition->options = *options;
+ pending_stream_acquisition->request = options->request;
+ aws_http_message_acquire(pending_stream_acquisition->request);
+ pending_stream_acquisition->callback = callback;
+ pending_stream_acquisition->user_data = user_data;
+ pending_stream_acquisition->allocator = allocator;
+ return pending_stream_acquisition;
+}
+
+static void s_pending_stream_acquisition_destroy(
+ struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition) {
+ if (pending_stream_acquisition == NULL) {
+ return;
+ }
+ if (pending_stream_acquisition->request) {
+ aws_http_message_release(pending_stream_acquisition->request);
+ }
+ aws_mem_release(pending_stream_acquisition->allocator, pending_stream_acquisition);
+}
+
+static void s_lock_synced_data(struct aws_http2_stream_manager *stream_manager) {
+ int err = aws_mutex_lock(&stream_manager->synced_data.lock);
+ AWS_ASSERT(!err && "lock failed");
+ (void)err;
+}
+
+static void s_unlock_synced_data(struct aws_http2_stream_manager *stream_manager) {
+ int err = aws_mutex_unlock(&stream_manager->synced_data.lock);
+ AWS_ASSERT(!err && "unlock failed");
+ (void)err;
+}
+
+static void s_sm_log_stats_synced(struct aws_http2_stream_manager *stream_manager) {
+ STREAM_MANAGER_LOGF(
+ TRACE,
+ stream_manager,
+ "Stream manager internal counts status: "
+ "connection acquiring=%zu, streams opening=%zu, pending make request count=%zu, pending acquisition count=%zu, "
+ "holding connections count=%zu",
+ stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_CONNECTIONS_ACQUIRING],
+ stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_OPEN_STREAM],
+ stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_MAKE_REQUESTS],
+ stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION],
+ stream_manager->synced_data.holding_connections_count);
+}
+
+/* The count acquire and release all needs to be invoked helding the lock */
+static void s_sm_count_increase_synced(
+ struct aws_http2_stream_manager *stream_manager,
+ enum aws_sm_count_type count_type,
+ size_t num) {
+ stream_manager->synced_data.internal_refcount_stats[count_type] += num;
+ for (size_t i = 0; i < num; i++) {
+ aws_ref_count_acquire(&stream_manager->internal_ref_count);
+ }
+}
+
+static void s_sm_count_decrease_synced(
+ struct aws_http2_stream_manager *stream_manager,
+ enum aws_sm_count_type count_type,
+ size_t num) {
+ stream_manager->synced_data.internal_refcount_stats[count_type] -= num;
+ for (size_t i = 0; i < num; i++) {
+ aws_ref_count_release(&stream_manager->internal_ref_count);
+ }
+}
+
+static void s_aws_stream_management_transaction_init(
+ struct aws_http2_stream_management_transaction *work,
+ struct aws_http2_stream_manager *stream_manager) {
+ AWS_ZERO_STRUCT(*work);
+ aws_linked_list_init(&work->pending_make_requests);
+ work->stream_manager = stream_manager;
+ work->allocator = stream_manager->allocator;
+ aws_ref_count_acquire(&stream_manager->internal_ref_count);
+}
+
+static void s_aws_stream_management_transaction_clean_up(struct aws_http2_stream_management_transaction *work) {
+ (void)work;
+ AWS_ASSERT(aws_linked_list_empty(&work->pending_make_requests));
+ aws_ref_count_release(&work->stream_manager->internal_ref_count);
+}
+
+static struct aws_h2_sm_connection *s_get_best_sm_connection_from_set(struct aws_random_access_set *set) {
+ /* Use the best two algorithm */
+ int errored = AWS_ERROR_SUCCESS;
+ struct aws_h2_sm_connection *sm_connection_a = NULL;
+ errored = aws_random_access_set_random_get_ptr(set, (void **)&sm_connection_a);
+ struct aws_h2_sm_connection *sm_connection_b = NULL;
+ errored |= aws_random_access_set_random_get_ptr(set, (void **)&sm_connection_b);
+ struct aws_h2_sm_connection *chosen_connection =
+ sm_connection_a->num_streams_assigned > sm_connection_b->num_streams_assigned ? sm_connection_b
+ : sm_connection_a;
+ return errored == AWS_ERROR_SUCCESS ? chosen_connection : NULL;
+ (void)errored;
+}
+
+/* helper function for building the transaction: Try to assign connection for a pending stream acquisition */
+/* *_synced should only be called with LOCK HELD or from another synced function */
+static void s_sm_try_assign_connection_to_pending_stream_acquisition_synced(
+ struct aws_http2_stream_manager *stream_manager,
+ struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition) {
+
+ AWS_ASSERT(pending_stream_acquisition->sm_connection == NULL);
+ int errored = 0;
+ if (aws_random_access_set_get_size(&stream_manager->synced_data.ideal_available_set)) {
+ /**
+ * Try assigning to connection from ideal set
+ */
+ struct aws_h2_sm_connection *chosen_connection =
+ s_get_best_sm_connection_from_set(&stream_manager->synced_data.ideal_available_set);
+ AWS_ASSERT(chosen_connection);
+ pending_stream_acquisition->sm_connection = chosen_connection;
+ chosen_connection->num_streams_assigned++;
+
+ STREAM_MANAGER_LOGF(
+ DEBUG,
+ stream_manager,
+ "Picking connection:%p for acquisition:%p. Streams assigned to the connection=%" PRIu32 "",
+ (void *)chosen_connection->connection,
+ (void *)pending_stream_acquisition,
+ chosen_connection->num_streams_assigned);
+ /* Check if connection is still available or ideal, and move it if it's not */
+ if (chosen_connection->num_streams_assigned >= chosen_connection->max_concurrent_streams) {
+ /* It becomes not available for new streams any more, remove it from the set, but still alive (streams
+ * created will track the lifetime) */
+ chosen_connection->state = AWS_H2SMCST_FULL;
+ errored |=
+ aws_random_access_set_remove(&stream_manager->synced_data.ideal_available_set, chosen_connection);
+ STREAM_MANAGER_LOGF(
+ DEBUG,
+ stream_manager,
+ "connection:%p reaches max concurrent streams limits. "
+ "Connection max limits=%" PRIu32 ". Moving it out of available connections.",
+ (void *)chosen_connection->connection,
+ chosen_connection->max_concurrent_streams);
+ } else if (chosen_connection->num_streams_assigned >= stream_manager->ideal_concurrent_streams_per_connection) {
+ /* It meets the ideal limit, but still available for new streams, move it to the nonidea-available set */
+ errored |=
+ aws_random_access_set_remove(&stream_manager->synced_data.ideal_available_set, chosen_connection);
+ bool added = false;
+ errored |= aws_random_access_set_add(
+ &stream_manager->synced_data.nonideal_available_set, chosen_connection, &added);
+ errored |= !added;
+ chosen_connection->state = AWS_H2SMCST_NEARLY_FULL;
+ STREAM_MANAGER_LOGF(
+ DEBUG,
+ stream_manager,
+ "connection:%p reaches ideal concurrent streams limits. Ideal limits=%zu. Moving it to nonlimited set.",
+ (void *)chosen_connection->connection,
+ stream_manager->ideal_concurrent_streams_per_connection);
+ }
+ } else if (stream_manager->synced_data.holding_connections_count == stream_manager->max_connections) {
+ /**
+ * Try assigning to connection from nonideal available set.
+ *
+ * Note that we do not assign to nonideal connections until we're holding all the connections we can ever
+ * possibly get. This way, we don't overfill the first connections we get our hands on.
+ */
+
+ if (aws_random_access_set_get_size(&stream_manager->synced_data.nonideal_available_set)) {
+ struct aws_h2_sm_connection *chosen_connection =
+ s_get_best_sm_connection_from_set(&stream_manager->synced_data.nonideal_available_set);
+ AWS_ASSERT(chosen_connection);
+ pending_stream_acquisition->sm_connection = chosen_connection;
+ chosen_connection->num_streams_assigned++;
+
+ STREAM_MANAGER_LOGF(
+ DEBUG,
+ stream_manager,
+ "Picking connection:%p for acquisition:%p. Streams assigned to the connection=%" PRIu32 "",
+ (void *)chosen_connection->connection,
+ (void *)pending_stream_acquisition,
+ chosen_connection->num_streams_assigned);
+
+ if (chosen_connection->num_streams_assigned >= chosen_connection->max_concurrent_streams) {
+ /* It becomes not available for new streams any more, remove it from the set, but still alive (streams
+ * created will track the lifetime) */
+ chosen_connection->state = AWS_H2SMCST_FULL;
+ errored |= aws_random_access_set_remove(
+ &stream_manager->synced_data.nonideal_available_set, chosen_connection);
+ STREAM_MANAGER_LOGF(
+ DEBUG,
+ stream_manager,
+ "connection %p reaches max concurrent streams limits. "
+ "Connection max limits=%" PRIu32 ". Moving it out of available connections.",
+ (void *)chosen_connection->connection,
+ chosen_connection->max_concurrent_streams);
+ }
+ }
+ }
+ AWS_ASSERT(errored == 0 && "random access set went wrong");
+ (void)errored;
+}
+
+/* NOTE: never invoke with lock held */
+static void s_finish_pending_stream_acquisitions_list_helper(
+ struct aws_http2_stream_manager *stream_manager,
+ struct aws_linked_list *pending_stream_acquisitions,
+ int error_code) {
+ while (!aws_linked_list_empty(pending_stream_acquisitions)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(pending_stream_acquisitions);
+ struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition =
+ AWS_CONTAINER_OF(node, struct aws_h2_sm_pending_stream_acquisition, node);
+ /* Make sure no connection assigned. */
+ AWS_ASSERT(pending_stream_acquisition->sm_connection == NULL);
+ if (pending_stream_acquisition->callback) {
+ pending_stream_acquisition->callback(NULL, error_code, pending_stream_acquisition->user_data);
+ }
+ STREAM_MANAGER_LOGF(
+ DEBUG,
+ stream_manager,
+ "acquisition:%p failed with error: %d(%s)",
+ (void *)pending_stream_acquisition,
+ error_code,
+ aws_error_str(error_code));
+ s_pending_stream_acquisition_destroy(pending_stream_acquisition);
+ }
+}
+
+/* This is scheduled to run on a separate event loop to finish pending acquisition asynchronously */
+static void s_finish_pending_stream_acquisitions_task(struct aws_task *task, void *arg, enum aws_task_status status) {
+ (void)status;
+ struct aws_http2_stream_manager *stream_manager = arg;
+ STREAM_MANAGER_LOG(TRACE, stream_manager, "Stream Manager final task runs");
+ struct aws_http2_stream_management_transaction work;
+ struct aws_linked_list pending_stream_acquisitions;
+ aws_linked_list_init(&pending_stream_acquisitions);
+ s_aws_stream_management_transaction_init(&work, stream_manager);
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(stream_manager);
+ AWS_ASSERT(stream_manager->synced_data.state == AWS_H2SMST_DESTROYING);
+ /* swap list to avoid callback with lock held. */
+ aws_linked_list_swap_contents(
+ &pending_stream_acquisitions, &stream_manager->synced_data.pending_stream_acquisitions);
+ /* After the callbacks invoked, now we can update the count */
+ s_sm_count_decrease_synced(
+ stream_manager,
+ AWS_SMCT_PENDING_ACQUISITION,
+ stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION]);
+ s_aws_http2_stream_manager_build_transaction_synced(&work);
+ s_unlock_synced_data(stream_manager);
+ } /* END CRITICAL SECTION */
+ s_finish_pending_stream_acquisitions_list_helper(
+ stream_manager, &pending_stream_acquisitions, AWS_ERROR_HTTP_STREAM_MANAGER_SHUTTING_DOWN);
+ aws_mem_release(stream_manager->allocator, task);
+ s_aws_http2_stream_manager_execute_transaction(&work);
+}
+
+/* helper function for building the transaction: how many new connections we should request */
+static void s_check_new_connections_needed_synced(struct aws_http2_stream_management_transaction *work) {
+ struct aws_http2_stream_manager *stream_manager = work->stream_manager;
+ /* The ideal new connection we need to fit all the pending stream acquisitions */
+ size_t ideal_new_connection_count =
+ stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION] /
+ stream_manager->ideal_concurrent_streams_per_connection;
+ /* Rounding up */
+ if (stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION] %
+ stream_manager->ideal_concurrent_streams_per_connection) {
+ ++ideal_new_connection_count;
+ }
+ /* The ideal new connections sub the number of connections we are acquiring to avoid the async acquiring */
+ work->new_connections = aws_sub_size_saturating(
+ ideal_new_connection_count,
+ stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_CONNECTIONS_ACQUIRING]);
+ /* The real number we can have is the min of how many more we can still have and how many we need */
+ size_t new_connections_available =
+ stream_manager->max_connections - stream_manager->synced_data.holding_connections_count -
+ stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_CONNECTIONS_ACQUIRING];
+ work->new_connections = aws_min_size(new_connections_available, work->new_connections);
+ /* Update the number of connections we acquiring */
+ s_sm_count_increase_synced(stream_manager, AWS_SMCT_CONNECTIONS_ACQUIRING, work->new_connections);
+ STREAM_MANAGER_LOGF(
+ DEBUG,
+ stream_manager,
+ "number of acquisition that waiting for connections to use=%zu. connection acquiring=%zu, connection held=%zu, "
+ "max connection=%zu",
+ stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION],
+ stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_CONNECTIONS_ACQUIRING],
+ stream_manager->synced_data.holding_connections_count,
+ stream_manager->max_connections);
+}
+
+/**
+ * It can be invoked from:
+ * - User release last refcount of stream manager
+ * - User acquires stream from stream manager
+ * - Connection acquired callback from connection manager
+ * - Stream completed callback from HTTP
+ */
+/* *_synced should only be called with LOCK HELD or from another synced function */
+static void s_aws_http2_stream_manager_build_transaction_synced(struct aws_http2_stream_management_transaction *work) {
+ struct aws_http2_stream_manager *stream_manager = work->stream_manager;
+ if (stream_manager->synced_data.state == AWS_H2SMST_READY) {
+
+ /* Steps 1: Pending acquisitions of stream */
+ while (!aws_linked_list_empty(&stream_manager->synced_data.pending_stream_acquisitions)) {
+ struct aws_linked_list_node *node =
+ aws_linked_list_pop_front(&stream_manager->synced_data.pending_stream_acquisitions);
+ struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition =
+ AWS_CONTAINER_OF(node, struct aws_h2_sm_pending_stream_acquisition, node);
+ s_sm_try_assign_connection_to_pending_stream_acquisition_synced(stream_manager, pending_stream_acquisition);
+ if (pending_stream_acquisition->sm_connection == NULL) {
+ /* Cannot find any connection, push it back to the front and break the loop */
+ aws_linked_list_push_front(&stream_manager->synced_data.pending_stream_acquisitions, node);
+ STREAM_MANAGER_LOGF(
+ DEBUG,
+ stream_manager,
+ "acquisition:%p cannot find any connection to use.",
+ (void *)pending_stream_acquisition);
+ break;
+ } else {
+ /* found connection for the request. Move it to pending make requests and update the count */
+ aws_linked_list_push_back(&work->pending_make_requests, node);
+ s_sm_count_decrease_synced(stream_manager, AWS_SMCT_PENDING_ACQUISITION, 1);
+ s_sm_count_increase_synced(stream_manager, AWS_SMCT_PENDING_MAKE_REQUESTS, 1);
+ }
+ }
+
+ /* Step 2: Check for new connections needed */
+ if (stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION]) {
+ s_check_new_connections_needed_synced(work);
+ }
+
+ } else {
+ /* Stream manager is shutting down */
+ if (stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION] &&
+ !stream_manager->synced_data.finish_pending_stream_acquisitions_task_scheduled) {
+ /* schedule a task to finish the pending acquisitions if there doesn't have one and needed */
+ stream_manager->finish_pending_stream_acquisitions_task_event_loop =
+ aws_event_loop_group_get_next_loop(stream_manager->bootstrap->event_loop_group);
+ struct aws_task *finish_pending_stream_acquisitions_task =
+ aws_mem_calloc(stream_manager->allocator, 1, sizeof(struct aws_task));
+ aws_task_init(
+ finish_pending_stream_acquisitions_task,
+ s_finish_pending_stream_acquisitions_task,
+ stream_manager,
+ "sm_finish_pending_stream_acquisitions");
+ aws_event_loop_schedule_task_now(
+ stream_manager->finish_pending_stream_acquisitions_task_event_loop,
+ finish_pending_stream_acquisitions_task);
+ stream_manager->synced_data.finish_pending_stream_acquisitions_task_scheduled = true;
+ }
+ }
+ s_sm_log_stats_synced(stream_manager);
+}
+
+static void s_on_ping_complete(
+ struct aws_http_connection *http2_connection,
+ uint64_t round_trip_time_ns,
+ int error_code,
+ void *user_data) {
+
+ (void)http2_connection;
+ struct aws_h2_sm_connection *sm_connection = user_data;
+ if (error_code) {
+ goto done;
+ }
+ if (!sm_connection->connection) {
+ goto done;
+ }
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(aws_http_connection_get_channel(sm_connection->connection)));
+ STREAM_MANAGER_LOGF(
+ TRACE,
+ sm_connection->stream_manager,
+ "PING ACK received for connection: %p. Round trip time in ns is: %" PRIu64 ".",
+ (void *)sm_connection->connection,
+ round_trip_time_ns);
+ sm_connection->thread_data.ping_received = true;
+
+done:
+ /* Release refcount held for ping complete */
+ aws_ref_count_release(&sm_connection->ref_count);
+}
+
+static void s_connection_ping_timeout_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+ (void)status;
+ struct aws_h2_sm_connection *sm_connection = arg;
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ goto done;
+ }
+ if (!sm_connection->connection) {
+ /* The connection has been released before timeout happens, just release the refcount */
+ goto done;
+ }
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(aws_http_connection_get_channel(sm_connection->connection)));
+ if (!sm_connection->thread_data.ping_received) {
+ /* Timeout happened */
+ STREAM_MANAGER_LOGF(
+ ERROR,
+ sm_connection->stream_manager,
+ "ping timeout detected for connection: %p, closing connection.",
+ (void *)sm_connection->connection);
+
+ aws_http_connection_close(sm_connection->connection);
+ } else {
+ struct aws_channel *channel = aws_http_connection_get_channel(sm_connection->connection);
+ /* acquire a refcount for next set of tasks to run */
+ aws_ref_count_acquire(&sm_connection->ref_count);
+ aws_channel_schedule_task_future(
+ channel, &sm_connection->ping_task, sm_connection->thread_data.next_ping_task_time);
+ }
+done:
+ /* Release refcount for current set of tasks */
+ aws_ref_count_release(&sm_connection->ref_count);
+}
+
+static void s_connection_ping_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+ (void)status;
+ struct aws_h2_sm_connection *sm_connection = arg;
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ aws_ref_count_release(&sm_connection->ref_count);
+ return;
+ }
+ if (!sm_connection->connection) {
+ /* The connection has been released before ping task, just release the refcount */
+ aws_ref_count_release(&sm_connection->ref_count);
+ return;
+ }
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(aws_http_connection_get_channel(sm_connection->connection)));
+
+ STREAM_MANAGER_LOGF(
+ TRACE, sm_connection->stream_manager, "Sending PING for connection: %p.", (void *)sm_connection->connection);
+ aws_http2_connection_ping(sm_connection->connection, NULL, s_on_ping_complete, sm_connection);
+ /* Acquire refcount for PING complete to be invoked. */
+ aws_ref_count_acquire(&sm_connection->ref_count);
+ sm_connection->thread_data.ping_received = false;
+
+ /* schedule timeout task */
+ struct aws_channel *channel = aws_http_connection_get_channel(sm_connection->connection);
+ uint64_t current_time = 0;
+ aws_channel_current_clock_time(channel, &current_time);
+ sm_connection->thread_data.next_ping_task_time =
+ current_time + sm_connection->stream_manager->connection_ping_period_ns;
+ uint64_t timeout_time = current_time + sm_connection->stream_manager->connection_ping_timeout_ns;
+ aws_channel_task_init(
+ &sm_connection->ping_timeout_task,
+ s_connection_ping_timeout_task,
+ sm_connection,
+ "Stream manager connection ping timeout task");
+ /* keep the refcount for timeout task to run */
+ aws_channel_schedule_task_future(channel, &sm_connection->ping_timeout_task, timeout_time);
+}
+
+static void s_sm_connection_destroy(void *user_data) {
+ struct aws_h2_sm_connection *sm_connection = user_data;
+ aws_mem_release(sm_connection->allocator, sm_connection);
+}
+
+static struct aws_h2_sm_connection *s_sm_connection_new(
+ struct aws_http2_stream_manager *stream_manager,
+ struct aws_http_connection *connection) {
+ struct aws_h2_sm_connection *sm_connection =
+ aws_mem_calloc(stream_manager->allocator, 1, sizeof(struct aws_h2_sm_connection));
+ sm_connection->allocator = stream_manager->allocator;
+ /* Max concurrent stream reached, we need to update the max for the sm_connection */
+ struct aws_http2_setting out_settings[AWS_HTTP2_SETTINGS_COUNT];
+ /* The setting id equals to the index plus one. */
+ aws_http2_connection_get_remote_settings(connection, out_settings);
+ uint32_t remote_max_con_streams = out_settings[AWS_HTTP2_SETTINGS_MAX_CONCURRENT_STREAMS - 1].value;
+ sm_connection->max_concurrent_streams =
+ aws_min_u32((uint32_t)stream_manager->max_concurrent_streams_per_connection, remote_max_con_streams);
+ sm_connection->connection = connection;
+ sm_connection->stream_manager = stream_manager;
+ sm_connection->state = AWS_H2SMCST_IDEAL;
+ aws_ref_count_init(&sm_connection->ref_count, sm_connection, s_sm_connection_destroy);
+ if (stream_manager->connection_ping_period_ns) {
+ struct aws_channel *channel = aws_http_connection_get_channel(connection);
+ uint64_t schedule_time = 0;
+ aws_channel_current_clock_time(channel, &schedule_time);
+ schedule_time += stream_manager->connection_ping_period_ns;
+ aws_channel_task_init(
+ &sm_connection->ping_task, s_connection_ping_task, sm_connection, "Stream manager connection ping task");
+ /* Keep a refcount on sm_connection for the task to run. */
+ aws_ref_count_acquire(&sm_connection->ref_count);
+ aws_channel_schedule_task_future(channel, &sm_connection->ping_task, schedule_time);
+ }
+ return sm_connection;
+}
+
+static void s_sm_connection_release_connection(struct aws_h2_sm_connection *sm_connection) {
+ AWS_ASSERT(sm_connection->num_streams_assigned == 0);
+ if (sm_connection->connection) {
+ /* Should only be invoked from the connection thread. */
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(aws_http_connection_get_channel(sm_connection->connection)));
+ int error = aws_http_connection_manager_release_connection(
+ sm_connection->stream_manager->connection_manager, sm_connection->connection);
+ AWS_ASSERT(!error);
+ (void)error;
+ sm_connection->connection = NULL;
+ }
+ aws_ref_count_release(&sm_connection->ref_count);
+}
+
+static void s_sm_on_connection_acquired_failed_synced(
+ struct aws_http2_stream_manager *stream_manager,
+ struct aws_linked_list *stream_acquisitions_to_fail) {
+
+ /* Once we failed to acquire a connection, we fail the stream acquisitions that cannot fit into the remaining
+ * acquiring connections. */
+ size_t num_can_fit = aws_mul_size_saturating(
+ stream_manager->ideal_concurrent_streams_per_connection,
+ stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_CONNECTIONS_ACQUIRING]);
+ size_t num_to_fail = aws_sub_size_saturating(
+ stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION], num_can_fit);
+ /* Get a list to fail instead of fail them with in the lock. */
+ for (size_t i = 0; i < num_to_fail; i++) {
+ struct aws_linked_list_node *node =
+ aws_linked_list_pop_front(&stream_manager->synced_data.pending_stream_acquisitions);
+ aws_linked_list_push_back(stream_acquisitions_to_fail, node);
+ }
+ s_sm_count_decrease_synced(stream_manager, AWS_SMCT_PENDING_ACQUISITION, num_to_fail);
+}
+
+static void s_sm_on_connection_acquired(struct aws_http_connection *connection, int error_code, void *user_data) {
+ struct aws_http2_stream_manager *stream_manager = user_data;
+ struct aws_http2_stream_management_transaction work;
+ STREAM_MANAGER_LOGF(TRACE, stream_manager, "connection=%p acquired from connection manager", (void *)connection);
+ int re_error = 0;
+ int stream_fail_error_code = AWS_ERROR_SUCCESS;
+ bool should_release_connection = false;
+ struct aws_linked_list stream_acquisitions_to_fail;
+ aws_linked_list_init(&stream_acquisitions_to_fail);
+ s_aws_stream_management_transaction_init(&work, stream_manager);
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(stream_manager);
+ s_sm_count_decrease_synced(stream_manager, AWS_SMCT_CONNECTIONS_ACQUIRING, 1);
+ if (error_code || !connection) {
+ STREAM_MANAGER_LOGF(
+ ERROR,
+ stream_manager,
+ "connection acquired from connection manager failed, with error: %d(%s)",
+ error_code,
+ aws_error_str(error_code));
+ s_sm_on_connection_acquired_failed_synced(stream_manager, &stream_acquisitions_to_fail);
+ stream_fail_error_code = AWS_ERROR_HTTP_STREAM_MANAGER_CONNECTION_ACQUIRE_FAILURE;
+ } else if (aws_http_connection_get_version(connection) != AWS_HTTP_VERSION_2) {
+ STREAM_MANAGER_LOGF(
+ ERROR,
+ stream_manager,
+ "Unexpected HTTP version acquired, release the connection=%p acquired immediately",
+ (void *)connection);
+ should_release_connection = true;
+ s_sm_on_connection_acquired_failed_synced(stream_manager, &stream_acquisitions_to_fail);
+ stream_fail_error_code = AWS_ERROR_HTTP_STREAM_MANAGER_UNEXPECTED_HTTP_VERSION;
+ } else if (stream_manager->synced_data.state != AWS_H2SMST_READY) {
+ STREAM_MANAGER_LOGF(
+ DEBUG,
+ stream_manager,
+ "shutting down, release the connection=%p acquired immediately",
+ (void *)connection);
+ /* Release the acquired connection */
+ should_release_connection = true;
+ } else if (stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION] == 0) {
+ STREAM_MANAGER_LOGF(
+ DEBUG,
+ stream_manager,
+ "No pending acquisition, release the connection=%p acquired immediately",
+ (void *)connection);
+ /* Release the acquired connection */
+ should_release_connection = true;
+ } else {
+ struct aws_h2_sm_connection *sm_connection = s_sm_connection_new(stream_manager, connection);
+ bool added = false;
+ re_error |=
+ aws_random_access_set_add(&stream_manager->synced_data.ideal_available_set, sm_connection, &added);
+ re_error |= !added;
+ ++stream_manager->synced_data.holding_connections_count;
+ }
+ s_aws_http2_stream_manager_build_transaction_synced(&work);
+ s_unlock_synced_data(stream_manager);
+ } /* END CRITICAL SECTION */
+
+ if (should_release_connection) {
+ STREAM_MANAGER_LOGF(DEBUG, stream_manager, "Releasing connection: %p", (void *)connection);
+ re_error |= aws_http_connection_manager_release_connection(stream_manager->connection_manager, connection);
+ }
+
+ AWS_ASSERT(!re_error && "connection acquired callback fails with programming errors");
+ (void)re_error;
+
+ /* Fail acquisitions if any */
+ s_finish_pending_stream_acquisitions_list_helper(
+ stream_manager, &stream_acquisitions_to_fail, stream_fail_error_code);
+ s_aws_http2_stream_manager_execute_transaction(&work);
+}
+
+static int s_on_incoming_headers(
+ struct aws_http_stream *stream,
+ enum aws_http_header_block header_block,
+ const struct aws_http_header *header_array,
+ size_t num_headers,
+ void *user_data) {
+ struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition = user_data;
+ struct aws_h2_sm_connection *sm_connection = pending_stream_acquisition->sm_connection;
+ struct aws_http2_stream_manager *stream_manager = sm_connection->stream_manager;
+
+ if (pending_stream_acquisition->options.on_response_headers) {
+ return pending_stream_acquisition->options.on_response_headers(
+ stream, header_block, header_array, num_headers, pending_stream_acquisition->options.user_data);
+ }
+ if (stream_manager->close_connection_on_server_error) {
+ /* Check status code if stream completed successfully. */
+ int status_code = 0;
+ aws_http_stream_get_incoming_response_status(stream, &status_code);
+ AWS_ASSERT(status_code != 0); /* The get status should not fail */
+ switch (status_code) {
+ case AWS_HTTP_STATUS_CODE_500_INTERNAL_SERVER_ERROR:
+ case AWS_HTTP_STATUS_CODE_502_BAD_GATEWAY:
+ case AWS_HTTP_STATUS_CODE_503_SERVICE_UNAVAILABLE:
+ case AWS_HTTP_STATUS_CODE_504_GATEWAY_TIMEOUT:
+ /* For those error code if the retry happens, it should not use the same connection. */
+ if (!sm_connection->thread_data.stopped_new_requests) {
+ STREAM_MANAGER_LOGF(
+ DEBUG,
+ stream_manager,
+ "no longer using connection: %p due to receiving %d server error status code for stream: %p",
+ (void *)sm_connection->connection,
+ status_code,
+ (void *)stream);
+ aws_http_connection_stop_new_requests(sm_connection->connection);
+ sm_connection->thread_data.stopped_new_requests = true;
+ }
+ break;
+ default:
+ break;
+ }
+ }
+ return AWS_OP_SUCCESS;
+}
+
+static int s_on_incoming_header_block_done(
+ struct aws_http_stream *stream,
+ enum aws_http_header_block header_block,
+ void *user_data) {
+ struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition = user_data;
+ if (pending_stream_acquisition->options.on_response_header_block_done) {
+ return pending_stream_acquisition->options.on_response_header_block_done(
+ stream, header_block, pending_stream_acquisition->options.user_data);
+ }
+ return AWS_OP_SUCCESS;
+}
+
+static int s_on_incoming_body(struct aws_http_stream *stream, const struct aws_byte_cursor *data, void *user_data) {
+ struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition = user_data;
+ if (pending_stream_acquisition->options.on_response_body) {
+ return pending_stream_acquisition->options.on_response_body(
+ stream, data, pending_stream_acquisition->options.user_data);
+ }
+ return AWS_OP_SUCCESS;
+}
+
+/* Helper invoked when underlying connections is still available and the num stream assigned has been updated */
+static void s_update_sm_connection_set_on_stream_finishes_synced(
+ struct aws_h2_sm_connection *sm_connection,
+ struct aws_http2_stream_manager *stream_manager) {
+
+ int re_error = 0;
+ size_t cur_num = sm_connection->num_streams_assigned;
+ size_t ideal_num = stream_manager->ideal_concurrent_streams_per_connection;
+ size_t max_num = sm_connection->max_concurrent_streams;
+ /**
+ * TODO: When the MAX_CONCURRENT_STREAMS from other side changed after the initial settings. We need to:
+ * - figure out where I am
+ * - figure out where I should be
+ * - if they're different, remove from where I am, put where should be
+ */
+ if (sm_connection->state == AWS_H2SMCST_NEARLY_FULL && cur_num < ideal_num) {
+ /* this connection is back from soft limited to ideal */
+ bool exist = false;
+ (void)exist;
+ AWS_ASSERT(
+ aws_random_access_set_exist(&stream_manager->synced_data.nonideal_available_set, sm_connection, &exist) ==
+ AWS_OP_SUCCESS &&
+ exist);
+ re_error |= aws_random_access_set_remove(&stream_manager->synced_data.nonideal_available_set, sm_connection);
+ bool added = false;
+ re_error |= aws_random_access_set_add(&stream_manager->synced_data.ideal_available_set, sm_connection, &added);
+ re_error |= !added;
+ sm_connection->state = AWS_H2SMCST_IDEAL;
+ } else if (sm_connection->state == AWS_H2SMCST_FULL && cur_num < max_num) {
+ /* this connection is back from full */
+ STREAM_MANAGER_LOGF(
+ DEBUG,
+ stream_manager,
+ "connection:%p back to available, assigned stream=%zu, max concurrent streams=%" PRIu32 "",
+ (void *)sm_connection->connection,
+ cur_num,
+ sm_connection->max_concurrent_streams);
+ bool added = false;
+ if (cur_num >= ideal_num) {
+ sm_connection->state = AWS_H2SMCST_NEARLY_FULL;
+ STREAM_MANAGER_LOGF(
+ TRACE, stream_manager, "connection:%p added to soft limited set", (void *)sm_connection->connection);
+ re_error |=
+ aws_random_access_set_add(&stream_manager->synced_data.nonideal_available_set, sm_connection, &added);
+ } else {
+ sm_connection->state = AWS_H2SMCST_IDEAL;
+ STREAM_MANAGER_LOGF(
+ TRACE, stream_manager, "connection:%p added to ideal set", (void *)sm_connection->connection);
+ re_error |=
+ aws_random_access_set_add(&stream_manager->synced_data.ideal_available_set, sm_connection, &added);
+ }
+ re_error |= !added;
+ }
+ AWS_ASSERT(re_error == AWS_OP_SUCCESS);
+ (void)re_error;
+}
+
+static void s_sm_connection_on_scheduled_stream_finishes(
+ struct aws_h2_sm_connection *sm_connection,
+ struct aws_http2_stream_manager *stream_manager) {
+ /* Reach the max current will still allow new requests, but the new stream will complete with error */
+ bool connection_available = aws_http_connection_new_requests_allowed(sm_connection->connection);
+ struct aws_http2_stream_management_transaction work;
+ s_aws_stream_management_transaction_init(&work, stream_manager);
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(stream_manager);
+ s_sm_count_decrease_synced(stream_manager, AWS_SMCT_OPEN_STREAM, 1);
+ --sm_connection->num_streams_assigned;
+ if (!connection_available) {
+ /* It might be removed already, but, it's fine */
+ aws_random_access_set_remove(&stream_manager->synced_data.ideal_available_set, sm_connection);
+ aws_random_access_set_remove(&stream_manager->synced_data.nonideal_available_set, sm_connection);
+ } else {
+ s_update_sm_connection_set_on_stream_finishes_synced(sm_connection, stream_manager);
+ }
+ s_aws_http2_stream_manager_build_transaction_synced(&work);
+ /* After we build transaction, if the sm_connection still have zero assigned stream, we can kill the
+ * sm_connection */
+ if (sm_connection->num_streams_assigned == 0) {
+ /* It might be removed already, but, it's fine */
+ aws_random_access_set_remove(&stream_manager->synced_data.ideal_available_set, sm_connection);
+ work.sm_connection_to_release = sm_connection;
+ --stream_manager->synced_data.holding_connections_count;
+ /* After we release one connection back, we should check if we need more connections */
+ if (stream_manager->synced_data.state == AWS_H2SMST_READY &&
+ stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION]) {
+ s_check_new_connections_needed_synced(&work);
+ }
+ }
+ s_unlock_synced_data(stream_manager);
+ } /* END CRITICAL SECTION */
+ s_aws_http2_stream_manager_execute_transaction(&work);
+}
+
+static void s_on_stream_complete(struct aws_http_stream *stream, int error_code, void *user_data) {
+ struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition = user_data;
+ struct aws_h2_sm_connection *sm_connection = pending_stream_acquisition->sm_connection;
+ struct aws_http2_stream_manager *stream_manager = sm_connection->stream_manager;
+ if (pending_stream_acquisition->options.on_complete) {
+ pending_stream_acquisition->options.on_complete(
+ stream, error_code, pending_stream_acquisition->options.user_data);
+ }
+ s_sm_connection_on_scheduled_stream_finishes(sm_connection, stream_manager);
+}
+
+static void s_on_stream_destroy(void *user_data) {
+ struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition = user_data;
+ if (pending_stream_acquisition->options.on_destroy) {
+ pending_stream_acquisition->options.on_destroy(pending_stream_acquisition->options.user_data);
+ }
+ s_pending_stream_acquisition_destroy(pending_stream_acquisition);
+}
+
+/* Scheduled to happen from connection's thread */
+static void s_make_request_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+ struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition = arg;
+ struct aws_h2_sm_connection *sm_connection = pending_stream_acquisition->sm_connection;
+ struct aws_http2_stream_manager *stream_manager = sm_connection->stream_manager;
+ int error_code = AWS_ERROR_SUCCESS;
+
+ STREAM_MANAGER_LOGF(
+ TRACE,
+ stream_manager,
+ "Make request task running for acquisition:%p from connection:%p thread",
+ (void *)pending_stream_acquisition,
+ (void *)sm_connection->connection);
+ bool is_shutting_down = false;
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(stream_manager);
+ is_shutting_down = stream_manager->synced_data.state != AWS_H2SMST_READY;
+ s_sm_count_decrease_synced(stream_manager, AWS_SMCT_PENDING_MAKE_REQUESTS, 1);
+ /* The stream has not open yet, but we increase the count here, if anything fails, the count will be decreased
+ */
+ s_sm_count_increase_synced(stream_manager, AWS_SMCT_OPEN_STREAM, 1);
+ AWS_ASSERT(
+ sm_connection->max_concurrent_streams >= sm_connection->num_streams_assigned &&
+ "The max concurrent streams exceed");
+ s_unlock_synced_data(stream_manager);
+ } /* END CRITICAL SECTION */
+ /* this is a channel task. If it is canceled, that means the channel shutdown. In that case, that's equivalent
+ * to a closed connection. */
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ STREAM_MANAGER_LOGF(
+ ERROR,
+ stream_manager,
+ "acquisition:%p failed as the task is cancelled.",
+ (void *)pending_stream_acquisition);
+ error_code = AWS_ERROR_HTTP_CONNECTION_CLOSED;
+ goto error;
+ }
+ if (is_shutting_down) {
+ STREAM_MANAGER_LOGF(
+ ERROR,
+ stream_manager,
+ "acquisition:%p failed as stream manager is shutting down before task runs.",
+ (void *)pending_stream_acquisition);
+ error_code = AWS_ERROR_HTTP_STREAM_MANAGER_SHUTTING_DOWN;
+ goto error;
+ }
+ struct aws_http_make_request_options request_options = {
+ .self_size = sizeof(request_options),
+ .request = pending_stream_acquisition->request,
+ .on_response_headers = s_on_incoming_headers,
+ .on_response_header_block_done = s_on_incoming_header_block_done,
+ .on_response_body = s_on_incoming_body,
+ .on_complete = s_on_stream_complete,
+ .on_destroy = s_on_stream_destroy,
+ .user_data = pending_stream_acquisition,
+ .http2_use_manual_data_writes = pending_stream_acquisition->options.http2_use_manual_data_writes,
+ };
+ /* TODO: we could put the pending acquisition back to the list if the connection is not available for new request.
+ */
+
+ struct aws_http_stream *stream = aws_http_connection_make_request(sm_connection->connection, &request_options);
+ if (!stream) {
+ error_code = aws_last_error();
+ STREAM_MANAGER_LOGF(
+ ERROR,
+ stream_manager,
+ "acquisition:%p failed as HTTP level make request failed with error: %d(%s).",
+ (void *)pending_stream_acquisition,
+ error_code,
+ aws_error_str(error_code));
+ goto error;
+ }
+ /* Since we're in the connection's thread, this should be safe, there won't be any other callbacks to the user */
+ if (aws_http_stream_activate(stream)) {
+ /* Activate failed, the on_completed callback will NOT be invoked from HTTP, but we already told user about
+ * the stream. Invoke the user completed callback here */
+ error_code = aws_last_error();
+ STREAM_MANAGER_LOGF(
+ ERROR,
+ stream_manager,
+ "acquisition:%p failed as stream activate failed with error: %d(%s).",
+ (void *)pending_stream_acquisition,
+ error_code,
+ aws_error_str(error_code));
+ goto error;
+ }
+ if (pending_stream_acquisition->callback) {
+ pending_stream_acquisition->callback(stream, 0, pending_stream_acquisition->user_data);
+ }
+
+ /* Happy case, the complete callback will be invoked, and we clean things up at the callback, but we can release the
+ * request now */
+ aws_http_message_release(pending_stream_acquisition->request);
+ pending_stream_acquisition->request = NULL;
+ return;
+error:
+ if (pending_stream_acquisition->callback) {
+ pending_stream_acquisition->callback(NULL, error_code, pending_stream_acquisition->user_data);
+ }
+ s_pending_stream_acquisition_destroy(pending_stream_acquisition);
+ /* task should happen after destroy, as the task can trigger the whole stream manager to be destroyed */
+ s_sm_connection_on_scheduled_stream_finishes(sm_connection, stream_manager);
+}
+
+/* NEVER invoke with lock held */
+static void s_aws_http2_stream_manager_execute_transaction(struct aws_http2_stream_management_transaction *work) {
+
+ struct aws_http2_stream_manager *stream_manager = work->stream_manager;
+
+ /* Step1: Release connection */
+ if (work->sm_connection_to_release) {
+ AWS_ASSERT(work->sm_connection_to_release->num_streams_assigned == 0);
+ STREAM_MANAGER_LOGF(
+ DEBUG,
+ stream_manager,
+ "Release connection:%p back to connection manager as no outstanding streams",
+ (void *)work->sm_connection_to_release->connection);
+ s_sm_connection_release_connection(work->sm_connection_to_release);
+ }
+
+ /* Step2: Make request. The work should know what connection for the request to be made. */
+ while (!aws_linked_list_empty(&work->pending_make_requests)) {
+ /* The completions can also fail as the connection can be unavailable after the decision made. We just fail
+ * the acquisition */
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&work->pending_make_requests);
+ struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition =
+ AWS_CONTAINER_OF(node, struct aws_h2_sm_pending_stream_acquisition, node);
+
+ AWS_ASSERT(
+ pending_stream_acquisition->sm_connection &&
+ "Stream manager internal bug: connection is not decided before execute transaction");
+
+ STREAM_MANAGER_LOGF(
+ TRACE,
+ stream_manager,
+ "acquisition:%p is scheduled to be made request from connection:%p thread",
+ (void *)pending_stream_acquisition,
+ (void *)pending_stream_acquisition->sm_connection->connection);
+ /**
+ * schedule a task from the connection's event loop to make request, so that:
+ * - We can activate the stream for user and then invoked the callback
+ * - The callback will happen asynced even the stream failed to be created
+ * - We can make sure we will not break the settings
+ */
+ struct aws_channel *channel =
+ aws_http_connection_get_channel(pending_stream_acquisition->sm_connection->connection);
+ aws_channel_task_init(
+ &pending_stream_acquisition->make_request_task,
+ s_make_request_task,
+ pending_stream_acquisition,
+ "Stream manager make request task");
+ aws_channel_schedule_task_now(channel, &pending_stream_acquisition->make_request_task);
+ }
+
+ /* Step 3: Acquire connections if needed */
+ if (work->new_connections) {
+ STREAM_MANAGER_LOGF(DEBUG, stream_manager, "acquiring %zu new connections", work->new_connections);
+ }
+ for (size_t i = 0; i < work->new_connections; ++i) {
+ aws_http_connection_manager_acquire_connection(
+ stream_manager->connection_manager, s_sm_on_connection_acquired, stream_manager);
+ }
+
+ /*
+ * Step 4: Clean up work. Do this here rather than at the end of every caller. Destroy the manager if necessary
+ */
+ s_aws_stream_management_transaction_clean_up(work);
+}
+
+void s_stream_manager_destroy_final(struct aws_http2_stream_manager *stream_manager) {
+ if (!stream_manager) {
+ return;
+ }
+
+ STREAM_MANAGER_LOG(TRACE, stream_manager, "Stream Manager finishes destroying self");
+ /* Connection manager has already been cleaned up */
+ AWS_FATAL_ASSERT(stream_manager->connection_manager == NULL);
+ AWS_FATAL_ASSERT(aws_linked_list_empty(&stream_manager->synced_data.pending_stream_acquisitions));
+ aws_mutex_clean_up(&stream_manager->synced_data.lock);
+ aws_random_access_set_clean_up(&stream_manager->synced_data.ideal_available_set);
+ aws_random_access_set_clean_up(&stream_manager->synced_data.nonideal_available_set);
+ aws_client_bootstrap_release(stream_manager->bootstrap);
+
+ if (stream_manager->shutdown_complete_callback) {
+ stream_manager->shutdown_complete_callback(stream_manager->shutdown_complete_user_data);
+ }
+ aws_mem_release(stream_manager->allocator, stream_manager);
+}
+
+void s_stream_manager_on_cm_shutdown_complete(void *user_data) {
+ struct aws_http2_stream_manager *stream_manager = (struct aws_http2_stream_manager *)user_data;
+ STREAM_MANAGER_LOGF(
+ TRACE,
+ stream_manager,
+ "Underlying connection manager (ip=%p) finished shutdown, stream manager can finish destroying now",
+ (void *)stream_manager->connection_manager);
+ stream_manager->connection_manager = NULL;
+ s_stream_manager_destroy_final(stream_manager);
+}
+
+static void s_stream_manager_start_destroy(struct aws_http2_stream_manager *stream_manager) {
+ STREAM_MANAGER_LOG(TRACE, stream_manager, "Stream Manager reaches the condition to destroy, start to destroy");
+ /* If there is no outstanding streams, the connections set should be empty. */
+ AWS_ASSERT(aws_random_access_set_get_size(&stream_manager->synced_data.ideal_available_set) == 0);
+ AWS_ASSERT(aws_random_access_set_get_size(&stream_manager->synced_data.nonideal_available_set) == 0);
+ AWS_ASSERT(stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_CONNECTIONS_ACQUIRING] == 0);
+ AWS_ASSERT(stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_OPEN_STREAM] == 0);
+ AWS_ASSERT(stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_MAKE_REQUESTS] == 0);
+ AWS_ASSERT(stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION] == 0);
+ AWS_ASSERT(stream_manager->connection_manager);
+ struct aws_http_connection_manager *cm = stream_manager->connection_manager;
+ stream_manager->connection_manager = NULL;
+ aws_http_connection_manager_release(cm);
+}
+
+void s_stream_manager_on_zero_external_ref(struct aws_http2_stream_manager *stream_manager) {
+ STREAM_MANAGER_LOG(
+ TRACE,
+ stream_manager,
+ "Last refcount released, manager stop accepting new stream request and will start to clean up when not "
+ "outstanding tasks remaining.");
+ struct aws_http2_stream_management_transaction work;
+ s_aws_stream_management_transaction_init(&work, stream_manager);
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(stream_manager);
+ stream_manager->synced_data.state = AWS_H2SMST_DESTROYING;
+ s_aws_http2_stream_manager_build_transaction_synced(&work);
+ /* Release the internal ref count as no external usage anymore */
+ aws_ref_count_release(&stream_manager->internal_ref_count);
+ s_unlock_synced_data(stream_manager);
+ } /* END CRITICAL SECTION */
+ s_aws_http2_stream_manager_execute_transaction(&work);
+}
+
+struct aws_http2_stream_manager *aws_http2_stream_manager_new(
+ struct aws_allocator *allocator,
+ const struct aws_http2_stream_manager_options *options) {
+
+ AWS_PRECONDITION(allocator);
+ /* The other options are validated by the aws_http_connection_manager_new */
+ if (!options->http2_prior_knowledge && !options->tls_connection_options) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION_MANAGER,
+ "Invalid options - Prior knowledge must be used for cleartext HTTP/2 connections."
+ " Upgrade from HTTP/1.1 is not supported.");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+ struct aws_http2_stream_manager *stream_manager =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http2_stream_manager));
+ stream_manager->allocator = allocator;
+ aws_linked_list_init(&stream_manager->synced_data.pending_stream_acquisitions);
+
+ if (aws_mutex_init(&stream_manager->synced_data.lock)) {
+ goto on_error;
+ }
+ if (aws_random_access_set_init(
+ &stream_manager->synced_data.ideal_available_set,
+ allocator,
+ aws_hash_ptr,
+ aws_ptr_eq,
+ NULL /* destroy function */,
+ 2)) {
+ goto on_error;
+ }
+ if (aws_random_access_set_init(
+ &stream_manager->synced_data.nonideal_available_set,
+ allocator,
+ aws_hash_ptr,
+ aws_ptr_eq,
+ NULL /* destroy function */,
+ 2)) {
+ goto on_error;
+ }
+ aws_ref_count_init(
+ &stream_manager->external_ref_count,
+ stream_manager,
+ (aws_simple_completion_callback *)s_stream_manager_on_zero_external_ref);
+ aws_ref_count_init(
+ &stream_manager->internal_ref_count,
+ stream_manager,
+ (aws_simple_completion_callback *)s_stream_manager_start_destroy);
+
+ if (options->connection_ping_period_ms) {
+ stream_manager->connection_ping_period_ns =
+ aws_timestamp_convert(options->connection_ping_period_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL);
+ size_t connection_ping_timeout_ms =
+ options->connection_ping_timeout_ms ? options->connection_ping_timeout_ms : s_default_ping_timeout_ms;
+ stream_manager->connection_ping_timeout_ns =
+ aws_timestamp_convert(connection_ping_timeout_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL);
+ if (stream_manager->connection_ping_period_ns < stream_manager->connection_ping_timeout_ns) {
+ STREAM_MANAGER_LOGF(
+ WARN,
+ stream_manager,
+ "connection_ping_period_ms: %zu is shorter than connection_ping_timeout_ms: %zu. Clapping "
+ "connection_ping_timeout_ms to %zu",
+ options->connection_ping_period_ms,
+ connection_ping_timeout_ms,
+ options->connection_ping_period_ms);
+ stream_manager->connection_ping_timeout_ns = stream_manager->connection_ping_period_ns;
+ }
+ }
+
+ stream_manager->bootstrap = aws_client_bootstrap_acquire(options->bootstrap);
+ struct aws_http_connection_manager_options cm_options = {
+ .bootstrap = options->bootstrap,
+ .socket_options = options->socket_options,
+ .tls_connection_options = options->tls_connection_options,
+ .http2_prior_knowledge = options->http2_prior_knowledge,
+ .host = options->host,
+ .port = options->port,
+ .enable_read_back_pressure = options->enable_read_back_pressure,
+ .monitoring_options = options->monitoring_options,
+ .proxy_options = options->proxy_options,
+ .proxy_ev_settings = options->proxy_ev_settings,
+ .max_connections = options->max_connections,
+ .shutdown_complete_user_data = stream_manager,
+ .shutdown_complete_callback = s_stream_manager_on_cm_shutdown_complete,
+ .initial_settings_array = options->initial_settings_array,
+ .num_initial_settings = options->num_initial_settings,
+ .max_closed_streams = options->max_closed_streams,
+ .http2_conn_manual_window_management = options->conn_manual_window_management,
+ };
+ /* aws_http_connection_manager_new needs to be the last thing that can fail */
+ stream_manager->connection_manager = aws_http_connection_manager_new(allocator, &cm_options);
+ if (!stream_manager->connection_manager) {
+ goto on_error;
+ }
+ /* Nothing can fail after here */
+ stream_manager->synced_data.state = AWS_H2SMST_READY;
+ stream_manager->shutdown_complete_callback = options->shutdown_complete_callback;
+ stream_manager->shutdown_complete_user_data = options->shutdown_complete_user_data;
+ stream_manager->ideal_concurrent_streams_per_connection = options->ideal_concurrent_streams_per_connection
+ ? options->ideal_concurrent_streams_per_connection
+ : UINT32_MAX;
+ stream_manager->max_concurrent_streams_per_connection =
+ options->max_concurrent_streams_per_connection ? options->max_concurrent_streams_per_connection : UINT32_MAX;
+ stream_manager->max_connections = options->max_connections;
+ stream_manager->close_connection_on_server_error = options->close_connection_on_server_error;
+
+ return stream_manager;
+on_error:
+ s_stream_manager_destroy_final(stream_manager);
+ return NULL;
+}
+
+struct aws_http2_stream_manager *aws_http2_stream_manager_acquire(struct aws_http2_stream_manager *stream_manager) {
+ if (stream_manager) {
+ aws_ref_count_acquire(&stream_manager->external_ref_count);
+ }
+ return stream_manager;
+}
+
+struct aws_http2_stream_manager *aws_http2_stream_manager_release(struct aws_http2_stream_manager *stream_manager) {
+ if (stream_manager) {
+ aws_ref_count_release(&stream_manager->external_ref_count);
+ }
+ return NULL;
+}
+
+void aws_http2_stream_manager_acquire_stream(
+ struct aws_http2_stream_manager *stream_manager,
+ const struct aws_http2_stream_manager_acquire_stream_options *acquire_stream_option) {
+ AWS_PRECONDITION(stream_manager);
+ AWS_PRECONDITION(acquire_stream_option);
+ AWS_PRECONDITION(acquire_stream_option->callback);
+ AWS_PRECONDITION(acquire_stream_option->options);
+ struct aws_http2_stream_management_transaction work;
+ struct aws_h2_sm_pending_stream_acquisition *pending_stream_acquisition = s_new_pending_stream_acquisition(
+ stream_manager->allocator,
+ acquire_stream_option->options,
+ acquire_stream_option->callback,
+ acquire_stream_option->user_data);
+ STREAM_MANAGER_LOGF(
+ TRACE, stream_manager, "Stream Manager creates acquisition:%p for user", (void *)pending_stream_acquisition);
+ s_aws_stream_management_transaction_init(&work, stream_manager);
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(stream_manager);
+ /* it's use after free crime */
+ AWS_FATAL_ASSERT(stream_manager->synced_data.state != AWS_H2SMST_DESTROYING);
+ aws_linked_list_push_back(
+ &stream_manager->synced_data.pending_stream_acquisitions, &pending_stream_acquisition->node);
+ s_sm_count_increase_synced(stream_manager, AWS_SMCT_PENDING_ACQUISITION, 1);
+ s_aws_http2_stream_manager_build_transaction_synced(&work);
+ s_unlock_synced_data(stream_manager);
+ } /* END CRITICAL SECTION */
+ s_aws_http2_stream_manager_execute_transaction(&work);
+}
+
+static size_t s_get_available_streams_num_from_connection_set(const struct aws_random_access_set *set) {
+ size_t all_available_streams_num = 0;
+ size_t ideal_connection_num = aws_random_access_set_get_size(set);
+ for (size_t i = 0; i < ideal_connection_num; i++) {
+ struct aws_h2_sm_connection *sm_connection = NULL;
+ AWS_FATAL_ASSERT(aws_random_access_set_random_get_ptr_index(set, (void **)&sm_connection, i) == AWS_OP_SUCCESS);
+ uint32_t available_streams = sm_connection->max_concurrent_streams - sm_connection->num_streams_assigned;
+ all_available_streams_num += (size_t)available_streams;
+ }
+ return all_available_streams_num;
+}
+
+void aws_http2_stream_manager_fetch_metrics(
+ const struct aws_http2_stream_manager *stream_manager,
+ struct aws_http_manager_metrics *out_metrics) {
+ AWS_PRECONDITION(stream_manager);
+ AWS_PRECONDITION(out_metrics);
+ { /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data((struct aws_http2_stream_manager *)(void *)stream_manager);
+ size_t all_available_streams_num = 0;
+ all_available_streams_num +=
+ s_get_available_streams_num_from_connection_set(&stream_manager->synced_data.ideal_available_set);
+ all_available_streams_num +=
+ s_get_available_streams_num_from_connection_set(&stream_manager->synced_data.nonideal_available_set);
+ out_metrics->pending_concurrency_acquires =
+ stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_PENDING_ACQUISITION];
+ out_metrics->available_concurrency = all_available_streams_num;
+ out_metrics->leased_concurrency = stream_manager->synced_data.internal_refcount_stats[AWS_SMCT_OPEN_STREAM];
+ s_unlock_synced_data((struct aws_http2_stream_manager *)(void *)stream_manager);
+ } /* END CRITICAL SECTION */
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/proxy_connection.c b/contrib/restricted/aws/aws-c-http/source/proxy_connection.c
new file mode 100644
index 0000000000..e6cdb8a246
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/proxy_connection.c
@@ -0,0 +1,1658 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/proxy_impl.h>
+
+#include <aws/common/encoding.h>
+#include <aws/common/environment.h>
+#include <aws/common/hash_table.h>
+#include <aws/common/string.h>
+#include <aws/http/connection_manager.h>
+#include <aws/http/private/connection_impl.h>
+#include <aws/http/proxy.h>
+#include <aws/http/request_response.h>
+#include <aws/io/channel.h>
+#include <aws/io/logging.h>
+#include <aws/io/tls_channel_handler.h>
+#include <aws/io/uri.h>
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4204) /* non-constant aggregate initializer */
+# pragma warning(disable : 4232) /* function pointer to dll symbol */
+#endif
+
+AWS_STATIC_STRING_FROM_LITERAL(s_host_header_name, "Host");
+AWS_STATIC_STRING_FROM_LITERAL(s_proxy_connection_header_name, "Proxy-Connection");
+AWS_STATIC_STRING_FROM_LITERAL(s_proxy_connection_header_value, "Keep-Alive");
+AWS_STATIC_STRING_FROM_LITERAL(s_options_method, "OPTIONS");
+AWS_STATIC_STRING_FROM_LITERAL(s_star_path, "*");
+
+AWS_STATIC_STRING_FROM_LITERAL(s_http_proxy_env_var, "HTTP_PROXY");
+AWS_STATIC_STRING_FROM_LITERAL(s_http_proxy_env_var_low, "http_proxy");
+AWS_STATIC_STRING_FROM_LITERAL(s_https_proxy_env_var, "HTTPS_PROXY");
+AWS_STATIC_STRING_FROM_LITERAL(s_https_proxy_env_var_low, "https_proxy");
+
+#ifndef BYO_CRYPTO
+AWS_STATIC_STRING_FROM_LITERAL(s_proxy_no_verify_peer_env_var, "AWS_PROXY_NO_VERIFY_PEER");
+#endif
+
+static struct aws_http_proxy_system_vtable s_default_vtable = {
+ .setup_client_tls = &aws_channel_setup_client_tls,
+};
+
+static struct aws_http_proxy_system_vtable *s_vtable = &s_default_vtable;
+
+void aws_http_proxy_system_set_vtable(struct aws_http_proxy_system_vtable *vtable) {
+ s_vtable = vtable;
+}
+
+void aws_http_proxy_user_data_destroy(struct aws_http_proxy_user_data *user_data) {
+ if (user_data == NULL) {
+ return;
+ }
+ aws_hash_table_clean_up(&user_data->alpn_string_map);
+
+ /*
+ * For tunneling connections, this is now internal and never surfaced to the user, so it's our responsibility
+ * to clean up the last reference.
+ */
+ if (user_data->proxy_connection != NULL && user_data->proxy_config->connection_type == AWS_HPCT_HTTP_TUNNEL) {
+ aws_http_connection_release(user_data->proxy_connection);
+ user_data->proxy_connection = NULL;
+ }
+
+ aws_string_destroy(user_data->original_host);
+ if (user_data->proxy_config) {
+ aws_http_proxy_config_destroy(user_data->proxy_config);
+ }
+
+ if (user_data->original_tls_options) {
+ aws_tls_connection_options_clean_up(user_data->original_tls_options);
+ aws_mem_release(user_data->allocator, user_data->original_tls_options);
+ }
+
+ aws_http_proxy_negotiator_release(user_data->proxy_negotiator);
+
+ aws_client_bootstrap_release(user_data->original_bootstrap);
+
+ aws_mem_release(user_data->allocator, user_data);
+}
+
+struct aws_http_proxy_user_data *aws_http_proxy_user_data_new(
+ struct aws_allocator *allocator,
+ const struct aws_http_client_connection_options *orig_options,
+ aws_client_bootstrap_on_channel_event_fn *on_channel_setup,
+ aws_client_bootstrap_on_channel_event_fn *on_channel_shutdown) {
+
+ AWS_FATAL_ASSERT(orig_options->proxy_options != NULL);
+ /* make copy of options, and add defaults for missing optional structs */
+ struct aws_http_client_connection_options options = *orig_options;
+
+ struct aws_http1_connection_options default_http1_options;
+ AWS_ZERO_STRUCT(default_http1_options);
+ if (options.http1_options == NULL) {
+ options.http1_options = &default_http1_options;
+ }
+
+ struct aws_http2_connection_options default_http2_options;
+ AWS_ZERO_STRUCT(default_http2_options);
+ if (options.http2_options == NULL) {
+ options.http2_options = &default_http2_options;
+ }
+
+ struct aws_http2_setting *setting_array = NULL;
+ struct aws_http_proxy_user_data *user_data = NULL;
+ aws_mem_acquire_many(
+ options.allocator,
+ 2,
+ &user_data,
+ sizeof(struct aws_http_proxy_user_data),
+ &setting_array,
+ options.http2_options->num_initial_settings * sizeof(struct aws_http2_setting));
+ AWS_ZERO_STRUCT(*user_data);
+
+ user_data->allocator = allocator;
+ user_data->state = AWS_PBS_SOCKET_CONNECT;
+ user_data->error_code = AWS_ERROR_SUCCESS;
+ user_data->connect_status_code = AWS_HTTP_STATUS_CODE_UNKNOWN;
+ user_data->original_bootstrap = aws_client_bootstrap_acquire(options.bootstrap);
+ if (options.socket_options != NULL) {
+ user_data->original_socket_options = *options.socket_options;
+ }
+ user_data->original_manual_window_management = options.manual_window_management;
+ user_data->original_initial_window_size = options.initial_window_size;
+
+ user_data->original_host = aws_string_new_from_cursor(allocator, &options.host_name);
+ if (user_data->original_host == NULL) {
+ goto on_error;
+ }
+
+ user_data->original_port = options.port;
+
+ user_data->proxy_config = aws_http_proxy_config_new_from_connection_options(allocator, &options);
+ if (user_data->proxy_config == NULL) {
+ goto on_error;
+ }
+
+ user_data->proxy_negotiator =
+ aws_http_proxy_strategy_create_negotiator(user_data->proxy_config->proxy_strategy, allocator);
+ if (user_data->proxy_negotiator == NULL) {
+ goto on_error;
+ }
+
+ if (options.tls_options) {
+ /* clone tls options, but redirect user data to what we're creating */
+ user_data->original_tls_options = aws_mem_calloc(allocator, 1, sizeof(struct aws_tls_connection_options));
+ if (user_data->original_tls_options == NULL ||
+ aws_tls_connection_options_copy(user_data->original_tls_options, options.tls_options)) {
+ goto on_error;
+ }
+
+ user_data->original_tls_options->user_data = user_data;
+ }
+
+ if (aws_http_alpn_map_init_copy(options.allocator, &user_data->alpn_string_map, options.alpn_string_map)) {
+ goto on_error;
+ }
+
+ user_data->original_http_on_setup = options.on_setup;
+ user_data->original_http_on_shutdown = options.on_shutdown;
+ user_data->original_channel_on_setup = on_channel_setup;
+ user_data->original_channel_on_shutdown = on_channel_shutdown;
+ user_data->requested_event_loop = options.requested_event_loop;
+ user_data->prior_knowledge_http2 = options.prior_knowledge_http2;
+
+ /* one and only one setup callback must be valid */
+ AWS_FATAL_ASSERT((user_data->original_http_on_setup == NULL) != (user_data->original_channel_on_setup == NULL));
+
+ /* one and only one shutdown callback must be valid */
+ AWS_FATAL_ASSERT(
+ (user_data->original_http_on_shutdown == NULL) != (user_data->original_channel_on_shutdown == NULL));
+
+ /* callback set must be self-consistent. Technically the second check is redundant given the previous checks */
+ AWS_FATAL_ASSERT((user_data->original_http_on_setup == NULL) == (user_data->original_http_on_shutdown == NULL));
+ AWS_FATAL_ASSERT(
+ (user_data->original_channel_on_setup == NULL) == (user_data->original_channel_on_shutdown == NULL));
+
+ user_data->original_user_data = options.user_data;
+ user_data->original_http1_options = *options.http1_options;
+ user_data->original_http2_options = *options.http2_options;
+
+ /* keep a copy of the settings array if it's not NULL */
+ if (options.http2_options->num_initial_settings > 0) {
+ memcpy(
+ setting_array,
+ options.http2_options->initial_settings_array,
+ options.http2_options->num_initial_settings * sizeof(struct aws_http2_setting));
+ user_data->original_http2_options.initial_settings_array = setting_array;
+ }
+
+ return user_data;
+
+on_error:
+
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "(STATIC) Proxy connection failed to create user data with error %d(%s)",
+ aws_last_error(),
+ aws_error_str(aws_last_error()));
+
+ aws_http_proxy_user_data_destroy(user_data);
+
+ return NULL;
+}
+
+struct aws_http_proxy_user_data *aws_http_proxy_user_data_new_reset_clone(
+ struct aws_allocator *allocator,
+ struct aws_http_proxy_user_data *old_user_data) {
+
+ AWS_FATAL_ASSERT(old_user_data != NULL);
+
+ struct aws_http2_setting *setting_array = NULL;
+ struct aws_http_proxy_user_data *user_data = NULL;
+ aws_mem_acquire_many(
+ allocator,
+ 2,
+ &user_data,
+ sizeof(struct aws_http_proxy_user_data),
+ &setting_array,
+ old_user_data->original_http2_options.num_initial_settings * sizeof(struct aws_http2_setting));
+
+ AWS_ZERO_STRUCT(*user_data);
+ user_data->allocator = allocator;
+ user_data->state = AWS_PBS_SOCKET_CONNECT;
+ user_data->error_code = AWS_ERROR_SUCCESS;
+ user_data->connect_status_code = AWS_HTTP_STATUS_CODE_UNKNOWN;
+ user_data->original_bootstrap = aws_client_bootstrap_acquire(old_user_data->original_bootstrap);
+ user_data->original_socket_options = old_user_data->original_socket_options;
+ user_data->original_manual_window_management = old_user_data->original_manual_window_management;
+ user_data->original_initial_window_size = old_user_data->original_initial_window_size;
+ user_data->prior_knowledge_http2 = old_user_data->prior_knowledge_http2;
+
+ user_data->original_host = aws_string_new_from_string(allocator, old_user_data->original_host);
+ if (user_data->original_host == NULL) {
+ goto on_error;
+ }
+
+ user_data->original_port = old_user_data->original_port;
+
+ user_data->proxy_config = aws_http_proxy_config_new_clone(allocator, old_user_data->proxy_config);
+ if (user_data->proxy_config == NULL) {
+ goto on_error;
+ }
+
+ user_data->proxy_negotiator = aws_http_proxy_negotiator_acquire(old_user_data->proxy_negotiator);
+ if (user_data->proxy_negotiator == NULL) {
+ goto on_error;
+ }
+
+ if (old_user_data->original_tls_options) {
+ /* clone tls options, but redirect user data to what we're creating */
+ user_data->original_tls_options = aws_mem_calloc(allocator, 1, sizeof(struct aws_tls_connection_options));
+ if (user_data->original_tls_options == NULL ||
+ aws_tls_connection_options_copy(user_data->original_tls_options, old_user_data->original_tls_options)) {
+ goto on_error;
+ }
+
+ user_data->original_tls_options->user_data = user_data;
+ }
+
+ if (aws_http_alpn_map_init_copy(allocator, &user_data->alpn_string_map, &old_user_data->alpn_string_map)) {
+ goto on_error;
+ }
+
+ user_data->original_http_on_setup = old_user_data->original_http_on_setup;
+ user_data->original_http_on_shutdown = old_user_data->original_http_on_shutdown;
+ user_data->original_channel_on_setup = old_user_data->original_channel_on_setup;
+ user_data->original_channel_on_shutdown = old_user_data->original_channel_on_shutdown;
+ user_data->original_user_data = old_user_data->original_user_data;
+ user_data->original_http1_options = old_user_data->original_http1_options;
+ user_data->original_http2_options = old_user_data->original_http2_options;
+
+ /* keep a copy of the settings array if it's not NULL */
+ if (old_user_data->original_http2_options.num_initial_settings > 0) {
+ memcpy(
+ setting_array,
+ old_user_data->original_http2_options.initial_settings_array,
+ old_user_data->original_http2_options.num_initial_settings * sizeof(struct aws_http2_setting));
+ user_data->original_http2_options.initial_settings_array = setting_array;
+ }
+
+ return user_data;
+
+on_error:
+
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "(STATIC) Proxy connection failed to create user data with error %d(%s)",
+ aws_last_error(),
+ aws_error_str(aws_last_error()));
+
+ aws_http_proxy_user_data_destroy(user_data);
+
+ return NULL;
+}
+
+/*
+ * Examines the proxy user data state and determines whether to make an http-interface setup callback
+ * or a raw channel setup callback
+ */
+static void s_do_on_setup_callback(
+ struct aws_http_proxy_user_data *proxy_ud,
+ struct aws_http_connection *connection,
+ int error_code) {
+ if (proxy_ud->original_http_on_setup) {
+ proxy_ud->original_http_on_setup(connection, error_code, proxy_ud->original_user_data);
+ proxy_ud->original_http_on_setup = NULL;
+ }
+
+ if (proxy_ud->original_channel_on_setup) {
+ struct aws_channel *channel = NULL;
+ if (connection != NULL) {
+ channel = aws_http_connection_get_channel(connection);
+ }
+ proxy_ud->original_channel_on_setup(
+ proxy_ud->original_bootstrap, error_code, channel, proxy_ud->original_user_data);
+ proxy_ud->original_channel_on_setup = NULL;
+ }
+}
+
+/*
+ * Examines the proxy user data state and determines whether to make an http-interface shutdown callback
+ * or a raw channel shutdown callback
+ */
+static void s_do_on_shutdown_callback(struct aws_http_proxy_user_data *proxy_ud, int error_code) {
+ AWS_FATAL_ASSERT(proxy_ud->proxy_connection);
+
+ if (proxy_ud->original_http_on_shutdown) {
+ AWS_FATAL_ASSERT(proxy_ud->final_connection);
+ proxy_ud->original_http_on_shutdown(proxy_ud->final_connection, error_code, proxy_ud->original_user_data);
+ proxy_ud->original_http_on_shutdown = NULL;
+ }
+
+ if (proxy_ud->original_channel_on_shutdown) {
+ struct aws_channel *channel = aws_http_connection_get_channel(proxy_ud->proxy_connection);
+ proxy_ud->original_channel_on_shutdown(
+ proxy_ud->original_bootstrap, error_code, channel, proxy_ud->original_user_data);
+ proxy_ud->original_channel_on_shutdown = NULL;
+ }
+}
+
+/*
+ * Connection callback used ONLY by forwarding http proxy connections. After this,
+ * the connection is live and the user is notified
+ */
+static void s_aws_http_on_client_connection_http_forwarding_proxy_setup_fn(
+ struct aws_http_connection *connection,
+ int error_code,
+ void *user_data) {
+ struct aws_http_proxy_user_data *proxy_ud = user_data;
+
+ s_do_on_setup_callback(proxy_ud, connection, error_code);
+
+ if (error_code != AWS_ERROR_SUCCESS) {
+ aws_http_proxy_user_data_destroy(user_data);
+ } else {
+ /*
+ * The proxy connection and final connection are the same in forwarding proxy connections. This lets
+ * us unconditionally use fatal asserts on these being non-null regardless of proxy configuration.
+ */
+ proxy_ud->proxy_connection = connection;
+ proxy_ud->final_connection = connection;
+ proxy_ud->state = AWS_PBS_SUCCESS;
+ }
+}
+
+/*
+ * Connection shutdown callback used by both http and https proxy connections. Only invokes
+ * user shutdown if the connection was successfully established. Otherwise, it invokes
+ * the user setup function with an error.
+ */
+static void s_aws_http_on_client_connection_http_proxy_shutdown_fn(
+ struct aws_http_connection *connection,
+ int error_code,
+ void *user_data) {
+
+ struct aws_http_proxy_user_data *proxy_ud = user_data;
+
+ if (proxy_ud->state == AWS_PBS_SUCCESS) {
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION,
+ "(%p) Proxy connection (channel %p) shutting down.",
+ (void *)connection,
+ (void *)aws_http_connection_get_channel(connection));
+ s_do_on_shutdown_callback(proxy_ud, error_code);
+ } else {
+ int ec = error_code;
+ if (ec == AWS_ERROR_SUCCESS) {
+ ec = proxy_ud->error_code;
+ }
+ if (ec == AWS_ERROR_SUCCESS) {
+ ec = AWS_ERROR_UNKNOWN;
+ }
+
+ AWS_LOGF_WARN(
+ AWS_LS_HTTP_CONNECTION,
+ "(%p) Error %d while connecting to \"%s\" via proxy.",
+ (void *)connection,
+ ec,
+ (char *)proxy_ud->original_host->bytes);
+
+ s_do_on_setup_callback(proxy_ud, NULL, ec);
+ }
+
+ aws_http_proxy_user_data_destroy(user_data);
+}
+
+/*
+ * On-any-error entry point that releases all resources involved in establishing the proxy connection.
+ * This must not be invoked any time after a successful setup callback.
+ */
+static void s_aws_http_proxy_user_data_shutdown(struct aws_http_proxy_user_data *user_data) {
+
+ user_data->state = AWS_PBS_FAILURE;
+
+ if (user_data->proxy_connection == NULL) {
+ s_do_on_setup_callback(user_data, NULL, user_data->error_code);
+ aws_http_proxy_user_data_destroy(user_data);
+ return;
+ }
+
+ if (user_data->connect_stream) {
+ aws_http_stream_release(user_data->connect_stream);
+ user_data->connect_stream = NULL;
+ }
+
+ if (user_data->connect_request) {
+ aws_http_message_destroy(user_data->connect_request);
+ user_data->connect_request = NULL;
+ }
+
+ struct aws_http_connection *http_connection = user_data->proxy_connection;
+ user_data->proxy_connection = NULL;
+
+ aws_channel_shutdown(http_connection->channel_slot->channel, user_data->error_code);
+ aws_http_connection_release(http_connection);
+}
+
+static struct aws_http_message *s_build_h1_proxy_connect_request(struct aws_http_proxy_user_data *user_data) {
+ struct aws_http_message *request = aws_http_message_new_request(user_data->allocator);
+ if (request == NULL) {
+ return NULL;
+ }
+
+ struct aws_byte_buf path_buffer;
+ AWS_ZERO_STRUCT(path_buffer);
+
+ if (aws_http_message_set_request_method(request, aws_http_method_connect)) {
+ goto on_error;
+ }
+
+ if (aws_byte_buf_init(&path_buffer, user_data->allocator, user_data->original_host->len + 10)) {
+ goto on_error;
+ }
+
+ struct aws_byte_cursor host_cursor = aws_byte_cursor_from_string(user_data->original_host);
+ if (aws_byte_buf_append(&path_buffer, &host_cursor)) {
+ goto on_error;
+ }
+
+ struct aws_byte_cursor colon_cursor = aws_byte_cursor_from_c_str(":");
+ if (aws_byte_buf_append(&path_buffer, &colon_cursor)) {
+ goto on_error;
+ }
+
+ char port_str[20] = "\0";
+ snprintf(port_str, sizeof(port_str), "%d", (int)user_data->original_port);
+ struct aws_byte_cursor port_cursor = aws_byte_cursor_from_c_str(port_str);
+ if (aws_byte_buf_append(&path_buffer, &port_cursor)) {
+ goto on_error;
+ }
+
+ struct aws_byte_cursor path_cursor = aws_byte_cursor_from_array(path_buffer.buffer, path_buffer.len);
+ if (aws_http_message_set_request_path(request, path_cursor)) {
+ goto on_error;
+ }
+
+ struct aws_http_header host_header = {
+ .name = aws_byte_cursor_from_string(s_host_header_name),
+ .value = aws_byte_cursor_from_array(path_buffer.buffer, path_buffer.len),
+ };
+ if (aws_http_message_add_header(request, host_header)) {
+ goto on_error;
+ }
+
+ struct aws_http_header keep_alive_header = {
+ .name = aws_byte_cursor_from_string(s_proxy_connection_header_name),
+ .value = aws_byte_cursor_from_string(s_proxy_connection_header_value),
+ };
+ if (aws_http_message_add_header(request, keep_alive_header)) {
+ goto on_error;
+ }
+
+ aws_byte_buf_clean_up(&path_buffer);
+
+ return request;
+
+on_error:
+
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "(%p) TLS proxy connection failed to build CONNECT request with error %d(%s)",
+ (void *)user_data->proxy_connection,
+ aws_last_error(),
+ aws_error_str(aws_last_error()));
+
+ aws_byte_buf_clean_up(&path_buffer);
+ aws_http_message_destroy(request);
+
+ return NULL;
+}
+
+/*
+ * Builds the CONNECT request issued after proxy connection establishment, during the creation of
+ * tls-enabled proxy connections.
+ */
+static struct aws_http_message *s_build_proxy_connect_request(struct aws_http_proxy_user_data *user_data) {
+ struct aws_http_connection *proxy_connection = user_data->proxy_connection;
+ switch (proxy_connection->http_version) {
+ case AWS_HTTP_VERSION_1_1:
+ return s_build_h1_proxy_connect_request(user_data);
+ default:
+ aws_raise_error(AWS_ERROR_HTTP_UNSUPPORTED_PROTOCOL);
+ return NULL;
+ }
+}
+
+static int s_aws_http_on_incoming_body_tunnel_proxy(
+ struct aws_http_stream *stream,
+ const struct aws_byte_cursor *data,
+ void *user_data) {
+ (void)stream;
+
+ struct aws_http_proxy_user_data *context = user_data;
+ aws_http_proxy_negotiator_connect_on_incoming_body_fn *on_incoming_body =
+ context->proxy_negotiator->strategy_vtable.tunnelling_vtable->on_incoming_body_callback;
+ if (on_incoming_body != NULL) {
+ (*on_incoming_body)(context->proxy_negotiator, data);
+ }
+
+ aws_http_stream_update_window(stream, data->len);
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_aws_http_on_response_headers_tunnel_proxy(
+ struct aws_http_stream *stream,
+ enum aws_http_header_block header_block,
+ const struct aws_http_header *header_array,
+ size_t num_headers,
+ void *user_data) {
+ (void)stream;
+
+ struct aws_http_proxy_user_data *context = user_data;
+ aws_http_proxy_negotiation_connect_on_incoming_headers_fn *on_incoming_headers =
+ context->proxy_negotiator->strategy_vtable.tunnelling_vtable->on_incoming_headers_callback;
+ if (on_incoming_headers != NULL) {
+ (*on_incoming_headers)(context->proxy_negotiator, header_block, header_array, num_headers);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/*
+ * Headers done callback for the CONNECT request made during tls proxy connections
+ */
+static int s_aws_http_on_incoming_header_block_done_tunnel_proxy(
+ struct aws_http_stream *stream,
+ enum aws_http_header_block header_block,
+ void *user_data) {
+
+ struct aws_http_proxy_user_data *context = user_data;
+
+ if (header_block == AWS_HTTP_HEADER_BLOCK_MAIN) {
+ int status_code = AWS_HTTP_STATUS_CODE_UNKNOWN;
+ aws_http_stream_get_incoming_response_status(stream, &status_code);
+ context->connect_status_code = (enum aws_http_status_code)status_code;
+ if (context->connect_status_code != AWS_HTTP_STATUS_CODE_200_OK) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "(%p) Proxy CONNECT request failed with status code %d",
+ (void *)context->proxy_connection,
+ context->connect_status_code);
+ context->error_code = AWS_ERROR_HTTP_PROXY_CONNECT_FAILED;
+ }
+
+ aws_http_proxy_negotiator_connect_status_fn *on_status =
+ context->proxy_negotiator->strategy_vtable.tunnelling_vtable->on_status_callback;
+ if (on_status != NULL) {
+ (*on_status)(context->proxy_negotiator, context->connect_status_code);
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_aws_http_apply_http_connection_to_proxied_channel(struct aws_http_proxy_user_data *context) {
+ AWS_FATAL_ASSERT(context->proxy_connection != NULL);
+ AWS_FATAL_ASSERT(context->original_http_on_setup != NULL);
+
+ struct aws_channel *channel = aws_http_connection_get_channel(context->proxy_connection);
+
+ struct aws_http_connection *connection = aws_http_connection_new_channel_handler(
+ context->allocator,
+ channel,
+ false,
+ context->original_tls_options != NULL,
+ context->original_manual_window_management,
+ context->prior_knowledge_http2,
+ context->original_initial_window_size,
+ context->alpn_string_map.p_impl == NULL ? NULL : &context->alpn_string_map,
+ &context->original_http1_options,
+ &context->original_http2_options,
+ context->original_user_data);
+ if (connection == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "static: Failed to create the client connection object, error %d (%s).",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ return AWS_OP_ERR;
+ }
+
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: " PRInSTR " client connection established.",
+ (void *)connection,
+ AWS_BYTE_CURSOR_PRI(aws_http_version_to_str(connection->http_version)));
+
+ context->final_connection = connection;
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_do_final_proxied_channel_setup(struct aws_http_proxy_user_data *proxy_ud) {
+ if (proxy_ud->original_http_on_setup != NULL) {
+ /*
+ * If we're transitioning to http with http setup/shutdown callbacks, try to apply a new http connection to
+ * the channel
+ */
+ if (s_aws_http_apply_http_connection_to_proxied_channel(proxy_ud)) {
+ proxy_ud->error_code = aws_last_error();
+ s_aws_http_proxy_user_data_shutdown(proxy_ud);
+ return;
+ }
+
+ s_do_on_setup_callback(proxy_ud, proxy_ud->final_connection, AWS_ERROR_SUCCESS);
+ } else {
+ /*
+ * Otherwise invoke setup directly (which will end up being channel setup)
+ */
+ s_do_on_setup_callback(proxy_ud, proxy_ud->proxy_connection, AWS_ERROR_SUCCESS);
+ }
+
+ /* Tell user of successful connection. */
+ proxy_ud->state = AWS_PBS_SUCCESS;
+}
+
+/*
+ * Tls negotiation callback for tls proxy connections
+ */
+static void s_on_origin_server_tls_negotation_result(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ int error_code,
+ void *user_data) {
+
+ (void)handler;
+ (void)slot;
+
+ struct aws_http_proxy_user_data *context = user_data;
+ if (error_code != AWS_ERROR_SUCCESS) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "(%p) Proxy connection failed origin server TLS negotiation with error %d(%s)",
+ (void *)context->proxy_connection,
+ error_code,
+ aws_error_str(error_code));
+ context->error_code = error_code;
+ s_aws_http_proxy_user_data_shutdown(context);
+ return;
+ }
+
+ s_do_final_proxied_channel_setup(context);
+}
+
+static int s_create_tunneling_connection(struct aws_http_proxy_user_data *user_data);
+static int s_make_proxy_connect_request(struct aws_http_proxy_user_data *user_data);
+
+static void s_zero_callbacks(struct aws_http_proxy_user_data *proxy_ud) {
+ proxy_ud->original_http_on_shutdown = NULL;
+ proxy_ud->original_http_on_setup = NULL;
+ proxy_ud->original_channel_on_shutdown = NULL;
+ proxy_ud->original_channel_on_setup = NULL;
+}
+
+/*
+ * Stream done callback for the CONNECT request made during tls proxy connections
+ */
+static void s_aws_http_on_stream_complete_tunnel_proxy(
+ struct aws_http_stream *stream,
+ int error_code,
+ void *user_data) {
+ struct aws_http_proxy_user_data *context = user_data;
+ AWS_FATAL_ASSERT(stream == context->connect_stream);
+
+ if (context->error_code == AWS_ERROR_SUCCESS && error_code != AWS_ERROR_SUCCESS) {
+ context->error_code = error_code;
+ }
+
+ if (context->error_code != AWS_ERROR_SUCCESS) {
+ context->error_code = AWS_ERROR_HTTP_PROXY_CONNECT_FAILED;
+ if (context->connect_status_code == AWS_HTTP_STATUS_CODE_407_PROXY_AUTHENTICATION_REQUIRED) {
+ enum aws_http_proxy_negotiation_retry_directive retry_directive =
+ aws_http_proxy_negotiator_get_retry_directive(context->proxy_negotiator);
+
+ if (retry_directive == AWS_HPNRD_NEW_CONNECTION) {
+ struct aws_http_proxy_user_data *new_context =
+ aws_http_proxy_user_data_new_reset_clone(context->allocator, context);
+ if (new_context != NULL && s_create_tunneling_connection(new_context) == AWS_OP_SUCCESS) {
+ /*
+ * We successfully kicked off a new connection. By NULLing the callbacks on the old one, we can
+ * shut it down quietly without the user being notified. The new connection will notify the user
+ * based on its success or failure.
+ */
+ s_zero_callbacks(context);
+ context->error_code = AWS_ERROR_HTTP_PROXY_CONNECT_FAILED_RETRYABLE;
+ }
+ } else if (retry_directive == AWS_HPNRD_CURRENT_CONNECTION) {
+ context->error_code = AWS_ERROR_SUCCESS;
+ if (s_make_proxy_connect_request(context) == AWS_OP_SUCCESS) {
+ return;
+ }
+ }
+ }
+
+ s_aws_http_proxy_user_data_shutdown(context);
+ return;
+ }
+
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION,
+ "(%p) Proxy connection made successful CONNECT request to \"%s\" via proxy",
+ (void *)context->proxy_connection,
+ context->original_host->bytes);
+
+ /*
+ * We're finished with these, let's release
+ */
+ aws_http_stream_release(stream);
+ context->connect_stream = NULL;
+ aws_http_message_destroy(context->connect_request);
+ context->connect_request = NULL;
+
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION, "(%p) Beginning TLS negotiation through proxy", (void *)context->proxy_connection);
+
+ if (context->original_tls_options != NULL) {
+ /*
+ * Perform TLS negotiation to the origin server through proxy
+ */
+ context->original_tls_options->on_negotiation_result = s_on_origin_server_tls_negotation_result;
+
+ context->state = AWS_PBS_TLS_NEGOTIATION;
+ struct aws_channel *channel = aws_http_connection_get_channel(context->proxy_connection);
+
+ struct aws_channel_slot *last_slot = aws_channel_get_first_slot(channel);
+ while (last_slot->adj_right != NULL) {
+ last_slot = last_slot->adj_right;
+ }
+
+ if (s_vtable->setup_client_tls(last_slot, context->original_tls_options)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "(%p) Proxy connection failed to start TLS negotiation with error %d(%s)",
+ (void *)context->proxy_connection,
+ aws_last_error(),
+ aws_error_str(aws_last_error()));
+ s_aws_http_proxy_user_data_shutdown(context);
+ return;
+ }
+ } else {
+ s_do_final_proxied_channel_setup(context);
+ }
+}
+
+static void s_terminate_tunneling_connect(
+ struct aws_http_message *message,
+ int error_code,
+ void *internal_proxy_user_data) {
+ (void)message;
+
+ struct aws_http_proxy_user_data *proxy_ud = internal_proxy_user_data;
+
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "(%p) Tunneling proxy connection failed to create request stream for CONNECT request with error %d(%s)",
+ (void *)proxy_ud->proxy_connection,
+ error_code,
+ aws_error_str(error_code));
+
+ proxy_ud->error_code = error_code;
+ s_aws_http_proxy_user_data_shutdown(proxy_ud);
+}
+
+static void s_continue_tunneling_connect(struct aws_http_message *message, void *internal_proxy_user_data) {
+ struct aws_http_proxy_user_data *proxy_ud = internal_proxy_user_data;
+
+ struct aws_http_make_request_options request_options = {
+ .self_size = sizeof(request_options),
+ .request = message,
+ .user_data = proxy_ud,
+ .on_response_headers = s_aws_http_on_response_headers_tunnel_proxy,
+ .on_response_header_block_done = s_aws_http_on_incoming_header_block_done_tunnel_proxy,
+ .on_response_body = s_aws_http_on_incoming_body_tunnel_proxy,
+ .on_complete = s_aws_http_on_stream_complete_tunnel_proxy,
+ };
+
+ if (proxy_ud->connect_stream != NULL) {
+ aws_http_stream_release(proxy_ud->connect_stream);
+ }
+
+ proxy_ud->connect_stream = aws_http_connection_make_request(proxy_ud->proxy_connection, &request_options);
+ if (proxy_ud->connect_stream == NULL) {
+ goto on_error;
+ }
+
+ aws_http_stream_activate(proxy_ud->connect_stream);
+
+ return;
+
+on_error:
+
+ s_aws_http_proxy_user_data_shutdown(proxy_ud);
+}
+
+/*
+ * Issues a CONNECT request on an http connection
+ */
+static int s_make_proxy_connect_request(struct aws_http_proxy_user_data *user_data) {
+ if (user_data->connect_request != NULL) {
+ aws_http_message_destroy(user_data->connect_request);
+ user_data->connect_request = NULL;
+ }
+
+ user_data->connect_request = s_build_proxy_connect_request(user_data);
+ if (user_data->connect_request == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ (*user_data->proxy_negotiator->strategy_vtable.tunnelling_vtable->connect_request_transform)(
+ user_data->proxy_negotiator,
+ user_data->connect_request,
+ s_terminate_tunneling_connect,
+ s_continue_tunneling_connect,
+ user_data);
+
+ return AWS_OP_SUCCESS;
+}
+
+/*
+ * Connection setup callback for tunneling proxy connections.
+ */
+static void s_aws_http_on_client_connection_http_tunneling_proxy_setup_fn(
+ struct aws_http_connection *connection,
+ int error_code,
+ void *user_data) {
+
+ struct aws_http_proxy_user_data *proxy_ud = user_data;
+
+ proxy_ud->error_code = error_code;
+ if (error_code != AWS_ERROR_SUCCESS) {
+ goto on_error;
+ }
+
+ AWS_LOGF_INFO(AWS_LS_HTTP_CONNECTION, "(%p) Making CONNECT request to proxy", (void *)proxy_ud->proxy_connection);
+
+ proxy_ud->proxy_connection = connection;
+ proxy_ud->state = AWS_PBS_HTTP_CONNECT;
+ if (s_make_proxy_connect_request(proxy_ud)) {
+ goto on_error;
+ }
+
+ return;
+
+on_error:
+
+ s_aws_http_proxy_user_data_shutdown(proxy_ud);
+}
+
+/*
+ * Checks for the special case when a request is an OPTIONS request with *
+ * path and no query params
+ */
+static bool s_is_star_path_options_method(const struct aws_http_message *request) {
+ struct aws_byte_cursor method_cursor;
+ if (aws_http_message_get_request_method(request, &method_cursor)) {
+ return false;
+ }
+
+ struct aws_byte_cursor options_cursor = aws_byte_cursor_from_string(s_options_method);
+ if (!aws_byte_cursor_eq_ignore_case(&method_cursor, &options_cursor)) {
+ return false;
+ }
+
+ struct aws_byte_cursor path_cursor;
+ if (aws_http_message_get_request_path(request, &path_cursor)) {
+ return false;
+ }
+
+ struct aws_byte_cursor star_cursor = aws_byte_cursor_from_string(s_star_path);
+ if (!aws_byte_cursor_eq_ignore_case(&path_cursor, &star_cursor)) {
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Modifies a requests uri by transforming it to absolute form according to
+ * section 5.3.2 of rfc 7230
+ *
+ * We do this by parsing the existing uri and then rebuilding it as an
+ * absolute resource path (using the original connection options)
+ */
+int aws_http_rewrite_uri_for_proxy_request(
+ struct aws_http_message *request,
+ struct aws_http_proxy_user_data *proxy_user_data) {
+ int result = AWS_OP_ERR;
+
+ struct aws_uri target_uri;
+ AWS_ZERO_STRUCT(target_uri);
+
+ struct aws_byte_cursor path_cursor;
+ AWS_ZERO_STRUCT(path_cursor);
+
+ if (aws_http_message_get_request_path(request, &path_cursor)) {
+ goto done;
+ }
+
+ /* Pull out the original path/query */
+ struct aws_uri uri;
+ if (aws_uri_init_parse(&uri, proxy_user_data->allocator, &path_cursor)) {
+ goto done;
+ }
+
+ const struct aws_byte_cursor *actual_path_cursor = aws_uri_path(&uri);
+ const struct aws_byte_cursor *actual_query_cursor = aws_uri_query_string(&uri);
+
+ /* now rebuild the uri with scheme, host and port subbed in from the original connection options */
+ struct aws_uri_builder_options target_uri_builder;
+ AWS_ZERO_STRUCT(target_uri_builder);
+ target_uri_builder.scheme = aws_http_scheme_http;
+ target_uri_builder.path = *actual_path_cursor;
+ target_uri_builder.host_name = aws_byte_cursor_from_string(proxy_user_data->original_host);
+ target_uri_builder.port = proxy_user_data->original_port;
+ target_uri_builder.query_string = *actual_query_cursor;
+
+ if (aws_uri_init_from_builder_options(&target_uri, proxy_user_data->allocator, &target_uri_builder)) {
+ goto done;
+ }
+
+ struct aws_byte_cursor full_target_uri =
+ aws_byte_cursor_from_array(target_uri.uri_str.buffer, target_uri.uri_str.len);
+
+ /*
+ * By rfc 7230, Section 5.3.4, a star-pathed options request made through a proxy MUST be transformed (at the last
+ * proxy) back into a star-pathed request if the proxy request has an empty path and no query string. This
+ * is behavior we want to support. So from our side, we need to make sure that star-pathed options requests
+ * get translated into options requests with the authority as the uri and an empty path-query.
+ *
+ * Our URI transform always ends with a '/' which is technically not an empty path. To address this,
+ * the easiest thing to do is just detect if this was originally a star-pathed options request
+ * and drop the final '/' from the path.
+ */
+ if (s_is_star_path_options_method(request)) {
+ if (full_target_uri.len > 0 && *(full_target_uri.ptr + full_target_uri.len - 1) == '/') {
+ full_target_uri.len -= 1;
+ }
+ }
+
+ /* mutate the request with the new path value */
+ if (aws_http_message_set_request_path(request, full_target_uri)) {
+ goto done;
+ }
+
+ result = AWS_OP_SUCCESS;
+
+done:
+
+ aws_uri_clean_up(&target_uri);
+ aws_uri_clean_up(&uri);
+
+ return result;
+}
+
+/*
+ * Plaintext proxy request transformation function
+ *
+ * Rewrites the target uri to absolute form and injects any desired headers
+ */
+static int s_proxy_http_request_transform(struct aws_http_message *request, void *user_data) {
+ struct aws_http_proxy_user_data *proxy_ud = user_data;
+
+ if (aws_http_rewrite_uri_for_proxy_request(request, proxy_ud)) {
+ return AWS_OP_ERR;
+ }
+
+ if ((*proxy_ud->proxy_negotiator->strategy_vtable.forwarding_vtable->forward_request_transform)(
+ proxy_ud->proxy_negotiator, request)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/*
+ * Top-level function to route a connection request through a proxy server, with no channel security
+ */
+static int s_aws_http_client_connect_via_forwarding_proxy(const struct aws_http_client_connection_options *options) {
+ AWS_FATAL_ASSERT(options->tls_options == NULL);
+
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION,
+ "(STATIC) Connecting to \"" PRInSTR "\" via proxy \"" PRInSTR "\"",
+ AWS_BYTE_CURSOR_PRI(options->host_name),
+ AWS_BYTE_CURSOR_PRI(options->proxy_options->host));
+
+ /* Create a wrapper user data that contains all proxy-related information, state, and user-facing callbacks */
+ struct aws_http_proxy_user_data *proxy_user_data =
+ aws_http_proxy_user_data_new(options->allocator, options, NULL, NULL);
+ if (proxy_user_data == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ AWS_FATAL_ASSERT(options->proxy_options != NULL);
+
+ /* Fill in a new connection options pointing at the proxy */
+ struct aws_http_client_connection_options options_copy = *options;
+
+ options_copy.proxy_options = NULL;
+ options_copy.host_name = options->proxy_options->host;
+ options_copy.port = options->proxy_options->port;
+ options_copy.user_data = proxy_user_data;
+ options_copy.on_setup = s_aws_http_on_client_connection_http_forwarding_proxy_setup_fn;
+ options_copy.on_shutdown = s_aws_http_on_client_connection_http_proxy_shutdown_fn;
+ options_copy.tls_options = options->proxy_options->tls_options;
+ options_copy.requested_event_loop = options->requested_event_loop;
+ options_copy.prior_knowledge_http2 = false; /* ToDo, expose the protocol specific config for proxy connection. */
+
+ int result = aws_http_client_connect_internal(&options_copy, s_proxy_http_request_transform);
+ if (result == AWS_OP_ERR) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "(STATIC) Proxy http connection failed client connect with error %d(%s)",
+ aws_last_error(),
+ aws_error_str(aws_last_error()));
+
+ aws_http_proxy_user_data_destroy(proxy_user_data);
+ }
+
+ return result;
+}
+
+static int s_create_tunneling_connection(struct aws_http_proxy_user_data *user_data) {
+ struct aws_http_client_connection_options connect_options;
+ AWS_ZERO_STRUCT(connect_options);
+
+ connect_options.self_size = sizeof(struct aws_http_client_connection_options);
+ connect_options.allocator = user_data->allocator;
+ connect_options.bootstrap = user_data->original_bootstrap;
+ connect_options.host_name = aws_byte_cursor_from_buf(&user_data->proxy_config->host);
+ connect_options.port = user_data->proxy_config->port;
+ connect_options.socket_options = &user_data->original_socket_options;
+ connect_options.tls_options = user_data->proxy_config->tls_options;
+ connect_options.monitoring_options = NULL; /* ToDo */
+ connect_options.manual_window_management = user_data->original_manual_window_management;
+ connect_options.initial_window_size = user_data->original_initial_window_size;
+ connect_options.user_data = user_data;
+ connect_options.on_setup = s_aws_http_on_client_connection_http_tunneling_proxy_setup_fn;
+ connect_options.on_shutdown = s_aws_http_on_client_connection_http_proxy_shutdown_fn;
+ connect_options.http1_options = NULL; /* ToDo, expose the protocol specific config for proxy connection. */
+ connect_options.http2_options = NULL; /* ToDo */
+ connect_options.requested_event_loop = user_data->requested_event_loop;
+
+ int result = aws_http_client_connect(&connect_options);
+ if (result == AWS_OP_ERR) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "(STATIC) Proxy tunnel connection failed client connect with error %d(%s)",
+ aws_last_error(),
+ aws_error_str(aws_last_error()));
+ aws_http_proxy_user_data_destroy(user_data);
+ }
+
+ return result;
+}
+
+/*
+ * Top-level function to route a connection through a proxy server via a CONNECT request
+ */
+static int s_aws_http_client_connect_via_tunneling_proxy(
+ const struct aws_http_client_connection_options *options,
+ aws_client_bootstrap_on_channel_event_fn *on_channel_setup,
+ aws_client_bootstrap_on_channel_event_fn *on_channel_shutdown) {
+ AWS_FATAL_ASSERT(options->proxy_options != NULL);
+
+ AWS_LOGF_INFO(
+ AWS_LS_HTTP_CONNECTION,
+ "(STATIC) Connecting to \"" PRInSTR "\" through a tunnel via proxy \"" PRInSTR "\"",
+ AWS_BYTE_CURSOR_PRI(options->host_name),
+ AWS_BYTE_CURSOR_PRI(options->proxy_options->host));
+
+ /* Create a wrapper user data that contains all proxy-related information, state, and user-facing callbacks */
+ struct aws_http_proxy_user_data *user_data =
+ aws_http_proxy_user_data_new(options->allocator, options, on_channel_setup, on_channel_shutdown);
+ if (user_data == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ return s_create_tunneling_connection(user_data);
+}
+
+static enum aws_http_proxy_connection_type s_determine_proxy_connection_type(
+ enum aws_http_proxy_connection_type proxy_connection_type,
+ bool is_tls_connection) {
+ if (proxy_connection_type != AWS_HPCT_HTTP_LEGACY) {
+ return proxy_connection_type;
+ }
+
+ if (is_tls_connection) {
+ return AWS_HPCT_HTTP_TUNNEL;
+ } else {
+ return AWS_HPCT_HTTP_FORWARD;
+ }
+}
+
+static int s_proxy_uri_init_from_env_variable(
+ struct aws_allocator *allocator,
+ const struct aws_http_client_connection_options *options,
+ struct aws_uri *proxy_uri,
+ bool *found) {
+ struct aws_string *proxy_uri_string = NULL;
+ *found = false;
+ if (options->tls_options) {
+ if (aws_get_environment_value(allocator, s_https_proxy_env_var_low, &proxy_uri_string) == AWS_OP_SUCCESS &&
+ proxy_uri_string != NULL) {
+ AWS_LOGF_DEBUG(AWS_LS_HTTP_CONNECTION, "https_proxy environment found");
+ } else if (
+ aws_get_environment_value(allocator, s_https_proxy_env_var, &proxy_uri_string) == AWS_OP_SUCCESS &&
+ proxy_uri_string != NULL) {
+ AWS_LOGF_DEBUG(AWS_LS_HTTP_CONNECTION, "HTTPS_PROXY environment found");
+ } else {
+ return AWS_OP_SUCCESS;
+ }
+ } else {
+ if (aws_get_environment_value(allocator, s_http_proxy_env_var_low, &proxy_uri_string) == AWS_OP_SUCCESS &&
+ proxy_uri_string != NULL) {
+ AWS_LOGF_DEBUG(AWS_LS_HTTP_CONNECTION, "http_proxy environment found");
+ } else if (
+ aws_get_environment_value(allocator, s_http_proxy_env_var, &proxy_uri_string) == AWS_OP_SUCCESS &&
+ proxy_uri_string != NULL) {
+ AWS_LOGF_DEBUG(AWS_LS_HTTP_CONNECTION, "HTTP_PROXY environment found");
+ } else {
+ return AWS_OP_SUCCESS;
+ }
+ }
+ struct aws_byte_cursor proxy_uri_cursor = aws_byte_cursor_from_string(proxy_uri_string);
+ if (aws_uri_init_parse(proxy_uri, allocator, &proxy_uri_cursor)) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "Could not parse found proxy URI.");
+ aws_string_destroy(proxy_uri_string);
+ return AWS_OP_ERR;
+ }
+ *found = true;
+ aws_string_destroy(proxy_uri_string);
+ return AWS_OP_SUCCESS;
+}
+
+static int s_connect_proxy(const struct aws_http_client_connection_options *options) {
+ if (aws_http_options_validate_proxy_configuration(options)) {
+ return AWS_OP_ERR;
+ }
+
+ enum aws_http_proxy_connection_type proxy_connection_type =
+ s_determine_proxy_connection_type(options->proxy_options->connection_type, options->tls_options != NULL);
+
+ switch (proxy_connection_type) {
+ case AWS_HPCT_HTTP_FORWARD:
+ return s_aws_http_client_connect_via_forwarding_proxy(options);
+
+ case AWS_HPCT_HTTP_TUNNEL:
+ return s_aws_http_client_connect_via_tunneling_proxy(options, NULL, NULL);
+
+ default:
+ return aws_raise_error(AWS_ERROR_UNIMPLEMENTED);
+ }
+}
+
+static int s_setup_proxy_tls_env_variable(
+ const struct aws_http_client_connection_options *options,
+ struct aws_tls_connection_options *default_tls_connection_options,
+ struct aws_http_proxy_options *proxy_options,
+ struct aws_uri *proxy_uri) {
+ (void)default_tls_connection_options;
+ (void)proxy_uri;
+ if (options->proxy_ev_settings->tls_options) {
+ proxy_options->tls_options = options->proxy_ev_settings->tls_options;
+ } else {
+#ifdef BYO_CRYPTO
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "Failed making default TLS context because of BYO_CRYPTO, set up the tls_options for proxy_env_settings to "
+ "make it work.");
+ return AWS_OP_ERR;
+#else
+ struct aws_tls_ctx *tls_ctx = NULL;
+ struct aws_tls_ctx_options tls_ctx_options;
+ AWS_ZERO_STRUCT(tls_ctx_options);
+ /* create a default tls options */
+ aws_tls_ctx_options_init_default_client(&tls_ctx_options, options->allocator);
+ struct aws_string *proxy_no_verify_peer_string = NULL;
+ if (aws_get_environment_value(
+ options->allocator, s_proxy_no_verify_peer_env_var, &proxy_no_verify_peer_string) == AWS_OP_SUCCESS &&
+ proxy_no_verify_peer_string != NULL) {
+ /* turn off the peer verification, if setup from envrionment variable. Mostly for testing. */
+ aws_tls_ctx_options_set_verify_peer(&tls_ctx_options, false);
+ aws_string_destroy(proxy_no_verify_peer_string);
+ }
+ tls_ctx = aws_tls_client_ctx_new(options->allocator, &tls_ctx_options);
+ aws_tls_ctx_options_clean_up(&tls_ctx_options);
+ if (!tls_ctx) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "Failed to create default TLS context.");
+ return AWS_OP_ERR;
+ }
+ aws_tls_connection_options_init_from_ctx(default_tls_connection_options, tls_ctx);
+ /* tls options hold a ref to the ctx */
+ aws_tls_ctx_release(tls_ctx);
+ if (aws_tls_connection_options_set_server_name(
+ default_tls_connection_options, options->allocator, &proxy_uri->host_name)) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_CONNECTION, "Failed set server name for TLS connection options.");
+ return AWS_OP_ERR;
+ }
+ proxy_options->tls_options = default_tls_connection_options;
+#endif
+ }
+ return AWS_OP_SUCCESS;
+}
+
+static int s_connect_proxy_via_env_variable(const struct aws_http_client_connection_options *options) {
+ struct aws_http_proxy_options proxy_options;
+ AWS_ZERO_STRUCT(proxy_options);
+ struct aws_uri proxy_uri;
+ AWS_ZERO_STRUCT(proxy_uri);
+ struct aws_tls_connection_options default_tls_connection_options;
+ AWS_ZERO_STRUCT(default_tls_connection_options);
+ bool found = false;
+ bool success = false;
+ if (s_proxy_uri_init_from_env_variable(options->allocator, options, &proxy_uri, &found)) {
+ /* Envrionment is set but failed to parse it */
+ goto done;
+ }
+ if (found) {
+ proxy_options.host = proxy_uri.host_name;
+ proxy_options.port = proxy_uri.port;
+ proxy_options.connection_type = options->proxy_ev_settings->connection_type;
+ if (proxy_options.connection_type == AWS_HPCT_HTTP_LEGACY) {
+ if (options->tls_options) {
+ /* Use tunneling when main connection use TLS. */
+ proxy_options.connection_type = AWS_HPCT_HTTP_TUNNEL;
+ } else {
+ /* Use forwarding proxy when main connection use clear text. */
+ proxy_options.connection_type = AWS_HPCT_HTTP_FORWARD;
+ }
+ }
+ if (aws_byte_cursor_eq_ignore_case(&proxy_uri.scheme, &aws_http_scheme_https)) {
+ if (s_setup_proxy_tls_env_variable(options, &default_tls_connection_options, &proxy_options, &proxy_uri)) {
+ goto done;
+ }
+ }
+ /* Support basic authentication. */
+ if (proxy_uri.password.len) {
+ /* Has no empty password set */
+ struct aws_http_proxy_strategy_basic_auth_options config = {
+ .proxy_connection_type = proxy_options.connection_type,
+ .user_name = proxy_uri.user,
+ .password = proxy_uri.password,
+ };
+ proxy_options.proxy_strategy = aws_http_proxy_strategy_new_basic_auth(options->allocator, &config);
+ }
+ } else {
+ success = true;
+ goto done;
+ }
+ struct aws_http_client_connection_options copied_options = *options;
+ copied_options.proxy_options = &proxy_options;
+ if (s_connect_proxy(&copied_options)) {
+ goto done;
+ }
+ success = true;
+done:
+ aws_tls_connection_options_clean_up(&default_tls_connection_options);
+ aws_http_proxy_strategy_release(proxy_options.proxy_strategy);
+ aws_uri_clean_up(&proxy_uri);
+ if (success && !found) {
+ /* Successfully, but no envrionment variable found. Connect without proxy */
+ return aws_http_client_connect_internal(options, NULL);
+ }
+ return success ? AWS_OP_SUCCESS : AWS_OP_ERR;
+}
+
+/*
+ * Dispatches a proxy-enabled connection request to the appropriate top-level connection function
+ */
+int aws_http_client_connect_via_proxy(const struct aws_http_client_connection_options *options) {
+ if (options->proxy_options == NULL && options->proxy_ev_settings &&
+ options->proxy_ev_settings->env_var_type == AWS_HPEV_ENABLE) {
+ return s_connect_proxy_via_env_variable(options);
+ }
+ return s_connect_proxy(options);
+}
+
+static struct aws_http_proxy_config *s_aws_http_proxy_config_new(
+ struct aws_allocator *allocator,
+ const struct aws_http_proxy_options *proxy_options,
+ enum aws_http_proxy_connection_type override_proxy_connection_type) {
+ AWS_FATAL_ASSERT(proxy_options != NULL);
+
+ struct aws_http_proxy_config *config = aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_config));
+ if (config == NULL) {
+ return NULL;
+ }
+
+ config->allocator = allocator;
+ config->connection_type = override_proxy_connection_type;
+
+ if (aws_byte_buf_init_copy_from_cursor(&config->host, allocator, proxy_options->host)) {
+ goto on_error;
+ }
+
+ if (proxy_options->tls_options) {
+ config->tls_options = aws_mem_calloc(allocator, 1, sizeof(struct aws_tls_connection_options));
+ if (aws_tls_connection_options_copy(config->tls_options, proxy_options->tls_options)) {
+ goto on_error;
+ }
+ }
+
+ config->port = proxy_options->port;
+
+ if (proxy_options->proxy_strategy != NULL) {
+ config->proxy_strategy = aws_http_proxy_strategy_acquire(proxy_options->proxy_strategy);
+ } else if (proxy_options->auth_type == AWS_HPAT_BASIC) {
+ struct aws_http_proxy_strategy_basic_auth_options basic_config;
+ AWS_ZERO_STRUCT(basic_config);
+
+ basic_config.proxy_connection_type = override_proxy_connection_type;
+ basic_config.user_name = proxy_options->auth_username;
+ basic_config.password = proxy_options->auth_password;
+
+ config->proxy_strategy = aws_http_proxy_strategy_new_basic_auth(allocator, &basic_config);
+ }
+
+ if (config->proxy_strategy == NULL) {
+ switch (override_proxy_connection_type) {
+ case AWS_HPCT_HTTP_FORWARD:
+ config->proxy_strategy = aws_http_proxy_strategy_new_forwarding_identity(allocator);
+ break;
+
+ case AWS_HPCT_HTTP_TUNNEL:
+ config->proxy_strategy = aws_http_proxy_strategy_new_tunneling_one_time_identity(allocator);
+ break;
+
+ default:
+ break;
+ }
+
+ if (config->proxy_strategy == NULL) {
+ goto on_error;
+ }
+ }
+
+ return config;
+
+on_error:
+
+ aws_http_proxy_config_destroy(config);
+
+ return NULL;
+}
+
+struct aws_http_proxy_config *aws_http_proxy_config_new_from_connection_options(
+ struct aws_allocator *allocator,
+ const struct aws_http_client_connection_options *options) {
+ AWS_FATAL_ASSERT(options != NULL);
+ AWS_FATAL_ASSERT(options->proxy_options != NULL);
+
+ return s_aws_http_proxy_config_new(
+ allocator,
+ options->proxy_options,
+ s_determine_proxy_connection_type(options->proxy_options->connection_type, options->tls_options != NULL));
+}
+
+struct aws_http_proxy_config *aws_http_proxy_config_new_from_manager_options(
+ struct aws_allocator *allocator,
+ const struct aws_http_connection_manager_options *options) {
+ AWS_FATAL_ASSERT(options != NULL);
+ AWS_FATAL_ASSERT(options->proxy_options != NULL);
+
+ return s_aws_http_proxy_config_new(
+ allocator,
+ options->proxy_options,
+ s_determine_proxy_connection_type(
+ options->proxy_options->connection_type, options->tls_connection_options != NULL));
+}
+
+struct aws_http_proxy_config *aws_http_proxy_config_new_tunneling_from_proxy_options(
+ struct aws_allocator *allocator,
+ const struct aws_http_proxy_options *proxy_options) {
+
+ return s_aws_http_proxy_config_new(allocator, proxy_options, AWS_HPCT_HTTP_TUNNEL);
+}
+
+struct aws_http_proxy_config *aws_http_proxy_config_new_from_proxy_options(
+ struct aws_allocator *allocator,
+ const struct aws_http_proxy_options *proxy_options) {
+ if (proxy_options->connection_type == AWS_HPCT_HTTP_LEGACY) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_PROXY_NEGOTIATION, "LEGACY type is not supported to create proxy config");
+ return NULL;
+ }
+
+ return s_aws_http_proxy_config_new(allocator, proxy_options, proxy_options->connection_type);
+}
+
+struct aws_http_proxy_config *aws_http_proxy_config_new_from_proxy_options_with_tls_info(
+ struct aws_allocator *allocator,
+ const struct aws_http_proxy_options *proxy_options,
+ bool is_tls_connection) {
+ AWS_FATAL_ASSERT(proxy_options != NULL);
+
+ return s_aws_http_proxy_config_new(
+ allocator, proxy_options, s_determine_proxy_connection_type(proxy_options->connection_type, is_tls_connection));
+}
+
+struct aws_http_proxy_config *aws_http_proxy_config_new_clone(
+ struct aws_allocator *allocator,
+ const struct aws_http_proxy_config *proxy_config) {
+
+ AWS_FATAL_ASSERT(proxy_config != NULL);
+
+ struct aws_http_proxy_config *config = aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_config));
+ if (config == NULL) {
+ return NULL;
+ }
+
+ config->connection_type = proxy_config->connection_type;
+
+ if (aws_byte_buf_init_copy_from_cursor(&config->host, allocator, aws_byte_cursor_from_buf(&proxy_config->host))) {
+ goto on_error;
+ }
+
+ if (proxy_config->tls_options) {
+ config->tls_options = aws_mem_calloc(allocator, 1, sizeof(struct aws_tls_connection_options));
+ if (aws_tls_connection_options_copy(config->tls_options, proxy_config->tls_options)) {
+ goto on_error;
+ }
+ }
+
+ config->allocator = allocator;
+ config->port = proxy_config->port;
+ config->proxy_strategy = aws_http_proxy_strategy_acquire(proxy_config->proxy_strategy);
+
+ return config;
+
+on_error:
+
+ aws_http_proxy_config_destroy(config);
+
+ return NULL;
+}
+
+void aws_http_proxy_config_destroy(struct aws_http_proxy_config *config) {
+ if (config == NULL) {
+ return;
+ }
+
+ aws_byte_buf_clean_up(&config->host);
+
+ if (config->tls_options) {
+ aws_tls_connection_options_clean_up(config->tls_options);
+ aws_mem_release(config->allocator, config->tls_options);
+ }
+
+ aws_http_proxy_strategy_release(config->proxy_strategy);
+
+ aws_mem_release(config->allocator, config);
+}
+
+void aws_http_proxy_options_init_from_config(
+ struct aws_http_proxy_options *options,
+ const struct aws_http_proxy_config *config) {
+ AWS_FATAL_ASSERT(options && config);
+
+ options->connection_type = config->connection_type;
+ options->host = aws_byte_cursor_from_buf(&config->host);
+ options->port = config->port;
+ options->tls_options = config->tls_options;
+ options->proxy_strategy = config->proxy_strategy;
+}
+
+int aws_http_options_validate_proxy_configuration(const struct aws_http_client_connection_options *options) {
+ if (options == NULL || options->proxy_options == NULL) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ enum aws_http_proxy_connection_type proxy_type = options->proxy_options->connection_type;
+ if (proxy_type == AWS_HPCT_HTTP_FORWARD && options->tls_options != NULL) {
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ struct aws_http_proxy_strategy *proxy_strategy = options->proxy_options->proxy_strategy;
+ if (proxy_strategy != NULL) {
+ if (proxy_strategy->proxy_connection_type != proxy_type) {
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+struct aws_proxied_socket_channel_user_data {
+ struct aws_allocator *allocator;
+ struct aws_client_bootstrap *bootstrap;
+ struct aws_channel *channel;
+ aws_client_bootstrap_on_channel_event_fn *original_setup_callback;
+ aws_client_bootstrap_on_channel_event_fn *original_shutdown_callback;
+ void *original_user_data;
+};
+
+static void s_proxied_socket_channel_user_data_destroy(struct aws_proxied_socket_channel_user_data *user_data) {
+ if (user_data == NULL) {
+ return;
+ }
+
+ aws_client_bootstrap_release(user_data->bootstrap);
+
+ aws_mem_release(user_data->allocator, user_data);
+}
+
+static struct aws_proxied_socket_channel_user_data *s_proxied_socket_channel_user_data_new(
+ struct aws_allocator *allocator,
+ struct aws_socket_channel_bootstrap_options *channel_options) {
+ struct aws_proxied_socket_channel_user_data *user_data =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_proxied_socket_channel_user_data));
+ if (user_data == NULL) {
+ return NULL;
+ }
+
+ user_data->allocator = allocator;
+ user_data->original_setup_callback = channel_options->setup_callback;
+ user_data->original_shutdown_callback = channel_options->shutdown_callback;
+ user_data->original_user_data = channel_options->user_data;
+ user_data->bootstrap = aws_client_bootstrap_acquire(channel_options->bootstrap);
+
+ return user_data;
+}
+
+static void s_http_proxied_socket_channel_setup(
+ struct aws_client_bootstrap *bootstrap,
+ int error_code,
+ struct aws_channel *channel,
+ void *user_data) {
+
+ (void)bootstrap;
+ struct aws_proxied_socket_channel_user_data *proxied_user_data = user_data;
+
+ if (error_code != AWS_ERROR_SUCCESS || channel == NULL) {
+ proxied_user_data->original_setup_callback(
+ proxied_user_data->bootstrap, error_code, NULL, proxied_user_data->original_user_data);
+ s_proxied_socket_channel_user_data_destroy(proxied_user_data);
+ return;
+ }
+
+ proxied_user_data->channel = channel;
+
+ proxied_user_data->original_setup_callback(
+ proxied_user_data->bootstrap,
+ AWS_ERROR_SUCCESS,
+ proxied_user_data->channel,
+ proxied_user_data->original_user_data);
+}
+
+static void s_http_proxied_socket_channel_shutdown(
+ struct aws_client_bootstrap *bootstrap,
+ int error_code,
+ struct aws_channel *channel,
+ void *user_data) {
+ (void)bootstrap;
+ (void)channel;
+ struct aws_proxied_socket_channel_user_data *proxied_user_data = user_data;
+ proxied_user_data->original_shutdown_callback(
+ proxied_user_data->bootstrap, error_code, proxied_user_data->channel, proxied_user_data->original_user_data);
+
+ s_proxied_socket_channel_user_data_destroy(proxied_user_data);
+}
+
+int aws_http_proxy_new_socket_channel(
+ struct aws_socket_channel_bootstrap_options *channel_options,
+ const struct aws_http_proxy_options *proxy_options) {
+
+ AWS_FATAL_ASSERT(channel_options != NULL && channel_options->bootstrap != NULL);
+ AWS_FATAL_ASSERT(proxy_options != NULL);
+
+ if (proxy_options->connection_type != AWS_HPCT_HTTP_TUNNEL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_PROXY_NEGOTIATION,
+ "Creating a raw protocol channel through an http proxy requires a tunneling proxy "
+ "configuration");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (channel_options->tls_options == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_PROXY_NEGOTIATION,
+ "Creating a raw protocol channel through an http proxy requires tls to the endpoint");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ struct aws_allocator *allocator = channel_options->bootstrap->allocator;
+ struct aws_proxied_socket_channel_user_data *user_data =
+ s_proxied_socket_channel_user_data_new(allocator, channel_options);
+
+ struct aws_http_client_connection_options http_connection_options = AWS_HTTP_CLIENT_CONNECTION_OPTIONS_INIT;
+ http_connection_options.allocator = allocator;
+ http_connection_options.bootstrap = channel_options->bootstrap;
+ http_connection_options.host_name = aws_byte_cursor_from_c_str(channel_options->host_name);
+ http_connection_options.port = channel_options->port;
+ http_connection_options.socket_options = channel_options->socket_options;
+ http_connection_options.tls_options = channel_options->tls_options;
+ http_connection_options.proxy_options = proxy_options;
+ http_connection_options.user_data = user_data;
+ http_connection_options.on_setup = NULL; /* use channel callbacks, not http callbacks */
+ http_connection_options.on_shutdown = NULL; /* use channel callbacks, not http callbacks */
+ http_connection_options.requested_event_loop = channel_options->requested_event_loop;
+
+ if (s_aws_http_client_connect_via_tunneling_proxy(
+ &http_connection_options, s_http_proxied_socket_channel_setup, s_http_proxied_socket_channel_shutdown)) {
+ goto on_error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+
+ s_proxied_socket_channel_user_data_destroy(user_data);
+
+ return AWS_OP_ERR;
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/proxy_strategy.c b/contrib/restricted/aws/aws-c-http/source/proxy_strategy.c
new file mode 100644
index 0000000000..3130d91cc3
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/proxy_strategy.c
@@ -0,0 +1,1703 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/proxy.h>
+
+#include <aws/common/encoding.h>
+#include <aws/common/string.h>
+#include <aws/http/private/proxy_impl.h>
+
+#if defined(_MSC_VER)
+# pragma warning(push)
+# pragma warning(disable : 4221)
+#endif /* _MSC_VER */
+
+struct aws_http_proxy_negotiator *aws_http_proxy_negotiator_acquire(
+ struct aws_http_proxy_negotiator *proxy_negotiator) {
+ if (proxy_negotiator != NULL) {
+ aws_ref_count_acquire(&proxy_negotiator->ref_count);
+ }
+
+ return proxy_negotiator;
+}
+
+void aws_http_proxy_negotiator_release(struct aws_http_proxy_negotiator *proxy_negotiator) {
+ if (proxy_negotiator != NULL) {
+ aws_ref_count_release(&proxy_negotiator->ref_count);
+ }
+}
+
+struct aws_http_proxy_negotiator *aws_http_proxy_strategy_create_negotiator(
+ struct aws_http_proxy_strategy *strategy,
+ struct aws_allocator *allocator) {
+ if (strategy == NULL || allocator == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ return strategy->vtable->create_negotiator(strategy, allocator);
+}
+
+enum aws_http_proxy_negotiation_retry_directive aws_http_proxy_negotiator_get_retry_directive(
+ struct aws_http_proxy_negotiator *proxy_negotiator) {
+ if (proxy_negotiator != NULL) {
+ if (proxy_negotiator->strategy_vtable.tunnelling_vtable->get_retry_directive != NULL) {
+ return proxy_negotiator->strategy_vtable.tunnelling_vtable->get_retry_directive(proxy_negotiator);
+ }
+ }
+
+ return AWS_HPNRD_STOP;
+}
+
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_acquire(struct aws_http_proxy_strategy *proxy_strategy) {
+ if (proxy_strategy != NULL) {
+ aws_ref_count_acquire(&proxy_strategy->ref_count);
+ }
+
+ return proxy_strategy;
+}
+
+void aws_http_proxy_strategy_release(struct aws_http_proxy_strategy *proxy_strategy) {
+ if (proxy_strategy != NULL) {
+ aws_ref_count_release(&proxy_strategy->ref_count);
+ }
+}
+
+/*****************************************************************************************************************/
+
+enum proxy_negotiator_connect_state {
+ AWS_PNCS_READY,
+ AWS_PNCS_IN_PROGRESS,
+ AWS_PNCS_SUCCESS,
+ AWS_PNCS_FAILURE,
+};
+
+/* Functions for basic auth strategy */
+
+struct aws_http_proxy_strategy_basic_auth {
+ struct aws_allocator *allocator;
+ struct aws_string *user_name;
+ struct aws_string *password;
+ struct aws_http_proxy_strategy strategy_base;
+};
+
+static void s_destroy_basic_auth_strategy(struct aws_http_proxy_strategy *proxy_strategy) {
+ struct aws_http_proxy_strategy_basic_auth *basic_auth_strategy = proxy_strategy->impl;
+
+ aws_string_destroy(basic_auth_strategy->user_name);
+ aws_string_destroy(basic_auth_strategy->password);
+
+ aws_mem_release(basic_auth_strategy->allocator, basic_auth_strategy);
+}
+
+struct aws_http_proxy_negotiator_basic_auth {
+ struct aws_allocator *allocator;
+
+ struct aws_http_proxy_strategy *strategy;
+
+ enum proxy_negotiator_connect_state connect_state;
+
+ struct aws_http_proxy_negotiator negotiator_base;
+};
+
+static void s_destroy_basic_auth_negotiator(struct aws_http_proxy_negotiator *proxy_negotiator) {
+ struct aws_http_proxy_negotiator_basic_auth *basic_auth_negotiator = proxy_negotiator->impl;
+
+ aws_http_proxy_strategy_release(basic_auth_negotiator->strategy);
+
+ aws_mem_release(basic_auth_negotiator->allocator, basic_auth_negotiator);
+}
+
+AWS_STATIC_STRING_FROM_LITERAL(s_proxy_authorization_header_name, "Proxy-Authorization");
+AWS_STATIC_STRING_FROM_LITERAL(s_proxy_authorization_header_basic_prefix, "Basic ");
+
+/*
+ * Adds a proxy authentication header based on the basic authentication mode, rfc7617
+ */
+static int s_add_basic_proxy_authentication_header(
+ struct aws_allocator *allocator,
+ struct aws_http_message *request,
+ struct aws_http_proxy_negotiator_basic_auth *basic_auth_negotiator) {
+
+ struct aws_byte_buf base64_input_value;
+ AWS_ZERO_STRUCT(base64_input_value);
+
+ struct aws_byte_buf header_value;
+ AWS_ZERO_STRUCT(header_value);
+
+ int result = AWS_OP_ERR;
+
+ struct aws_http_proxy_strategy_basic_auth *basic_auth_strategy = basic_auth_negotiator->strategy->impl;
+
+ if (aws_byte_buf_init(
+ &base64_input_value,
+ allocator,
+ basic_auth_strategy->user_name->len + basic_auth_strategy->password->len + 1)) {
+ goto done;
+ }
+
+ /* First build a buffer with "username:password" in it */
+ struct aws_byte_cursor username_cursor = aws_byte_cursor_from_string(basic_auth_strategy->user_name);
+ if (aws_byte_buf_append(&base64_input_value, &username_cursor)) {
+ goto done;
+ }
+
+ struct aws_byte_cursor colon_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(":");
+ if (aws_byte_buf_append(&base64_input_value, &colon_cursor)) {
+ goto done;
+ }
+
+ struct aws_byte_cursor password_cursor = aws_byte_cursor_from_string(basic_auth_strategy->password);
+ if (aws_byte_buf_append(&base64_input_value, &password_cursor)) {
+ goto done;
+ }
+
+ struct aws_byte_cursor base64_source_cursor =
+ aws_byte_cursor_from_array(base64_input_value.buffer, base64_input_value.len);
+
+ /* Figure out how much room we need in our final header value buffer */
+ size_t required_size = 0;
+ if (aws_base64_compute_encoded_len(base64_source_cursor.len, &required_size)) {
+ goto done;
+ }
+
+ required_size += s_proxy_authorization_header_basic_prefix->len + 1;
+ if (aws_byte_buf_init(&header_value, allocator, required_size)) {
+ goto done;
+ }
+
+ /* Build the final header value by appending the authorization type and the base64 encoding string together */
+ struct aws_byte_cursor basic_prefix = aws_byte_cursor_from_string(s_proxy_authorization_header_basic_prefix);
+ if (aws_byte_buf_append_dynamic(&header_value, &basic_prefix)) {
+ goto done;
+ }
+
+ if (aws_base64_encode(&base64_source_cursor, &header_value)) {
+ goto done;
+ }
+
+ struct aws_http_header header = {
+ .name = aws_byte_cursor_from_string(s_proxy_authorization_header_name),
+ .value = aws_byte_cursor_from_array(header_value.buffer, header_value.len),
+ };
+
+ if (aws_http_message_add_header(request, header)) {
+ goto done;
+ }
+
+ result = AWS_OP_SUCCESS;
+
+done:
+
+ aws_byte_buf_clean_up(&header_value);
+ aws_byte_buf_clean_up(&base64_input_value);
+
+ return result;
+}
+
+int s_basic_auth_forward_add_header(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ struct aws_http_message *message) {
+ struct aws_http_proxy_negotiator_basic_auth *basic_auth_negotiator = proxy_negotiator->impl;
+
+ return s_add_basic_proxy_authentication_header(basic_auth_negotiator->allocator, message, basic_auth_negotiator);
+}
+
+void s_basic_auth_tunnel_add_header(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ struct aws_http_message *message,
+ aws_http_proxy_negotiation_terminate_fn *negotiation_termination_callback,
+ aws_http_proxy_negotiation_http_request_forward_fn *negotiation_http_request_forward_callback,
+ void *internal_proxy_user_data) {
+
+ struct aws_http_proxy_negotiator_basic_auth *basic_auth_negotiator = proxy_negotiator->impl;
+ if (basic_auth_negotiator->connect_state != AWS_PNCS_READY) {
+ negotiation_termination_callback(message, AWS_ERROR_HTTP_PROXY_CONNECT_FAILED, internal_proxy_user_data);
+ return;
+ }
+
+ basic_auth_negotiator->connect_state = AWS_PNCS_IN_PROGRESS;
+
+ if (s_add_basic_proxy_authentication_header(basic_auth_negotiator->allocator, message, basic_auth_negotiator)) {
+ negotiation_termination_callback(message, aws_last_error(), internal_proxy_user_data);
+ return;
+ }
+
+ negotiation_http_request_forward_callback(message, internal_proxy_user_data);
+}
+
+static int s_basic_auth_on_connect_status(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ enum aws_http_status_code status_code) {
+ struct aws_http_proxy_negotiator_basic_auth *basic_auth_negotiator = proxy_negotiator->impl;
+
+ if (basic_auth_negotiator->connect_state == AWS_PNCS_IN_PROGRESS) {
+ if (AWS_HTTP_STATUS_CODE_200_OK != status_code) {
+ basic_auth_negotiator->connect_state = AWS_PNCS_FAILURE;
+ } else {
+ basic_auth_negotiator->connect_state = AWS_PNCS_SUCCESS;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static struct aws_http_proxy_negotiator_forwarding_vtable s_basic_auth_proxy_negotiator_forwarding_vtable = {
+ .forward_request_transform = s_basic_auth_forward_add_header,
+};
+
+static struct aws_http_proxy_negotiator_tunnelling_vtable s_basic_auth_proxy_negotiator_tunneling_vtable = {
+ .on_status_callback = s_basic_auth_on_connect_status,
+ .connect_request_transform = s_basic_auth_tunnel_add_header,
+};
+
+static struct aws_http_proxy_negotiator *s_create_basic_auth_negotiator(
+ struct aws_http_proxy_strategy *proxy_strategy,
+ struct aws_allocator *allocator) {
+ if (proxy_strategy == NULL || allocator == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_proxy_negotiator_basic_auth *basic_auth_negotiator =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_negotiator_basic_auth));
+ if (basic_auth_negotiator == NULL) {
+ return NULL;
+ }
+
+ basic_auth_negotiator->allocator = allocator;
+ basic_auth_negotiator->connect_state = AWS_PNCS_READY;
+ basic_auth_negotiator->negotiator_base.impl = basic_auth_negotiator;
+ aws_ref_count_init(
+ &basic_auth_negotiator->negotiator_base.ref_count,
+ &basic_auth_negotiator->negotiator_base,
+ (aws_simple_completion_callback *)s_destroy_basic_auth_negotiator);
+
+ if (proxy_strategy->proxy_connection_type == AWS_HPCT_HTTP_FORWARD) {
+ basic_auth_negotiator->negotiator_base.strategy_vtable.forwarding_vtable =
+ &s_basic_auth_proxy_negotiator_forwarding_vtable;
+ } else {
+ basic_auth_negotiator->negotiator_base.strategy_vtable.tunnelling_vtable =
+ &s_basic_auth_proxy_negotiator_tunneling_vtable;
+ }
+
+ basic_auth_negotiator->strategy = aws_http_proxy_strategy_acquire(proxy_strategy);
+
+ return &basic_auth_negotiator->negotiator_base;
+}
+
+static struct aws_http_proxy_strategy_vtable s_basic_auth_proxy_strategy_vtable = {
+ .create_negotiator = s_create_basic_auth_negotiator,
+};
+
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_basic_auth(
+ struct aws_allocator *allocator,
+ struct aws_http_proxy_strategy_basic_auth_options *config) {
+ if (config == NULL || allocator == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ if (config->proxy_connection_type != AWS_HPCT_HTTP_FORWARD &&
+ config->proxy_connection_type != AWS_HPCT_HTTP_TUNNEL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_proxy_strategy_basic_auth *basic_auth_strategy =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_strategy_basic_auth));
+ if (basic_auth_strategy == NULL) {
+ return NULL;
+ }
+
+ basic_auth_strategy->strategy_base.impl = basic_auth_strategy;
+ basic_auth_strategy->strategy_base.vtable = &s_basic_auth_proxy_strategy_vtable;
+ basic_auth_strategy->allocator = allocator;
+ basic_auth_strategy->strategy_base.proxy_connection_type = config->proxy_connection_type;
+ aws_ref_count_init(
+ &basic_auth_strategy->strategy_base.ref_count,
+ &basic_auth_strategy->strategy_base,
+ (aws_simple_completion_callback *)s_destroy_basic_auth_strategy);
+
+ basic_auth_strategy->user_name = aws_string_new_from_cursor(allocator, &config->user_name);
+ if (basic_auth_strategy->user_name == NULL) {
+ goto on_error;
+ }
+
+ basic_auth_strategy->password = aws_string_new_from_cursor(allocator, &config->password);
+ if (basic_auth_strategy->password == NULL) {
+ goto on_error;
+ }
+
+ return &basic_auth_strategy->strategy_base;
+
+on_error:
+
+ aws_http_proxy_strategy_release(&basic_auth_strategy->strategy_base);
+
+ return NULL;
+}
+
+/*****************************************************************************************************************/
+
+struct aws_http_proxy_strategy_one_time_identity {
+ struct aws_allocator *allocator;
+
+ struct aws_http_proxy_strategy strategy_base;
+};
+
+struct aws_http_proxy_negotiator_one_time_identity {
+ struct aws_allocator *allocator;
+
+ enum proxy_negotiator_connect_state connect_state;
+
+ struct aws_http_proxy_negotiator negotiator_base;
+};
+
+static void s_destroy_one_time_identity_negotiator(struct aws_http_proxy_negotiator *proxy_negotiator) {
+ struct aws_http_proxy_negotiator_one_time_identity *identity_negotiator = proxy_negotiator->impl;
+
+ aws_mem_release(identity_negotiator->allocator, identity_negotiator);
+}
+
+void s_one_time_identity_connect_transform(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ struct aws_http_message *message,
+ aws_http_proxy_negotiation_terminate_fn *negotiation_termination_callback,
+ aws_http_proxy_negotiation_http_request_forward_fn *negotiation_http_request_forward_callback,
+ void *internal_proxy_user_data) {
+
+ struct aws_http_proxy_negotiator_one_time_identity *one_time_identity_negotiator = proxy_negotiator->impl;
+ if (one_time_identity_negotiator->connect_state != AWS_PNCS_READY) {
+ negotiation_termination_callback(message, AWS_ERROR_HTTP_PROXY_CONNECT_FAILED, internal_proxy_user_data);
+ return;
+ }
+
+ one_time_identity_negotiator->connect_state = AWS_PNCS_IN_PROGRESS;
+ negotiation_http_request_forward_callback(message, internal_proxy_user_data);
+}
+
+static int s_one_time_identity_on_connect_status(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ enum aws_http_status_code status_code) {
+ struct aws_http_proxy_negotiator_one_time_identity *one_time_identity_negotiator = proxy_negotiator->impl;
+
+ if (one_time_identity_negotiator->connect_state == AWS_PNCS_IN_PROGRESS) {
+ if (AWS_HTTP_STATUS_CODE_200_OK != status_code) {
+ one_time_identity_negotiator->connect_state = AWS_PNCS_FAILURE;
+ } else {
+ one_time_identity_negotiator->connect_state = AWS_PNCS_SUCCESS;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static struct aws_http_proxy_negotiator_tunnelling_vtable s_one_time_identity_proxy_negotiator_tunneling_vtable = {
+ .on_status_callback = s_one_time_identity_on_connect_status,
+ .connect_request_transform = s_one_time_identity_connect_transform,
+};
+
+static struct aws_http_proxy_negotiator *s_create_one_time_identity_negotiator(
+ struct aws_http_proxy_strategy *proxy_strategy,
+ struct aws_allocator *allocator) {
+ if (proxy_strategy == NULL || allocator == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_proxy_negotiator_one_time_identity *identity_negotiator =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_negotiator_one_time_identity));
+ if (identity_negotiator == NULL) {
+ return NULL;
+ }
+
+ identity_negotiator->allocator = allocator;
+ identity_negotiator->connect_state = AWS_PNCS_READY;
+ identity_negotiator->negotiator_base.impl = identity_negotiator;
+ aws_ref_count_init(
+ &identity_negotiator->negotiator_base.ref_count,
+ &identity_negotiator->negotiator_base,
+ (aws_simple_completion_callback *)s_destroy_one_time_identity_negotiator);
+
+ identity_negotiator->negotiator_base.strategy_vtable.tunnelling_vtable =
+ &s_one_time_identity_proxy_negotiator_tunneling_vtable;
+
+ return &identity_negotiator->negotiator_base;
+}
+
+static struct aws_http_proxy_strategy_vtable s_one_time_identity_proxy_strategy_vtable = {
+ .create_negotiator = s_create_one_time_identity_negotiator,
+};
+
+static void s_destroy_one_time_identity_strategy(struct aws_http_proxy_strategy *proxy_strategy) {
+ struct aws_http_proxy_strategy_one_time_identity *identity_strategy = proxy_strategy->impl;
+
+ aws_mem_release(identity_strategy->allocator, identity_strategy);
+}
+
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_one_time_identity(
+ struct aws_allocator *allocator) {
+ if (allocator == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_proxy_strategy_one_time_identity *identity_strategy =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_strategy_one_time_identity));
+ if (identity_strategy == NULL) {
+ return NULL;
+ }
+
+ identity_strategy->strategy_base.impl = identity_strategy;
+ identity_strategy->strategy_base.vtable = &s_one_time_identity_proxy_strategy_vtable;
+ identity_strategy->strategy_base.proxy_connection_type = AWS_HPCT_HTTP_TUNNEL;
+ identity_strategy->allocator = allocator;
+
+ aws_ref_count_init(
+ &identity_strategy->strategy_base.ref_count,
+ &identity_strategy->strategy_base,
+ (aws_simple_completion_callback *)s_destroy_one_time_identity_strategy);
+
+ return &identity_strategy->strategy_base;
+}
+
+/******************************************************************************************************************/
+
+struct aws_http_proxy_strategy_forwarding_identity {
+ struct aws_allocator *allocator;
+
+ struct aws_http_proxy_strategy strategy_base;
+};
+
+struct aws_http_proxy_negotiator_forwarding_identity {
+ struct aws_allocator *allocator;
+
+ struct aws_http_proxy_negotiator negotiator_base;
+};
+
+static void s_destroy_forwarding_identity_negotiator(struct aws_http_proxy_negotiator *proxy_negotiator) {
+ struct aws_http_proxy_negotiator_forwarding_identity *identity_negotiator = proxy_negotiator->impl;
+
+ aws_mem_release(identity_negotiator->allocator, identity_negotiator);
+}
+
+int s_forwarding_identity_connect_transform(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ struct aws_http_message *message) {
+
+ (void)message;
+ (void)proxy_negotiator;
+
+ return AWS_OP_SUCCESS;
+}
+
+static struct aws_http_proxy_negotiator_forwarding_vtable s_forwarding_identity_proxy_negotiator_tunneling_vtable = {
+ .forward_request_transform = s_forwarding_identity_connect_transform,
+};
+
+static struct aws_http_proxy_negotiator *s_create_forwarding_identity_negotiator(
+ struct aws_http_proxy_strategy *proxy_strategy,
+ struct aws_allocator *allocator) {
+ if (proxy_strategy == NULL || allocator == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_proxy_negotiator_forwarding_identity *identity_negotiator =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_negotiator_forwarding_identity));
+ if (identity_negotiator == NULL) {
+ return NULL;
+ }
+
+ identity_negotiator->allocator = allocator;
+ identity_negotiator->negotiator_base.impl = identity_negotiator;
+ aws_ref_count_init(
+ &identity_negotiator->negotiator_base.ref_count,
+ &identity_negotiator->negotiator_base,
+ (aws_simple_completion_callback *)s_destroy_forwarding_identity_negotiator);
+
+ identity_negotiator->negotiator_base.strategy_vtable.forwarding_vtable =
+ &s_forwarding_identity_proxy_negotiator_tunneling_vtable;
+
+ return &identity_negotiator->negotiator_base;
+}
+
+static struct aws_http_proxy_strategy_vtable s_forwarding_identity_strategy_vtable = {
+ .create_negotiator = s_create_forwarding_identity_negotiator,
+};
+
+static void s_destroy_forwarding_identity_strategy(struct aws_http_proxy_strategy *proxy_strategy) {
+ struct aws_http_proxy_strategy_forwarding_identity *identity_strategy = proxy_strategy->impl;
+
+ aws_mem_release(identity_strategy->allocator, identity_strategy);
+}
+
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_forwarding_identity(struct aws_allocator *allocator) {
+ if (allocator == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_proxy_strategy_forwarding_identity *identity_strategy =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_strategy_forwarding_identity));
+ if (identity_strategy == NULL) {
+ return NULL;
+ }
+
+ identity_strategy->strategy_base.impl = identity_strategy;
+ identity_strategy->strategy_base.vtable = &s_forwarding_identity_strategy_vtable;
+ identity_strategy->strategy_base.proxy_connection_type = AWS_HPCT_HTTP_FORWARD;
+ identity_strategy->allocator = allocator;
+
+ aws_ref_count_init(
+ &identity_strategy->strategy_base.ref_count,
+ &identity_strategy->strategy_base,
+ (aws_simple_completion_callback *)s_destroy_forwarding_identity_strategy);
+
+ return &identity_strategy->strategy_base;
+}
+
+/******************************************************************************************************************/
+/* kerberos */
+
+AWS_STATIC_STRING_FROM_LITERAL(s_proxy_authorization_header_kerberos_prefix, "Negotiate ");
+
+struct aws_http_proxy_strategy_tunneling_kerberos {
+ struct aws_allocator *allocator;
+
+ aws_http_proxy_negotiation_get_token_sync_fn *get_token;
+
+ void *get_token_user_data;
+
+ struct aws_http_proxy_strategy strategy_base;
+};
+
+struct aws_http_proxy_negotiator_tunneling_kerberos {
+ struct aws_allocator *allocator;
+
+ struct aws_http_proxy_strategy *strategy;
+
+ enum proxy_negotiator_connect_state connect_state;
+
+ /*
+ * ToDo: make adaptive and add any state needed here
+ *
+ * Likely things include response code (from the vanilla CONNECT) and the appropriate headers in
+ * the response
+ */
+
+ struct aws_http_proxy_negotiator negotiator_base;
+};
+
+/*
+ * Adds a proxy authentication header based on the user kerberos authentication token
+ * This uses a token that is already base64 encoded
+ */
+static int s_add_kerberos_proxy_usertoken_authentication_header(
+ struct aws_allocator *allocator,
+ struct aws_http_message *request,
+ struct aws_byte_cursor user_token) {
+
+ struct aws_byte_buf header_value;
+ AWS_ZERO_STRUCT(header_value);
+
+ int result = AWS_OP_ERR;
+
+ if (aws_byte_buf_init(
+ &header_value, allocator, s_proxy_authorization_header_kerberos_prefix->len + user_token.len)) {
+ goto done;
+ }
+
+ /* First append proxy authorization header kerberos prefix */
+ struct aws_byte_cursor auth_header_cursor =
+ aws_byte_cursor_from_string(s_proxy_authorization_header_kerberos_prefix);
+ if (aws_byte_buf_append(&header_value, &auth_header_cursor)) {
+ goto done;
+ }
+
+ /* Append token to it */
+ if (aws_byte_buf_append(&header_value, &user_token)) {
+ goto done;
+ }
+
+ struct aws_http_header header = {
+ .name = aws_byte_cursor_from_string(s_proxy_authorization_header_name),
+ .value = aws_byte_cursor_from_array(header_value.buffer, header_value.len),
+ };
+
+ if (aws_http_message_add_header(request, header)) {
+ goto done;
+ }
+
+ result = AWS_OP_SUCCESS;
+
+done:
+
+ aws_byte_buf_clean_up(&header_value);
+ return result;
+}
+
+static void s_kerberos_tunnel_transform_connect(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ struct aws_http_message *message,
+ aws_http_proxy_negotiation_terminate_fn *negotiation_termination_callback,
+ aws_http_proxy_negotiation_http_request_forward_fn *negotiation_http_request_forward_callback,
+ void *internal_proxy_user_data) {
+ struct aws_http_proxy_negotiator_tunneling_kerberos *kerberos_negotiator = proxy_negotiator->impl;
+ struct aws_http_proxy_strategy_tunneling_kerberos *kerberos_strategy = kerberos_negotiator->strategy->impl;
+
+ int result = AWS_OP_ERR;
+ int error_code = AWS_ERROR_SUCCESS;
+ struct aws_string *kerberos_token = NULL;
+
+ if (kerberos_negotiator->connect_state == AWS_PNCS_FAILURE) {
+ error_code = AWS_ERROR_HTTP_PROXY_CONNECT_FAILED;
+ goto done;
+ }
+
+ if (kerberos_negotiator->connect_state != AWS_PNCS_READY) {
+ error_code = AWS_ERROR_INVALID_STATE;
+ goto done;
+ }
+
+ kerberos_negotiator->connect_state = AWS_PNCS_IN_PROGRESS;
+
+ kerberos_token = kerberos_strategy->get_token(kerberos_strategy->get_token_user_data, &error_code);
+ if (kerberos_token == NULL || error_code != AWS_ERROR_SUCCESS) {
+ goto done;
+ }
+
+ /*transform the header with proxy authenticate:Negotiate and kerberos token*/
+ if (s_add_kerberos_proxy_usertoken_authentication_header(
+ kerberos_negotiator->allocator, message, aws_byte_cursor_from_string(kerberos_token))) {
+ error_code = aws_last_error();
+ goto done;
+ }
+
+ kerberos_negotiator->connect_state = AWS_PNCS_IN_PROGRESS;
+ result = AWS_OP_SUCCESS;
+
+done:
+
+ if (result != AWS_OP_SUCCESS) {
+ if (error_code == AWS_ERROR_SUCCESS) {
+ error_code = AWS_ERROR_UNKNOWN;
+ }
+ negotiation_termination_callback(message, error_code, internal_proxy_user_data);
+ } else {
+ negotiation_http_request_forward_callback(message, internal_proxy_user_data);
+ }
+
+ aws_string_destroy(kerberos_token);
+}
+
+static int s_kerberos_on_incoming_header_adaptive(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ enum aws_http_header_block header_block,
+ const struct aws_http_header *header_array,
+ size_t num_headers) {
+
+ struct aws_http_proxy_negotiator_tunneling_kerberos *kerberos_negotiator = proxy_negotiator->impl;
+ (void)kerberos_negotiator;
+ (void)header_block;
+ (void)header_array;
+ (void)num_headers;
+
+ /* TODO: process vanilla CONNECT response headers here to improve usage/application */
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_kerberos_on_connect_status(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ enum aws_http_status_code status_code) {
+
+ struct aws_http_proxy_negotiator_tunneling_kerberos *kerberos_negotiator = proxy_negotiator->impl;
+
+ /* TODO: process status code of vanilla CONNECT request here to improve usage/application */
+
+ if (kerberos_negotiator->connect_state == AWS_PNCS_IN_PROGRESS) {
+ if (AWS_HTTP_STATUS_CODE_200_OK != status_code) {
+ kerberos_negotiator->connect_state = AWS_PNCS_FAILURE;
+ } else {
+ kerberos_negotiator->connect_state = AWS_PNCS_SUCCESS;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_kerberos_on_incoming_body(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ const struct aws_byte_cursor *data) {
+
+ struct aws_http_proxy_negotiator_tunneling_kerberos *kerberos_negotiator = proxy_negotiator->impl;
+ (void)kerberos_negotiator;
+ (void)data;
+
+ return AWS_OP_SUCCESS;
+}
+
+static struct aws_http_proxy_negotiator_tunnelling_vtable s_tunneling_kerberos_proxy_negotiator_tunneling_vtable = {
+ .on_incoming_body_callback = s_kerberos_on_incoming_body,
+ .on_incoming_headers_callback = s_kerberos_on_incoming_header_adaptive,
+ .on_status_callback = s_kerberos_on_connect_status,
+ .connect_request_transform = s_kerberos_tunnel_transform_connect,
+};
+
+static void s_destroy_tunneling_kerberos_negotiator(struct aws_http_proxy_negotiator *proxy_negotiator) {
+ struct aws_http_proxy_negotiator_tunneling_kerberos *kerberos_negotiator = proxy_negotiator->impl;
+
+ aws_http_proxy_strategy_release(kerberos_negotiator->strategy);
+
+ aws_mem_release(kerberos_negotiator->allocator, kerberos_negotiator);
+}
+
+static struct aws_http_proxy_negotiator *s_create_tunneling_kerberos_negotiator(
+ struct aws_http_proxy_strategy *proxy_strategy,
+ struct aws_allocator *allocator) {
+ if (proxy_strategy == NULL || allocator == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_proxy_negotiator_tunneling_kerberos *kerberos_negotiator =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_negotiator_tunneling_kerberos));
+ if (kerberos_negotiator == NULL) {
+ return NULL;
+ }
+
+ kerberos_negotiator->allocator = allocator;
+ kerberos_negotiator->negotiator_base.impl = kerberos_negotiator;
+ aws_ref_count_init(
+ &kerberos_negotiator->negotiator_base.ref_count,
+ &kerberos_negotiator->negotiator_base,
+ (aws_simple_completion_callback *)s_destroy_tunneling_kerberos_negotiator);
+
+ kerberos_negotiator->negotiator_base.strategy_vtable.tunnelling_vtable =
+ &s_tunneling_kerberos_proxy_negotiator_tunneling_vtable;
+
+ kerberos_negotiator->strategy = aws_http_proxy_strategy_acquire(proxy_strategy);
+
+ return &kerberos_negotiator->negotiator_base;
+}
+
+static struct aws_http_proxy_strategy_vtable s_tunneling_kerberos_strategy_vtable = {
+ .create_negotiator = s_create_tunneling_kerberos_negotiator,
+};
+
+static void s_destroy_tunneling_kerberos_strategy(struct aws_http_proxy_strategy *proxy_strategy) {
+ struct aws_http_proxy_strategy_tunneling_kerberos *kerberos_strategy = proxy_strategy->impl;
+
+ aws_mem_release(kerberos_strategy->allocator, kerberos_strategy);
+}
+
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_kerberos(
+ struct aws_allocator *allocator,
+ struct aws_http_proxy_strategy_tunneling_kerberos_options *config) {
+
+ if (allocator == NULL || config == NULL || config->get_token == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_proxy_strategy_tunneling_kerberos *kerberos_strategy =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_strategy_tunneling_kerberos));
+ if (kerberos_strategy == NULL) {
+ return NULL;
+ }
+
+ kerberos_strategy->strategy_base.impl = kerberos_strategy;
+ kerberos_strategy->strategy_base.vtable = &s_tunneling_kerberos_strategy_vtable;
+ kerberos_strategy->strategy_base.proxy_connection_type = AWS_HPCT_HTTP_TUNNEL;
+ kerberos_strategy->allocator = allocator;
+
+ aws_ref_count_init(
+ &kerberos_strategy->strategy_base.ref_count,
+ &kerberos_strategy->strategy_base,
+ (aws_simple_completion_callback *)s_destroy_tunneling_kerberos_strategy);
+
+ kerberos_strategy->get_token = config->get_token;
+ kerberos_strategy->get_token_user_data = config->get_token_user_data;
+
+ return &kerberos_strategy->strategy_base;
+}
+
+/******************************************************************************************************************/
+
+struct aws_http_proxy_strategy_tunneling_ntlm {
+ struct aws_allocator *allocator;
+
+ aws_http_proxy_negotiation_get_token_sync_fn *get_token;
+
+ aws_http_proxy_negotiation_get_challenge_token_sync_fn *get_challenge_token;
+
+ void *get_challenge_token_user_data;
+
+ struct aws_http_proxy_strategy strategy_base;
+};
+
+struct aws_http_proxy_negotiator_tunneling_ntlm {
+ struct aws_allocator *allocator;
+
+ struct aws_http_proxy_strategy *strategy;
+
+ enum proxy_negotiator_connect_state connect_state;
+
+ struct aws_string *challenge_token;
+
+ struct aws_http_proxy_negotiator negotiator_base;
+};
+
+AWS_STATIC_STRING_FROM_LITERAL(s_proxy_authorization_header_ntlm_prefix, "NTLM ");
+
+/*
+ * Adds a proxy authentication header based on ntlm credential or response provided by user
+ */
+static int s_add_ntlm_proxy_usertoken_authentication_header(
+ struct aws_allocator *allocator,
+ struct aws_http_message *request,
+ struct aws_byte_cursor credential_response) {
+
+ struct aws_byte_buf header_value;
+ AWS_ZERO_STRUCT(header_value);
+
+ int result = AWS_OP_ERR;
+
+ if (aws_byte_buf_init(
+ &header_value, allocator, s_proxy_authorization_header_ntlm_prefix->len + credential_response.len)) {
+ goto done;
+ }
+
+ /* First append proxy authorization header prefix */
+ struct aws_byte_cursor auth_header_cursor = aws_byte_cursor_from_string(s_proxy_authorization_header_ntlm_prefix);
+ if (aws_byte_buf_append(&header_value, &auth_header_cursor)) {
+ goto done;
+ }
+
+ /* Append the credential response to it; assumes already encoded properly (base64) */
+ if (aws_byte_buf_append(&header_value, &credential_response)) {
+ goto done;
+ }
+
+ struct aws_http_header header = {
+ .name = aws_byte_cursor_from_string(s_proxy_authorization_header_name),
+ .value = aws_byte_cursor_from_array(header_value.buffer, header_value.len),
+ };
+
+ if (aws_http_message_add_header(request, header)) {
+ goto done;
+ }
+
+ result = AWS_OP_SUCCESS;
+
+done:
+
+ aws_byte_buf_clean_up(&header_value);
+ return result;
+}
+
+static void s_ntlm_tunnel_transform_connect(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ struct aws_http_message *message,
+ aws_http_proxy_negotiation_terminate_fn *negotiation_termination_callback,
+ aws_http_proxy_negotiation_http_request_forward_fn *negotiation_http_request_forward_callback,
+ void *internal_proxy_user_data) {
+
+ struct aws_http_proxy_negotiator_tunneling_ntlm *ntlm_negotiator = proxy_negotiator->impl;
+ struct aws_http_proxy_strategy_tunneling_ntlm *ntlm_strategy = ntlm_negotiator->strategy->impl;
+
+ int result = AWS_OP_ERR;
+ int error_code = AWS_ERROR_SUCCESS;
+ struct aws_string *challenge_answer_token = NULL;
+ struct aws_byte_cursor challenge_token;
+ AWS_ZERO_STRUCT(challenge_token);
+
+ if (ntlm_negotiator->connect_state == AWS_PNCS_FAILURE) {
+ error_code = AWS_ERROR_HTTP_PROXY_CONNECT_FAILED;
+ goto done;
+ }
+
+ if (ntlm_negotiator->connect_state != AWS_PNCS_READY) {
+ error_code = AWS_ERROR_INVALID_STATE;
+ goto done;
+ }
+
+ if (ntlm_negotiator->challenge_token == NULL) {
+ error_code = AWS_ERROR_HTTP_PROXY_STRATEGY_NTLM_CHALLENGE_TOKEN_MISSING;
+ goto done;
+ }
+
+ ntlm_negotiator->connect_state = AWS_PNCS_IN_PROGRESS;
+ challenge_token = aws_byte_cursor_from_string(ntlm_negotiator->challenge_token);
+ challenge_answer_token =
+ ntlm_strategy->get_challenge_token(ntlm_strategy->get_challenge_token_user_data, &challenge_token, &error_code);
+
+ if (challenge_answer_token == NULL || error_code != AWS_ERROR_SUCCESS) {
+ goto done;
+ }
+
+ /*transform the header with proxy authenticate:Negotiate and kerberos token*/
+ if (s_add_ntlm_proxy_usertoken_authentication_header(
+ ntlm_negotiator->allocator, message, aws_byte_cursor_from_string(challenge_answer_token))) {
+ error_code = aws_last_error();
+ goto done;
+ }
+
+ ntlm_negotiator->connect_state = AWS_PNCS_IN_PROGRESS;
+ result = AWS_OP_SUCCESS;
+
+done:
+
+ if (result != AWS_OP_SUCCESS) {
+ if (error_code == AWS_ERROR_SUCCESS) {
+ error_code = AWS_ERROR_UNKNOWN;
+ }
+ negotiation_termination_callback(message, error_code, internal_proxy_user_data);
+ } else {
+ negotiation_http_request_forward_callback(message, internal_proxy_user_data);
+ }
+
+ aws_string_destroy(challenge_answer_token);
+}
+
+AWS_STATIC_STRING_FROM_LITERAL(s_ntlm_challenge_token_header, "Proxy-Authenticate");
+
+static int s_ntlm_on_incoming_header_adaptive(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ enum aws_http_header_block header_block,
+ const struct aws_http_header *header_array,
+ size_t num_headers) {
+
+ struct aws_http_proxy_negotiator_tunneling_ntlm *ntlm_negotiator = proxy_negotiator->impl;
+
+ /*
+ * only extract the challenge before we've started our own CONNECT attempt
+ *
+ * ToDo: we currently overwrite previous challenge tokens since it is unknown if multiple CONNECT requests
+ * cause new challenges to be issued such that old challenges become invalid even if successfully computed
+ */
+ if (ntlm_negotiator->connect_state == AWS_PNCS_READY) {
+ if (header_block == AWS_HTTP_HEADER_BLOCK_MAIN) {
+ struct aws_byte_cursor proxy_authenticate_header_name =
+ aws_byte_cursor_from_string(s_ntlm_challenge_token_header);
+ for (size_t i = 0; i < num_headers; ++i) {
+ struct aws_byte_cursor header_name_cursor = header_array[i].name;
+ if (aws_byte_cursor_eq_ignore_case(&proxy_authenticate_header_name, &header_name_cursor)) {
+ aws_string_destroy(ntlm_negotiator->challenge_token);
+
+ struct aws_byte_cursor challenge_value_cursor = header_array[i].value;
+ ntlm_negotiator->challenge_token =
+ aws_string_new_from_cursor(ntlm_negotiator->allocator, &challenge_value_cursor);
+ break;
+ }
+ }
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_ntlm_on_connect_status(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ enum aws_http_status_code status_code) {
+
+ struct aws_http_proxy_negotiator_tunneling_ntlm *ntlm_negotiator = proxy_negotiator->impl;
+
+ if (ntlm_negotiator->connect_state == AWS_PNCS_IN_PROGRESS) {
+ if (AWS_HTTP_STATUS_CODE_200_OK != status_code) {
+ ntlm_negotiator->connect_state = AWS_PNCS_FAILURE;
+ } else {
+ ntlm_negotiator->connect_state = AWS_PNCS_SUCCESS;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_ntlm_on_incoming_body(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ const struct aws_byte_cursor *data) {
+
+ struct aws_http_proxy_negotiator_tunneling_ntlm *ntlm_negotiator = proxy_negotiator->impl;
+ (void)ntlm_negotiator;
+ (void)data;
+
+ return AWS_OP_SUCCESS;
+}
+
+static enum aws_http_proxy_negotiation_retry_directive s_ntlm_tunnel_get_retry_directive(
+ struct aws_http_proxy_negotiator *proxy_negotiator) {
+ (void)proxy_negotiator;
+
+ return AWS_HPNRD_CURRENT_CONNECTION;
+}
+
+static struct aws_http_proxy_negotiator_tunnelling_vtable s_tunneling_ntlm_proxy_negotiator_tunneling_vtable = {
+ .on_incoming_body_callback = s_ntlm_on_incoming_body,
+ .on_incoming_headers_callback = s_ntlm_on_incoming_header_adaptive,
+ .on_status_callback = s_ntlm_on_connect_status,
+ .connect_request_transform = s_ntlm_tunnel_transform_connect,
+ .get_retry_directive = s_ntlm_tunnel_get_retry_directive,
+};
+
+static void s_destroy_tunneling_ntlm_negotiator(struct aws_http_proxy_negotiator *proxy_negotiator) {
+ struct aws_http_proxy_negotiator_tunneling_ntlm *ntlm_negotiator = proxy_negotiator->impl;
+
+ aws_string_destroy(ntlm_negotiator->challenge_token);
+ aws_http_proxy_strategy_release(ntlm_negotiator->strategy);
+
+ aws_mem_release(ntlm_negotiator->allocator, ntlm_negotiator);
+}
+
+static struct aws_http_proxy_negotiator *s_create_tunneling_ntlm_negotiator(
+ struct aws_http_proxy_strategy *proxy_strategy,
+ struct aws_allocator *allocator) {
+ if (proxy_strategy == NULL || allocator == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_proxy_negotiator_tunneling_ntlm *ntlm_negotiator =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_negotiator_tunneling_ntlm));
+ if (ntlm_negotiator == NULL) {
+ return NULL;
+ }
+
+ ntlm_negotiator->allocator = allocator;
+ ntlm_negotiator->negotiator_base.impl = ntlm_negotiator;
+ aws_ref_count_init(
+ &ntlm_negotiator->negotiator_base.ref_count,
+ &ntlm_negotiator->negotiator_base,
+ (aws_simple_completion_callback *)s_destroy_tunneling_ntlm_negotiator);
+
+ ntlm_negotiator->negotiator_base.strategy_vtable.tunnelling_vtable =
+ &s_tunneling_ntlm_proxy_negotiator_tunneling_vtable;
+
+ ntlm_negotiator->strategy = aws_http_proxy_strategy_acquire(proxy_strategy);
+
+ return &ntlm_negotiator->negotiator_base;
+}
+
+static struct aws_http_proxy_strategy_vtable s_tunneling_ntlm_strategy_vtable = {
+ .create_negotiator = s_create_tunneling_ntlm_negotiator,
+};
+
+static void s_destroy_tunneling_ntlm_strategy(struct aws_http_proxy_strategy *proxy_strategy) {
+ struct aws_http_proxy_strategy_tunneling_ntlm *ntlm_strategy = proxy_strategy->impl;
+
+ aws_mem_release(ntlm_strategy->allocator, ntlm_strategy);
+}
+
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_ntlm(
+ struct aws_allocator *allocator,
+ struct aws_http_proxy_strategy_tunneling_ntlm_options *config) {
+
+ if (allocator == NULL || config == NULL || config->get_challenge_token == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_proxy_strategy_tunneling_ntlm *ntlm_strategy =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_strategy_tunneling_ntlm));
+ if (ntlm_strategy == NULL) {
+ return NULL;
+ }
+
+ ntlm_strategy->strategy_base.impl = ntlm_strategy;
+ ntlm_strategy->strategy_base.vtable = &s_tunneling_ntlm_strategy_vtable;
+ ntlm_strategy->strategy_base.proxy_connection_type = AWS_HPCT_HTTP_TUNNEL;
+
+ ntlm_strategy->allocator = allocator;
+
+ aws_ref_count_init(
+ &ntlm_strategy->strategy_base.ref_count,
+ &ntlm_strategy->strategy_base,
+ (aws_simple_completion_callback *)s_destroy_tunneling_ntlm_strategy);
+
+ ntlm_strategy->get_challenge_token = config->get_challenge_token;
+ ntlm_strategy->get_challenge_token_user_data = config->get_challenge_token_user_data;
+
+ return &ntlm_strategy->strategy_base;
+}
+/******************************************************************************************************/
+
+static void s_ntlm_credential_tunnel_transform_connect(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ struct aws_http_message *message,
+ aws_http_proxy_negotiation_terminate_fn *negotiation_termination_callback,
+ aws_http_proxy_negotiation_http_request_forward_fn *negotiation_http_request_forward_callback,
+ void *internal_proxy_user_data) {
+
+ struct aws_http_proxy_negotiator_tunneling_ntlm *ntlm_credential_negotiator = proxy_negotiator->impl;
+ struct aws_http_proxy_strategy_tunneling_ntlm *ntlm_credential_strategy =
+ ntlm_credential_negotiator->strategy->impl;
+
+ int result = AWS_OP_ERR;
+ int error_code = AWS_ERROR_SUCCESS;
+ struct aws_string *token = NULL;
+
+ if (ntlm_credential_negotiator->connect_state == AWS_PNCS_FAILURE) {
+ error_code = AWS_ERROR_HTTP_PROXY_CONNECT_FAILED;
+ goto done;
+ }
+
+ if (ntlm_credential_negotiator->connect_state != AWS_PNCS_READY) {
+ error_code = AWS_ERROR_INVALID_STATE;
+ goto done;
+ }
+
+ ntlm_credential_negotiator->connect_state = AWS_PNCS_IN_PROGRESS;
+ token = ntlm_credential_strategy->get_token(ntlm_credential_strategy->get_challenge_token_user_data, &error_code);
+
+ if (token == NULL || error_code != AWS_ERROR_SUCCESS) {
+ goto done;
+ }
+
+ /*transform the header with proxy authenticate:Negotiate and kerberos token*/
+ if (s_add_ntlm_proxy_usertoken_authentication_header(
+ ntlm_credential_negotiator->allocator, message, aws_byte_cursor_from_string(token))) {
+ error_code = aws_last_error();
+ goto done;
+ }
+
+ ntlm_credential_negotiator->connect_state = AWS_PNCS_IN_PROGRESS;
+ result = AWS_OP_SUCCESS;
+
+done:
+
+ if (result != AWS_OP_SUCCESS) {
+ if (error_code == AWS_ERROR_SUCCESS) {
+ error_code = AWS_ERROR_UNKNOWN;
+ }
+ negotiation_termination_callback(message, error_code, internal_proxy_user_data);
+ } else {
+ negotiation_http_request_forward_callback(message, internal_proxy_user_data);
+ }
+
+ aws_string_destroy(token);
+}
+
+static struct aws_http_proxy_negotiator_tunnelling_vtable
+ s_tunneling_ntlm_proxy_credential_negotiator_tunneling_vtable = {
+ .on_incoming_body_callback = s_ntlm_on_incoming_body,
+ .on_incoming_headers_callback = s_ntlm_on_incoming_header_adaptive,
+ .on_status_callback = s_ntlm_on_connect_status,
+ .connect_request_transform = s_ntlm_credential_tunnel_transform_connect,
+};
+
+static void s_destroy_tunneling_ntlm_credential_negotiator(struct aws_http_proxy_negotiator *proxy_negotiator) {
+ struct aws_http_proxy_negotiator_tunneling_ntlm *ntlm_credential_negotiator = proxy_negotiator->impl;
+
+ aws_string_destroy(ntlm_credential_negotiator->challenge_token);
+ aws_http_proxy_strategy_release(ntlm_credential_negotiator->strategy);
+
+ aws_mem_release(ntlm_credential_negotiator->allocator, ntlm_credential_negotiator);
+}
+
+static struct aws_http_proxy_negotiator *s_create_tunneling_ntlm_credential_negotiator(
+ struct aws_http_proxy_strategy *proxy_strategy,
+ struct aws_allocator *allocator) {
+ if (proxy_strategy == NULL || allocator == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_proxy_negotiator_tunneling_ntlm *ntlm_credential_negotiator =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_negotiator_tunneling_ntlm));
+ if (ntlm_credential_negotiator == NULL) {
+ return NULL;
+ }
+
+ ntlm_credential_negotiator->allocator = allocator;
+ ntlm_credential_negotiator->negotiator_base.impl = ntlm_credential_negotiator;
+ aws_ref_count_init(
+ &ntlm_credential_negotiator->negotiator_base.ref_count,
+ &ntlm_credential_negotiator->negotiator_base,
+ (aws_simple_completion_callback *)s_destroy_tunneling_ntlm_credential_negotiator);
+
+ ntlm_credential_negotiator->negotiator_base.strategy_vtable.tunnelling_vtable =
+ &s_tunneling_ntlm_proxy_credential_negotiator_tunneling_vtable;
+
+ ntlm_credential_negotiator->strategy = aws_http_proxy_strategy_acquire(proxy_strategy);
+
+ return &ntlm_credential_negotiator->negotiator_base;
+}
+
+static struct aws_http_proxy_strategy_vtable s_tunneling_ntlm_credential_strategy_vtable = {
+ .create_negotiator = s_create_tunneling_ntlm_credential_negotiator,
+};
+
+static void s_destroy_tunneling_ntlm_credential_strategy(struct aws_http_proxy_strategy *proxy_strategy) {
+ struct aws_http_proxy_strategy_tunneling_ntlm *ntlm_credential_strategy = proxy_strategy->impl;
+
+ aws_mem_release(ntlm_credential_strategy->allocator, ntlm_credential_strategy);
+}
+
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_ntlm_credential(
+ struct aws_allocator *allocator,
+ struct aws_http_proxy_strategy_tunneling_ntlm_options *config) {
+
+ if (allocator == NULL || config == NULL || config->get_token == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_proxy_strategy_tunneling_ntlm *ntlm_credential_strategy =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_strategy_tunneling_ntlm));
+ if (ntlm_credential_strategy == NULL) {
+ return NULL;
+ }
+
+ ntlm_credential_strategy->strategy_base.impl = ntlm_credential_strategy;
+ ntlm_credential_strategy->strategy_base.vtable = &s_tunneling_ntlm_credential_strategy_vtable;
+ ntlm_credential_strategy->strategy_base.proxy_connection_type = AWS_HPCT_HTTP_TUNNEL;
+
+ ntlm_credential_strategy->allocator = allocator;
+
+ aws_ref_count_init(
+ &ntlm_credential_strategy->strategy_base.ref_count,
+ &ntlm_credential_strategy->strategy_base,
+ (aws_simple_completion_callback *)s_destroy_tunneling_ntlm_credential_strategy);
+
+ ntlm_credential_strategy->get_token = config->get_token;
+ ntlm_credential_strategy->get_challenge_token_user_data = config->get_challenge_token_user_data;
+
+ return &ntlm_credential_strategy->strategy_base;
+}
+
+/******************************************************************************************************************/
+
+#define PROXY_STRATEGY_MAX_ADAPTIVE_STRATEGIES 4
+
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_adaptive(
+ struct aws_allocator *allocator,
+ struct aws_http_proxy_strategy_tunneling_adaptive_options *config) {
+
+ if (allocator == NULL || config == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_proxy_strategy *strategies[PROXY_STRATEGY_MAX_ADAPTIVE_STRATEGIES];
+
+ uint32_t strategy_count = 0;
+ struct aws_http_proxy_strategy *identity_strategy = NULL;
+ struct aws_http_proxy_strategy *kerberos_strategy = NULL;
+ struct aws_http_proxy_strategy *ntlm_credential_strategy = NULL;
+ struct aws_http_proxy_strategy *ntlm_strategy = NULL;
+ struct aws_http_proxy_strategy *adaptive_sequence_strategy = NULL;
+
+ identity_strategy = aws_http_proxy_strategy_new_tunneling_one_time_identity(allocator);
+ if (identity_strategy == NULL) {
+ goto done;
+ }
+ strategies[strategy_count++] = identity_strategy;
+
+ if (config->kerberos_options != NULL) {
+ kerberos_strategy = aws_http_proxy_strategy_new_tunneling_kerberos(allocator, config->kerberos_options);
+ if (kerberos_strategy == NULL) {
+ goto done;
+ }
+
+ strategies[strategy_count++] = kerberos_strategy;
+ }
+
+ if (config->ntlm_options != NULL) {
+ ntlm_credential_strategy =
+ aws_http_proxy_strategy_new_tunneling_ntlm_credential(allocator, config->ntlm_options);
+ if (ntlm_credential_strategy == NULL) {
+ goto done;
+ }
+
+ strategies[strategy_count++] = ntlm_credential_strategy;
+
+ ntlm_strategy = aws_http_proxy_strategy_new_tunneling_ntlm(allocator, config->ntlm_options);
+ if (ntlm_strategy == NULL) {
+ goto done;
+ }
+
+ strategies[strategy_count++] = ntlm_strategy;
+ }
+
+ AWS_FATAL_ASSERT(strategy_count <= PROXY_STRATEGY_MAX_ADAPTIVE_STRATEGIES);
+
+ struct aws_http_proxy_strategy_tunneling_sequence_options sequence_config = {
+ .strategies = strategies,
+ .strategy_count = strategy_count,
+ };
+
+ adaptive_sequence_strategy = aws_http_proxy_strategy_new_tunneling_sequence(allocator, &sequence_config);
+ if (adaptive_sequence_strategy == NULL) {
+ goto done;
+ }
+
+done:
+
+ aws_http_proxy_strategy_release(identity_strategy);
+ aws_http_proxy_strategy_release(kerberos_strategy);
+ aws_http_proxy_strategy_release(ntlm_credential_strategy);
+ aws_http_proxy_strategy_release(ntlm_strategy);
+
+ return adaptive_sequence_strategy;
+}
+
+/******************************************************************************************************************/
+
+struct aws_http_proxy_strategy_tunneling_sequence {
+ struct aws_allocator *allocator;
+
+ struct aws_array_list strategies;
+
+ struct aws_http_proxy_strategy strategy_base;
+};
+
+struct aws_http_proxy_negotiator_tunneling_sequence {
+ struct aws_allocator *allocator;
+
+ struct aws_array_list negotiators;
+ size_t current_negotiator_transform_index;
+ void *original_internal_proxy_user_data;
+ aws_http_proxy_negotiation_terminate_fn *original_negotiation_termination_callback;
+ aws_http_proxy_negotiation_http_request_forward_fn *original_negotiation_http_request_forward_callback;
+
+ struct aws_http_proxy_negotiator negotiator_base;
+};
+
+static void s_sequence_tunnel_iteration_termination_callback(
+ struct aws_http_message *message,
+ int error_code,
+ void *user_data) {
+
+ struct aws_http_proxy_negotiator *proxy_negotiator = user_data;
+ struct aws_http_proxy_negotiator_tunneling_sequence *sequence_negotiator = proxy_negotiator->impl;
+
+ AWS_LOGF_WARN(
+ AWS_LS_HTTP_PROXY_NEGOTIATION,
+ "(id=%p) Proxy negotiation step failed with error %d",
+ (void *)proxy_negotiator,
+ error_code);
+
+ int connection_error_code = AWS_ERROR_HTTP_PROXY_CONNECT_FAILED_RETRYABLE;
+ if (sequence_negotiator->current_negotiator_transform_index >=
+ aws_array_list_length(&sequence_negotiator->negotiators)) {
+ connection_error_code = AWS_ERROR_HTTP_PROXY_CONNECT_FAILED;
+ }
+
+ sequence_negotiator->original_negotiation_termination_callback(
+ message, connection_error_code, sequence_negotiator->original_internal_proxy_user_data);
+}
+
+static void s_sequence_tunnel_iteration_forward_callback(struct aws_http_message *message, void *user_data) {
+ struct aws_http_proxy_negotiator *proxy_negotiator = user_data;
+ struct aws_http_proxy_negotiator_tunneling_sequence *sequence_negotiator = proxy_negotiator->impl;
+
+ sequence_negotiator->original_negotiation_http_request_forward_callback(
+ message, sequence_negotiator->original_internal_proxy_user_data);
+}
+
+static void s_sequence_tunnel_try_next_negotiator(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ struct aws_http_message *message) {
+ struct aws_http_proxy_negotiator_tunneling_sequence *sequence_negotiator = proxy_negotiator->impl;
+
+ size_t negotiator_count = aws_array_list_length(&sequence_negotiator->negotiators);
+ if (sequence_negotiator->current_negotiator_transform_index >= negotiator_count) {
+ goto on_error;
+ }
+
+ struct aws_http_proxy_negotiator *current_negotiator = NULL;
+ if (aws_array_list_get_at(
+ &sequence_negotiator->negotiators,
+ &current_negotiator,
+ sequence_negotiator->current_negotiator_transform_index++)) {
+ goto on_error;
+ }
+
+ current_negotiator->strategy_vtable.tunnelling_vtable->connect_request_transform(
+ current_negotiator,
+ message,
+ s_sequence_tunnel_iteration_termination_callback,
+ s_sequence_tunnel_iteration_forward_callback,
+ proxy_negotiator);
+
+ return;
+
+on_error:
+
+ sequence_negotiator->original_negotiation_termination_callback(
+ message, AWS_ERROR_HTTP_PROXY_CONNECT_FAILED, sequence_negotiator->original_internal_proxy_user_data);
+}
+
+static void s_sequence_tunnel_transform_connect(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ struct aws_http_message *message,
+ aws_http_proxy_negotiation_terminate_fn *negotiation_termination_callback,
+ aws_http_proxy_negotiation_http_request_forward_fn *negotiation_http_request_forward_callback,
+ void *internal_proxy_user_data) {
+
+ struct aws_http_proxy_negotiator_tunneling_sequence *sequence_negotiator = proxy_negotiator->impl;
+
+ sequence_negotiator->original_internal_proxy_user_data = internal_proxy_user_data;
+ sequence_negotiator->original_negotiation_termination_callback = negotiation_termination_callback;
+ sequence_negotiator->original_negotiation_http_request_forward_callback = negotiation_http_request_forward_callback;
+
+ s_sequence_tunnel_try_next_negotiator(proxy_negotiator, message);
+}
+
+static int s_sequence_on_incoming_headers(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ enum aws_http_header_block header_block,
+ const struct aws_http_header *header_array,
+ size_t num_headers) {
+
+ struct aws_http_proxy_negotiator_tunneling_sequence *sequence_negotiator = proxy_negotiator->impl;
+ size_t negotiator_count = aws_array_list_length(&sequence_negotiator->negotiators);
+ for (size_t i = 0; i < negotiator_count; ++i) {
+ struct aws_http_proxy_negotiator *negotiator = NULL;
+ if (aws_array_list_get_at(&sequence_negotiator->negotiators, &negotiator, i)) {
+ continue;
+ }
+
+ aws_http_proxy_negotiation_connect_on_incoming_headers_fn *on_incoming_headers =
+ negotiator->strategy_vtable.tunnelling_vtable->on_incoming_headers_callback;
+ if (on_incoming_headers != NULL) {
+ (*on_incoming_headers)(negotiator, header_block, header_array, num_headers);
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_sequence_on_connect_status(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ enum aws_http_status_code status_code) {
+
+ struct aws_http_proxy_negotiator_tunneling_sequence *sequence_negotiator = proxy_negotiator->impl;
+ size_t negotiator_count = aws_array_list_length(&sequence_negotiator->negotiators);
+ for (size_t i = 0; i < negotiator_count; ++i) {
+ struct aws_http_proxy_negotiator *negotiator = NULL;
+ if (aws_array_list_get_at(&sequence_negotiator->negotiators, &negotiator, i)) {
+ continue;
+ }
+
+ aws_http_proxy_negotiator_connect_status_fn *on_status =
+ negotiator->strategy_vtable.tunnelling_vtable->on_status_callback;
+ if (on_status != NULL) {
+ (*on_status)(negotiator, status_code);
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_sequence_on_incoming_body(
+ struct aws_http_proxy_negotiator *proxy_negotiator,
+ const struct aws_byte_cursor *data) {
+
+ struct aws_http_proxy_negotiator_tunneling_sequence *sequence_negotiator = proxy_negotiator->impl;
+ size_t negotiator_count = aws_array_list_length(&sequence_negotiator->negotiators);
+ for (size_t i = 0; i < negotiator_count; ++i) {
+ struct aws_http_proxy_negotiator *negotiator = NULL;
+ if (aws_array_list_get_at(&sequence_negotiator->negotiators, &negotiator, i)) {
+ continue;
+ }
+
+ aws_http_proxy_negotiator_connect_on_incoming_body_fn *on_incoming_body =
+ negotiator->strategy_vtable.tunnelling_vtable->on_incoming_body_callback;
+ if (on_incoming_body != NULL) {
+ (*on_incoming_body)(negotiator, data);
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static enum aws_http_proxy_negotiation_retry_directive s_sequence_get_retry_directive(
+ struct aws_http_proxy_negotiator *proxy_negotiator) {
+ struct aws_http_proxy_negotiator_tunneling_sequence *sequence_negotiator = proxy_negotiator->impl;
+
+ if (sequence_negotiator->current_negotiator_transform_index <
+ aws_array_list_length(&sequence_negotiator->negotiators)) {
+ struct aws_http_proxy_negotiator *next_negotiator = NULL;
+ aws_array_list_get_at(
+ &sequence_negotiator->negotiators,
+ &next_negotiator,
+ sequence_negotiator->current_negotiator_transform_index);
+
+ enum aws_http_proxy_negotiation_retry_directive next_negotiator_directive =
+ aws_http_proxy_negotiator_get_retry_directive(next_negotiator);
+ if (next_negotiator_directive == AWS_HPNRD_CURRENT_CONNECTION) {
+ return AWS_HPNRD_CURRENT_CONNECTION;
+ } else {
+ return AWS_HPNRD_NEW_CONNECTION;
+ }
+ }
+
+ return AWS_HPNRD_STOP;
+}
+
+static struct aws_http_proxy_negotiator_tunnelling_vtable s_tunneling_sequence_proxy_negotiator_tunneling_vtable = {
+ .on_incoming_body_callback = s_sequence_on_incoming_body,
+ .on_incoming_headers_callback = s_sequence_on_incoming_headers,
+ .on_status_callback = s_sequence_on_connect_status,
+ .connect_request_transform = s_sequence_tunnel_transform_connect,
+ .get_retry_directive = s_sequence_get_retry_directive,
+};
+
+static void s_destroy_tunneling_sequence_negotiator(struct aws_http_proxy_negotiator *proxy_negotiator) {
+ struct aws_http_proxy_negotiator_tunneling_sequence *sequence_negotiator = proxy_negotiator->impl;
+
+ size_t negotiator_count = aws_array_list_length(&sequence_negotiator->negotiators);
+ for (size_t i = 0; i < negotiator_count; ++i) {
+ struct aws_http_proxy_negotiator *negotiator = NULL;
+ if (aws_array_list_get_at(&sequence_negotiator->negotiators, &negotiator, i)) {
+ continue;
+ }
+
+ aws_http_proxy_negotiator_release(negotiator);
+ }
+
+ aws_array_list_clean_up(&sequence_negotiator->negotiators);
+
+ aws_mem_release(sequence_negotiator->allocator, sequence_negotiator);
+}
+
+static struct aws_http_proxy_negotiator *s_create_tunneling_sequence_negotiator(
+ struct aws_http_proxy_strategy *proxy_strategy,
+ struct aws_allocator *allocator) {
+ if (proxy_strategy == NULL || allocator == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_proxy_negotiator_tunneling_sequence *sequence_negotiator =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_negotiator_tunneling_sequence));
+ if (sequence_negotiator == NULL) {
+ return NULL;
+ }
+
+ sequence_negotiator->allocator = allocator;
+ sequence_negotiator->negotiator_base.impl = sequence_negotiator;
+ aws_ref_count_init(
+ &sequence_negotiator->negotiator_base.ref_count,
+ &sequence_negotiator->negotiator_base,
+ (aws_simple_completion_callback *)s_destroy_tunneling_sequence_negotiator);
+
+ sequence_negotiator->negotiator_base.strategy_vtable.tunnelling_vtable =
+ &s_tunneling_sequence_proxy_negotiator_tunneling_vtable;
+
+ struct aws_http_proxy_strategy_tunneling_sequence *sequence_strategy = proxy_strategy->impl;
+ size_t strategy_count = aws_array_list_length(&sequence_strategy->strategies);
+
+ if (aws_array_list_init_dynamic(
+ &sequence_negotiator->negotiators, allocator, strategy_count, sizeof(struct aws_http_proxy_negotiator *))) {
+ goto on_error;
+ }
+
+ for (size_t i = 0; i < strategy_count; ++i) {
+ struct aws_http_proxy_strategy *strategy = NULL;
+ if (aws_array_list_get_at(&sequence_strategy->strategies, &strategy, i)) {
+ goto on_error;
+ }
+
+ struct aws_http_proxy_negotiator *negotiator = aws_http_proxy_strategy_create_negotiator(strategy, allocator);
+ if (negotiator == NULL) {
+ goto on_error;
+ }
+
+ if (aws_array_list_push_back(&sequence_negotiator->negotiators, &negotiator)) {
+ aws_http_proxy_negotiator_release(negotiator);
+ goto on_error;
+ }
+ }
+
+ return &sequence_negotiator->negotiator_base;
+
+on_error:
+
+ aws_http_proxy_negotiator_release(&sequence_negotiator->negotiator_base);
+
+ return NULL;
+}
+
+static struct aws_http_proxy_strategy_vtable s_tunneling_sequence_strategy_vtable = {
+ .create_negotiator = s_create_tunneling_sequence_negotiator,
+};
+
+static void s_destroy_tunneling_sequence_strategy(struct aws_http_proxy_strategy *proxy_strategy) {
+ struct aws_http_proxy_strategy_tunneling_sequence *sequence_strategy = proxy_strategy->impl;
+
+ size_t strategy_count = aws_array_list_length(&sequence_strategy->strategies);
+ for (size_t i = 0; i < strategy_count; ++i) {
+ struct aws_http_proxy_strategy *strategy = NULL;
+ if (aws_array_list_get_at(&sequence_strategy->strategies, &strategy, i)) {
+ continue;
+ }
+
+ aws_http_proxy_strategy_release(strategy);
+ }
+
+ aws_array_list_clean_up(&sequence_strategy->strategies);
+
+ aws_mem_release(sequence_strategy->allocator, sequence_strategy);
+}
+
+struct aws_http_proxy_strategy *aws_http_proxy_strategy_new_tunneling_sequence(
+ struct aws_allocator *allocator,
+ struct aws_http_proxy_strategy_tunneling_sequence_options *config) {
+
+ if (allocator == NULL || config == NULL) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_proxy_strategy_tunneling_sequence *sequence_strategy =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_http_proxy_strategy_tunneling_sequence));
+ if (sequence_strategy == NULL) {
+ return NULL;
+ }
+
+ sequence_strategy->strategy_base.impl = sequence_strategy;
+ sequence_strategy->strategy_base.vtable = &s_tunneling_sequence_strategy_vtable;
+ sequence_strategy->strategy_base.proxy_connection_type = AWS_HPCT_HTTP_TUNNEL;
+ sequence_strategy->allocator = allocator;
+
+ aws_ref_count_init(
+ &sequence_strategy->strategy_base.ref_count,
+ &sequence_strategy->strategy_base,
+ (aws_simple_completion_callback *)s_destroy_tunneling_sequence_strategy);
+
+ if (aws_array_list_init_dynamic(
+ &sequence_strategy->strategies,
+ allocator,
+ config->strategy_count,
+ sizeof(struct aws_http_proxy_strategy *))) {
+ goto on_error;
+ }
+
+ for (size_t i = 0; i < config->strategy_count; ++i) {
+ struct aws_http_proxy_strategy *strategy = config->strategies[i];
+
+ if (aws_array_list_push_back(&sequence_strategy->strategies, &strategy)) {
+ goto on_error;
+ }
+
+ aws_http_proxy_strategy_acquire(strategy);
+ }
+
+ return &sequence_strategy->strategy_base;
+
+on_error:
+
+ aws_http_proxy_strategy_release(&sequence_strategy->strategy_base);
+
+ return NULL;
+}
+
+#if defined(_MSC_VER)
+# pragma warning(pop)
+#endif /* _MSC_VER */
diff --git a/contrib/restricted/aws/aws-c-http/source/random_access_set.c b/contrib/restricted/aws/aws-c-http/source/random_access_set.c
new file mode 100644
index 0000000000..20fc12309f
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/random_access_set.c
@@ -0,0 +1,187 @@
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/allocator.h>
+#include <aws/common/device_random.h>
+#include <aws/http/private/random_access_set.h>
+
+struct aws_random_access_set_impl {
+ struct aws_allocator *allocator;
+ struct aws_array_list list; /* Always store the pointer of the element. */
+ struct aws_hash_table map; /* Map from the element to the index in the array. */
+ aws_hash_callback_destroy_fn *destroy_element_fn;
+};
+
+static void s_impl_destroy(struct aws_random_access_set_impl *impl) {
+ if (!impl) {
+ return;
+ }
+ aws_array_list_clean_up(&impl->list);
+ aws_hash_table_clean_up(&impl->map);
+ aws_mem_release(impl->allocator, impl);
+}
+
+static struct aws_random_access_set_impl *s_impl_new(
+ struct aws_allocator *allocator,
+ aws_hash_fn *hash_fn,
+ aws_hash_callback_eq_fn *equals_fn,
+ aws_hash_callback_destroy_fn *destroy_element_fn,
+ size_t initial_item_allocation) {
+ struct aws_random_access_set_impl *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_random_access_set_impl));
+ impl->allocator = allocator;
+ /* Will always store the pointer of the element. */
+ if (aws_array_list_init_dynamic(&impl->list, allocator, initial_item_allocation, sizeof(void *))) {
+ s_impl_destroy(impl);
+ return NULL;
+ }
+
+ if (aws_hash_table_init(
+ &impl->map, allocator, initial_item_allocation, hash_fn, equals_fn, destroy_element_fn, NULL)) {
+ s_impl_destroy(impl);
+ return NULL;
+ }
+ impl->destroy_element_fn = destroy_element_fn;
+ return impl;
+}
+
+int aws_random_access_set_init(
+ struct aws_random_access_set *set,
+ struct aws_allocator *allocator,
+ aws_hash_fn *hash_fn,
+ aws_hash_callback_eq_fn *equals_fn,
+ aws_hash_callback_destroy_fn *destroy_element_fn,
+ size_t initial_item_allocation) {
+ AWS_FATAL_PRECONDITION(set);
+ AWS_FATAL_PRECONDITION(allocator);
+ AWS_FATAL_PRECONDITION(hash_fn);
+ AWS_FATAL_PRECONDITION(equals_fn);
+
+ struct aws_random_access_set_impl *impl =
+ s_impl_new(allocator, hash_fn, equals_fn, destroy_element_fn, initial_item_allocation);
+ if (!impl) {
+ return AWS_OP_ERR;
+ }
+ set->impl = impl;
+ return AWS_OP_SUCCESS;
+}
+
+void aws_random_access_set_clean_up(struct aws_random_access_set *set) {
+ if (!set) {
+ return;
+ }
+ s_impl_destroy(set->impl);
+}
+
+int aws_random_access_set_add(struct aws_random_access_set *set, const void *element, bool *added) {
+ AWS_PRECONDITION(set);
+ AWS_PRECONDITION(element);
+ AWS_PRECONDITION(added);
+ bool exist = false;
+ if (aws_random_access_set_exist(set, element, &exist) || exist) {
+ *added = false;
+ return AWS_OP_SUCCESS;
+ }
+ /* deep copy the pointer of element to store at the array list */
+ if (aws_array_list_push_back(&set->impl->list, (void *)&element)) {
+ goto list_push_error;
+ }
+ if (aws_hash_table_put(&set->impl->map, element, (void *)(aws_array_list_length(&set->impl->list) - 1), NULL)) {
+ goto error;
+ }
+ *added = true;
+ return AWS_OP_SUCCESS;
+error:
+ aws_array_list_pop_back(&set->impl->list);
+list_push_error:
+ *added = false;
+ return AWS_OP_ERR;
+}
+
+int aws_random_access_set_remove(struct aws_random_access_set *set, const void *element) {
+ AWS_PRECONDITION(set);
+ AWS_PRECONDITION(element);
+ size_t current_length = aws_array_list_length(&set->impl->list);
+ if (current_length == 0) {
+ /* Nothing to remove */
+ return AWS_OP_SUCCESS;
+ }
+ struct aws_hash_element *find = NULL;
+ /* find and remove the element from table */
+ if (aws_hash_table_find(&set->impl->map, element, &find)) {
+ return AWS_OP_ERR;
+ }
+ if (!find) {
+ /* It's removed already */
+ return AWS_OP_SUCCESS;
+ }
+
+ size_t index_to_remove = (size_t)find->value;
+ if (aws_hash_table_remove_element(&set->impl->map, find)) {
+ return AWS_OP_ERR;
+ }
+ /* If assert code failed, we won't be recovered from the failure */
+ int assert_re = AWS_OP_SUCCESS;
+ (void)assert_re;
+ /* Nothing else can fail after here. */
+ if (index_to_remove != current_length - 1) {
+ /* It's not the last element, we need to swap it with the end of the list and remove the last element */
+ void *last_element = NULL;
+ /* The last element is a pointer of pointer of element. */
+ assert_re = aws_array_list_get_at_ptr(&set->impl->list, &last_element, current_length - 1);
+ AWS_ASSERT(assert_re == AWS_OP_SUCCESS);
+ /* Update the last element index in the table */
+ struct aws_hash_element *element_to_update = NULL;
+ assert_re = aws_hash_table_find(&set->impl->map, *(void **)last_element, &element_to_update);
+ AWS_ASSERT(assert_re == AWS_OP_SUCCESS);
+ AWS_ASSERT(element_to_update != NULL);
+ element_to_update->value = (void *)index_to_remove;
+ /* Swap the last element with the element to remove in the list */
+ aws_array_list_swap(&set->impl->list, index_to_remove, current_length - 1);
+ }
+ /* Remove the current last element from the list */
+ assert_re = aws_array_list_pop_back(&set->impl->list);
+ AWS_ASSERT(assert_re == AWS_OP_SUCCESS);
+ if (set->impl->destroy_element_fn) {
+ set->impl->destroy_element_fn((void *)element);
+ }
+ return AWS_OP_SUCCESS;
+}
+
+int aws_random_access_set_random_get_ptr(const struct aws_random_access_set *set, void **out) {
+ AWS_PRECONDITION(set);
+ AWS_PRECONDITION(out != NULL);
+ size_t length = aws_array_list_length(&set->impl->list);
+ if (length == 0) {
+ return aws_raise_error(AWS_ERROR_LIST_EMPTY);
+ }
+
+ uint64_t random_64_bit_num = 0;
+ aws_device_random_u64(&random_64_bit_num);
+
+ size_t index = (size_t)random_64_bit_num % length;
+ /* The array list stores the pointer of the element. */
+ return aws_array_list_get_at(&set->impl->list, (void *)out, index);
+}
+
+size_t aws_random_access_set_get_size(const struct aws_random_access_set *set) {
+ return aws_array_list_length(&set->impl->list);
+}
+
+int aws_random_access_set_exist(const struct aws_random_access_set *set, const void *element, bool *exist) {
+ AWS_PRECONDITION(set);
+ AWS_PRECONDITION(element);
+ AWS_PRECONDITION(exist);
+ struct aws_hash_element *find = NULL;
+ int re = aws_hash_table_find(&set->impl->map, element, &find);
+ *exist = find != NULL;
+ return re;
+}
+
+int aws_random_access_set_random_get_ptr_index(const struct aws_random_access_set *set, void **out, size_t index) {
+ AWS_PRECONDITION(set);
+ AWS_PRECONDITION(out != NULL);
+ return aws_array_list_get_at(&set->impl->list, (void *)out, index);
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/request_response.c b/contrib/restricted/aws/aws-c-http/source/request_response.c
new file mode 100644
index 0000000000..c382a3a4d0
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/request_response.c
@@ -0,0 +1,1228 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/array_list.h>
+#include <aws/common/mutex.h>
+#include <aws/common/string.h>
+#include <aws/http/private/connection_impl.h>
+#include <aws/http/private/request_response_impl.h>
+#include <aws/http/private/strutil.h>
+#include <aws/http/server.h>
+#include <aws/http/status_code.h>
+#include <aws/io/logging.h>
+#include <aws/io/stream.h>
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4204) /* non-constant aggregate initializer */
+#endif
+
+enum {
+ /* Initial capacity for the aws_http_message.headers array_list. */
+ AWS_HTTP_REQUEST_NUM_RESERVED_HEADERS = 16,
+};
+
+bool aws_http_header_name_eq(struct aws_byte_cursor name_a, struct aws_byte_cursor name_b) {
+ return aws_byte_cursor_eq_ignore_case(&name_a, &name_b);
+}
+
+/**
+ * -- Data Structure Notes --
+ * Headers are stored in a linear array, rather than a hash-table of arrays.
+ * The linear array was simpler to implement and may be faster due to having fewer allocations.
+ * The API has been designed so we can swap out the implementation later if desired.
+ *
+ * -- String Storage Notes --
+ * We use a single allocation to hold the name and value of each aws_http_header.
+ * We could optimize storage by using something like a string pool. If we do this, be sure to maintain
+ * the address of existing strings when adding new strings (a dynamic aws_byte_buf would not suffice).
+ */
+struct aws_http_headers {
+ struct aws_allocator *alloc;
+ struct aws_array_list array_list; /* Contains aws_http_header */
+ struct aws_atomic_var refcount;
+};
+
+struct aws_http_headers *aws_http_headers_new(struct aws_allocator *allocator) {
+ AWS_PRECONDITION(allocator);
+
+ struct aws_http_headers *headers = aws_mem_calloc(allocator, 1, sizeof(struct aws_http_headers));
+ if (!headers) {
+ goto alloc_failed;
+ }
+
+ headers->alloc = allocator;
+ aws_atomic_init_int(&headers->refcount, 1);
+
+ if (aws_array_list_init_dynamic(
+ &headers->array_list, allocator, AWS_HTTP_REQUEST_NUM_RESERVED_HEADERS, sizeof(struct aws_http_header))) {
+ goto array_list_failed;
+ }
+
+ return headers;
+
+array_list_failed:
+ aws_mem_release(headers->alloc, headers);
+alloc_failed:
+ return NULL;
+}
+
+void aws_http_headers_release(struct aws_http_headers *headers) {
+ AWS_PRECONDITION(!headers || headers->alloc);
+ if (!headers) {
+ return;
+ }
+
+ size_t prev_refcount = aws_atomic_fetch_sub(&headers->refcount, 1);
+ if (prev_refcount == 1) {
+ aws_http_headers_clear(headers);
+ aws_array_list_clean_up(&headers->array_list);
+ aws_mem_release(headers->alloc, headers);
+ } else {
+ AWS_ASSERT(prev_refcount != 0);
+ }
+}
+
+void aws_http_headers_acquire(struct aws_http_headers *headers) {
+ AWS_PRECONDITION(headers);
+ aws_atomic_fetch_add(&headers->refcount, 1);
+}
+
+static int s_http_headers_add_header_impl(
+ struct aws_http_headers *headers,
+ const struct aws_http_header *header_orig,
+ bool front) {
+
+ AWS_PRECONDITION(headers);
+ AWS_PRECONDITION(header_orig);
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&header_orig->name) && aws_byte_cursor_is_valid(&header_orig->value));
+
+ struct aws_http_header header_copy = *header_orig;
+
+ if (header_copy.name.len == 0) {
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_HEADER_NAME);
+ }
+
+ /* Whitespace around header values is ignored (RFC-7230 - Section 3.2).
+ * Trim it off here, so anyone querying this value has an easier time. */
+ header_copy.value = aws_strutil_trim_http_whitespace(header_copy.value);
+
+ size_t total_len;
+ if (aws_add_size_checked(header_copy.name.len, header_copy.value.len, &total_len)) {
+ return AWS_OP_ERR;
+ }
+
+ /* Store our own copy of the strings.
+ * We put the name and value into the same allocation. */
+ uint8_t *strmem = aws_mem_acquire(headers->alloc, total_len);
+
+ struct aws_byte_buf strbuf = aws_byte_buf_from_empty_array(strmem, total_len);
+ aws_byte_buf_append_and_update(&strbuf, &header_copy.name);
+ aws_byte_buf_append_and_update(&strbuf, &header_copy.value);
+ if (front) {
+ if (aws_array_list_push_front(&headers->array_list, &header_copy)) {
+ goto error;
+ }
+ } else {
+ if (aws_array_list_push_back(&headers->array_list, &header_copy)) {
+ goto error;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+
+error:
+ aws_mem_release(headers->alloc, strmem);
+ return AWS_OP_ERR;
+}
+
+int aws_http_headers_add_header(struct aws_http_headers *headers, const struct aws_http_header *header) {
+ /* Add pseudo headers to the front and not checking any violation until we send the header to the wire */
+ bool pseudo = aws_strutil_is_http_pseudo_header_name(header->name);
+ bool front = false;
+ if (pseudo && aws_http_headers_count(headers)) {
+ struct aws_http_header last_header;
+ /* TODO: instead if checking the last header, maybe we can add the pseudo headers to the end of the existing
+ * pseudo headers, which needs to insert to the middle of the array list. */
+ AWS_ZERO_STRUCT(last_header);
+ aws_http_headers_get_index(headers, aws_http_headers_count(headers) - 1, &last_header);
+ front = !aws_strutil_is_http_pseudo_header_name(last_header.name);
+ }
+ return s_http_headers_add_header_impl(headers, header, front);
+}
+
+int aws_http_headers_add(struct aws_http_headers *headers, struct aws_byte_cursor name, struct aws_byte_cursor value) {
+ struct aws_http_header header = {.name = name, .value = value};
+ return aws_http_headers_add_header(headers, &header);
+}
+
+void aws_http_headers_clear(struct aws_http_headers *headers) {
+ AWS_PRECONDITION(headers);
+
+ struct aws_http_header *header = NULL;
+ const size_t count = aws_http_headers_count(headers);
+ for (size_t i = 0; i < count; ++i) {
+ aws_array_list_get_at_ptr(&headers->array_list, (void **)&header, i);
+ AWS_ASSUME(header);
+
+ /* Storage for name & value is in the same allocation */
+ aws_mem_release(headers->alloc, header->name.ptr);
+ }
+
+ aws_array_list_clear(&headers->array_list);
+}
+
+/* Does not check index */
+static void s_http_headers_erase_index(struct aws_http_headers *headers, size_t index) {
+ struct aws_http_header *header = NULL;
+ aws_array_list_get_at_ptr(&headers->array_list, (void **)&header, index);
+ AWS_ASSUME(header);
+
+ /* Storage for name & value is in the same allocation */
+ aws_mem_release(headers->alloc, header->name.ptr);
+
+ aws_array_list_erase(&headers->array_list, index);
+}
+
+int aws_http_headers_erase_index(struct aws_http_headers *headers, size_t index) {
+ AWS_PRECONDITION(headers);
+
+ if (index >= aws_http_headers_count(headers)) {
+ return aws_raise_error(AWS_ERROR_INVALID_INDEX);
+ }
+
+ s_http_headers_erase_index(headers, index);
+ return AWS_OP_SUCCESS;
+}
+
+/* Erase entries with name, stop at end_index */
+static int s_http_headers_erase(
+ struct aws_http_headers *headers,
+ struct aws_byte_cursor name,
+ size_t start_index,
+ size_t end_index) {
+ bool erased_any = false;
+ struct aws_http_header *header = NULL;
+
+ /* Iterating in reverse is simpler */
+ for (size_t n = end_index; n > start_index; --n) {
+ const size_t i = n - 1;
+
+ aws_array_list_get_at_ptr(&headers->array_list, (void **)&header, i);
+ AWS_ASSUME(header);
+
+ if (aws_http_header_name_eq(header->name, name)) {
+ s_http_headers_erase_index(headers, i);
+ erased_any = true;
+ }
+ }
+
+ if (!erased_any) {
+ return aws_raise_error(AWS_ERROR_HTTP_HEADER_NOT_FOUND);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_http_headers_erase(struct aws_http_headers *headers, struct aws_byte_cursor name) {
+ AWS_PRECONDITION(headers);
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&name));
+
+ return s_http_headers_erase(headers, name, 0, aws_http_headers_count(headers));
+}
+
+int aws_http_headers_erase_value(
+ struct aws_http_headers *headers,
+ struct aws_byte_cursor name,
+ struct aws_byte_cursor value) {
+
+ AWS_PRECONDITION(headers);
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&name) && aws_byte_cursor_is_valid(&value));
+
+ struct aws_http_header *header = NULL;
+ const size_t count = aws_http_headers_count(headers);
+ for (size_t i = 0; i < count; ++i) {
+ aws_array_list_get_at_ptr(&headers->array_list, (void **)&header, i);
+ AWS_ASSUME(header);
+
+ if (aws_http_header_name_eq(header->name, name) && aws_byte_cursor_eq(&header->value, &value)) {
+ s_http_headers_erase_index(headers, i);
+ return AWS_OP_SUCCESS;
+ }
+ }
+
+ return aws_raise_error(AWS_ERROR_HTTP_HEADER_NOT_FOUND);
+}
+
+int aws_http_headers_add_array(struct aws_http_headers *headers, const struct aws_http_header *array, size_t count) {
+ AWS_PRECONDITION(headers);
+ AWS_PRECONDITION(AWS_MEM_IS_READABLE(array, count));
+
+ const size_t orig_count = aws_http_headers_count(headers);
+
+ for (size_t i = 0; i < count; ++i) {
+ if (aws_http_headers_add_header(headers, &array[i])) {
+ goto error;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+
+error:
+ /* Erase headers from the end until we're back to our previous state */
+ for (size_t new_count = aws_http_headers_count(headers); new_count > orig_count; --new_count) {
+ s_http_headers_erase_index(headers, new_count - 1);
+ }
+
+ return AWS_OP_ERR;
+}
+
+int aws_http_headers_set(struct aws_http_headers *headers, struct aws_byte_cursor name, struct aws_byte_cursor value) {
+ AWS_PRECONDITION(headers);
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&name) && aws_byte_cursor_is_valid(&value));
+
+ const size_t prev_count = aws_http_headers_count(headers);
+ bool pseudo = aws_strutil_is_http_pseudo_header_name(name);
+ const size_t start = pseudo ? 1 : 0;
+ struct aws_http_header header = {.name = name, .value = value};
+ if (s_http_headers_add_header_impl(headers, &header, pseudo)) {
+ return AWS_OP_ERR;
+ }
+ /* Erase pre-existing headers AFTER add, in case name or value was referencing their memory. */
+ s_http_headers_erase(headers, name, start, prev_count);
+ return AWS_OP_SUCCESS;
+}
+
+size_t aws_http_headers_count(const struct aws_http_headers *headers) {
+ AWS_PRECONDITION(headers);
+
+ return aws_array_list_length(&headers->array_list);
+}
+
+int aws_http_headers_get_index(
+ const struct aws_http_headers *headers,
+ size_t index,
+ struct aws_http_header *out_header) {
+
+ AWS_PRECONDITION(headers);
+ AWS_PRECONDITION(out_header);
+
+ return aws_array_list_get_at(&headers->array_list, out_header, index);
+}
+
+/* RFC-9110 - 5.3
+ * A recipient MAY combine multiple field lines within a field section that
+ * have the same field name into one field line, without changing the semantics
+ * of the message, by appending each subsequent field line value to the initial
+ * field line value in order, separated by a comma (",") and optional whitespace
+ * (OWS, defined in Section 5.6.3). For consistency, use comma SP. */
+AWS_HTTP_API
+struct aws_string *aws_http_headers_get_all(const struct aws_http_headers *headers, struct aws_byte_cursor name) {
+
+ AWS_PRECONDITION(headers);
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&name));
+
+ struct aws_string *value_str = NULL;
+
+ const struct aws_byte_cursor separator = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(", ");
+
+ struct aws_byte_buf value_builder;
+ aws_byte_buf_init(&value_builder, headers->alloc, 0);
+ bool found = false;
+ struct aws_http_header *header = NULL;
+ const size_t count = aws_http_headers_count(headers);
+ for (size_t i = 0; i < count; ++i) {
+ aws_array_list_get_at_ptr(&headers->array_list, (void **)&header, i);
+ if (aws_http_header_name_eq(name, header->name)) {
+ if (!found) {
+ found = true;
+ } else {
+ aws_byte_buf_append_dynamic(&value_builder, &separator);
+ }
+ aws_byte_buf_append_dynamic(&value_builder, &header->value);
+ }
+ }
+
+ if (found) {
+ value_str = aws_string_new_from_buf(headers->alloc, &value_builder);
+ } else {
+ aws_raise_error(AWS_ERROR_HTTP_HEADER_NOT_FOUND);
+ }
+
+ aws_byte_buf_clean_up(&value_builder);
+ return value_str;
+}
+
+int aws_http_headers_get(
+ const struct aws_http_headers *headers,
+ struct aws_byte_cursor name,
+ struct aws_byte_cursor *out_value) {
+
+ AWS_PRECONDITION(headers);
+ AWS_PRECONDITION(out_value);
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&name));
+
+ struct aws_http_header *header = NULL;
+ const size_t count = aws_http_headers_count(headers);
+ for (size_t i = 0; i < count; ++i) {
+ aws_array_list_get_at_ptr(&headers->array_list, (void **)&header, i);
+ AWS_ASSUME(header);
+
+ if (aws_http_header_name_eq(header->name, name)) {
+ *out_value = header->value;
+ return AWS_OP_SUCCESS;
+ }
+ }
+
+ return aws_raise_error(AWS_ERROR_HTTP_HEADER_NOT_FOUND);
+}
+
+bool aws_http_headers_has(const struct aws_http_headers *headers, struct aws_byte_cursor name) {
+
+ struct aws_byte_cursor out_value;
+ if (aws_http_headers_get(headers, name, &out_value)) {
+ return false;
+ }
+ return true;
+}
+
+int aws_http2_headers_get_request_method(
+ const struct aws_http_headers *h2_headers,
+ struct aws_byte_cursor *out_method) {
+ return aws_http_headers_get(h2_headers, aws_http_header_method, out_method);
+}
+
+int aws_http2_headers_get_request_scheme(
+ const struct aws_http_headers *h2_headers,
+ struct aws_byte_cursor *out_scheme) {
+ return aws_http_headers_get(h2_headers, aws_http_header_scheme, out_scheme);
+}
+
+int aws_http2_headers_get_request_authority(
+ const struct aws_http_headers *h2_headers,
+ struct aws_byte_cursor *out_authority) {
+ return aws_http_headers_get(h2_headers, aws_http_header_authority, out_authority);
+}
+
+int aws_http2_headers_get_request_path(const struct aws_http_headers *h2_headers, struct aws_byte_cursor *out_path) {
+ return aws_http_headers_get(h2_headers, aws_http_header_path, out_path);
+}
+
+int aws_http2_headers_get_response_status(const struct aws_http_headers *h2_headers, int *out_status_code) {
+ struct aws_byte_cursor status_code_cur;
+ int return_code = aws_http_headers_get(h2_headers, aws_http_header_status, &status_code_cur);
+ if (return_code == AWS_OP_SUCCESS) {
+ uint64_t code_val_u64;
+ if (aws_byte_cursor_utf8_parse_u64(status_code_cur, &code_val_u64)) {
+ return AWS_OP_ERR;
+ }
+ *out_status_code = (int)code_val_u64;
+ }
+ return return_code;
+}
+
+int aws_http2_headers_set_request_method(struct aws_http_headers *h2_headers, struct aws_byte_cursor method) {
+ return aws_http_headers_set(h2_headers, aws_http_header_method, method);
+}
+
+int aws_http2_headers_set_request_scheme(struct aws_http_headers *h2_headers, struct aws_byte_cursor scheme) {
+ return aws_http_headers_set(h2_headers, aws_http_header_scheme, scheme);
+}
+
+int aws_http2_headers_set_request_authority(struct aws_http_headers *h2_headers, struct aws_byte_cursor authority) {
+ return aws_http_headers_set(h2_headers, aws_http_header_authority, authority);
+}
+
+int aws_http2_headers_set_request_path(struct aws_http_headers *h2_headers, struct aws_byte_cursor path) {
+ return aws_http_headers_set(h2_headers, aws_http_header_path, path);
+}
+
+int aws_http2_headers_set_response_status(struct aws_http_headers *h2_headers, int status_code) {
+ /* Status code must fit in 3 digits */
+ if (status_code < 0 || status_code > 999) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ char status_code_str[4] = "000";
+ snprintf(status_code_str, sizeof(status_code_str), "%03d", status_code);
+ struct aws_byte_cursor status_code_cur = aws_byte_cursor_from_c_str(status_code_str);
+ return aws_http_headers_set(h2_headers, aws_http_header_status, status_code_cur);
+}
+
+struct aws_http_message {
+ struct aws_allocator *allocator;
+ struct aws_http_headers *headers;
+ struct aws_input_stream *body_stream;
+ struct aws_atomic_var refcount;
+ enum aws_http_version http_version;
+
+ /* Data specific to the request or response subclasses */
+ union {
+ struct aws_http_message_request_data {
+ struct aws_string *method;
+ struct aws_string *path;
+ } request;
+ struct aws_http_message_response_data {
+ int status;
+ } response;
+ } subclass_data;
+
+ struct aws_http_message_request_data *request_data;
+ struct aws_http_message_response_data *response_data;
+};
+
+static int s_set_string_from_cursor(
+ struct aws_string **dst,
+ struct aws_byte_cursor cursor,
+ struct aws_allocator *alloc) {
+
+ AWS_PRECONDITION(dst);
+
+ /* If the cursor is empty, set dst to NULL */
+ struct aws_string *new_str;
+ if (cursor.len) {
+ new_str = aws_string_new_from_cursor(alloc, &cursor);
+ if (!new_str) {
+ return AWS_OP_ERR;
+ }
+ } else {
+ new_str = NULL;
+ }
+
+ /* Replace existing value */
+ aws_string_destroy(*dst);
+
+ *dst = new_str;
+ return AWS_OP_SUCCESS;
+}
+static struct aws_http_message *s_message_new_common(
+ struct aws_allocator *allocator,
+ struct aws_http_headers *existing_headers) {
+
+ /* allocation cannot fail */
+ struct aws_http_message *message = aws_mem_calloc(allocator, 1, sizeof(struct aws_http_message));
+
+ message->allocator = allocator;
+ aws_atomic_init_int(&message->refcount, 1);
+
+ if (existing_headers) {
+ message->headers = existing_headers;
+ aws_http_headers_acquire(message->headers);
+ } else {
+ message->headers = aws_http_headers_new(allocator);
+ if (!message->headers) {
+ goto error;
+ }
+ }
+
+ return message;
+error:
+ aws_http_message_destroy(message);
+ return NULL;
+}
+
+static struct aws_http_message *s_message_new_request_common(
+ struct aws_allocator *allocator,
+ struct aws_http_headers *existing_headers,
+ enum aws_http_version version) {
+
+ struct aws_http_message *message = s_message_new_common(allocator, existing_headers);
+ if (message) {
+ message->request_data = &message->subclass_data.request;
+ message->http_version = version;
+ }
+ return message;
+}
+
+struct aws_http_message *aws_http_message_new_request_with_headers(
+ struct aws_allocator *allocator,
+ struct aws_http_headers *existing_headers) {
+
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(existing_headers);
+
+ return s_message_new_request_common(allocator, existing_headers, AWS_HTTP_VERSION_1_1);
+}
+
+struct aws_http_message *aws_http_message_new_request(struct aws_allocator *allocator) {
+ AWS_PRECONDITION(allocator);
+ return s_message_new_request_common(allocator, NULL, AWS_HTTP_VERSION_1_1);
+}
+
+struct aws_http_message *aws_http2_message_new_request(struct aws_allocator *allocator) {
+ AWS_PRECONDITION(allocator);
+ return s_message_new_request_common(allocator, NULL, AWS_HTTP_VERSION_2);
+}
+
+static struct aws_http_message *s_http_message_new_response_common(
+ struct aws_allocator *allocator,
+ enum aws_http_version version) {
+ AWS_PRECONDITION(allocator);
+
+ struct aws_http_message *message = s_message_new_common(allocator, NULL);
+ if (message) {
+ message->response_data = &message->subclass_data.response;
+ message->response_data->status = AWS_HTTP_STATUS_CODE_UNKNOWN;
+ message->http_version = version;
+ }
+ return message;
+}
+
+struct aws_http_message *aws_http_message_new_response(struct aws_allocator *allocator) {
+ AWS_PRECONDITION(allocator);
+ return s_http_message_new_response_common(allocator, AWS_HTTP_VERSION_1_1);
+}
+
+struct aws_http_message *aws_http2_message_new_response(struct aws_allocator *allocator) {
+ AWS_PRECONDITION(allocator);
+ return s_http_message_new_response_common(allocator, AWS_HTTP_VERSION_2);
+}
+
+void aws_http_message_destroy(struct aws_http_message *message) {
+ aws_http_message_release(message);
+}
+
+struct aws_http_message *aws_http_message_release(struct aws_http_message *message) {
+ /* Note that release() may also be used by new() functions to clean up if something goes wrong */
+ AWS_PRECONDITION(!message || message->allocator);
+ if (!message) {
+ return NULL;
+ }
+
+ size_t prev_refcount = aws_atomic_fetch_sub(&message->refcount, 1);
+ if (prev_refcount == 1) {
+ if (message->request_data) {
+ aws_string_destroy(message->request_data->method);
+ aws_string_destroy(message->request_data->path);
+ }
+
+ aws_http_headers_release(message->headers);
+ aws_input_stream_release(message->body_stream);
+ aws_mem_release(message->allocator, message);
+ } else {
+ AWS_ASSERT(prev_refcount != 0);
+ }
+
+ return NULL;
+}
+
+struct aws_http_message *aws_http_message_acquire(struct aws_http_message *message) {
+ if (message != NULL) {
+ aws_atomic_fetch_add(&message->refcount, 1);
+ }
+
+ return message;
+}
+
+bool aws_http_message_is_request(const struct aws_http_message *message) {
+ AWS_PRECONDITION(message);
+ return message->request_data;
+}
+
+bool aws_http_message_is_response(const struct aws_http_message *message) {
+ AWS_PRECONDITION(message);
+ return message->response_data;
+}
+
+enum aws_http_version aws_http_message_get_protocol_version(const struct aws_http_message *message) {
+ AWS_PRECONDITION(message);
+ return message->http_version;
+}
+
+int aws_http_message_set_request_method(struct aws_http_message *request_message, struct aws_byte_cursor method) {
+ AWS_PRECONDITION(request_message);
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&method));
+ AWS_PRECONDITION(request_message->request_data);
+
+ if (request_message->request_data) {
+ switch (request_message->http_version) {
+ case AWS_HTTP_VERSION_1_1:
+ return s_set_string_from_cursor(
+ &request_message->request_data->method, method, request_message->allocator);
+ case AWS_HTTP_VERSION_2:
+ return aws_http2_headers_set_request_method(request_message->headers, method);
+ default:
+ return aws_raise_error(AWS_ERROR_UNIMPLEMENTED);
+ }
+ }
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+}
+
+int aws_http_message_get_request_method(
+ const struct aws_http_message *request_message,
+ struct aws_byte_cursor *out_method) {
+
+ AWS_PRECONDITION(request_message);
+ AWS_PRECONDITION(out_method);
+ AWS_PRECONDITION(request_message->request_data);
+ int error = AWS_ERROR_HTTP_DATA_NOT_AVAILABLE;
+ if (request_message->request_data) {
+ switch (request_message->http_version) {
+ case AWS_HTTP_VERSION_1_1:
+ if (request_message->request_data->method) {
+ *out_method = aws_byte_cursor_from_string(request_message->request_data->method);
+ return AWS_OP_SUCCESS;
+ }
+ break;
+ case AWS_HTTP_VERSION_2:
+ return aws_http2_headers_get_request_method(request_message->headers, out_method);
+ default:
+ error = AWS_ERROR_UNIMPLEMENTED;
+ }
+ }
+
+ AWS_ZERO_STRUCT(*out_method);
+ return aws_raise_error(error);
+}
+
+int aws_http_message_set_request_path(struct aws_http_message *request_message, struct aws_byte_cursor path) {
+ AWS_PRECONDITION(request_message);
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&path));
+ AWS_PRECONDITION(request_message->request_data);
+
+ if (request_message->request_data) {
+ switch (request_message->http_version) {
+ case AWS_HTTP_VERSION_1_1:
+ return s_set_string_from_cursor(&request_message->request_data->path, path, request_message->allocator);
+ case AWS_HTTP_VERSION_2:
+ return aws_http2_headers_set_request_path(request_message->headers, path);
+ default:
+ return aws_raise_error(AWS_ERROR_UNIMPLEMENTED);
+ }
+ }
+
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+}
+
+int aws_http_message_get_request_path(
+ const struct aws_http_message *request_message,
+ struct aws_byte_cursor *out_path) {
+
+ AWS_PRECONDITION(request_message);
+ AWS_PRECONDITION(out_path);
+ AWS_PRECONDITION(request_message->request_data);
+
+ if (request_message->request_data) {
+ switch (request_message->http_version) {
+ case AWS_HTTP_VERSION_1_1:
+ if (request_message->request_data->path) {
+ *out_path = aws_byte_cursor_from_string(request_message->request_data->path);
+ return AWS_OP_SUCCESS;
+ }
+ break;
+ case AWS_HTTP_VERSION_2:
+ return aws_http2_headers_get_request_path(request_message->headers, out_path);
+ default:
+ return aws_raise_error(AWS_ERROR_UNIMPLEMENTED);
+ }
+ }
+
+ AWS_ZERO_STRUCT(*out_path);
+ return aws_raise_error(AWS_ERROR_HTTP_DATA_NOT_AVAILABLE);
+}
+
+int aws_http_message_get_response_status(const struct aws_http_message *response_message, int *out_status_code) {
+ AWS_PRECONDITION(response_message);
+ AWS_PRECONDITION(out_status_code);
+ AWS_PRECONDITION(response_message->response_data);
+
+ *out_status_code = AWS_HTTP_STATUS_CODE_UNKNOWN;
+
+ if (response_message->response_data) {
+ switch (response_message->http_version) {
+ case AWS_HTTP_VERSION_1_1:
+ if (response_message->response_data->status != AWS_HTTP_STATUS_CODE_UNKNOWN) {
+ *out_status_code = response_message->response_data->status;
+ return AWS_OP_SUCCESS;
+ }
+ break;
+ case AWS_HTTP_VERSION_2:
+ return aws_http2_headers_get_response_status(response_message->headers, out_status_code);
+ default:
+ return aws_raise_error(AWS_ERROR_UNIMPLEMENTED);
+ }
+ }
+
+ return aws_raise_error(AWS_ERROR_HTTP_DATA_NOT_AVAILABLE);
+}
+
+int aws_http_message_set_response_status(struct aws_http_message *response_message, int status_code) {
+ AWS_PRECONDITION(response_message);
+ AWS_PRECONDITION(response_message->response_data);
+
+ if (response_message->response_data) {
+ /* Status code must be printable with exactly 3 digits */
+ if (status_code >= 0 && status_code <= 999) {
+ switch (response_message->http_version) {
+ case AWS_HTTP_VERSION_1_1:
+ response_message->response_data->status = status_code;
+ return AWS_OP_SUCCESS;
+ case AWS_HTTP_VERSION_2:
+ return aws_http2_headers_set_response_status(response_message->headers, status_code);
+ default:
+ return aws_raise_error(AWS_ERROR_UNIMPLEMENTED);
+ }
+ }
+
+ return aws_raise_error(AWS_ERROR_HTTP_INVALID_STATUS_CODE);
+ }
+
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+}
+
+void aws_http_message_set_body_stream(struct aws_http_message *message, struct aws_input_stream *body_stream) {
+ AWS_PRECONDITION(message);
+ /* release previous stream, if any */
+ aws_input_stream_release(message->body_stream);
+
+ message->body_stream = body_stream;
+ if (message->body_stream) {
+ aws_input_stream_acquire(message->body_stream);
+ }
+}
+
+int aws_http1_stream_write_chunk(struct aws_http_stream *http1_stream, const struct aws_http1_chunk_options *options) {
+ AWS_PRECONDITION(http1_stream);
+ AWS_PRECONDITION(http1_stream->vtable);
+ AWS_PRECONDITION(options);
+ if (!http1_stream->vtable->http1_write_chunk) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: HTTP/1 stream only function invoked on other stream, ignoring call.",
+ (void *)http1_stream);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ return http1_stream->vtable->http1_write_chunk(http1_stream, options);
+}
+
+int aws_http2_stream_write_data(
+ struct aws_http_stream *http2_stream,
+ const struct aws_http2_stream_write_data_options *options) {
+ AWS_PRECONDITION(http2_stream);
+ AWS_PRECONDITION(http2_stream->vtable);
+ AWS_PRECONDITION(http2_stream->vtable->http2_write_data);
+ AWS_PRECONDITION(options);
+
+ return http2_stream->vtable->http2_write_data(http2_stream, options);
+}
+
+int aws_http1_stream_add_chunked_trailer(
+ struct aws_http_stream *http1_stream,
+ const struct aws_http_headers *trailing_headers) {
+ AWS_PRECONDITION(http1_stream);
+ AWS_PRECONDITION(http1_stream->vtable);
+ AWS_PRECONDITION(trailing_headers);
+ if (!http1_stream->vtable->http1_add_trailer) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: HTTP/1 stream only function invoked on other stream, ignoring call.",
+ (void *)http1_stream);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ return http1_stream->vtable->http1_add_trailer(http1_stream, trailing_headers);
+}
+
+struct aws_input_stream *aws_http_message_get_body_stream(const struct aws_http_message *message) {
+ AWS_PRECONDITION(message);
+ return message->body_stream;
+}
+
+struct aws_http_headers *aws_http_message_get_headers(const struct aws_http_message *message) {
+ AWS_PRECONDITION(message);
+ return message->headers;
+}
+
+const struct aws_http_headers *aws_http_message_get_const_headers(const struct aws_http_message *message) {
+ AWS_PRECONDITION(message);
+ return message->headers;
+}
+
+int aws_http_message_add_header(struct aws_http_message *message, struct aws_http_header header) {
+ return aws_http_headers_add(message->headers, header.name, header.value);
+}
+
+int aws_http_message_add_header_array(
+ struct aws_http_message *message,
+ const struct aws_http_header *headers,
+ size_t num_headers) {
+
+ return aws_http_headers_add_array(message->headers, headers, num_headers);
+}
+
+int aws_http_message_erase_header(struct aws_http_message *message, size_t index) {
+ return aws_http_headers_erase_index(message->headers, index);
+}
+
+size_t aws_http_message_get_header_count(const struct aws_http_message *message) {
+ return aws_http_headers_count(message->headers);
+}
+
+int aws_http_message_get_header(
+ const struct aws_http_message *message,
+ struct aws_http_header *out_header,
+ size_t index) {
+
+ return aws_http_headers_get_index(message->headers, index, out_header);
+}
+
+struct aws_http_stream *aws_http_connection_make_request(
+ struct aws_http_connection *client_connection,
+ const struct aws_http_make_request_options *options) {
+
+ AWS_PRECONDITION(client_connection);
+ AWS_PRECONDITION(aws_http_connection_is_client(client_connection));
+ AWS_PRECONDITION(options);
+ if (options->self_size == 0 || !options->request || !aws_http_message_is_request(options->request)) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Cannot create client request, options are invalid.",
+ (void *)client_connection);
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ /* Connection owns stream, and must outlive stream */
+ aws_http_connection_acquire(client_connection);
+
+ struct aws_http_stream *stream = client_connection->vtable->make_request(client_connection, options);
+ if (!stream) {
+ aws_http_connection_release(client_connection);
+ return NULL;
+ }
+
+ return stream;
+}
+
+struct aws_http_message *aws_http2_message_new_from_http1(
+ struct aws_allocator *alloc,
+ const struct aws_http_message *http1_msg) {
+
+ struct aws_http_headers *old_headers = aws_http_message_get_headers(http1_msg);
+ struct aws_http_header header_iter;
+ struct aws_byte_buf lower_name_buf;
+ AWS_ZERO_STRUCT(lower_name_buf);
+ struct aws_http_message *message = aws_http_message_is_request(http1_msg) ? aws_http2_message_new_request(alloc)
+ : aws_http2_message_new_response(alloc);
+ if (!message) {
+ return NULL;
+ }
+ struct aws_http_headers *copied_headers = message->headers;
+ AWS_LOGF_TRACE(AWS_LS_HTTP_GENERAL, "Creating HTTP/2 message from HTTP/1 message id: %p", (void *)http1_msg);
+
+ /* Set pseudo headers from HTTP/1.1 message */
+ if (aws_http_message_is_request(http1_msg)) {
+ struct aws_byte_cursor method;
+ if (aws_http_message_get_request_method(http1_msg, &method)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_GENERAL,
+ "Failed to create HTTP/2 message from HTTP/1 message, ip: %p, due to no method found.",
+ (void *)http1_msg);
+ /* error will happen when the request is invalid */
+ aws_raise_error(AWS_ERROR_HTTP_INVALID_METHOD);
+ goto error;
+ }
+ /* Use add instead of set method to avoid push front to the array list */
+ if (aws_http_headers_add(copied_headers, aws_http_header_method, method)) {
+ goto error;
+ }
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_GENERAL,
+ "Added header to new HTTP/2 header - \"%.*s\": \"%.*s\" ",
+ (int)aws_http_header_method.len,
+ aws_http_header_method.ptr,
+ (int)method.len,
+ method.ptr);
+ /**
+ * we set a default value, "https", for now.
+ * TODO: as we support prior knowledge, we may also want to support http?
+ */
+ struct aws_byte_cursor scheme_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("https");
+ if (aws_http_headers_add(copied_headers, aws_http_header_scheme, scheme_cursor)) {
+ goto error;
+ }
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_GENERAL,
+ "Added header to new HTTP/2 header - \"%.*s\": \"%.*s\" ",
+ (int)aws_http_header_scheme.len,
+ aws_http_header_scheme.ptr,
+ (int)scheme_cursor.len,
+ scheme_cursor.ptr);
+
+ /**
+ * An intermediary that forwards a request over HTTP/2 MUST construct an ":authority" pseudo-header field using
+ * the authority information from the control data of the original request. (RFC=9113 8.3.1)
+ */
+ struct aws_byte_cursor host_value;
+ AWS_ZERO_STRUCT(host_value);
+ if (aws_http_headers_get(http1_msg->headers, aws_byte_cursor_from_c_str("host"), &host_value) ==
+ AWS_OP_SUCCESS) {
+ if (aws_http_headers_add(copied_headers, aws_http_header_authority, host_value)) {
+ goto error;
+ }
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_GENERAL,
+ "Added header to new HTTP/2 header - \"%.*s\": \"%.*s\" ",
+ (int)aws_http_header_authority.len,
+ aws_http_header_authority.ptr,
+ (int)host_value.len,
+ host_value.ptr);
+ }
+ /* TODO: If the host headers is missing, the target URI could be the other source of the authority information
+ */
+
+ struct aws_byte_cursor path_cursor;
+ if (aws_http_message_get_request_path(http1_msg, &path_cursor)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_GENERAL,
+ "Failed to create HTTP/2 message from HTTP/1 message, ip: %p, due to no path found.",
+ (void *)http1_msg);
+ aws_raise_error(AWS_ERROR_HTTP_INVALID_PATH);
+ goto error;
+ }
+ if (aws_http_headers_add(copied_headers, aws_http_header_path, path_cursor)) {
+ goto error;
+ }
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_GENERAL,
+ "Added header to new HTTP/2 header - \"%.*s\": \"%.*s\" ",
+ (int)aws_http_header_path.len,
+ aws_http_header_path.ptr,
+ (int)path_cursor.len,
+ path_cursor.ptr);
+ } else {
+ int status = 0;
+ if (aws_http_message_get_response_status(http1_msg, &status)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_GENERAL,
+ "Failed to create HTTP/2 response message from HTTP/1 response message, ip: %p, due to no status "
+ "found.",
+ (void *)http1_msg);
+ /* error will happen when the request is invalid */
+ aws_raise_error(AWS_ERROR_HTTP_INVALID_STATUS_CODE);
+ goto error;
+ }
+ if (aws_http2_headers_set_response_status(copied_headers, status)) {
+ goto error;
+ }
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_GENERAL,
+ "Added header to new HTTP/2 header - \"%.*s\": \"%d\" ",
+ (int)aws_http_header_status.len,
+ aws_http_header_status.ptr,
+ status);
+ }
+
+ if (aws_byte_buf_init(&lower_name_buf, alloc, 256)) {
+ goto error;
+ }
+ for (size_t iter = 0; iter < aws_http_headers_count(old_headers); iter++) {
+ aws_byte_buf_reset(&lower_name_buf, false);
+ bool copy_header = true;
+ /* name should be converted to lower case */
+ if (aws_http_headers_get_index(old_headers, iter, &header_iter)) {
+ goto error;
+ }
+ /* append lower case name to the buffer */
+ aws_byte_buf_append_with_lookup(&lower_name_buf, &header_iter.name, aws_lookup_table_to_lower_get());
+ struct aws_byte_cursor lower_name_cursor = aws_byte_cursor_from_buf(&lower_name_buf);
+ enum aws_http_header_name name_enum = aws_http_lowercase_str_to_header_name(lower_name_cursor);
+ switch (name_enum) {
+ case AWS_HTTP_HEADER_TRANSFER_ENCODING:
+ case AWS_HTTP_HEADER_UPGRADE:
+ case AWS_HTTP_HEADER_KEEP_ALIVE:
+ case AWS_HTTP_HEADER_PROXY_CONNECTION:
+ case AWS_HTTP_HEADER_HOST:
+ /**
+ * An intermediary transforming an HTTP/1.x message to HTTP/2 MUST remove connection-specific header
+ * fields as discussed in Section 7.6.1 of [HTTP]. (RFC=9113 8.2.2)
+ */
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_GENERAL,
+ "Skip connection-specific headers - \"%.*s\" ",
+ (int)lower_name_cursor.len,
+ lower_name_cursor.ptr);
+ copy_header = false;
+ break;
+
+ default:
+ break;
+ }
+ if (copy_header) {
+ if (aws_http_headers_add(copied_headers, lower_name_cursor, header_iter.value)) {
+ goto error;
+ }
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_GENERAL,
+ "Added header to new HTTP/2 header - \"%.*s\": \"%.*s\" ",
+ (int)lower_name_cursor.len,
+ lower_name_cursor.ptr,
+ (int)header_iter.value.len,
+ header_iter.value.ptr);
+ }
+ }
+ aws_byte_buf_clean_up(&lower_name_buf);
+ aws_http_message_set_body_stream(message, aws_http_message_get_body_stream(http1_msg));
+
+ return message;
+error:
+ aws_http_message_release(message);
+ aws_byte_buf_clean_up(&lower_name_buf);
+ return NULL;
+}
+
+int aws_http_stream_activate(struct aws_http_stream *stream) {
+ AWS_PRECONDITION(stream);
+ AWS_PRECONDITION(stream->vtable);
+ AWS_PRECONDITION(stream->vtable->activate);
+ /* make sure it's actually a client calling us. This is always a programmer bug, so just assert and die. */
+ AWS_PRECONDITION(aws_http_connection_is_client(stream->owning_connection));
+
+ return stream->vtable->activate(stream);
+}
+
+struct aws_http_stream *aws_http_stream_new_server_request_handler(
+ const struct aws_http_request_handler_options *options) {
+ AWS_PRECONDITION(options);
+ if (options->self_size == 0 || !options->server_connection ||
+ !aws_http_connection_is_server(options->server_connection)) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_CONNECTION,
+ "id=%p: Cannot create server request handler stream, options are invalid.",
+ (void *)options->server_connection);
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ return options->server_connection->vtable->new_server_request_handler_stream(options);
+}
+
+int aws_http_stream_send_response(struct aws_http_stream *stream, struct aws_http_message *response) {
+ AWS_PRECONDITION(stream);
+ AWS_PRECONDITION(response);
+ AWS_PRECONDITION(aws_http_message_is_response(response));
+ return stream->owning_connection->vtable->stream_send_response(stream, response);
+}
+
+void aws_http_stream_release(struct aws_http_stream *stream) {
+ if (!stream) {
+ return;
+ }
+
+ size_t prev_refcount = aws_atomic_fetch_sub(&stream->refcount, 1);
+ if (prev_refcount == 1) {
+ AWS_LOGF_TRACE(AWS_LS_HTTP_STREAM, "id=%p: Final stream refcount released.", (void *)stream);
+
+ void *user_data = stream->user_data;
+ aws_http_on_stream_destroy_fn *on_destroy_callback = stream->on_destroy;
+
+ struct aws_http_connection *owning_connection = stream->owning_connection;
+ stream->vtable->destroy(stream);
+
+ if (on_destroy_callback) {
+ /* info user that destroy completed. */
+ on_destroy_callback(user_data);
+ }
+ /* Connection needed to outlive stream, but it's free to go now */
+ aws_http_connection_release(owning_connection);
+ } else {
+ AWS_ASSERT(prev_refcount != 0);
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM, "id=%p: Stream refcount released, %zu remaining.", (void *)stream, prev_refcount - 1);
+ }
+}
+
+struct aws_http_connection *aws_http_stream_get_connection(const struct aws_http_stream *stream) {
+ AWS_ASSERT(stream);
+ return stream->owning_connection;
+}
+
+int aws_http_stream_get_incoming_response_status(const struct aws_http_stream *stream, int *out_status) {
+ AWS_ASSERT(stream && stream->client_data);
+
+ if (stream->client_data->response_status == (int)AWS_HTTP_STATUS_CODE_UNKNOWN) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Status code not yet received.", (void *)stream);
+ return aws_raise_error(AWS_ERROR_HTTP_DATA_NOT_AVAILABLE);
+ }
+
+ *out_status = stream->client_data->response_status;
+ return AWS_OP_SUCCESS;
+}
+
+int aws_http_stream_get_incoming_request_method(
+ const struct aws_http_stream *stream,
+ struct aws_byte_cursor *out_method) {
+ AWS_ASSERT(stream && stream->server_data);
+
+ if (!stream->server_data->request_method_str.ptr) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Request method not yet received.", (void *)stream);
+ return aws_raise_error(AWS_ERROR_HTTP_DATA_NOT_AVAILABLE);
+ }
+
+ *out_method = stream->server_data->request_method_str;
+ return AWS_OP_SUCCESS;
+}
+
+int aws_http_stream_get_incoming_request_uri(const struct aws_http_stream *stream, struct aws_byte_cursor *out_uri) {
+ AWS_ASSERT(stream && stream->server_data);
+
+ if (!stream->server_data->request_path.ptr) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_STREAM, "id=%p: Request URI not yet received.", (void *)stream);
+ return aws_raise_error(AWS_ERROR_HTTP_DATA_NOT_AVAILABLE);
+ }
+
+ *out_uri = stream->server_data->request_path;
+ return AWS_OP_SUCCESS;
+}
+
+void aws_http_stream_update_window(struct aws_http_stream *stream, size_t increment_size) {
+ stream->vtable->update_window(stream, increment_size);
+}
+
+uint32_t aws_http_stream_get_id(const struct aws_http_stream *stream) {
+ return stream->id;
+}
+
+int aws_http2_stream_reset(struct aws_http_stream *http2_stream, uint32_t http2_error) {
+ AWS_PRECONDITION(http2_stream);
+ AWS_PRECONDITION(http2_stream->vtable);
+ if (!http2_stream->vtable->http2_reset_stream) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: HTTP/2 stream only function invoked on other stream, ignoring call.",
+ (void *)http2_stream);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+ return http2_stream->vtable->http2_reset_stream(http2_stream, http2_error);
+}
+
+int aws_http2_stream_get_received_reset_error_code(struct aws_http_stream *http2_stream, uint32_t *out_http2_error) {
+ AWS_PRECONDITION(http2_stream);
+ AWS_PRECONDITION(http2_stream->vtable);
+ AWS_PRECONDITION(out_http2_error);
+ if (!http2_stream->vtable->http2_get_received_error_code) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: HTTP/2 stream only function invoked on other stream, ignoring call.",
+ (void *)http2_stream);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+ return http2_stream->vtable->http2_get_received_error_code(http2_stream, out_http2_error);
+}
+
+int aws_http2_stream_get_sent_reset_error_code(struct aws_http_stream *http2_stream, uint32_t *out_http2_error) {
+ AWS_PRECONDITION(http2_stream);
+ AWS_PRECONDITION(http2_stream->vtable);
+ AWS_PRECONDITION(out_http2_error);
+ if (!http2_stream->vtable->http2_get_sent_error_code) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_STREAM,
+ "id=%p: HTTP/2 stream only function invoked on other stream, ignoring call.",
+ (void *)http2_stream);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+ return http2_stream->vtable->http2_get_sent_error_code(http2_stream, out_http2_error);
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/statistics.c b/contrib/restricted/aws/aws-c-http/source/statistics.c
new file mode 100644
index 0000000000..ea4e65c1dd
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/statistics.c
@@ -0,0 +1,35 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/statistics.h>
+
+int aws_crt_statistics_http1_channel_init(struct aws_crt_statistics_http1_channel *stats) {
+ AWS_ZERO_STRUCT(*stats);
+ stats->category = AWSCRT_STAT_CAT_HTTP1_CHANNEL;
+
+ return AWS_OP_SUCCESS;
+}
+
+void aws_crt_statistics_http1_channel_cleanup(struct aws_crt_statistics_http1_channel *stats) {
+ (void)stats;
+}
+
+void aws_crt_statistics_http1_channel_reset(struct aws_crt_statistics_http1_channel *stats) {
+ stats->pending_outgoing_stream_ms = 0;
+ stats->pending_incoming_stream_ms = 0;
+ stats->current_outgoing_stream_id = 0;
+ stats->current_incoming_stream_id = 0;
+}
+
+void aws_crt_statistics_http2_channel_init(struct aws_crt_statistics_http2_channel *stats) {
+ AWS_ZERO_STRUCT(*stats);
+ stats->category = AWSCRT_STAT_CAT_HTTP2_CHANNEL;
+}
+
+void aws_crt_statistics_http2_channel_reset(struct aws_crt_statistics_http2_channel *stats) {
+ stats->pending_outgoing_stream_ms = 0;
+ stats->pending_incoming_stream_ms = 0;
+ stats->was_inactive = false;
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/strutil.c b/contrib/restricted/aws/aws-c-http/source/strutil.c
new file mode 100644
index 0000000000..552535f46d
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/strutil.c
@@ -0,0 +1,232 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/http/private/strutil.h>
+
+static struct aws_byte_cursor s_trim(struct aws_byte_cursor cursor, const bool trim_table[256]) {
+ /* trim leading whitespace */
+ size_t i;
+ for (i = 0; i < cursor.len; ++i) {
+ const uint8_t c = cursor.ptr[i];
+ if (!trim_table[c]) {
+ break;
+ }
+ }
+ cursor.ptr += i;
+ cursor.len -= i;
+
+ /* trim trailing whitespace */
+ for (; cursor.len; --cursor.len) {
+ const uint8_t c = cursor.ptr[cursor.len - 1];
+ if (!trim_table[c]) {
+ break;
+ }
+ }
+
+ return cursor;
+}
+
+static const bool s_http_whitespace_table[256] = {
+ [' '] = true,
+ ['\t'] = true,
+};
+
+struct aws_byte_cursor aws_strutil_trim_http_whitespace(struct aws_byte_cursor cursor) {
+ return s_trim(cursor, s_http_whitespace_table);
+}
+
+/* RFC7230 section 3.2.6:
+ * token = 1*tchar
+ * tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*"
+ * / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
+ * / DIGIT / ALPHA
+ */
+static const bool s_http_token_table[256] = {
+ ['!'] = true, ['#'] = true, ['$'] = true, ['%'] = true, ['&'] = true, ['\''] = true, ['*'] = true, ['+'] = true,
+ ['-'] = true, ['.'] = true, ['^'] = true, ['_'] = true, ['`'] = true, ['|'] = true, ['~'] = true,
+
+ ['0'] = true, ['1'] = true, ['2'] = true, ['3'] = true, ['4'] = true, ['5'] = true, ['6'] = true, ['7'] = true,
+ ['8'] = true, ['9'] = true,
+
+ ['A'] = true, ['B'] = true, ['C'] = true, ['D'] = true, ['E'] = true, ['F'] = true, ['G'] = true, ['H'] = true,
+ ['I'] = true, ['J'] = true, ['K'] = true, ['L'] = true, ['M'] = true, ['N'] = true, ['O'] = true, ['P'] = true,
+ ['Q'] = true, ['R'] = true, ['S'] = true, ['T'] = true, ['U'] = true, ['V'] = true, ['W'] = true, ['X'] = true,
+ ['Y'] = true, ['Z'] = true,
+
+ ['a'] = true, ['b'] = true, ['c'] = true, ['d'] = true, ['e'] = true, ['f'] = true, ['g'] = true, ['h'] = true,
+ ['i'] = true, ['j'] = true, ['k'] = true, ['l'] = true, ['m'] = true, ['n'] = true, ['o'] = true, ['p'] = true,
+ ['q'] = true, ['r'] = true, ['s'] = true, ['t'] = true, ['u'] = true, ['v'] = true, ['w'] = true, ['x'] = true,
+ ['y'] = true, ['z'] = true,
+};
+
+/* Same as above, but with uppercase characters removed */
+static const bool s_http_lowercase_token_table[256] = {
+ ['!'] = true, ['#'] = true, ['$'] = true, ['%'] = true, ['&'] = true, ['\''] = true, ['*'] = true, ['+'] = true,
+ ['-'] = true, ['.'] = true, ['^'] = true, ['_'] = true, ['`'] = true, ['|'] = true, ['~'] = true,
+
+ ['0'] = true, ['1'] = true, ['2'] = true, ['3'] = true, ['4'] = true, ['5'] = true, ['6'] = true, ['7'] = true,
+ ['8'] = true, ['9'] = true,
+
+ ['a'] = true, ['b'] = true, ['c'] = true, ['d'] = true, ['e'] = true, ['f'] = true, ['g'] = true, ['h'] = true,
+ ['i'] = true, ['j'] = true, ['k'] = true, ['l'] = true, ['m'] = true, ['n'] = true, ['o'] = true, ['p'] = true,
+ ['q'] = true, ['r'] = true, ['s'] = true, ['t'] = true, ['u'] = true, ['v'] = true, ['w'] = true, ['x'] = true,
+ ['y'] = true, ['z'] = true,
+};
+
+static bool s_is_token(struct aws_byte_cursor token, const bool token_table[256]) {
+ if (token.len == 0) {
+ return false;
+ }
+
+ for (size_t i = 0; i < token.len; ++i) {
+ const uint8_t c = token.ptr[i];
+ if (token_table[c] == false) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+bool aws_strutil_is_http_token(struct aws_byte_cursor token) {
+ return s_is_token(token, s_http_token_table);
+}
+
+bool aws_strutil_is_lowercase_http_token(struct aws_byte_cursor token) {
+ return s_is_token(token, s_http_lowercase_token_table);
+}
+
+/* clang-format off */
+/**
+ * Table with true for all octets allowed in field-content,
+ * as defined in RFC7230 section 3.2 and 3.2.6 and RFC5234 appendix-B.1:
+ *
+ * field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
+ * field-vchar = VCHAR / obs-text
+ * VCHAR = %x21-7E ; visible (printing) characters
+ * obs-text = %x80-FF
+ */
+static const bool s_http_field_content_table[256] = {
+ /* clang-format off */
+
+ /* whitespace */
+ ['\t'] = true, [' '] = true,
+
+ /* VCHAR = 0x21-7E */
+ [0x21] = true, [0x22] = true, [0x23] = true, [0x24] = true, [0x25] = true, [0x26] = true, [0x27] = true,
+ [0x28] = true, [0x29] = true, [0x2A] = true, [0x2B] = true, [0x2C] = true, [0x2D] = true, [0x2E] = true,
+ [0x2F] = true, [0x30] = true, [0x31] = true, [0x32] = true, [0x33] = true, [0x34] = true, [0x35] = true,
+ [0x36] = true, [0x37] = true, [0x38] = true, [0x39] = true, [0x3A] = true, [0x3B] = true, [0x3C] = true,
+ [0x3D] = true, [0x3E] = true, [0x3F] = true, [0x40] = true, [0x41] = true, [0x42] = true, [0x43] = true,
+ [0x44] = true, [0x45] = true, [0x46] = true, [0x47] = true, [0x48] = true, [0x49] = true, [0x4A] = true,
+ [0x4B] = true, [0x4C] = true, [0x4D] = true, [0x4E] = true, [0x4F] = true, [0x50] = true, [0x51] = true,
+ [0x52] = true, [0x53] = true, [0x54] = true, [0x55] = true, [0x56] = true, [0x57] = true, [0x58] = true,
+ [0x59] = true, [0x5A] = true, [0x5B] = true, [0x5C] = true, [0x5D] = true, [0x5E] = true, [0x5F] = true,
+ [0x60] = true, [0x61] = true, [0x62] = true, [0x63] = true, [0x64] = true, [0x65] = true, [0x66] = true,
+ [0x67] = true, [0x68] = true, [0x69] = true, [0x6A] = true, [0x6B] = true, [0x6C] = true, [0x6D] = true,
+ [0x6E] = true, [0x6F] = true, [0x70] = true, [0x71] = true, [0x72] = true, [0x73] = true, [0x74] = true,
+ [0x75] = true, [0x76] = true, [0x77] = true, [0x78] = true, [0x79] = true, [0x7A] = true, [0x7B] = true,
+ [0x7C] = true, [0x7D] = true, [0x7E] = true,
+
+ /* obs-text = %x80-FF */
+ [0x80] = true, [0x81] = true, [0x82] = true, [0x83] = true, [0x84] = true, [0x85] = true, [0x86] = true,
+ [0x87] = true, [0x88] = true, [0x89] = true, [0x8A] = true, [0x8B] = true, [0x8C] = true, [0x8D] = true,
+ [0x8E] = true, [0x8F] = true, [0x90] = true, [0x91] = true, [0x92] = true, [0x93] = true, [0x94] = true,
+ [0x95] = true, [0x96] = true, [0x97] = true, [0x98] = true, [0x99] = true, [0x9A] = true, [0x9B] = true,
+ [0x9C] = true, [0x9D] = true, [0x9E] = true, [0x9F] = true, [0xA0] = true, [0xA1] = true, [0xA2] = true,
+ [0xA3] = true, [0xA4] = true, [0xA5] = true, [0xA6] = true, [0xA7] = true, [0xA8] = true, [0xA9] = true,
+ [0xAA] = true, [0xAB] = true, [0xAC] = true, [0xAD] = true, [0xAE] = true, [0xAF] = true, [0xB0] = true,
+ [0xB1] = true, [0xB2] = true, [0xB3] = true, [0xB4] = true, [0xB5] = true, [0xB6] = true, [0xB7] = true,
+ [0xB8] = true, [0xB9] = true, [0xBA] = true, [0xBB] = true, [0xBC] = true, [0xBD] = true, [0xBE] = true,
+ [0xBF] = true, [0xC0] = true, [0xC1] = true, [0xC2] = true, [0xC3] = true, [0xC4] = true, [0xC5] = true,
+ [0xC6] = true, [0xC7] = true, [0xC8] = true, [0xC9] = true, [0xCA] = true, [0xCB] = true, [0xCC] = true,
+ [0xCD] = true, [0xCE] = true, [0xCF] = true, [0xD0] = true, [0xD1] = true, [0xD2] = true, [0xD3] = true,
+ [0xD4] = true, [0xD5] = true, [0xD6] = true, [0xD7] = true, [0xD8] = true, [0xD9] = true, [0xDA] = true,
+ [0xDB] = true, [0xDC] = true, [0xDD] = true, [0xDE] = true, [0xDF] = true, [0xE0] = true, [0xE1] = true,
+ [0xE2] = true, [0xE3] = true, [0xE4] = true, [0xE5] = true, [0xE6] = true, [0xE7] = true, [0xE8] = true,
+ [0xE9] = true, [0xEA] = true, [0xEB] = true, [0xEC] = true, [0xED] = true, [0xEE] = true, [0xEF] = true,
+ [0xF0] = true, [0xF1] = true, [0xF2] = true, [0xF3] = true, [0xF4] = true, [0xF5] = true, [0xF6] = true,
+ [0xF7] = true, [0xF8] = true, [0xF9] = true, [0xFA] = true, [0xFB] = true, [0xFC] = true, [0xFD] = true,
+ [0xFE] = true, [0xFF] = true,
+ /* clang-format on */
+};
+
+/**
+ * From RFC7230 section 3.2:
+ * field-value = *( field-content / obs-fold )
+ * field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ]
+ *
+ * But we're forbidding obs-fold
+ */
+bool aws_strutil_is_http_field_value(struct aws_byte_cursor cursor) {
+ if (cursor.len == 0) {
+ return true;
+ }
+
+ /* first and last char cannot be whitespace */
+ const uint8_t first_c = cursor.ptr[0];
+ const uint8_t last_c = cursor.ptr[cursor.len - 1];
+ if (s_http_whitespace_table[first_c] || s_http_whitespace_table[last_c]) {
+ return false;
+ }
+
+ /* ensure every char is legal field-content */
+ size_t i = 0;
+ do {
+ const uint8_t c = cursor.ptr[i++];
+ if (s_http_field_content_table[c] == false) {
+ return false;
+ }
+ } while (i < cursor.len);
+
+ return true;
+}
+
+/**
+ * From RFC7230 section 3.1.2:
+ * reason-phrase = *( HTAB / SP / VCHAR / obs-text )
+ * VCHAR = %x21-7E ; visible (printing) characters
+ * obs-text = %x80-FF
+ */
+bool aws_strutil_is_http_reason_phrase(struct aws_byte_cursor cursor) {
+ for (size_t i = 0; i < cursor.len; ++i) {
+ const uint8_t c = cursor.ptr[i];
+ /* the field-content table happens to allow the exact same characters as reason-phrase */
+ if (s_http_field_content_table[c] == false) {
+ return false;
+ }
+ }
+ return true;
+}
+
+bool aws_strutil_is_http_request_target(struct aws_byte_cursor cursor) {
+ if (cursor.len == 0) {
+ return false;
+ }
+
+ /* TODO: Actually check the complete grammar as defined in RFC7230 5.3 and
+ * RFC3986. Currently this just checks whether the sequence is blatantly illegal */
+ size_t i = 0;
+ do {
+ const uint8_t c = cursor.ptr[i++];
+ /* everything <= ' ' is non-visible ascii*/
+ if (c <= ' ') {
+ return false;
+ }
+ } while (i < cursor.len);
+
+ return true;
+}
+
+bool aws_strutil_is_http_pseudo_header_name(struct aws_byte_cursor cursor) {
+ if (cursor.len == 0) {
+ return false;
+ }
+ const uint8_t c = cursor.ptr[0];
+ if (c != ':') {
+ /* short cut */
+ return false;
+ }
+ return true;
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/websocket.c b/contrib/restricted/aws/aws-c-http/source/websocket.c
new file mode 100644
index 0000000000..8b57953624
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/websocket.c
@@ -0,0 +1,1790 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/websocket_impl.h>
+
+#include <aws/common/atomics.h>
+#include <aws/common/device_random.h>
+#include <aws/common/encoding.h>
+#include <aws/common/mutex.h>
+#include <aws/common/ref_count.h>
+#include <aws/http/private/websocket_decoder.h>
+#include <aws/http/private/websocket_encoder.h>
+#include <aws/http/request_response.h>
+#include <aws/io/channel.h>
+#include <aws/io/logging.h>
+
+#include <inttypes.h>
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4204) /* non-constant aggregate initializer */
+#endif
+
+/* TODO: If something goes wrong during normal shutdown, do I change the error_code? */
+
+struct outgoing_frame {
+ struct aws_websocket_send_frame_options def;
+ struct aws_linked_list_node node;
+};
+
+struct aws_websocket {
+ struct aws_allocator *alloc;
+ struct aws_ref_count ref_count;
+ struct aws_channel_handler channel_handler;
+ struct aws_channel_slot *channel_slot;
+ size_t initial_window_size;
+ bool manual_window_update;
+
+ void *user_data;
+ aws_websocket_on_incoming_frame_begin_fn *on_incoming_frame_begin;
+ aws_websocket_on_incoming_frame_payload_fn *on_incoming_frame_payload;
+ aws_websocket_on_incoming_frame_complete_fn *on_incoming_frame_complete;
+
+ struct aws_channel_task move_synced_data_to_thread_task;
+ struct aws_channel_task shutdown_channel_task;
+ struct aws_channel_task increment_read_window_task;
+ struct aws_channel_task waiting_on_payload_stream_task;
+ struct aws_channel_task close_timeout_task;
+ bool is_server;
+
+ /* Data that should only be accessed from the websocket's channel thread. */
+ struct {
+ struct aws_websocket_encoder encoder;
+
+ /* list of outbound frames that have yet to be encoded and sent to the socket */
+ struct aws_linked_list outgoing_frame_list;
+
+ /* current outbound frame being encoded and sent to the socket */
+ struct outgoing_frame *current_outgoing_frame;
+
+ /*
+ * list of outbound frames that have been completely written to the io message heading to the socket.
+ * When the socket write completes we can in turn invoke completion callbacks for all of these frames
+ */
+ struct aws_linked_list write_completion_frames;
+
+ struct aws_websocket_decoder decoder;
+ struct aws_websocket_incoming_frame *current_incoming_frame;
+ struct aws_websocket_incoming_frame incoming_frame_storage;
+
+ /* Payload of incoming PING frame.
+ * The PONG frame we send in response must have an identical payload */
+ struct aws_byte_buf incoming_ping_payload;
+
+ /* If current incoming frame is CONTINUATION, this is the data type it is a continuation of. */
+ enum aws_websocket_opcode continuation_of_opcode;
+
+ /* Amount to increment window after a channel message has been processed. */
+ size_t incoming_message_window_update;
+
+ /* Cached slot to right */
+ struct aws_channel_slot *last_known_right_slot;
+
+ /* True when no more frames will be read, due to:
+ * - a CLOSE frame was received
+ * - decoder error
+ * - channel shutdown in read-dir */
+ bool is_reading_stopped;
+
+ /* True when no more frames will be written, due to:
+ * - a CLOSE frame was sent
+ * - encoder error
+ * - channel shutdown in write-dir */
+ bool is_writing_stopped;
+
+ /* During normal shutdown websocket ensures that a CLOSE frame is sent */
+ bool is_shutting_down_and_waiting_for_close_frame_to_be_written;
+ int channel_shutdown_error_code;
+ bool channel_shutdown_free_scarce_resources_immediately;
+
+ /* Wait until each aws_io_message is completely written to
+ * the socket before sending the next aws_io_message */
+ bool is_waiting_for_write_completion;
+
+ /* If, while writing out data from a payload stream, we experience "read would block",
+ * schedule a task to try again in the near-future. */
+ bool is_waiting_on_payload_stream_task;
+
+ /* True if this websocket is being used as a dumb mid-channel handler.
+ * The websocket will no longer respond to its public API or invoke callbacks. */
+ bool is_midchannel_handler;
+ } thread_data;
+
+ /* Data that may be touched from any thread (lock must be held). */
+ struct {
+ struct aws_mutex lock;
+
+ struct aws_linked_list outgoing_frame_list;
+
+ /* If non-zero, then increment_read_window_task is scheduled */
+ size_t window_increment_size;
+
+ /* Error-code returned by aws_websocket_send_frame() when is_writing_stopped is true */
+ int send_frame_error_code;
+
+ /* Use a task to issue a channel shutdown. */
+ int shutdown_channel_task_error_code;
+ bool is_shutdown_channel_task_scheduled;
+
+ bool is_move_synced_data_to_thread_task_scheduled;
+
+ /* Mirrors variable from thread_data */
+ bool is_midchannel_handler;
+ } synced_data;
+};
+
+static int s_handler_process_read_message(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ struct aws_io_message *message);
+
+static int s_handler_process_write_message(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ struct aws_io_message *message);
+
+static int s_handler_increment_read_window(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ size_t size);
+
+static int s_handler_shutdown(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ enum aws_channel_direction dir,
+ int error_code,
+ bool free_scarce_resources_immediately);
+
+static size_t s_handler_initial_window_size(struct aws_channel_handler *handler);
+static size_t s_handler_message_overhead(struct aws_channel_handler *handler);
+static void s_handler_destroy(struct aws_channel_handler *handler);
+static void s_websocket_on_refcount_zero(void *user_data);
+
+static int s_encoder_stream_outgoing_payload(struct aws_byte_buf *out_buf, void *user_data);
+
+static int s_decoder_on_frame(const struct aws_websocket_frame *frame, void *user_data);
+static int s_decoder_on_payload(struct aws_byte_cursor data, void *user_data);
+static int s_decoder_on_user_payload(struct aws_websocket *websocket, struct aws_byte_cursor data);
+static int s_decoder_on_midchannel_payload(struct aws_websocket *websocket, struct aws_byte_cursor data);
+
+static void s_destroy_outgoing_frame(struct aws_websocket *websocket, struct outgoing_frame *frame, int error_code);
+static void s_complete_frame_list(struct aws_websocket *websocket, struct aws_linked_list *frames, int error_code);
+static void s_complete_incoming_frame(struct aws_websocket *websocket, int error_code, bool *out_callback_result);
+static void s_finish_shutdown(struct aws_websocket *websocket);
+static void s_io_message_write_completed(
+ struct aws_channel *channel,
+ struct aws_io_message *message,
+ int err_code,
+ void *user_data);
+static int s_send_frame(
+ struct aws_websocket *websocket,
+ const struct aws_websocket_send_frame_options *options,
+ bool from_public_api);
+static bool s_midchannel_send_payload(struct aws_websocket *websocket, struct aws_byte_buf *out_buf, void *user_data);
+static void s_midchannel_send_complete(struct aws_websocket *websocket, int error_code, void *user_data);
+static void s_move_synced_data_to_thread_task(struct aws_channel_task *task, void *arg, enum aws_task_status status);
+static void s_increment_read_window_task(struct aws_channel_task *task, void *arg, enum aws_task_status status);
+static void s_shutdown_channel_task(struct aws_channel_task *task, void *arg, enum aws_task_status status);
+static void s_waiting_on_payload_stream_task(struct aws_channel_task *task, void *arg, enum aws_task_status status);
+static void s_close_timeout_task(struct aws_channel_task *task, void *arg, enum aws_task_status status);
+static void s_schedule_channel_shutdown(struct aws_websocket *websocket, int error_code);
+static void s_shutdown_due_to_write_err(struct aws_websocket *websocket, int error_code);
+static void s_shutdown_due_to_read_err(struct aws_websocket *websocket, int error_code);
+static void s_stop_writing(struct aws_websocket *websocket, int send_frame_error_code);
+static void s_try_write_outgoing_frames(struct aws_websocket *websocket);
+
+static struct aws_channel_handler_vtable s_channel_handler_vtable = {
+ .process_read_message = s_handler_process_read_message,
+ .process_write_message = s_handler_process_write_message,
+ .increment_read_window = s_handler_increment_read_window,
+ .shutdown = s_handler_shutdown,
+ .initial_window_size = s_handler_initial_window_size,
+ .message_overhead = s_handler_message_overhead,
+ .destroy = s_handler_destroy,
+};
+
+const char *aws_websocket_opcode_str(uint8_t opcode) {
+ switch (opcode) {
+ case AWS_WEBSOCKET_OPCODE_CONTINUATION:
+ return "continuation";
+ case AWS_WEBSOCKET_OPCODE_TEXT:
+ return "text";
+ case AWS_WEBSOCKET_OPCODE_BINARY:
+ return "binary";
+ case AWS_WEBSOCKET_OPCODE_CLOSE:
+ return "close";
+ case AWS_WEBSOCKET_OPCODE_PING:
+ return "ping";
+ case AWS_WEBSOCKET_OPCODE_PONG:
+ return "pong";
+ default:
+ return "";
+ }
+}
+
+bool aws_websocket_is_data_frame(uint8_t opcode) {
+ /* RFC-6455 Section 5.6: Most significant bit of (4 bit) data frame opcode is 0 */
+ return !(opcode & 0x08);
+}
+
+static void s_lock_synced_data(struct aws_websocket *websocket) {
+ int err = aws_mutex_lock(&websocket->synced_data.lock);
+ AWS_ASSERT(!err);
+ (void)err;
+}
+
+static void s_unlock_synced_data(struct aws_websocket *websocket) {
+ int err = aws_mutex_unlock(&websocket->synced_data.lock);
+ AWS_ASSERT(!err);
+ (void)err;
+}
+
+struct aws_websocket *aws_websocket_handler_new(const struct aws_websocket_handler_options *options) {
+ struct aws_channel_slot *slot = NULL;
+ struct aws_websocket *websocket = NULL;
+ int err;
+
+ slot = aws_channel_slot_new(options->channel);
+ if (!slot) {
+ goto error;
+ }
+
+ err = aws_channel_slot_insert_end(options->channel, slot);
+ if (err) {
+ goto error;
+ }
+
+ websocket = aws_mem_calloc(options->allocator, 1, sizeof(struct aws_websocket));
+ if (!websocket) {
+ goto error;
+ }
+
+ websocket->alloc = options->allocator;
+ aws_ref_count_init(&websocket->ref_count, websocket, s_websocket_on_refcount_zero);
+ websocket->channel_handler.vtable = &s_channel_handler_vtable;
+ websocket->channel_handler.alloc = options->allocator;
+ websocket->channel_handler.impl = websocket;
+
+ websocket->channel_slot = slot;
+
+ websocket->initial_window_size = options->initial_window_size;
+ websocket->manual_window_update = options->manual_window_update;
+
+ websocket->user_data = options->user_data;
+ websocket->on_incoming_frame_begin = options->on_incoming_frame_begin;
+ websocket->on_incoming_frame_payload = options->on_incoming_frame_payload;
+ websocket->on_incoming_frame_complete = options->on_incoming_frame_complete;
+
+ websocket->is_server = options->is_server;
+
+ aws_channel_task_init(
+ &websocket->move_synced_data_to_thread_task,
+ s_move_synced_data_to_thread_task,
+ websocket,
+ "websocket_move_synced_data_to_thread");
+ aws_channel_task_init(
+ &websocket->shutdown_channel_task, s_shutdown_channel_task, websocket, "websocket_shutdown_channel");
+ aws_channel_task_init(
+ &websocket->increment_read_window_task,
+ s_increment_read_window_task,
+ websocket,
+ "websocket_increment_read_window");
+ aws_channel_task_init(
+ &websocket->waiting_on_payload_stream_task,
+ s_waiting_on_payload_stream_task,
+ websocket,
+ "websocket_waiting_on_payload_stream");
+ aws_channel_task_init(&websocket->close_timeout_task, s_close_timeout_task, websocket, "websocket_close_timeout");
+
+ aws_linked_list_init(&websocket->thread_data.outgoing_frame_list);
+ aws_linked_list_init(&websocket->thread_data.write_completion_frames);
+ aws_byte_buf_init(&websocket->thread_data.incoming_ping_payload, websocket->alloc, 0);
+
+ aws_websocket_encoder_init(&websocket->thread_data.encoder, s_encoder_stream_outgoing_payload, websocket);
+
+ aws_websocket_decoder_init(
+ &websocket->thread_data.decoder, options->allocator, s_decoder_on_frame, s_decoder_on_payload, websocket);
+
+ aws_linked_list_init(&websocket->synced_data.outgoing_frame_list);
+
+ err = aws_mutex_init(&websocket->synced_data.lock);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "static: Failed to initialize mutex, error %d (%s).",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ goto error;
+ }
+
+ err = aws_channel_slot_set_handler(slot, &websocket->channel_handler);
+ if (err) {
+ goto error;
+ }
+
+ /* Ensure websocket (and the rest of the channel) can't be destroyed until aws_websocket_release() is called */
+ aws_channel_acquire_hold(options->channel);
+
+ return websocket;
+
+error:
+ if (slot) {
+ if (websocket && !slot->handler) {
+ websocket->channel_handler.vtable->destroy(&websocket->channel_handler);
+ }
+ aws_channel_slot_remove(slot);
+ }
+ return NULL;
+}
+
+static void s_handler_destroy(struct aws_channel_handler *handler) {
+ struct aws_websocket *websocket = handler->impl;
+ AWS_ASSERT(!websocket->thread_data.current_outgoing_frame);
+ AWS_ASSERT(!websocket->thread_data.current_incoming_frame);
+
+ AWS_LOGF_TRACE(AWS_LS_HTTP_WEBSOCKET, "id=%p: Destroying websocket.", (void *)websocket);
+
+ aws_websocket_decoder_clean_up(&websocket->thread_data.decoder);
+ aws_byte_buf_clean_up(&websocket->thread_data.incoming_ping_payload);
+ aws_mutex_clean_up(&websocket->synced_data.lock);
+ aws_mem_release(websocket->alloc, websocket);
+}
+
+struct aws_websocket *aws_websocket_acquire(struct aws_websocket *websocket) {
+ AWS_PRECONDITION(websocket);
+ AWS_LOGF_TRACE(AWS_LS_HTTP_WEBSOCKET, "id=%p: Acquiring websocket ref-count.", (void *)websocket);
+ aws_ref_count_acquire(&websocket->ref_count);
+ return websocket;
+}
+
+void aws_websocket_release(struct aws_websocket *websocket) {
+ if (!websocket) {
+ return;
+ }
+
+ AWS_LOGF_TRACE(AWS_LS_HTTP_WEBSOCKET, "id=%p: Releasing websocket ref-count.", (void *)websocket);
+ aws_ref_count_release(&websocket->ref_count);
+}
+
+static void s_websocket_on_refcount_zero(void *user_data) {
+ struct aws_websocket *websocket = user_data;
+ AWS_ASSERT(websocket->channel_slot);
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET, "id=%p: Websocket ref-count is zero, shut down if necessary.", (void *)websocket);
+
+ /* Channel might already be shut down, but make sure */
+ s_schedule_channel_shutdown(websocket, AWS_ERROR_SUCCESS);
+
+ /* Channel won't destroy its slots/handlers until its refcount reaches 0 */
+ aws_channel_release_hold(websocket->channel_slot->channel);
+}
+
+struct aws_channel *aws_websocket_get_channel(const struct aws_websocket *websocket) {
+ return websocket->channel_slot->channel;
+}
+
+int aws_websocket_convert_to_midchannel_handler(struct aws_websocket *websocket) {
+ if (!aws_channel_thread_is_callers_thread(websocket->channel_slot->channel)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET, "id=%p: Cannot convert to midchannel handler on this thread.", (void *)websocket);
+ return aws_raise_error(AWS_ERROR_IO_EVENT_LOOP_THREAD_ONLY);
+ }
+
+ if (websocket->thread_data.is_midchannel_handler) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET, "id=%p: Websocket has already converted to midchannel handler.", (void *)websocket);
+ return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_IS_MIDCHANNEL_HANDLER);
+ }
+
+ if (websocket->thread_data.is_reading_stopped || websocket->thread_data.is_writing_stopped) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Cannot convert websocket to midchannel handler because it is closed or closing.",
+ (void *)websocket);
+ return aws_raise_error(AWS_ERROR_HTTP_CONNECTION_CLOSED);
+ }
+
+ if (websocket->thread_data.current_incoming_frame) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Cannot convert to midchannel handler in the middle of an incoming frame.",
+ (void *)websocket);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ websocket->thread_data.is_midchannel_handler = true;
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_send_frame(
+ struct aws_websocket *websocket,
+ const struct aws_websocket_send_frame_options *options,
+ bool from_public_api) {
+
+ AWS_ASSERT(websocket);
+ AWS_ASSERT(options);
+
+ /* Check for bad input. Log about non-obvious errors. */
+ if (options->payload_length > 0 && !options->stream_outgoing_payload) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Invalid frame options, payload streaming function required when payload length is non-zero.",
+ (void *)websocket);
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ struct outgoing_frame *frame = aws_mem_calloc(websocket->alloc, 1, sizeof(struct outgoing_frame));
+ if (!frame) {
+ return AWS_OP_ERR;
+ }
+
+ frame->def = *options;
+
+ /* Enqueue frame, unless no further sending is allowed. */
+ int send_error = 0;
+ bool should_schedule_task = false;
+
+ /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(websocket);
+
+ if (websocket->synced_data.is_midchannel_handler && from_public_api) {
+ send_error = AWS_ERROR_HTTP_WEBSOCKET_IS_MIDCHANNEL_HANDLER;
+ } else if (websocket->synced_data.send_frame_error_code) {
+ send_error = websocket->synced_data.send_frame_error_code;
+ } else {
+ aws_linked_list_push_back(&websocket->synced_data.outgoing_frame_list, &frame->node);
+ if (!websocket->synced_data.is_move_synced_data_to_thread_task_scheduled) {
+ websocket->synced_data.is_move_synced_data_to_thread_task_scheduled = true;
+ should_schedule_task = true;
+ }
+ }
+
+ s_unlock_synced_data(websocket);
+ /* END CRITICAL SECTION */
+
+ if (send_error) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Cannot send frame, error %d (%s).",
+ (void *)websocket,
+ send_error,
+ aws_error_name(send_error));
+
+ aws_mem_release(websocket->alloc, frame);
+ return aws_raise_error(send_error);
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Enqueuing outgoing frame with opcode=%" PRIu8 "(%s) length=%" PRIu64 " fin=%s",
+ (void *)websocket,
+ options->opcode,
+ aws_websocket_opcode_str(options->opcode),
+ options->payload_length,
+ options->fin ? "T" : "F");
+
+ if (should_schedule_task) {
+ AWS_LOGF_TRACE(AWS_LS_HTTP_WEBSOCKET, "id=%p: Scheduling synced data task.", (void *)websocket);
+ aws_channel_schedule_task_now(websocket->channel_slot->channel, &websocket->move_synced_data_to_thread_task);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_websocket_send_frame(struct aws_websocket *websocket, const struct aws_websocket_send_frame_options *options) {
+ return s_send_frame(websocket, options, true);
+}
+
+static void s_move_synced_data_to_thread_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ return;
+ }
+
+ struct aws_websocket *websocket = arg;
+ struct aws_linked_list tmp_list;
+ aws_linked_list_init(&tmp_list);
+
+ /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(websocket);
+
+ aws_linked_list_swap_contents(&websocket->synced_data.outgoing_frame_list, &tmp_list);
+
+ websocket->synced_data.is_move_synced_data_to_thread_task_scheduled = false;
+
+ s_unlock_synced_data(websocket);
+ /* END CRITICAL SECTION */
+
+ if (!aws_linked_list_empty(&tmp_list)) {
+ aws_linked_list_move_all_back(&websocket->thread_data.outgoing_frame_list, &tmp_list);
+ s_try_write_outgoing_frames(websocket);
+ }
+}
+
+static void s_try_write_outgoing_frames(struct aws_websocket *websocket) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel));
+ int err;
+
+ /* Check whether we should be writing data */
+ if (!websocket->thread_data.current_outgoing_frame &&
+ aws_linked_list_empty(&websocket->thread_data.outgoing_frame_list)) {
+
+ AWS_LOGF_TRACE(AWS_LS_HTTP_WEBSOCKET, "id=%p: No data to write at this time.", (void *)websocket);
+ return;
+ }
+
+ if (websocket->thread_data.is_waiting_for_write_completion) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Waiting until outstanding aws_io_message is written to socket before sending more data.",
+ (void *)websocket);
+ return;
+ }
+
+ if (websocket->thread_data.is_writing_stopped) {
+ AWS_LOGF_TRACE(AWS_LS_HTTP_WEBSOCKET, "id=%p: Websocket is no longer sending data.", (void *)websocket);
+ return;
+ }
+
+ /* Acquire aws_io_message */
+ struct aws_io_message *io_msg = aws_channel_slot_acquire_max_message_for_write(websocket->channel_slot);
+ if (!io_msg) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Failed acquire message from pool, error %d (%s).",
+ (void *)websocket,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ io_msg->user_data = websocket;
+ io_msg->on_completion = s_io_message_write_completed;
+
+ /* Loop through frames, writing their data into the io_msg */
+ bool wrote_close_frame = false;
+ while (!websocket->thread_data.is_writing_stopped) {
+ if (websocket->thread_data.current_outgoing_frame) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Resuming write of frame=%p opcode=%" PRIu8 "(%s) payload-length=%" PRIu64 ".",
+ (void *)websocket,
+ (void *)websocket->thread_data.current_outgoing_frame,
+ websocket->thread_data.current_outgoing_frame->def.opcode,
+ aws_websocket_opcode_str(websocket->thread_data.current_outgoing_frame->def.opcode),
+ websocket->thread_data.current_outgoing_frame->def.payload_length);
+
+ } else {
+ /* We're not in the middle of encoding a frame, so pop off the next one to encode. */
+ if (aws_linked_list_empty(&websocket->thread_data.outgoing_frame_list)) {
+ AWS_LOGF_TRACE(AWS_LS_HTTP_WEBSOCKET, "id=%p: No more frames to write.", (void *)websocket);
+ break;
+ }
+
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&websocket->thread_data.outgoing_frame_list);
+ websocket->thread_data.current_outgoing_frame = AWS_CONTAINER_OF(node, struct outgoing_frame, node);
+
+ struct aws_websocket_frame frame = {
+ .fin = websocket->thread_data.current_outgoing_frame->def.fin,
+ .opcode = websocket->thread_data.current_outgoing_frame->def.opcode,
+ .payload_length = websocket->thread_data.current_outgoing_frame->def.payload_length,
+ };
+
+ /* RFC-6455 Section 5.3 Client-to-Server Masking
+ * Clients must mask payload with key derived from an unpredictable source of entropy. */
+ if (!websocket->is_server) {
+ frame.masked = true;
+ /* TODO: faster source of random (but still seeded by device_random) */
+ struct aws_byte_buf masking_key_buf = aws_byte_buf_from_empty_array(frame.masking_key, 4);
+ err = aws_device_random_buffer(&masking_key_buf);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Failed to derive masking key, error %d (%s).",
+ (void *)websocket,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+ }
+
+ err = aws_websocket_encoder_start_frame(&websocket->thread_data.encoder, &frame);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Failed to start frame encoding, error %d (%s).",
+ (void *)websocket,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Start writing frame=%p opcode=%" PRIu8 "(%s) payload-length=%" PRIu64 ".",
+ (void *)websocket,
+ (void *)websocket->thread_data.current_outgoing_frame,
+ websocket->thread_data.current_outgoing_frame->def.opcode,
+ aws_websocket_opcode_str(websocket->thread_data.current_outgoing_frame->def.opcode),
+ websocket->thread_data.current_outgoing_frame->def.payload_length);
+ }
+
+ err = aws_websocket_encoder_process(&websocket->thread_data.encoder, &io_msg->message_data);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Frame encoding failed with error %d (%s).",
+ (void *)websocket,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ if (aws_websocket_encoder_is_frame_in_progress(&websocket->thread_data.encoder)) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Outgoing frame still in progress, but no more data can be written at this time.",
+ (void *)websocket);
+ break;
+ }
+
+ if (websocket->thread_data.current_outgoing_frame->def.opcode == AWS_WEBSOCKET_OPCODE_CLOSE) {
+ wrote_close_frame = true;
+ }
+
+ /*
+ * a completely-written frame gets added to the write completion list so that when the socket write completes
+ * we can complete all of the outbound frames that were finished as part of the io message
+ */
+ aws_linked_list_push_back(
+ &websocket->thread_data.write_completion_frames, &websocket->thread_data.current_outgoing_frame->node);
+
+ websocket->thread_data.current_outgoing_frame = NULL;
+
+ if (wrote_close_frame) {
+ break;
+ }
+ }
+
+ /* If payload stream didn't have any bytes available to read right now, then the aws_io_message might be empty.
+ * If this is the case schedule a task to try again in the future. */
+ if (io_msg->message_data.len == 0) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Reading from payload stream would block, will try again later.",
+ (void *)websocket);
+
+ if (!websocket->thread_data.is_waiting_on_payload_stream_task) {
+ websocket->thread_data.is_waiting_on_payload_stream_task = true;
+
+ /* Future Optimization Idea: Minimize work while we wait. Use some kind of backoff for the retry timing,
+ * or have some way for stream to notify when more data is available. */
+ aws_channel_schedule_task_now(websocket->channel_slot->channel, &websocket->waiting_on_payload_stream_task);
+ }
+
+ aws_mem_release(io_msg->allocator, io_msg);
+ return;
+ }
+
+ /* Prepare to send aws_io_message up the channel. */
+
+ /* If CLOSE frame was written, that's the last data we'll write */
+ if (wrote_close_frame) {
+ s_stop_writing(websocket, AWS_ERROR_HTTP_WEBSOCKET_CLOSE_FRAME_SENT);
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Sending aws_io_message of size %zu in write direction.",
+ (void *)websocket,
+ io_msg->message_data.len);
+
+ websocket->thread_data.is_waiting_for_write_completion = true;
+ err = aws_channel_slot_send_message(websocket->channel_slot, io_msg, AWS_CHANNEL_DIR_WRITE);
+ if (err) {
+ websocket->thread_data.is_waiting_for_write_completion = false;
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Failed to send message in write direction, error %d (%s).",
+ (void *)websocket,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ /* Finish shutdown if we were waiting for the CLOSE frame to be written */
+ if (wrote_close_frame && websocket->thread_data.is_shutting_down_and_waiting_for_close_frame_to_be_written) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET, "id=%p: CLOSE frame sent, finishing handler shutdown sequence.", (void *)websocket);
+
+ s_finish_shutdown(websocket);
+ }
+
+ return;
+
+error:
+ if (io_msg) {
+ aws_mem_release(io_msg->allocator, io_msg);
+ }
+
+ s_shutdown_due_to_write_err(websocket, aws_last_error());
+}
+
+/* Encoder's outgoing_payload callback invokes current frame's callback */
+static int s_encoder_stream_outgoing_payload(struct aws_byte_buf *out_buf, void *user_data) {
+ struct aws_websocket *websocket = user_data;
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel));
+ AWS_ASSERT(websocket->thread_data.current_outgoing_frame);
+
+ struct outgoing_frame *current_frame = websocket->thread_data.current_outgoing_frame;
+ AWS_ASSERT(current_frame->def.stream_outgoing_payload);
+
+ bool callback_result = current_frame->def.stream_outgoing_payload(websocket, out_buf, current_frame->def.user_data);
+ if (!callback_result) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET, "id=%p: Outgoing payload callback has reported a failure.", (void *)websocket);
+ return aws_raise_error(AWS_ERROR_HTTP_CALLBACK_FAILURE);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_waiting_on_payload_stream_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ /* If channel has shut down, don't need to resume sending payload */
+ return;
+ }
+
+ struct aws_websocket *websocket = arg;
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel));
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET, "id=%p: Done waiting for payload stream, sending more data...", (void *)websocket);
+
+ websocket->thread_data.is_waiting_on_payload_stream_task = false;
+ s_try_write_outgoing_frames(websocket);
+}
+
+static void s_io_message_write_completed(
+ struct aws_channel *channel,
+ struct aws_io_message *message,
+ int err_code,
+ void *user_data) {
+
+ (void)channel;
+ (void)message;
+ struct aws_websocket *websocket = user_data;
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(channel));
+
+ /*
+ * Invoke the completion callbacks (and then destroy) for all the frames that were completely written as
+ * part of this message completion at the socket layer
+ */
+ s_complete_frame_list(websocket, &websocket->thread_data.write_completion_frames, err_code);
+
+ if (err_code == AWS_ERROR_SUCCESS) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET, "id=%p: aws_io_message written to socket, sending more data...", (void *)websocket);
+
+ websocket->thread_data.is_waiting_for_write_completion = false;
+ s_try_write_outgoing_frames(websocket);
+ } else {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: aws_io_message did not finish writing to socket, error %d (%s).",
+ (void *)websocket,
+ err_code,
+ aws_error_name(err_code));
+
+ s_shutdown_due_to_write_err(websocket, err_code);
+ }
+}
+
+static int s_handler_process_write_message(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ struct aws_io_message *message) {
+
+ (void)slot;
+ struct aws_websocket *websocket = handler->impl;
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel));
+
+ /* For each aws_io_message headed in the write direction, send a BINARY frame,
+ * where the frame's payload is the data from this aws_io_message. */
+ struct aws_websocket_send_frame_options options = {
+ .payload_length = message->message_data.len,
+ .user_data = message,
+ .stream_outgoing_payload = s_midchannel_send_payload,
+ .on_complete = s_midchannel_send_complete,
+ .opcode = AWS_WEBSOCKET_OPCODE_BINARY,
+ .fin = true,
+ };
+
+ /* Use copy_mark to track progress as the data is streamed out */
+ message->copy_mark = 0;
+
+ int err = s_send_frame(websocket, &options, false);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* Callback for writing data from downstream aws_io_messages into payload of BINARY frames headed upstream */
+static bool s_midchannel_send_payload(struct aws_websocket *websocket, struct aws_byte_buf *out_buf, void *user_data) {
+ (void)websocket;
+ struct aws_io_message *io_msg = user_data;
+
+ /* copy_mark is used to track progress */
+ size_t src_available = io_msg->message_data.len - io_msg->copy_mark;
+ size_t dst_available = out_buf->capacity - out_buf->len;
+ size_t sending = dst_available < src_available ? dst_available : src_available;
+
+ bool success = aws_byte_buf_write(out_buf, io_msg->message_data.buffer + io_msg->copy_mark, sending);
+
+ io_msg->copy_mark += sending;
+ return success;
+}
+
+/* Callback when data from downstream aws_io_messages, finishes being sent as a BINARY frame upstream. */
+static void s_midchannel_send_complete(struct aws_websocket *websocket, int error_code, void *user_data) {
+ (void)websocket;
+ struct aws_io_message *io_msg = user_data;
+
+ if (io_msg->on_completion) {
+ io_msg->on_completion(io_msg->owning_channel, io_msg, error_code, io_msg->user_data);
+ }
+
+ aws_mem_release(io_msg->allocator, io_msg);
+}
+
+static void s_destroy_outgoing_frame(struct aws_websocket *websocket, struct outgoing_frame *frame, int error_code) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Completed outgoing frame=%p opcode=%" PRIu8 "(%s) payload-length=%" PRIu64 " with error_code %d (%s).",
+ (void *)websocket,
+ (void *)frame,
+ frame->def.opcode,
+ aws_websocket_opcode_str(frame->def.opcode),
+ frame->def.payload_length,
+ error_code,
+ aws_error_name(error_code));
+
+ if (frame->def.on_complete) {
+ frame->def.on_complete(websocket, error_code, frame->def.user_data);
+ }
+
+ aws_mem_release(websocket->alloc, frame);
+}
+
+static void s_complete_frame_list(struct aws_websocket *websocket, struct aws_linked_list *frames, int error_code) {
+ struct aws_linked_list_node *node = aws_linked_list_begin(frames);
+ while (node != aws_linked_list_end(frames)) {
+ struct outgoing_frame *frame = AWS_CONTAINER_OF(node, struct outgoing_frame, node);
+
+ node = aws_linked_list_next(node);
+ s_destroy_outgoing_frame(websocket, frame, error_code);
+ }
+
+ /* we've released everything, so reset the list to empty */
+ aws_linked_list_init(frames);
+}
+
+static void s_stop_writing(struct aws_websocket *websocket, int send_frame_error_code) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel));
+ AWS_ASSERT(send_frame_error_code != AWS_ERROR_SUCCESS);
+
+ if (websocket->thread_data.is_writing_stopped) {
+ return;
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Websocket will send no more data, future attempts to send will get error %d (%s).",
+ (void *)websocket,
+ send_frame_error_code,
+ aws_error_name(send_frame_error_code));
+
+ /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(websocket);
+
+ websocket->synced_data.send_frame_error_code = send_frame_error_code;
+
+ s_unlock_synced_data(websocket);
+ /* END CRITICAL SECTION */
+
+ websocket->thread_data.is_writing_stopped = true;
+}
+
+static void s_shutdown_due_to_write_err(struct aws_websocket *websocket, int error_code) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel));
+
+ /* No more writing allowed (it's ok to call this redundantly). */
+ s_stop_writing(websocket, AWS_ERROR_HTTP_CONNECTION_CLOSED);
+
+ /* If there's a current outgoing frame, complete it with the specific error code.
+ * Any other pending frames will complete with the generic CONNECTION_CLOSED error. */
+ if (websocket->thread_data.current_outgoing_frame) {
+ s_destroy_outgoing_frame(websocket, websocket->thread_data.current_outgoing_frame, error_code);
+ websocket->thread_data.current_outgoing_frame = NULL;
+ }
+
+ /* If we're in the final stages of shutdown, ensure shutdown completes.
+ * Otherwise tell the channel to shutdown (it's ok to shutdown the channel redundantly). */
+ if (websocket->thread_data.is_shutting_down_and_waiting_for_close_frame_to_be_written) {
+ s_finish_shutdown(websocket);
+ } else {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Closing websocket due to failure during write, error %d (%s).",
+ (void *)websocket,
+ error_code,
+ aws_error_name(error_code));
+ s_schedule_channel_shutdown(websocket, error_code);
+ }
+}
+
+static void s_shutdown_due_to_read_err(struct aws_websocket *websocket, int error_code) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel));
+
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Closing websocket due to failure during read, error %d (%s).",
+ (void *)websocket,
+ error_code,
+ aws_error_name(error_code));
+
+ websocket->thread_data.is_reading_stopped = true;
+
+ /* If there's a current incoming frame, complete it with the specific error code. */
+ if (websocket->thread_data.current_incoming_frame) {
+ s_complete_incoming_frame(websocket, error_code, NULL);
+ }
+
+ /* Tell channel to shutdown (it's ok to call this redundantly) */
+ s_schedule_channel_shutdown(websocket, error_code);
+}
+
+static void s_shutdown_channel_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ return;
+ }
+
+ struct aws_websocket *websocket = arg;
+ int error_code;
+
+ /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(websocket);
+
+ error_code = websocket->synced_data.shutdown_channel_task_error_code;
+
+ s_unlock_synced_data(websocket);
+ /* END CRITICAL SECTION */
+
+ aws_channel_shutdown(websocket->channel_slot->channel, error_code);
+}
+
+/* Tell the channel to shut down. It is safe to call this multiple times.
+ * The call to aws_channel_shutdown() is delayed so that a user invoking aws_websocket_close doesn't
+ * have completion callbacks firing before the function call even returns */
+static void s_schedule_channel_shutdown(struct aws_websocket *websocket, int error_code) {
+ bool schedule_shutdown = false;
+
+ /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(websocket);
+
+ if (!websocket->synced_data.is_shutdown_channel_task_scheduled) {
+ schedule_shutdown = true;
+ websocket->synced_data.is_shutdown_channel_task_scheduled = true;
+ websocket->synced_data.shutdown_channel_task_error_code = error_code;
+ }
+
+ s_unlock_synced_data(websocket);
+ /* END CRITICAL SECTION */
+
+ if (schedule_shutdown) {
+ aws_channel_schedule_task_now(websocket->channel_slot->channel, &websocket->shutdown_channel_task);
+ }
+}
+
+void aws_websocket_close(struct aws_websocket *websocket, bool free_scarce_resources_immediately) {
+ bool is_midchannel_handler;
+
+ /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(websocket);
+ is_midchannel_handler = websocket->synced_data.is_midchannel_handler;
+ s_unlock_synced_data(websocket);
+ /* END CRITICAL SECTION */
+
+ if (is_midchannel_handler) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Ignoring close call, websocket has converted to midchannel handler.",
+ (void *)websocket);
+ return;
+ }
+
+ /* TODO: aws_channel_shutdown() should let users specify error_code and "immediate" as separate parameters.
+ * Currently, any non-zero error_code results in "immediate" shutdown */
+ int error_code = AWS_ERROR_SUCCESS;
+ if (free_scarce_resources_immediately) {
+ error_code = AWS_ERROR_HTTP_CONNECTION_CLOSED;
+ }
+
+ s_schedule_channel_shutdown(websocket, error_code);
+}
+
+static int s_handler_shutdown(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ enum aws_channel_direction dir,
+ int error_code,
+ bool free_scarce_resources_immediately) {
+
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(slot->channel));
+ struct aws_websocket *websocket = handler->impl;
+ int err;
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Websocket handler shutting down dir=%s error_code=%d immediate=%d.",
+ (void *)websocket,
+ dir == AWS_CHANNEL_DIR_READ ? "READ" : "WRITE",
+ error_code,
+ free_scarce_resources_immediately);
+
+ if (dir == AWS_CHANNEL_DIR_READ) {
+ /* Shutdown in the read direction is immediate and simple. */
+ websocket->thread_data.is_reading_stopped = true;
+ aws_channel_slot_on_handler_shutdown_complete(slot, dir, error_code, free_scarce_resources_immediately);
+
+ } else {
+ websocket->thread_data.channel_shutdown_error_code = error_code;
+ websocket->thread_data.channel_shutdown_free_scarce_resources_immediately = free_scarce_resources_immediately;
+ websocket->thread_data.is_shutting_down_and_waiting_for_close_frame_to_be_written = true;
+
+ if (websocket->thread_data.channel_shutdown_free_scarce_resources_immediately ||
+ websocket->thread_data.is_writing_stopped) {
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Finishing handler shutdown immediately, without ensuring a CLOSE frame was sent.",
+ (void *)websocket);
+
+ s_stop_writing(websocket, AWS_ERROR_HTTP_CONNECTION_CLOSED);
+ s_finish_shutdown(websocket);
+ } else {
+ /* Attempt to queue a CLOSE frame, then wait for it to send before finishing shutdown. */
+ struct aws_websocket_send_frame_options close_frame = {
+ .opcode = AWS_WEBSOCKET_OPCODE_CLOSE,
+ .fin = true,
+ };
+ err = s_send_frame(websocket, &close_frame, false);
+ if (err) {
+ AWS_LOGF_WARN(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Failed to send CLOSE frame, error %d (%s).",
+ (void *)websocket,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ s_stop_writing(websocket, AWS_ERROR_HTTP_CONNECTION_CLOSED);
+ s_finish_shutdown(websocket);
+ } else {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Outgoing CLOSE frame queued, handler will finish shutdown once it's sent.",
+ (void *)websocket);
+ /* schedule a task to run after 1 sec. If the CLOSE still not sent at that time, we should just cancel
+ * sending it and shutdown the channel. */
+ uint64_t schedule_time = 0;
+ aws_channel_current_clock_time(websocket->channel_slot->channel, &schedule_time);
+ schedule_time += AWS_WEBSOCKET_CLOSE_TIMEOUT;
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: websocket_close_timeout task will be run at timestamp %" PRIu64,
+ (void *)websocket,
+ schedule_time);
+ aws_channel_schedule_task_future(
+ websocket->channel_slot->channel, &websocket->close_timeout_task, schedule_time);
+ }
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_close_timeout_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ /* If channel has shut down, don't need to resume sending payload */
+ return;
+ }
+
+ struct aws_websocket *websocket = arg;
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel));
+
+ if (!websocket->thread_data.is_shutting_down_and_waiting_for_close_frame_to_be_written) {
+ /* Not waiting for write to complete, which means the CLOSE frame has sent, just do nothing */
+ return;
+ }
+
+ AWS_LOGF_WARN(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Failed to send CLOSE frame, timeout happened, shutdown the channel",
+ (void *)websocket);
+
+ s_stop_writing(websocket, AWS_ERROR_HTTP_CONNECTION_CLOSED);
+ s_finish_shutdown(websocket);
+}
+
+static void s_finish_shutdown(struct aws_websocket *websocket) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel));
+ AWS_ASSERT(websocket->thread_data.is_writing_stopped);
+ AWS_ASSERT(websocket->thread_data.is_shutting_down_and_waiting_for_close_frame_to_be_written);
+
+ AWS_LOGF_TRACE(AWS_LS_HTTP_WEBSOCKET, "id=%p: Finishing websocket handler shutdown.", (void *)websocket);
+
+ websocket->thread_data.is_shutting_down_and_waiting_for_close_frame_to_be_written = false;
+
+ /* Cancel all incomplete frames */
+ if (websocket->thread_data.current_incoming_frame) {
+ s_complete_incoming_frame(websocket, AWS_ERROR_HTTP_CONNECTION_CLOSED, NULL);
+ }
+
+ if (websocket->thread_data.current_outgoing_frame) {
+ s_destroy_outgoing_frame(
+ websocket, websocket->thread_data.current_outgoing_frame, AWS_ERROR_HTTP_CONNECTION_CLOSED);
+ websocket->thread_data.current_outgoing_frame = NULL;
+ }
+
+ /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(websocket);
+
+ while (!aws_linked_list_empty(&websocket->synced_data.outgoing_frame_list)) {
+ /* Move frames from synced_data to thread_data, then cancel them together outside critical section */
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&websocket->synced_data.outgoing_frame_list);
+ aws_linked_list_push_back(&websocket->thread_data.outgoing_frame_list, node);
+ }
+
+ s_unlock_synced_data(websocket);
+ /* END CRITICAL SECTION */
+
+ s_complete_frame_list(websocket, &websocket->thread_data.write_completion_frames, AWS_ERROR_HTTP_CONNECTION_CLOSED);
+
+ while (!aws_linked_list_empty(&websocket->thread_data.outgoing_frame_list)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&websocket->thread_data.outgoing_frame_list);
+ struct outgoing_frame *frame = AWS_CONTAINER_OF(node, struct outgoing_frame, node);
+ s_destroy_outgoing_frame(websocket, frame, AWS_ERROR_HTTP_CONNECTION_CLOSED);
+ }
+
+ aws_channel_slot_on_handler_shutdown_complete(
+ websocket->channel_slot,
+ AWS_CHANNEL_DIR_WRITE,
+ websocket->thread_data.channel_shutdown_error_code,
+ websocket->thread_data.channel_shutdown_free_scarce_resources_immediately);
+}
+
+static int s_handler_process_read_message(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ struct aws_io_message *message) {
+
+ AWS_ASSERT(message);
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(slot->channel));
+ struct aws_websocket *websocket = handler->impl;
+ struct aws_byte_cursor cursor = aws_byte_cursor_from_buf(&message->message_data);
+ int err;
+
+ /* At the end of this function we'll bump the window back up by this amount.
+ * We start off assuming we'll re-open the window by the whole amount,
+ * but this number will go down if we process any payload data that ought to shrink the window */
+ websocket->thread_data.incoming_message_window_update = message->message_data.len;
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Begin processing incoming message of size %zu.",
+ (void *)websocket,
+ message->message_data.len);
+
+ while (cursor.len) {
+ if (websocket->thread_data.is_reading_stopped) {
+ goto clean_up;
+ }
+
+ bool frame_complete;
+ err = aws_websocket_decoder_process(&websocket->thread_data.decoder, &cursor, &frame_complete);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Failed processing incoming message, error %d (%s). Closing connection.",
+ (void *)websocket,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ goto error;
+ }
+
+ if (frame_complete) {
+ bool callback_result;
+ s_complete_incoming_frame(websocket, AWS_ERROR_SUCCESS, &callback_result);
+ if (!callback_result) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Incoming frame completion callback has reported a failure. Closing connection",
+ (void *)websocket);
+
+ aws_raise_error(AWS_ERROR_HTTP_CALLBACK_FAILURE);
+ goto error;
+ }
+ }
+ }
+
+ if (websocket->thread_data.incoming_message_window_update > 0) {
+ err = aws_channel_slot_increment_read_window(slot, websocket->thread_data.incoming_message_window_update);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Failed to increment read window after message processing, error %d (%s). Closing "
+ "connection.",
+ (void *)websocket,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+ }
+
+ goto clean_up;
+
+error:
+ s_shutdown_due_to_read_err(websocket, aws_last_error());
+
+clean_up:
+ if (cursor.len > 0) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Done processing incoming message, final %zu bytes ignored.",
+ (void *)websocket,
+ cursor.len);
+ } else {
+ AWS_LOGF_TRACE(AWS_LS_HTTP_WEBSOCKET, "id=%p: Done processing incoming message.", (void *)websocket);
+ }
+ aws_mem_release(message->allocator, message);
+ return AWS_OP_SUCCESS;
+}
+
+static int s_decoder_on_frame(const struct aws_websocket_frame *frame, void *user_data) {
+ struct aws_websocket *websocket = user_data;
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel));
+ AWS_ASSERT(!websocket->thread_data.current_incoming_frame);
+ AWS_ASSERT(!websocket->thread_data.is_reading_stopped);
+
+ websocket->thread_data.current_incoming_frame = &websocket->thread_data.incoming_frame_storage;
+
+ websocket->thread_data.current_incoming_frame->payload_length = frame->payload_length;
+ websocket->thread_data.current_incoming_frame->opcode = frame->opcode;
+ websocket->thread_data.current_incoming_frame->fin = frame->fin;
+
+ /* If CONTINUATION frames are expected, remember which type of data is being continued.
+ * RFC-6455 Section 5.4 Fragmentation */
+ if (aws_websocket_is_data_frame(frame->opcode)) {
+ if (frame->opcode != AWS_WEBSOCKET_OPCODE_CONTINUATION) {
+ if (frame->fin) {
+ websocket->thread_data.continuation_of_opcode = 0;
+ } else {
+ websocket->thread_data.continuation_of_opcode = frame->opcode;
+ }
+ }
+ } else if (frame->opcode == AWS_WEBSOCKET_OPCODE_PING) {
+ /* Prepare to store payload of PING so we can echo it back in the PONG */
+ aws_byte_buf_reset(&websocket->thread_data.incoming_ping_payload, false /*zero_contents*/);
+ /* Note: we are NOT calling aws_byte_buf_reserve().
+ * This works around an attack where a malicious peer CLAIMS they'll send a huge frame,
+ * which would case OOM if we did the reserve immediately.
+ * If a malicious peer wants to run us out of memory, they'll need to do
+ * it the costly way and actually send a billion bytes.
+ * Or we could impose our own internal limits, but for now this is simpler */
+ }
+
+ /* Invoke user cb */
+ bool callback_result = true;
+ if (websocket->on_incoming_frame_begin && !websocket->thread_data.is_midchannel_handler) {
+ callback_result = websocket->on_incoming_frame_begin(
+ websocket, websocket->thread_data.current_incoming_frame, websocket->user_data);
+ }
+
+ if (!callback_result) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET, "id=%p: Incoming frame callback has reported a failure.", (void *)websocket);
+ return aws_raise_error(AWS_ERROR_HTTP_CALLBACK_FAILURE);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_decoder_on_payload(struct aws_byte_cursor data, void *user_data) {
+ struct aws_websocket *websocket = user_data;
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel));
+ AWS_ASSERT(websocket->thread_data.current_incoming_frame);
+ AWS_ASSERT(!websocket->thread_data.is_reading_stopped);
+
+ /* Store payload of PING so we can echo it back in the PONG */
+ if (websocket->thread_data.current_incoming_frame->opcode == AWS_WEBSOCKET_OPCODE_PING) {
+ aws_byte_buf_append_dynamic(&websocket->thread_data.incoming_ping_payload, &data);
+ }
+
+ if (websocket->thread_data.is_midchannel_handler) {
+ return s_decoder_on_midchannel_payload(websocket, data);
+ }
+
+ return s_decoder_on_user_payload(websocket, data);
+}
+
+/* Invoke user cb */
+static int s_decoder_on_user_payload(struct aws_websocket *websocket, struct aws_byte_cursor data) {
+ if (websocket->on_incoming_frame_payload) {
+ if (!websocket->on_incoming_frame_payload(
+ websocket, websocket->thread_data.current_incoming_frame, data, websocket->user_data)) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET, "id=%p: Incoming payload callback has reported a failure.", (void *)websocket);
+ return aws_raise_error(AWS_ERROR_HTTP_CALLBACK_FAILURE);
+ }
+ }
+
+ /* If this is a "data" frame's payload, let the window shrink */
+ if (aws_websocket_is_data_frame(websocket->thread_data.current_incoming_frame->opcode) &&
+ websocket->manual_window_update) {
+
+ websocket->thread_data.incoming_message_window_update -= data.len;
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: The read window is shrinking by %zu due to incoming payload from 'data' frame.",
+ (void *)websocket,
+ data.len);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* Pass data to channel handler on the right */
+static int s_decoder_on_midchannel_payload(struct aws_websocket *websocket, struct aws_byte_cursor data) {
+ struct aws_io_message *io_msg = NULL;
+
+ /* Only pass data to next handler if it's from a BINARY frame (or the CONTINUATION of a BINARY frame) */
+ bool is_binary_data = websocket->thread_data.current_incoming_frame->opcode == AWS_WEBSOCKET_OPCODE_BINARY ||
+ (websocket->thread_data.current_incoming_frame->opcode == AWS_WEBSOCKET_OPCODE_CONTINUATION &&
+ websocket->thread_data.continuation_of_opcode == AWS_WEBSOCKET_OPCODE_BINARY);
+ if (!is_binary_data) {
+ return AWS_OP_SUCCESS;
+ }
+
+ AWS_ASSERT(websocket->channel_slot->adj_right); /* Expected another slot in the read direction */
+
+ /* Note that current implementation of websocket handler does not buffer data travelling in the "read" direction,
+ * so the downstream read window needs to be large enough to immediately receive incoming data. */
+ if (aws_channel_slot_downstream_read_window(websocket->channel_slot) < data.len) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Cannot send entire message without exceeding read window.",
+ (void *)websocket);
+ aws_raise_error(AWS_IO_CHANNEL_READ_WOULD_EXCEED_WINDOW);
+ goto error;
+ }
+
+ io_msg = aws_channel_acquire_message_from_pool(
+ websocket->channel_slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, data.len);
+ if (!io_msg) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_WEBSOCKET, "id=%p: Failed to acquire message.", (void *)websocket);
+ goto error;
+ }
+
+ if (io_msg->message_data.capacity < data.len) {
+ /* Probably can't happen. Data is coming an aws_io_message, should be able to acquire another just as big */
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET, "id=%p: Failed to acquire sufficiently large message.", (void *)websocket);
+ aws_raise_error(AWS_ERROR_UNKNOWN);
+ goto error;
+ }
+
+ if (!aws_byte_buf_write_from_whole_cursor(&io_msg->message_data, data)) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_WEBSOCKET, "id=%p: Unexpected error while copying data.", (void *)websocket);
+ aws_raise_error(AWS_ERROR_UNKNOWN);
+ goto error;
+ }
+
+ int err = aws_channel_slot_send_message(websocket->channel_slot, io_msg, AWS_CHANNEL_DIR_READ);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Failed to send read message, error %d (%s).",
+ (void *)websocket,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ /* Reduce amount by which websocket will update its read window */
+ AWS_ASSERT(websocket->thread_data.incoming_message_window_update >= data.len);
+ websocket->thread_data.incoming_message_window_update -= data.len;
+
+ return AWS_OP_SUCCESS;
+
+error:
+ if (io_msg) {
+ aws_mem_release(io_msg->allocator, io_msg);
+ }
+ return AWS_OP_ERR;
+}
+
+/* When the websocket sends a frame automatically (PONG, CLOSE),
+ * this holds the payload. */
+struct aws_websocket_autopayload {
+ struct aws_allocator *alloc;
+ struct aws_byte_buf buf;
+ struct aws_byte_cursor advancing_cursor;
+};
+
+static struct aws_websocket_autopayload *s_autopayload_new(
+ struct aws_allocator *alloc,
+ const struct aws_byte_buf *src) {
+
+ struct aws_websocket_autopayload *autopayload = aws_mem_calloc(alloc, 1, sizeof(struct aws_websocket_autopayload));
+ autopayload->alloc = alloc;
+ if (src->len > 0) {
+ aws_byte_buf_init_copy(&autopayload->buf, alloc, src);
+ autopayload->advancing_cursor = aws_byte_cursor_from_buf(&autopayload->buf);
+ }
+
+ return autopayload;
+}
+
+static void s_autopayload_destroy(struct aws_websocket_autopayload *autopayload) {
+ aws_byte_buf_clean_up(&autopayload->buf);
+ aws_mem_release(autopayload->alloc, autopayload);
+}
+
+static void s_autopayload_send_complete(struct aws_websocket *websocket, int error_code, void *user_data) {
+ (void)websocket;
+ (void)error_code;
+
+ struct aws_websocket_autopayload *autopayload = user_data;
+ s_autopayload_destroy(autopayload);
+}
+
+static bool s_autopayload_stream_outgoing_payload(
+ struct aws_websocket *websocket,
+ struct aws_byte_buf *out_buf,
+ void *user_data) {
+
+ (void)websocket;
+ struct aws_websocket_autopayload *autopayload = user_data;
+ aws_byte_buf_write_to_capacity(out_buf, &autopayload->advancing_cursor);
+ return true;
+}
+
+static void s_complete_incoming_frame(struct aws_websocket *websocket, int error_code, bool *out_callback_result) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel));
+ AWS_ASSERT(websocket->thread_data.current_incoming_frame);
+
+ if (error_code == 0) {
+ /* If this was a CLOSE frame, don't read any more data. */
+ if (websocket->thread_data.current_incoming_frame->opcode == AWS_WEBSOCKET_OPCODE_CLOSE) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Close frame received, any further data received will be ignored.",
+ (void *)websocket);
+ websocket->thread_data.is_reading_stopped = true;
+
+ /* TODO: auto-close if there's a channel-handler to the right */
+
+ } else if (websocket->thread_data.current_incoming_frame->opcode == AWS_WEBSOCKET_OPCODE_PING) {
+ /* Automatically respond to a PING with a PONG */
+ if (!websocket->thread_data.is_writing_stopped) {
+ /* Optimization idea: avoid allocations/copies each time we send an auto-PONG.
+ * Maybe have a small autopayload pool, instead of allocating one each time.
+ * Maybe encode directly to aws_io_message, instead of copying to a buf, that's copied to a msg later.
+ * Maybe "std::move()" the aws_byte_bufs around instead of copying them. */
+ struct aws_websocket_autopayload *autopong =
+ s_autopayload_new(websocket->alloc, &websocket->thread_data.incoming_ping_payload);
+
+ struct aws_websocket_send_frame_options pong_frame = {
+ .opcode = AWS_WEBSOCKET_OPCODE_PONG,
+ .fin = true,
+ .payload_length = autopong->buf.len,
+ .stream_outgoing_payload = s_autopayload_stream_outgoing_payload,
+ .on_complete = s_autopayload_send_complete,
+ .user_data = autopong,
+ };
+
+ int send_err = s_send_frame(websocket, &pong_frame, false /*from_public_api*/);
+ /* Failure should be impossible. We already checked that writing is not stopped */
+ AWS_FATAL_ASSERT(!send_err && "Unexpected failure sending websocket PONG");
+ }
+ }
+ }
+
+ /* Invoke user cb */
+ bool callback_result = true;
+ if (websocket->on_incoming_frame_complete && !websocket->thread_data.is_midchannel_handler) {
+ callback_result = websocket->on_incoming_frame_complete(
+ websocket, websocket->thread_data.current_incoming_frame, error_code, websocket->user_data);
+ }
+
+ if (out_callback_result) {
+ *out_callback_result = callback_result;
+ }
+
+ websocket->thread_data.current_incoming_frame = NULL;
+}
+
+static size_t s_handler_initial_window_size(struct aws_channel_handler *handler) {
+ struct aws_websocket *websocket = handler->impl;
+ return websocket->initial_window_size;
+}
+
+static size_t s_handler_message_overhead(struct aws_channel_handler *handler) {
+ (void)handler;
+ return AWS_WEBSOCKET_MAX_FRAME_OVERHEAD;
+}
+
+static int s_handler_increment_read_window(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ size_t size) {
+
+ struct aws_websocket *websocket = handler->impl;
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(slot->channel));
+ AWS_ASSERT(websocket->thread_data.is_midchannel_handler);
+
+ /* NOTE: This is pretty hacky and should change if it ever causes issues.
+ *
+ * Currently, all read messages are processed the moment they're received.
+ * If the downstream read window is open enough to accept this data, we can send it right along.
+ * BUT if the downstream window were too small, we'd need to buffer the data and wait until
+ * the downstream window opened again to finish sending.
+ *
+ * To avoid that complexity, we go to pains here to ensure that the websocket's window exactly
+ * matches the window to the right, allowing us to avoid buffering in the read direction.
+ */
+ size_t increment = size;
+ if (websocket->thread_data.last_known_right_slot != slot->adj_right) {
+ if (size < slot->window_size) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: The websocket does not support downstream handlers with a smaller window.",
+ (void *)websocket);
+ aws_raise_error(AWS_IO_CHANNEL_READ_WOULD_EXCEED_WINDOW);
+ goto error;
+ }
+
+ /* New handler to the right, make sure websocket's window matches its window. */
+ websocket->thread_data.last_known_right_slot = slot->adj_right;
+ increment = size - slot->window_size;
+ }
+
+ if (increment != 0) {
+ int err = aws_channel_slot_increment_read_window(slot, increment);
+ if (err) {
+ goto error;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+
+error:
+ websocket->thread_data.is_reading_stopped = true;
+ /* Shutting down channel because I know that no one ever checks these errors */
+ s_shutdown_due_to_read_err(websocket, aws_last_error());
+ return AWS_OP_ERR;
+}
+
+static void s_increment_read_window_action(struct aws_websocket *websocket, size_t size) {
+ AWS_ASSERT(aws_channel_thread_is_callers_thread(websocket->channel_slot->channel));
+
+ int err = aws_channel_slot_increment_read_window(websocket->channel_slot, size);
+ if (err) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Failed to increment read window, error %d (%s). Closing websocket.",
+ (void *)websocket,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ s_schedule_channel_shutdown(websocket, aws_last_error());
+ }
+}
+
+static void s_increment_read_window_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ return;
+ }
+
+ struct aws_websocket *websocket = arg;
+ size_t size;
+
+ /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(websocket);
+
+ size = websocket->synced_data.window_increment_size;
+ websocket->synced_data.window_increment_size = 0;
+
+ s_unlock_synced_data(websocket);
+ /* END CRITICAL SECTION */
+
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET, "id=%p: Running task to increment read window by %zu.", (void *)websocket, size);
+
+ s_increment_read_window_action(websocket, size);
+}
+
+void aws_websocket_increment_read_window(struct aws_websocket *websocket, size_t size) {
+ if (size == 0) {
+ AWS_LOGF_TRACE(AWS_LS_HTTP_WEBSOCKET, "id=%p: Ignoring window increment of size 0.", (void *)websocket);
+ return;
+ }
+
+ if (!websocket->manual_window_update) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Ignoring window increment. Manual window management (aka read backpressure) is not enabled.",
+ (void *)websocket);
+ return;
+ }
+
+ /* Schedule a task to do the increment.
+ * If task is already scheduled, just increase size to be incremented */
+ bool is_midchannel_handler = false;
+ bool should_schedule_task = false;
+
+ /* BEGIN CRITICAL SECTION */
+ s_lock_synced_data(websocket);
+
+ if (websocket->synced_data.is_midchannel_handler) {
+ is_midchannel_handler = true;
+ } else if (websocket->synced_data.window_increment_size == 0) {
+ should_schedule_task = true;
+ websocket->synced_data.window_increment_size = size;
+ } else {
+ websocket->synced_data.window_increment_size =
+ aws_add_size_saturating(websocket->synced_data.window_increment_size, size);
+ }
+
+ s_unlock_synced_data(websocket);
+ /* END CRITICAL SECTION */
+
+ if (is_midchannel_handler) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Ignoring window increment call, websocket has converted to midchannel handler.",
+ (void *)websocket);
+ } else if (should_schedule_task) {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET, "id=%p: Scheduling task to increment read window by %zu.", (void *)websocket, size);
+ aws_channel_schedule_task_now(websocket->channel_slot->channel, &websocket->increment_read_window_task);
+ } else {
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Task to increment read window already scheduled, increasing scheduled size by %zu.",
+ (void *)websocket,
+ size);
+ }
+}
+
+int aws_websocket_random_handshake_key(struct aws_byte_buf *dst) {
+ /* RFC-6455 Section 4.1.
+ * Derive random 16-byte value, base64-encoded, for the Sec-WebSocket-Key header */
+ uint8_t key_random_storage[16] = {0};
+ struct aws_byte_buf key_random_buf = aws_byte_buf_from_empty_array(key_random_storage, sizeof(key_random_storage));
+ int err = aws_device_random_buffer(&key_random_buf);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_byte_cursor key_random_cur = aws_byte_cursor_from_buf(&key_random_buf);
+ err = aws_base64_encode(&key_random_cur, dst);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+struct aws_http_message *aws_http_message_new_websocket_handshake_request(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor path,
+ struct aws_byte_cursor host) {
+
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&path));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&host));
+
+ struct aws_http_message *request = aws_http_message_new_request(allocator);
+ if (!request) {
+ goto error;
+ }
+
+ int err = aws_http_message_set_request_method(request, aws_http_method_get);
+ if (err) {
+ goto error;
+ }
+
+ err = aws_http_message_set_request_path(request, path);
+ if (err) {
+ goto error;
+ }
+
+ uint8_t key_storage[AWS_WEBSOCKET_MAX_HANDSHAKE_KEY_LENGTH];
+ struct aws_byte_buf key_buf = aws_byte_buf_from_empty_array(key_storage, sizeof(key_storage));
+ err = aws_websocket_random_handshake_key(&key_buf);
+ if (err) {
+ goto error;
+ }
+
+ struct aws_http_header required_headers[] = {
+ {
+ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Host"),
+ .value = host,
+ },
+ {
+ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Upgrade"),
+ .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("websocket"),
+ },
+ {
+ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Connection"),
+ .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Upgrade"),
+ },
+ {
+ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Sec-WebSocket-Key"),
+ .value = aws_byte_cursor_from_buf(&key_buf),
+ },
+ {
+ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Sec-WebSocket-Version"),
+ .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("13"),
+ },
+ };
+
+ for (size_t i = 0; i < AWS_ARRAY_SIZE(required_headers); ++i) {
+ err = aws_http_message_add_header(request, required_headers[i]);
+ if (err) {
+ goto error;
+ }
+ }
+
+ return request;
+
+error:
+ aws_http_message_destroy(request);
+ return NULL;
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/websocket_bootstrap.c b/contrib/restricted/aws/aws-c-http/source/websocket_bootstrap.c
new file mode 100644
index 0000000000..b522587305
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/websocket_bootstrap.c
@@ -0,0 +1,866 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/cal/hash.h>
+#include <aws/common/encoding.h>
+#include <aws/common/logging.h>
+#include <aws/common/string.h>
+#include <aws/http/connection.h>
+#include <aws/http/private/http_impl.h>
+#include <aws/http/private/strutil.h>
+#include <aws/http/private/websocket_impl.h>
+#include <aws/http/request_response.h>
+#include <aws/http/status_code.h>
+#include <aws/io/uri.h>
+
+#include <inttypes.h>
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4204) /* non-constant aggregate initializer */
+#endif
+
+/**
+ * Allow unit-tests to mock interactions with external systems.
+ */
+static const struct aws_websocket_client_bootstrap_system_vtable s_default_system_vtable = {
+ .aws_http_client_connect = aws_http_client_connect,
+ .aws_http_connection_release = aws_http_connection_release,
+ .aws_http_connection_close = aws_http_connection_close,
+ .aws_http_connection_get_channel = aws_http_connection_get_channel,
+ .aws_http_connection_make_request = aws_http_connection_make_request,
+ .aws_http_stream_activate = aws_http_stream_activate,
+ .aws_http_stream_release = aws_http_stream_release,
+ .aws_http_stream_get_connection = aws_http_stream_get_connection,
+ .aws_http_stream_update_window = aws_http_stream_update_window,
+ .aws_http_stream_get_incoming_response_status = aws_http_stream_get_incoming_response_status,
+ .aws_websocket_handler_new = aws_websocket_handler_new,
+};
+
+static const struct aws_websocket_client_bootstrap_system_vtable *s_system_vtable = &s_default_system_vtable;
+
+void aws_websocket_client_bootstrap_set_system_vtable(
+ const struct aws_websocket_client_bootstrap_system_vtable *system_vtable) {
+
+ s_system_vtable = system_vtable;
+}
+
+/**
+ * The websocket bootstrap brings a websocket connection into this world, and sees it out again.
+ * Spins up an HTTP client, performs the opening handshake (HTTP Upgrade request),
+ * creates the websocket handler, and inserts it into the channel.
+ * The bootstrap is responsible for firing the on_connection_setup and on_connection_shutdown callbacks.
+ */
+struct aws_websocket_client_bootstrap {
+ /* Settings copied in from aws_websocket_client_connection_options */
+ struct aws_allocator *alloc;
+ size_t initial_window_size;
+ bool manual_window_update;
+ void *user_data;
+ /* Setup callback will be set NULL once it's invoked.
+ * This is used to determine whether setup or shutdown should be invoked
+ * from the HTTP-shutdown callback. */
+ aws_websocket_on_connection_setup_fn *websocket_setup_callback;
+ aws_websocket_on_connection_shutdown_fn *websocket_shutdown_callback;
+ aws_websocket_on_incoming_frame_begin_fn *websocket_frame_begin_callback;
+ aws_websocket_on_incoming_frame_payload_fn *websocket_frame_payload_callback;
+ aws_websocket_on_incoming_frame_complete_fn *websocket_frame_complete_callback;
+
+ /* Handshake request data */
+ struct aws_http_message *handshake_request;
+
+ /* Given the "Sec-WebSocket-Key" from the request,
+ * this is what we expect the response's "Sec-WebSocket-Accept" to be */
+ struct aws_byte_buf expected_sec_websocket_accept;
+
+ /* Comma-separated values from the request's "Sec-WebSocket-Protocol" (or NULL if none) */
+ struct aws_string *expected_sec_websocket_protocols;
+
+ /* Handshake response data */
+ int response_status;
+ struct aws_http_headers *response_headers;
+ bool got_full_response_headers;
+ struct aws_byte_buf response_body;
+ bool got_full_response_body;
+
+ int setup_error_code;
+ struct aws_websocket *websocket;
+};
+
+static void s_ws_bootstrap_destroy(struct aws_websocket_client_bootstrap *ws_bootstrap);
+static int s_ws_bootstrap_calculate_sec_websocket_accept(
+ struct aws_byte_cursor sec_websocket_key,
+ struct aws_byte_buf *out_buf,
+ struct aws_allocator *alloc);
+static void s_ws_bootstrap_cancel_setup_due_to_err(
+ struct aws_websocket_client_bootstrap *ws_bootstrap,
+ struct aws_http_connection *http_connection,
+ int error_code);
+static void s_ws_bootstrap_on_http_setup(struct aws_http_connection *http_connection, int error_code, void *user_data);
+static void s_ws_bootstrap_on_http_shutdown(
+ struct aws_http_connection *http_connection,
+ int error_code,
+ void *user_data);
+static int s_ws_bootstrap_on_handshake_response_headers(
+ struct aws_http_stream *stream,
+ enum aws_http_header_block header_block,
+ const struct aws_http_header *header_array,
+ size_t num_headers,
+ void *user_data);
+static int s_ws_bootstrap_on_handshake_response_header_block_done(
+ struct aws_http_stream *stream,
+ enum aws_http_header_block header_block,
+ void *user_data);
+static int s_ws_bootstrap_on_handshake_response_body(
+ struct aws_http_stream *stream,
+ const struct aws_byte_cursor *data,
+ void *user_data);
+static void s_ws_bootstrap_on_stream_complete(struct aws_http_stream *stream, int error_code, void *user_data);
+
+int aws_websocket_client_connect(const struct aws_websocket_client_connection_options *options) {
+ aws_http_fatal_assert_library_initialized();
+ AWS_ASSERT(options);
+
+ /* Validate options */
+ struct aws_byte_cursor path;
+ aws_http_message_get_request_path(options->handshake_request, &path);
+ if (!options->allocator || !options->bootstrap || !options->socket_options || !options->host.len || !path.len ||
+ !options->on_connection_setup) {
+
+ AWS_LOGF_ERROR(AWS_LS_HTTP_WEBSOCKET_SETUP, "id=static: Missing required websocket connection options.");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ struct aws_byte_cursor method;
+ aws_http_message_get_request_method(options->handshake_request, &method);
+ if (aws_http_str_to_method(method) != AWS_HTTP_METHOD_GET) {
+
+ AWS_LOGF_ERROR(AWS_LS_HTTP_WEBSOCKET_SETUP, "id=static: Websocket request must have method be 'GET'.");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (!options->handshake_request) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=static: Invalid connection options, missing required request for websocket client handshake.");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ const struct aws_http_headers *request_headers = aws_http_message_get_headers(options->handshake_request);
+ struct aws_byte_cursor sec_websocket_key;
+ if (aws_http_headers_get(request_headers, aws_byte_cursor_from_c_str("Sec-WebSocket-Key"), &sec_websocket_key)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=static: Websocket handshake request is missing required 'Sec-WebSocket-Key' header");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ /* Extensions are not currently supported */
+ if (aws_http_headers_has(request_headers, aws_byte_cursor_from_c_str("Sec-WebSocket-Extensions"))) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP, "id=static: 'Sec-WebSocket-Extensions' are not currently supported");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ /* Create bootstrap */
+ struct aws_websocket_client_bootstrap *ws_bootstrap =
+ aws_mem_calloc(options->allocator, 1, sizeof(struct aws_websocket_client_bootstrap));
+
+ ws_bootstrap->alloc = options->allocator;
+ ws_bootstrap->initial_window_size = options->initial_window_size;
+ ws_bootstrap->manual_window_update = options->manual_window_management;
+ ws_bootstrap->user_data = options->user_data;
+ ws_bootstrap->websocket_setup_callback = options->on_connection_setup;
+ ws_bootstrap->websocket_shutdown_callback = options->on_connection_shutdown;
+ ws_bootstrap->websocket_frame_begin_callback = options->on_incoming_frame_begin;
+ ws_bootstrap->websocket_frame_payload_callback = options->on_incoming_frame_payload;
+ ws_bootstrap->websocket_frame_complete_callback = options->on_incoming_frame_complete;
+ ws_bootstrap->handshake_request = aws_http_message_acquire(options->handshake_request);
+ ws_bootstrap->response_status = AWS_HTTP_STATUS_CODE_UNKNOWN;
+ ws_bootstrap->response_headers = aws_http_headers_new(ws_bootstrap->alloc);
+ aws_byte_buf_init(&ws_bootstrap->response_body, ws_bootstrap->alloc, 0);
+
+ if (s_ws_bootstrap_calculate_sec_websocket_accept(
+ sec_websocket_key, &ws_bootstrap->expected_sec_websocket_accept, ws_bootstrap->alloc)) {
+ goto error;
+ }
+
+ ws_bootstrap->expected_sec_websocket_protocols =
+ aws_http_headers_get_all(request_headers, aws_byte_cursor_from_c_str("Sec-WebSocket-Protocol"));
+
+ /* Initiate HTTP connection */
+ struct aws_http_client_connection_options http_options = AWS_HTTP_CLIENT_CONNECTION_OPTIONS_INIT;
+ http_options.allocator = ws_bootstrap->alloc;
+ http_options.bootstrap = options->bootstrap;
+ http_options.host_name = options->host;
+ http_options.socket_options = options->socket_options;
+ http_options.tls_options = options->tls_options;
+ http_options.proxy_options = options->proxy_options;
+
+ if (options->manual_window_management) {
+ http_options.manual_window_management = true;
+
+ /* Give HTTP handler enough window to comfortably receive the handshake response.
+ *
+ * If the upgrade is unsuccessful, the HTTP window will shrink as the response body is received.
+ * In this case, we'll keep incrementing the window back to its original size so data keeps arriving.
+ *
+ * If the upgrade is successful, then the websocket handler is installed, and
+ * the HTTP handler will take over its own window management. */
+ http_options.initial_window_size = 1024;
+ }
+
+ http_options.user_data = ws_bootstrap;
+ http_options.on_setup = s_ws_bootstrap_on_http_setup;
+ http_options.on_shutdown = s_ws_bootstrap_on_http_shutdown;
+ http_options.requested_event_loop = options->requested_event_loop;
+
+ /* Infer port, if not explicitly specified in URI */
+ http_options.port = options->port;
+ if (!http_options.port) {
+ http_options.port = options->tls_options ? 443 : 80;
+ }
+
+ if (s_system_vtable->aws_http_client_connect(&http_options)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=static: Websocket failed to initiate HTTP connection, error %d (%s)",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ /* Success! (so far) */
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: Websocket setup begun, connecting to " PRInSTR ":%" PRIu16 PRInSTR,
+ (void *)ws_bootstrap,
+ AWS_BYTE_CURSOR_PRI(options->host),
+ options->port,
+ AWS_BYTE_CURSOR_PRI(path));
+
+ return AWS_OP_SUCCESS;
+
+error:
+ s_ws_bootstrap_destroy(ws_bootstrap);
+ return AWS_OP_ERR;
+}
+
+static void s_ws_bootstrap_destroy(struct aws_websocket_client_bootstrap *ws_bootstrap) {
+ if (!ws_bootstrap) {
+ return;
+ }
+
+ aws_http_message_release(ws_bootstrap->handshake_request);
+ aws_http_headers_release(ws_bootstrap->response_headers);
+ aws_byte_buf_clean_up(&ws_bootstrap->expected_sec_websocket_accept);
+ aws_string_destroy(ws_bootstrap->expected_sec_websocket_protocols);
+ aws_byte_buf_clean_up(&ws_bootstrap->response_body);
+
+ aws_mem_release(ws_bootstrap->alloc, ws_bootstrap);
+}
+
+/* Given the handshake request's "Sec-WebSocket-Key" value,
+ * calculate the expected value for the response's "Sec-WebSocket-Accept".
+ * RFC-6455 Section 4.1:
+ * base64-encoded SHA-1 of the concatenation of the |Sec-WebSocket-Key|
+ * (as a string, not base64-decoded) with the string
+ * "258EAFA5-E914-47DA-95CA-C5AB0DC85B11" but ignoring any leading and
+ * trailing whitespace
+ */
+static int s_ws_bootstrap_calculate_sec_websocket_accept(
+ struct aws_byte_cursor sec_websocket_key,
+ struct aws_byte_buf *out_buf,
+ struct aws_allocator *alloc) {
+
+ AWS_ASSERT(out_buf && !out_buf->allocator && out_buf->len == 0); /* expect buf to be uninitialized */
+
+ /* note: leading and trailing whitespace was already trimmed by aws_http_headers */
+
+ /* optimization: skip concatenating Sec-WebSocket-Key and the magic string.
+ * just run the SHA1 over the first string, and then the 2nd. */
+
+ bool success = false;
+ struct aws_byte_cursor magic_string = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("258EAFA5-E914-47DA-95CA-C5AB0DC85B11");
+
+ /* SHA-1 */
+ struct aws_hash *sha1 = aws_sha1_new(alloc);
+ if (!sha1) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=static: Failed to initiate SHA1, error %d (%s)",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto cleanup;
+ }
+
+ if (aws_hash_update(sha1, &sec_websocket_key) || aws_hash_update(sha1, &magic_string)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=static: Failed to update SHA1, error %d (%s)",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto cleanup;
+ }
+
+ uint8_t sha1_storage[AWS_SHA1_LEN];
+ struct aws_byte_buf sha1_buf = aws_byte_buf_from_empty_array(sha1_storage, sizeof(sha1_storage));
+ if (aws_hash_finalize(sha1, &sha1_buf, 0)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=static: Failed to finalize SHA1, error %d (%s)",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto cleanup;
+ }
+
+ /* base64-encoded SHA-1 (clear out_buf, and write to it again) */
+ size_t base64_encode_sha1_len;
+ if (aws_base64_compute_encoded_len(sha1_buf.len, &base64_encode_sha1_len)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=static: Failed to determine Base64-encoded length, error %d (%s)",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto cleanup;
+ }
+ aws_byte_buf_init(out_buf, alloc, base64_encode_sha1_len);
+
+ struct aws_byte_cursor sha1_cursor = aws_byte_cursor_from_buf(&sha1_buf);
+ if (aws_base64_encode(&sha1_cursor, out_buf)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=static: Failed to Base64-encode, error %d (%s)",
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto cleanup;
+ }
+
+ success = true;
+cleanup:
+ if (sha1) {
+ aws_hash_destroy(sha1);
+ }
+ return success ? AWS_OP_SUCCESS : AWS_OP_ERR;
+}
+
+/* Called if something goes wrong after an HTTP connection is established.
+ * The HTTP connection is closed.
+ * We must wait for its shutdown to complete before informing user of the failed websocket setup. */
+static void s_ws_bootstrap_cancel_setup_due_to_err(
+ struct aws_websocket_client_bootstrap *ws_bootstrap,
+ struct aws_http_connection *http_connection,
+ int error_code) {
+
+ AWS_ASSERT(error_code);
+ AWS_ASSERT(http_connection);
+
+ if (!ws_bootstrap->setup_error_code) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: Canceling websocket setup due to error %d (%s).",
+ (void *)ws_bootstrap,
+ error_code,
+ aws_error_name(error_code));
+
+ ws_bootstrap->setup_error_code = error_code;
+
+ s_system_vtable->aws_http_connection_close(http_connection);
+ }
+}
+
+static void s_ws_bootstrap_invoke_setup_callback(struct aws_websocket_client_bootstrap *ws_bootstrap, int error_code) {
+
+ /* sanity check: websocket XOR error_code is set. both cannot be set. both cannot be unset */
+ AWS_FATAL_ASSERT((error_code != 0) ^ (ws_bootstrap->websocket != NULL));
+
+ /* Report things about the response, if we received them */
+ int *response_status_ptr = NULL;
+ struct aws_http_header *response_header_array = NULL;
+ size_t num_response_headers = 0;
+ struct aws_byte_cursor *response_body_ptr = NULL;
+ struct aws_byte_cursor response_body_cursor = {.len = 0};
+
+ if (ws_bootstrap->got_full_response_headers) {
+ response_status_ptr = &ws_bootstrap->response_status;
+
+ num_response_headers = aws_http_headers_count(ws_bootstrap->response_headers);
+
+ response_header_array =
+ aws_mem_calloc(ws_bootstrap->alloc, aws_max_size(1, num_response_headers), sizeof(struct aws_http_header));
+
+ for (size_t i = 0; i < num_response_headers; ++i) {
+ aws_http_headers_get_index(ws_bootstrap->response_headers, i, &response_header_array[i]);
+ }
+
+ if (ws_bootstrap->got_full_response_body) {
+ response_body_cursor = aws_byte_cursor_from_buf(&ws_bootstrap->response_body);
+ response_body_ptr = &response_body_cursor;
+ }
+ }
+
+ struct aws_websocket_on_connection_setup_data setup_data = {
+ .error_code = error_code,
+ .websocket = ws_bootstrap->websocket,
+ .handshake_response_status = response_status_ptr,
+ .handshake_response_header_array = response_header_array,
+ .num_handshake_response_headers = num_response_headers,
+ .handshake_response_body = response_body_ptr,
+ };
+
+ ws_bootstrap->websocket_setup_callback(&setup_data, ws_bootstrap->user_data);
+
+ /* Clear setup callback so that we know that it's been invoked. */
+ ws_bootstrap->websocket_setup_callback = NULL;
+
+ if (response_header_array) {
+ aws_mem_release(ws_bootstrap->alloc, response_header_array);
+ }
+}
+
+/* Invoked when HTTP connection has been established (or failed to be established) */
+static void s_ws_bootstrap_on_http_setup(struct aws_http_connection *http_connection, int error_code, void *user_data) {
+
+ struct aws_websocket_client_bootstrap *ws_bootstrap = user_data;
+
+ /* Setup callback contract is: if error_code is non-zero then connection is NULL. */
+ AWS_FATAL_ASSERT((error_code != 0) == (http_connection == NULL));
+
+ /* If http connection failed, inform the user immediately and clean up the websocket bootstrapper. */
+ if (error_code) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: Websocket setup failed to establish HTTP connection, error %d (%s).",
+ (void *)ws_bootstrap,
+ error_code,
+ aws_error_name(error_code));
+
+ s_ws_bootstrap_invoke_setup_callback(ws_bootstrap, error_code);
+
+ s_ws_bootstrap_destroy(ws_bootstrap);
+ return;
+ }
+
+ /* Connection exists!
+ * Note that if anything goes wrong with websocket setup from hereon out, we must close the http connection
+ * first and wait for shutdown to complete before informing the user of setup failure. */
+
+ /* Send the handshake request */
+ struct aws_http_make_request_options options = {
+ .self_size = sizeof(options),
+ .request = ws_bootstrap->handshake_request,
+ .user_data = ws_bootstrap,
+ .on_response_headers = s_ws_bootstrap_on_handshake_response_headers,
+ .on_response_header_block_done = s_ws_bootstrap_on_handshake_response_header_block_done,
+ .on_response_body = s_ws_bootstrap_on_handshake_response_body,
+ .on_complete = s_ws_bootstrap_on_stream_complete,
+ };
+
+ struct aws_http_stream *handshake_stream =
+ s_system_vtable->aws_http_connection_make_request(http_connection, &options);
+
+ if (!handshake_stream) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: Failed to make websocket upgrade request, error %d (%s).",
+ (void *)ws_bootstrap,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ if (s_system_vtable->aws_http_stream_activate(handshake_stream)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: Failed to activate websocket upgrade request, error %d (%s).",
+ (void *)ws_bootstrap,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ /* Success! (so far) */
+ AWS_LOGF_TRACE(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: HTTP connection established, sending websocket upgrade request.",
+ (void *)ws_bootstrap);
+ return;
+
+error:
+ s_system_vtable->aws_http_stream_release(handshake_stream);
+ s_ws_bootstrap_cancel_setup_due_to_err(ws_bootstrap, http_connection, aws_last_error());
+}
+
+/* Invoked when the HTTP connection has shut down.
+ * This is never called if the HTTP connection failed its setup */
+static void s_ws_bootstrap_on_http_shutdown(
+ struct aws_http_connection *http_connection,
+ int error_code,
+ void *user_data) {
+
+ struct aws_websocket_client_bootstrap *ws_bootstrap = user_data;
+
+ /* Inform user that connection has completely shut down.
+ * If setup callback still hasn't fired, invoke it now and indicate failure.
+ * Otherwise, invoke shutdown callback. */
+ if (ws_bootstrap->websocket_setup_callback) {
+ AWS_ASSERT(!ws_bootstrap->websocket);
+
+ /* If there's already a setup_error_code, use that */
+ if (ws_bootstrap->setup_error_code) {
+ error_code = ws_bootstrap->setup_error_code;
+ }
+
+ /* Ensure non-zero error_code is passed */
+ if (!error_code) {
+ error_code = AWS_ERROR_UNKNOWN;
+ }
+
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: Websocket setup failed, error %d (%s).",
+ (void *)ws_bootstrap,
+ error_code,
+ aws_error_name(error_code));
+
+ s_ws_bootstrap_invoke_setup_callback(ws_bootstrap, error_code);
+
+ } else if (ws_bootstrap->websocket_shutdown_callback) {
+ AWS_ASSERT(ws_bootstrap->websocket);
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Websocket client connection shut down with error %d (%s).",
+ (void *)ws_bootstrap->websocket,
+ error_code,
+ aws_error_name(error_code));
+
+ ws_bootstrap->websocket_shutdown_callback(ws_bootstrap->websocket, error_code, ws_bootstrap->user_data);
+ }
+
+ /* Clean up HTTP connection and websocket-bootstrap.
+ * It's still up to the user to release the websocket itself. */
+ s_system_vtable->aws_http_connection_release(http_connection);
+
+ s_ws_bootstrap_destroy(ws_bootstrap);
+}
+
+/* Invoked repeatedly as handshake response headers arrive */
+static int s_ws_bootstrap_on_handshake_response_headers(
+ struct aws_http_stream *stream,
+ enum aws_http_header_block header_block,
+ const struct aws_http_header *header_array,
+ size_t num_headers,
+ void *user_data) {
+
+ (void)stream;
+ (void)header_block;
+
+ struct aws_websocket_client_bootstrap *ws_bootstrap = user_data;
+
+ /* Deep-copy headers into ws_bootstrap */
+ aws_http_headers_add_array(ws_bootstrap->response_headers, header_array, num_headers);
+
+ /* Don't report a partially-received response */
+ ws_bootstrap->got_full_response_headers = false;
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_ws_bootstrap_validate_header(
+ struct aws_websocket_client_bootstrap *ws_bootstrap,
+ const char *name,
+ struct aws_byte_cursor expected_value,
+ bool case_sensitive) {
+
+ struct aws_byte_cursor actual_value;
+ if (aws_http_headers_get(ws_bootstrap->response_headers, aws_byte_cursor_from_c_str(name), &actual_value)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP, "id=%p: Response lacks required '%s' header", (void *)ws_bootstrap, name);
+ return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_UPGRADE_FAILURE);
+ }
+
+ bool matches = case_sensitive ? aws_byte_cursor_eq(&expected_value, &actual_value)
+ : aws_byte_cursor_eq_ignore_case(&expected_value, &actual_value);
+ if (!matches) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: Response '%s' header has wrong value. Expected '" PRInSTR "'. Received '" PRInSTR "'",
+ (void *)ws_bootstrap,
+ name,
+ AWS_BYTE_CURSOR_PRI(expected_value),
+ AWS_BYTE_CURSOR_PRI(actual_value));
+ return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_UPGRADE_FAILURE);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_ws_bootstrap_validate_sec_websocket_protocol(const struct aws_websocket_client_bootstrap *ws_bootstrap) {
+ /* First handle the easy case:
+ * If client requested no protocols, then the response should not pick any */
+ if (ws_bootstrap->expected_sec_websocket_protocols == NULL) {
+ if (aws_http_headers_has(
+ ws_bootstrap->response_headers, aws_byte_cursor_from_c_str("Sec-WebSocket-Protocol"))) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: Response has 'Sec-WebSocket-Protocol' header, no protocol was requested",
+ (void *)ws_bootstrap);
+ return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_UPGRADE_FAILURE);
+ } else {
+ return AWS_OP_SUCCESS;
+ }
+ }
+
+ /* Check that server has picked one of the protocols listed in the request */
+ struct aws_byte_cursor response_protocol;
+ if (aws_http_headers_get(
+ ws_bootstrap->response_headers, aws_byte_cursor_from_c_str("Sec-WebSocket-Protocol"), &response_protocol)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: Response lacks required 'Sec-WebSocket-Protocol' header",
+ (void *)ws_bootstrap);
+ return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_UPGRADE_FAILURE);
+ }
+
+ struct aws_byte_cursor request_protocols =
+ aws_byte_cursor_from_string(ws_bootstrap->expected_sec_websocket_protocols);
+ struct aws_byte_cursor request_protocol_i;
+ AWS_ZERO_STRUCT(request_protocol_i);
+ while (aws_byte_cursor_next_split(&request_protocols, ',', &request_protocol_i)) {
+ struct aws_byte_cursor request_protocol = aws_strutil_trim_http_whitespace(request_protocol_i);
+ if (aws_byte_cursor_eq(&response_protocol, &request_protocol)) {
+ /* Success! */
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: Server selected Sec-WebSocket-Protocol: " PRInSTR,
+ (void *)ws_bootstrap,
+ AWS_BYTE_CURSOR_PRI(response_protocol));
+ return AWS_OP_SUCCESS;
+ }
+ }
+
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: Response 'Sec-WebSocket-Protocol' header has wrong value. Received '" PRInSTR
+ "'. Expected one of '" PRInSTR "'",
+ (void *)ws_bootstrap,
+ AWS_BYTE_CURSOR_PRI(response_protocol),
+ AWS_BYTE_CURSOR_PRI(request_protocols));
+ return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_UPGRADE_FAILURE);
+}
+
+/* OK, we've got all the headers for the 101 Switching Protocols response.
+ * Validate the handshake response, install the websocket handler into the channel,
+ * and invoke the on_connection_setup callback. */
+static int s_ws_bootstrap_validate_response_and_install_websocket_handler(
+ struct aws_websocket_client_bootstrap *ws_bootstrap,
+ struct aws_http_connection *http_connection) {
+
+ /* RFC-6455 Section 4.1 - The client MUST validate the server's response as follows... */
+
+ /* (we already checked step 1, that status code is 101) */
+ AWS_FATAL_ASSERT(ws_bootstrap->response_status == AWS_HTTP_STATUS_CODE_101_SWITCHING_PROTOCOLS);
+
+ /* 2. If the response lacks an |Upgrade| header field or the |Upgrade|
+ * header field contains a value that is not an ASCII case-
+ * insensitive match for the value "websocket", the client MUST
+ * _Fail the WebSocket Connection_. */
+ if (s_ws_bootstrap_validate_header(
+ ws_bootstrap, "Upgrade", aws_byte_cursor_from_c_str("websocket"), false /*case_sensitive*/)) {
+ goto error;
+ }
+
+ /* 3. If the response lacks a |Connection| header field or the
+ * |Connection| header field doesn't contain a token that is an
+ * ASCII case-insensitive match for the value "Upgrade", the client
+ * MUST _Fail the WebSocket Connection_. */
+ if (s_ws_bootstrap_validate_header(
+ ws_bootstrap, "Connection", aws_byte_cursor_from_c_str("Upgrade"), false /*case_sensitive*/)) {
+ goto error;
+ }
+
+ /* 4. If the response lacks a |Sec-WebSocket-Accept| header field or
+ * the |Sec-WebSocket-Accept| contains a value other than the
+ * base64-encoded SHA-1 of the concatenation of the |Sec-WebSocket-
+ * Key| (as a string, not base64-decoded) with the string "258EAFA5-
+ * E914-47DA-95CA-C5AB0DC85B11" but ignoring any leading and
+ * trailing whitespace, the client MUST _Fail the WebSocket
+ * Connection_. */
+ if (s_ws_bootstrap_validate_header(
+ ws_bootstrap,
+ "Sec-WebSocket-Accept",
+ aws_byte_cursor_from_buf(&ws_bootstrap->expected_sec_websocket_accept),
+ true /*case_sensitive*/)) {
+ goto error;
+ }
+
+ /* (step 5 is about validating Sec-WebSocket-Extensions, but we don't support extensions) */
+ if (aws_http_headers_has(ws_bootstrap->response_headers, aws_byte_cursor_from_c_str("Sec-WebSocket-Extensions"))) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: Response has 'Sec-WebSocket-Extensions' header, but client does not support extensions.",
+ (void *)ws_bootstrap);
+ aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_UPGRADE_FAILURE);
+ goto error;
+ }
+
+ /* 6. If the response includes a |Sec-WebSocket-Protocol| header field
+ * and this header field indicates the use of a subprotocol that was
+ * not present in the client's handshake (the server has indicated a
+ * subprotocol not requested by the client), the client MUST _Fail
+ * the WebSocket Connection_. */
+ if (s_ws_bootstrap_validate_sec_websocket_protocol(ws_bootstrap)) {
+ goto error;
+ }
+
+ /* Insert websocket handler into channel */
+ struct aws_channel *channel = s_system_vtable->aws_http_connection_get_channel(http_connection);
+ AWS_ASSERT(channel);
+
+ struct aws_websocket_handler_options ws_options = {
+ .allocator = ws_bootstrap->alloc,
+ .channel = channel,
+ .initial_window_size = ws_bootstrap->initial_window_size,
+ .user_data = ws_bootstrap->user_data,
+ .on_incoming_frame_begin = ws_bootstrap->websocket_frame_begin_callback,
+ .on_incoming_frame_payload = ws_bootstrap->websocket_frame_payload_callback,
+ .on_incoming_frame_complete = ws_bootstrap->websocket_frame_complete_callback,
+ .is_server = false,
+ .manual_window_update = ws_bootstrap->manual_window_update,
+ };
+
+ ws_bootstrap->websocket = s_system_vtable->aws_websocket_handler_new(&ws_options);
+ if (!ws_bootstrap->websocket) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: Failed to create websocket handler, error %d (%s)",
+ (void *)ws_bootstrap,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ goto error;
+ }
+
+ /* Success! Setup complete! */
+ AWS_LOGF_TRACE(/* Log for tracing setup id to websocket id. */
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: Setup success, created websocket=%p",
+ (void *)ws_bootstrap,
+ (void *)ws_bootstrap->websocket);
+
+ AWS_LOGF_DEBUG(/* Debug log about creation of websocket. */
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Websocket client connection established.",
+ (void *)ws_bootstrap->websocket);
+
+ s_ws_bootstrap_invoke_setup_callback(ws_bootstrap, 0 /*error_code*/);
+ return AWS_OP_SUCCESS;
+
+error:
+ s_ws_bootstrap_cancel_setup_due_to_err(ws_bootstrap, http_connection, aws_last_error());
+ /* Returning error stops HTTP from processing any further data */
+ return AWS_OP_ERR;
+}
+
+/**
+ * Invoked each time we reach the end of a block of response headers.
+ * If we got a valid 101 Switching Protocols response, we insert the websocket handler.
+ * Note:
+ * In HTTP, 1xx responses are "interim" responses. So a 101 Switching Protocols
+ * response does not "complete" the stream. Once the connection has switched
+ * protocols, the stream does not end until the whole connection is closed.
+ */
+static int s_ws_bootstrap_on_handshake_response_header_block_done(
+ struct aws_http_stream *stream,
+ enum aws_http_header_block header_block,
+ void *user_data) {
+
+ struct aws_websocket_client_bootstrap *ws_bootstrap = user_data;
+ struct aws_http_connection *http_connection = s_system_vtable->aws_http_stream_get_connection(stream);
+ AWS_ASSERT(http_connection);
+
+ /* Get status code from stream */
+ s_system_vtable->aws_http_stream_get_incoming_response_status(stream, &ws_bootstrap->response_status);
+
+ ws_bootstrap->got_full_response_headers = true;
+
+ if (header_block == AWS_HTTP_HEADER_BLOCK_INFORMATIONAL) {
+ if (ws_bootstrap->response_status == AWS_HTTP_STATUS_CODE_101_SWITCHING_PROTOCOLS) {
+ /* OK, got 101 response, proceed with upgrade! */
+ return s_ws_bootstrap_validate_response_and_install_websocket_handler(ws_bootstrap, http_connection);
+
+ } else {
+ /* It would be weird to get any other kind of 1xx response, but anything is possible.
+ * Another response should come eventually. Just ignore the headers from this one... */
+ AWS_LOGF_DEBUG(
+ AWS_LS_HTTP_WEBSOCKET_SETUP,
+ "id=%p: Server sent interim response with status code %d",
+ (void *)ws_bootstrap,
+ ws_bootstrap->response_status);
+
+ aws_http_headers_clear(ws_bootstrap->response_headers);
+ ws_bootstrap->got_full_response_headers = false;
+ return AWS_OP_SUCCESS;
+ }
+ }
+
+ /* Otherwise, we got normal headers (from a non-1xx response), or trailing headers.
+ * This can only happen if the handshake did not succeed. Keep the connection going.
+ * We'll report failed setup to the user after we've received the complete response */
+ ws_bootstrap->setup_error_code = AWS_ERROR_HTTP_WEBSOCKET_UPGRADE_FAILURE;
+ return AWS_OP_SUCCESS;
+}
+
+/**
+ * Invoked as we receive the body of a failed response.
+ * This is never invoked if the handshake succeeds.
+ */
+static int s_ws_bootstrap_on_handshake_response_body(
+ struct aws_http_stream *stream,
+ const struct aws_byte_cursor *data,
+ void *user_data) {
+
+ struct aws_websocket_client_bootstrap *ws_bootstrap = user_data;
+
+ aws_byte_buf_append_dynamic(&ws_bootstrap->response_body, data);
+
+ /* If we're managing the read window...
+ * bump the HTTP window back to its starting size, so that we keep receiving the whole response. */
+ if (ws_bootstrap->manual_window_update) {
+ s_system_vtable->aws_http_stream_update_window(stream, data->len);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/**
+ * Invoked when the stream completes.
+ *
+ * If the handshake succeeded and the websocket was installed,
+ * then this is invoked at the end of the websocket connection.
+ *
+ * If the handshake response was not 101, then this is invoked
+ * after we've received the whole response.
+ *
+ * Or this is invoked because the connection failed unexpectedly before the handshake could complete,
+ * (or we killed the connection because the 101 response didn't pass validation).
+ */
+static void s_ws_bootstrap_on_stream_complete(struct aws_http_stream *stream, int error_code, void *user_data) {
+ struct aws_websocket_client_bootstrap *ws_bootstrap = user_data;
+ struct aws_http_connection *http_connection = s_system_vtable->aws_http_stream_get_connection(stream);
+
+ /* Only report the body if we received a complete response */
+ if (error_code == 0) {
+ ws_bootstrap->got_full_response_body = true;
+ }
+
+ /* Make sure the connection closes.
+ * We'll deal with finishing setup or shutdown from the http-shutdown callback */
+ s_system_vtable->aws_http_connection_close(http_connection);
+
+ /* Done with stream, let it be cleaned up */
+ s_system_vtable->aws_http_stream_release(stream);
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/websocket_decoder.c b/contrib/restricted/aws/aws-c-http/source/websocket_decoder.c
new file mode 100644
index 0000000000..bcaa3c6912
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/websocket_decoder.c
@@ -0,0 +1,387 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/websocket_decoder.h>
+
+#include <aws/common/encoding.h>
+
+#include <inttypes.h>
+
+typedef int(state_fn)(struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data);
+
+/* STATE_INIT: Resets things, consumes no data */
+static int s_state_init(struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data) {
+ (void)data;
+ AWS_ZERO_STRUCT(decoder->current_frame);
+ decoder->state = AWS_WEBSOCKET_DECODER_STATE_OPCODE_BYTE;
+ return AWS_OP_SUCCESS;
+}
+
+/* STATE_OPCODE_BYTE: Decode first byte of frame, which has all kinds of goodies in it. */
+static int s_state_opcode_byte(struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data) {
+ if (data->len == 0) {
+ return AWS_OP_SUCCESS;
+ }
+
+ uint8_t byte = data->ptr[0];
+ aws_byte_cursor_advance(data, 1);
+
+ /* first 4 bits are all bools */
+ decoder->current_frame.fin = byte & 0x80;
+ decoder->current_frame.rsv[0] = byte & 0x40;
+ decoder->current_frame.rsv[1] = byte & 0x20;
+ decoder->current_frame.rsv[2] = byte & 0x10;
+
+ /* next 4 bits are opcode */
+ decoder->current_frame.opcode = byte & 0x0F;
+
+ /* RFC-6455 Section 5.2 - Opcode
+ * If an unknown opcode is received, the receiving endpoint MUST _Fail the WebSocket Connection_. */
+ switch (decoder->current_frame.opcode) {
+ case AWS_WEBSOCKET_OPCODE_CONTINUATION:
+ case AWS_WEBSOCKET_OPCODE_TEXT:
+ case AWS_WEBSOCKET_OPCODE_BINARY:
+ case AWS_WEBSOCKET_OPCODE_CLOSE:
+ case AWS_WEBSOCKET_OPCODE_PING:
+ case AWS_WEBSOCKET_OPCODE_PONG:
+ break;
+ default:
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Received frame with unknown opcode 0x%" PRIx8,
+ (void *)decoder->user_data,
+ decoder->current_frame.opcode);
+ return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR);
+ }
+
+ /* RFC-6455 Section 5.2 Fragmentation
+ *
+ * Data frames with the FIN bit clear are considered fragmented and must be followed by
+ * 1+ CONTINUATION frames, where only the final CONTINUATION frame's FIN bit is set.
+ *
+ * Control frames may be injected in the middle of a fragmented message,
+ * but control frames may not be fragmented themselves.
+ */
+ if (aws_websocket_is_data_frame(decoder->current_frame.opcode)) {
+ bool is_continuation_frame = AWS_WEBSOCKET_OPCODE_CONTINUATION == decoder->current_frame.opcode;
+
+ if (decoder->expecting_continuation_data_frame != is_continuation_frame) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Fragmentation error. Received start of new message before end of previous message",
+ (void *)decoder->user_data);
+ return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR);
+ }
+
+ decoder->expecting_continuation_data_frame = !decoder->current_frame.fin;
+
+ } else {
+ /* Control frames themselves MUST NOT be fragmented. */
+ if (!decoder->current_frame.fin) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Received fragmented control frame. This is illegal",
+ (void *)decoder->user_data);
+ return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR);
+ }
+ }
+
+ if (decoder->current_frame.opcode == AWS_WEBSOCKET_OPCODE_TEXT) {
+ decoder->processing_text_message = true;
+ }
+
+ decoder->state = AWS_WEBSOCKET_DECODER_STATE_LENGTH_BYTE;
+ return AWS_OP_SUCCESS;
+}
+
+/* STATE_LENGTH_BYTE: Decode byte containing length, determine if we need to decode extended length. */
+static int s_state_length_byte(struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data) {
+ if (data->len == 0) {
+ return AWS_OP_SUCCESS;
+ }
+
+ uint8_t byte = data->ptr[0];
+ aws_byte_cursor_advance(data, 1);
+
+ /* first bit is a bool */
+ decoder->current_frame.masked = byte & 0x80;
+
+ /* remaining 7 bits are payload length */
+ decoder->current_frame.payload_length = byte & 0x7F;
+
+ if (decoder->current_frame.payload_length >= AWS_WEBSOCKET_7BIT_VALUE_FOR_2BYTE_EXTENDED_LENGTH) {
+ /* If 7bit payload length has a high value, then the next few bytes contain the real payload length */
+ decoder->state_bytes_processed = 0;
+ decoder->state = AWS_WEBSOCKET_DECODER_STATE_EXTENDED_LENGTH;
+ } else {
+ /* If 7bit payload length has low value, that's the actual payload size, jump past EXTENDED_LENGTH state */
+ decoder->state = AWS_WEBSOCKET_DECODER_STATE_MASKING_KEY_CHECK;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* STATE_EXTENDED_LENGTH: Decode extended length (state skipped if no extended length). */
+static int s_state_extended_length(struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data) {
+ if (data->len == 0) {
+ return AWS_OP_SUCCESS;
+ }
+
+ /* The 7bit payload value loaded during the previous state indicated that
+ * actual payload length is encoded across the next 2 or 8 bytes. */
+ uint8_t total_bytes_extended_length;
+ uint64_t min_acceptable_value;
+ uint64_t max_acceptable_value;
+ if (decoder->current_frame.payload_length == AWS_WEBSOCKET_7BIT_VALUE_FOR_2BYTE_EXTENDED_LENGTH) {
+ total_bytes_extended_length = 2;
+ min_acceptable_value = AWS_WEBSOCKET_2BYTE_EXTENDED_LENGTH_MIN_VALUE;
+ max_acceptable_value = AWS_WEBSOCKET_2BYTE_EXTENDED_LENGTH_MAX_VALUE;
+ } else {
+ AWS_ASSERT(decoder->current_frame.payload_length == AWS_WEBSOCKET_7BIT_VALUE_FOR_8BYTE_EXTENDED_LENGTH);
+
+ total_bytes_extended_length = 8;
+ min_acceptable_value = AWS_WEBSOCKET_8BYTE_EXTENDED_LENGTH_MIN_VALUE;
+ max_acceptable_value = AWS_WEBSOCKET_8BYTE_EXTENDED_LENGTH_MAX_VALUE;
+ }
+
+ /* Copy bytes of extended-length to state_cache, we'll process them later.*/
+ AWS_ASSERT(total_bytes_extended_length > decoder->state_bytes_processed);
+
+ size_t remaining_bytes = (size_t)(total_bytes_extended_length - decoder->state_bytes_processed);
+ size_t bytes_to_consume = remaining_bytes <= data->len ? remaining_bytes : data->len;
+
+ AWS_ASSERT(bytes_to_consume + decoder->state_bytes_processed <= sizeof(decoder->state_cache));
+
+ memcpy(decoder->state_cache + decoder->state_bytes_processed, data->ptr, bytes_to_consume);
+
+ aws_byte_cursor_advance(data, bytes_to_consume);
+ decoder->state_bytes_processed += bytes_to_consume;
+
+ /* Return, still waiting on more bytes */
+ if (decoder->state_bytes_processed < total_bytes_extended_length) {
+ return AWS_OP_SUCCESS;
+ }
+
+ /* All bytes have been copied into state_cache, now read them together as one number,
+ * transforming from network byte order (big endian) to native endianness. */
+ struct aws_byte_cursor cache_cursor = aws_byte_cursor_from_array(decoder->state_cache, total_bytes_extended_length);
+ if (total_bytes_extended_length == 2) {
+ uint16_t val;
+ aws_byte_cursor_read_be16(&cache_cursor, &val);
+ decoder->current_frame.payload_length = val;
+ } else {
+ aws_byte_cursor_read_be64(&cache_cursor, &decoder->current_frame.payload_length);
+ }
+
+ if (decoder->current_frame.payload_length < min_acceptable_value ||
+ decoder->current_frame.payload_length > max_acceptable_value) {
+
+ AWS_LOGF_ERROR(AWS_LS_HTTP_WEBSOCKET, "id=%p: Failed to decode payload length", (void *)decoder->user_data);
+ return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR);
+ }
+
+ decoder->state = AWS_WEBSOCKET_DECODER_STATE_MASKING_KEY_CHECK;
+ return AWS_OP_SUCCESS;
+}
+
+/* MASKING_KEY_CHECK: Determine if we need to decode masking-key. Consumes no data. */
+static int s_state_masking_key_check(struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data) {
+ (void)data;
+
+ /* If mask bit was set, move to next state to process 4 bytes of masking key.
+ * Otherwise skip next step, there is no masking key. */
+ if (decoder->current_frame.masked) {
+ decoder->state = AWS_WEBSOCKET_DECODER_STATE_MASKING_KEY;
+ decoder->state_bytes_processed = 0;
+ } else {
+ decoder->state = AWS_WEBSOCKET_DECODER_STATE_PAYLOAD_CHECK;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* MASKING_KEY: Decode masking-key (state skipped if no masking key). */
+static int s_state_masking_key(struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data) {
+ if (data->len == 0) {
+ return AWS_OP_SUCCESS;
+ }
+
+ AWS_ASSERT(4 > decoder->state_bytes_processed);
+ size_t bytes_remaining = 4 - (size_t)decoder->state_bytes_processed;
+ size_t bytes_to_consume = bytes_remaining < data->len ? bytes_remaining : data->len;
+
+ memcpy(decoder->current_frame.masking_key + decoder->state_bytes_processed, data->ptr, bytes_to_consume);
+
+ aws_byte_cursor_advance(data, bytes_to_consume);
+ decoder->state_bytes_processed += bytes_to_consume;
+
+ /* If all bytes consumed, proceed to next state */
+ if (decoder->state_bytes_processed == 4) {
+ decoder->state = AWS_WEBSOCKET_DECODER_STATE_PAYLOAD_CHECK;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* PAYLOAD_CHECK: Determine if we need to decode a payload. Consumes no data. */
+static int s_state_payload_check(struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data) {
+ (void)data;
+
+ /* Invoke on_frame() callback to inform user of non-payload data. */
+ int err = decoder->on_frame(&decoder->current_frame, decoder->user_data);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ /* Choose next state: either we have payload to process or we don't. */
+ if (decoder->current_frame.payload_length > 0) {
+ decoder->state_bytes_processed = 0;
+ decoder->state = AWS_WEBSOCKET_DECODER_STATE_PAYLOAD;
+ } else {
+ decoder->state = AWS_WEBSOCKET_DECODER_STATE_FRAME_END;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* PAYLOAD: Decode payload until we're done (state skipped if no payload). */
+static int s_state_payload(struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data) {
+ if (data->len == 0) {
+ return AWS_OP_SUCCESS;
+ }
+
+ AWS_ASSERT(decoder->current_frame.payload_length > decoder->state_bytes_processed);
+ uint64_t bytes_remaining = decoder->current_frame.payload_length - decoder->state_bytes_processed;
+ size_t bytes_to_consume = bytes_remaining < data->len ? (size_t)bytes_remaining : data->len;
+
+ struct aws_byte_cursor payload = aws_byte_cursor_advance(data, bytes_to_consume);
+
+ /* Unmask data, if necessary.
+ * RFC-6455 Section 5.3 Client-to-Server Masking
+ * Each byte of payload is XOR against a byte of the masking-key */
+ if (decoder->current_frame.masked) {
+ uint64_t mask_index = decoder->state_bytes_processed;
+
+ /* Optimization idea: don't do this 1 byte at a time */
+ uint8_t *current_byte = payload.ptr;
+ uint8_t *end_byte = payload.ptr + payload.len;
+ while (current_byte != end_byte) {
+ *current_byte++ ^= decoder->current_frame.masking_key[mask_index++ % 4];
+ }
+ }
+
+ /* TODO: validate payload of CLOSE frame */
+
+ /* Validate the UTF-8 for TEXT messages (a TEXT frame and any subsequent CONTINUATION frames) */
+ if (decoder->processing_text_message && aws_websocket_is_data_frame(decoder->current_frame.opcode)) {
+ if (aws_utf8_decoder_update(decoder->text_message_validator, payload)) {
+ AWS_LOGF_ERROR(AWS_LS_HTTP_WEBSOCKET, "id=%p: Received invalid UTF-8", (void *)decoder->user_data);
+ return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR);
+ }
+ }
+
+ /* Invoke on_payload() callback to inform user of payload data */
+ int err = decoder->on_payload(payload, decoder->user_data);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ decoder->state_bytes_processed += payload.len;
+ AWS_ASSERT(decoder->state_bytes_processed <= decoder->current_frame.payload_length);
+
+ /* If all data consumed, proceed to next state. */
+ if (decoder->state_bytes_processed == decoder->current_frame.payload_length) {
+ decoder->state = AWS_WEBSOCKET_DECODER_STATE_FRAME_END;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* FRAME_END: Perform checks once we reach the end of the frame. */
+static int s_state_frame_end(struct aws_websocket_decoder *decoder, struct aws_byte_cursor *data) {
+ (void)data;
+
+ /* If we're done processing a text message (a TEXT frame and any subsequent CONTINUATION frames),
+ * complete the UTF-8 validation */
+ if (decoder->processing_text_message && aws_websocket_is_data_frame(decoder->current_frame.opcode) &&
+ decoder->current_frame.fin) {
+
+ if (aws_utf8_decoder_finalize(decoder->text_message_validator)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Received invalid UTF-8 (incomplete encoding)",
+ (void *)decoder->user_data);
+ return aws_raise_error(AWS_ERROR_HTTP_WEBSOCKET_PROTOCOL_ERROR);
+ }
+
+ decoder->processing_text_message = false;
+ }
+
+ /* Done! */
+ decoder->state = AWS_WEBSOCKET_DECODER_STATE_DONE;
+ return AWS_OP_SUCCESS;
+}
+
+static state_fn *s_state_functions[AWS_WEBSOCKET_DECODER_STATE_DONE] = {
+ s_state_init,
+ s_state_opcode_byte,
+ s_state_length_byte,
+ s_state_extended_length,
+ s_state_masking_key_check,
+ s_state_masking_key,
+ s_state_payload_check,
+ s_state_payload,
+ s_state_frame_end,
+};
+
+int aws_websocket_decoder_process(
+ struct aws_websocket_decoder *decoder,
+ struct aws_byte_cursor *data,
+ bool *frame_complete) {
+
+ /* Run state machine until frame is completely decoded, or the state stops changing.
+ * Note that we don't stop looping when data->len reaches zero, because some states consume no data. */
+ while (decoder->state != AWS_WEBSOCKET_DECODER_STATE_DONE) {
+ enum aws_websocket_decoder_state prev_state = decoder->state;
+
+ int err = s_state_functions[decoder->state](decoder, data);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ if (decoder->state == prev_state) {
+ AWS_ASSERT(data->len == 0); /* If no more work to do, all possible data should have been consumed */
+ break;
+ }
+ }
+
+ if (decoder->state == AWS_WEBSOCKET_DECODER_STATE_DONE) {
+ decoder->state = AWS_WEBSOCKET_DECODER_STATE_INIT;
+ *frame_complete = true;
+ return AWS_OP_SUCCESS;
+ }
+
+ *frame_complete = false;
+ return AWS_OP_SUCCESS;
+}
+
+void aws_websocket_decoder_init(
+ struct aws_websocket_decoder *decoder,
+ struct aws_allocator *alloc,
+ aws_websocket_decoder_frame_fn *on_frame,
+ aws_websocket_decoder_payload_fn *on_payload,
+ void *user_data) {
+
+ AWS_ZERO_STRUCT(*decoder);
+ decoder->user_data = user_data;
+ decoder->on_frame = on_frame;
+ decoder->on_payload = on_payload;
+ decoder->text_message_validator = aws_utf8_decoder_new(alloc, NULL /*options*/);
+}
+
+void aws_websocket_decoder_clean_up(struct aws_websocket_decoder *decoder) {
+ aws_utf8_decoder_destroy(decoder->text_message_validator);
+ AWS_ZERO_STRUCT(*decoder);
+}
diff --git a/contrib/restricted/aws/aws-c-http/source/websocket_encoder.c b/contrib/restricted/aws/aws-c-http/source/websocket_encoder.c
new file mode 100644
index 0000000000..a2fd1989a7
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/source/websocket_encoder.c
@@ -0,0 +1,375 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/http/private/websocket_encoder.h>
+
+#include <inttypes.h>
+
+typedef int(state_fn)(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf);
+
+/* STATE_INIT: Outputs no data */
+static int s_state_init(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf) {
+ (void)out_buf;
+
+ if (!encoder->is_frame_in_progress) {
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ encoder->state = AWS_WEBSOCKET_ENCODER_STATE_OPCODE_BYTE;
+ return AWS_OP_SUCCESS;
+}
+
+/* STATE_OPCODE_BYTE: Outputs 1st byte of frame, which is packed with goodies. */
+static int s_state_opcode_byte(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf) {
+
+ AWS_ASSERT((encoder->frame.opcode & 0xF0) == 0); /* Should be impossible, the opcode was checked in start_frame() */
+
+ /* Right 4 bits are opcode, left 4 bits are fin|rsv1|rsv2|rsv3 */
+ uint8_t byte = encoder->frame.opcode;
+ byte |= (encoder->frame.fin << 7);
+ byte |= (encoder->frame.rsv[0] << 6);
+ byte |= (encoder->frame.rsv[1] << 5);
+ byte |= (encoder->frame.rsv[2] << 4);
+
+ /* If buffer has room to write, proceed to next state */
+ if (aws_byte_buf_write_u8(out_buf, byte)) {
+ encoder->state = AWS_WEBSOCKET_ENCODER_STATE_LENGTH_BYTE;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* STATE_LENGTH_BYTE: Output 2nd byte of frame, which indicates payload length */
+static int s_state_length_byte(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf) {
+ /* First bit is masking bool */
+ uint8_t byte = (uint8_t)(encoder->frame.masked << 7);
+
+ /* Next 7bits are length, if length is small.
+ * Otherwise next 7bits are a magic number indicating how many bytes will be required to encode actual length */
+ bool extended_length_required;
+
+ if (encoder->frame.payload_length < AWS_WEBSOCKET_2BYTE_EXTENDED_LENGTH_MIN_VALUE) {
+ byte |= (uint8_t)encoder->frame.payload_length;
+ extended_length_required = false;
+ } else if (encoder->frame.payload_length <= AWS_WEBSOCKET_2BYTE_EXTENDED_LENGTH_MAX_VALUE) {
+ byte |= AWS_WEBSOCKET_7BIT_VALUE_FOR_2BYTE_EXTENDED_LENGTH;
+ extended_length_required = true;
+ } else {
+ AWS_ASSERT(encoder->frame.payload_length <= AWS_WEBSOCKET_8BYTE_EXTENDED_LENGTH_MAX_VALUE);
+ byte |= AWS_WEBSOCKET_7BIT_VALUE_FOR_8BYTE_EXTENDED_LENGTH;
+ extended_length_required = true;
+ }
+
+ /* If buffer has room to write, proceed to next appropriate state */
+ if (aws_byte_buf_write_u8(out_buf, byte)) {
+ if (extended_length_required) {
+ encoder->state = AWS_WEBSOCKET_ENCODER_STATE_EXTENDED_LENGTH;
+ encoder->state_bytes_processed = 0;
+ } else {
+ encoder->state = AWS_WEBSOCKET_ENCODER_STATE_MASKING_KEY_CHECK;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* STATE_EXTENDED_LENGTH: Output extended length (state skipped if not using extended length). */
+static int s_state_extended_length(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf) {
+ /* Fill tmp buffer with extended-length in network byte order */
+ uint8_t network_bytes_array[8] = {0};
+ struct aws_byte_buf network_bytes_buf =
+ aws_byte_buf_from_empty_array(network_bytes_array, sizeof(network_bytes_array));
+ if (encoder->frame.payload_length <= AWS_WEBSOCKET_2BYTE_EXTENDED_LENGTH_MAX_VALUE) {
+ aws_byte_buf_write_be16(&network_bytes_buf, (uint16_t)encoder->frame.payload_length);
+ } else {
+ aws_byte_buf_write_be64(&network_bytes_buf, encoder->frame.payload_length);
+ }
+
+ /* Use cursor to iterate over tmp buffer */
+ struct aws_byte_cursor network_bytes_cursor = aws_byte_cursor_from_buf(&network_bytes_buf);
+
+ /* Advance cursor if some bytes already written */
+ aws_byte_cursor_advance(&network_bytes_cursor, (size_t)encoder->state_bytes_processed);
+
+ /* Shorten cursor if it won't all fit in out_buf */
+ bool all_data_written = true;
+ size_t space_available = out_buf->capacity - out_buf->len;
+ if (network_bytes_cursor.len > space_available) {
+ network_bytes_cursor.len = space_available;
+ all_data_written = false;
+ }
+
+ aws_byte_buf_write_from_whole_cursor(out_buf, network_bytes_cursor);
+ encoder->state_bytes_processed += network_bytes_cursor.len;
+
+ /* If all bytes written, advance to next state */
+ if (all_data_written) {
+ encoder->state = AWS_WEBSOCKET_ENCODER_STATE_MASKING_KEY_CHECK;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* MASKING_KEY_CHECK: Outputs no data. Gets things ready for (or decides to skip) the STATE_MASKING_KEY */
+static int s_state_masking_key_check(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf) {
+ (void)out_buf;
+
+ if (encoder->frame.masked) {
+ encoder->state_bytes_processed = 0;
+ encoder->state = AWS_WEBSOCKET_ENCODER_STATE_MASKING_KEY;
+ } else {
+ encoder->state = AWS_WEBSOCKET_ENCODER_STATE_PAYLOAD_CHECK;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* MASKING_KEY: Output masking-key (state skipped if no masking key). */
+static int s_state_masking_key(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf) {
+ /* Prepare cursor to iterate over masking-key bytes */
+ struct aws_byte_cursor cursor =
+ aws_byte_cursor_from_array(encoder->frame.masking_key, sizeof(encoder->frame.masking_key));
+
+ /* Advance cursor if some bytes already written (moves ptr forward but shortens len so end stays in place) */
+ aws_byte_cursor_advance(&cursor, (size_t)encoder->state_bytes_processed);
+
+ /* Shorten cursor if it won't all fit in out_buf */
+ bool all_data_written = true;
+ size_t space_available = out_buf->capacity - out_buf->len;
+ if (cursor.len > space_available) {
+ cursor.len = space_available;
+ all_data_written = false;
+ }
+
+ aws_byte_buf_write_from_whole_cursor(out_buf, cursor);
+ encoder->state_bytes_processed += cursor.len;
+
+ /* If all bytes written, advance to next state */
+ if (all_data_written) {
+ encoder->state = AWS_WEBSOCKET_ENCODER_STATE_PAYLOAD_CHECK;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* MASKING_KEY_CHECK: Outputs no data. Gets things ready for (or decides to skip) STATE_PAYLOAD */
+static int s_state_payload_check(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf) {
+ (void)out_buf;
+
+ if (encoder->frame.payload_length > 0) {
+ encoder->state_bytes_processed = 0;
+ encoder->state = AWS_WEBSOCKET_ENCODER_STATE_PAYLOAD;
+ } else {
+ encoder->state = AWS_WEBSOCKET_ENCODER_STATE_DONE;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* PAYLOAD: Output payload until we're done (state skipped if no payload). */
+static int s_state_payload(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf) {
+
+ /* Bail early if out_buf has no space for writing */
+ if (out_buf->len >= out_buf->capacity) {
+ return AWS_OP_SUCCESS;
+ }
+
+ const uint64_t prev_bytes_processed = encoder->state_bytes_processed;
+ const struct aws_byte_buf prev_buf = *out_buf;
+
+ /* Invoke callback which will write to buffer */
+ int err = encoder->stream_outgoing_payload(out_buf, encoder->user_data);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ /* Ensure that user did not commit forbidden acts upon the out_buf */
+ AWS_FATAL_ASSERT(
+ (out_buf->buffer == prev_buf.buffer) && (out_buf->capacity == prev_buf.capacity) &&
+ (out_buf->len >= prev_buf.len));
+
+ size_t bytes_written = out_buf->len - prev_buf.len;
+
+ err = aws_add_u64_checked(encoder->state_bytes_processed, bytes_written, &encoder->state_bytes_processed);
+ if (err) {
+ return aws_raise_error(AWS_ERROR_HTTP_OUTGOING_STREAM_LENGTH_INCORRECT);
+ }
+
+ /* Mask data, if necessary.
+ * RFC-6455 Section 5.3 Client-to-Server Masking
+ * Each byte of payload is XOR against a byte of the masking-key */
+ if (encoder->frame.masked) {
+ uint64_t mask_index = prev_bytes_processed;
+
+ /* Optimization idea: don't do this 1 byte at a time */
+ uint8_t *current_byte = out_buf->buffer + prev_buf.len;
+ uint8_t *end_byte = out_buf->buffer + out_buf->len;
+ while (current_byte != end_byte) {
+ *current_byte++ ^= encoder->frame.masking_key[mask_index++ % 4];
+ }
+ }
+
+ /* If done writing payload, proceed to next state */
+ if (encoder->state_bytes_processed == encoder->frame.payload_length) {
+ encoder->state = AWS_WEBSOCKET_ENCODER_STATE_DONE;
+ } else {
+ /* Some more error-checking... */
+ if (encoder->state_bytes_processed > encoder->frame.payload_length) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Outgoing stream has exceeded stated payload length of %" PRIu64,
+ (void *)encoder->user_data,
+ encoder->frame.payload_length);
+ return aws_raise_error(AWS_ERROR_HTTP_OUTGOING_STREAM_LENGTH_INCORRECT);
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static state_fn *s_state_functions[AWS_WEBSOCKET_ENCODER_STATE_DONE] = {
+ s_state_init,
+ s_state_opcode_byte,
+ s_state_length_byte,
+ s_state_extended_length,
+ s_state_masking_key_check,
+ s_state_masking_key,
+ s_state_payload_check,
+ s_state_payload,
+};
+
+int aws_websocket_encoder_process(struct aws_websocket_encoder *encoder, struct aws_byte_buf *out_buf) {
+
+ /* Run state machine until frame is completely decoded, or the state stops changing.
+ * Note that we don't necessarily stop looping when out_buf is full, because not all states need to output data */
+ while (encoder->state != AWS_WEBSOCKET_ENCODER_STATE_DONE) {
+ const enum aws_websocket_encoder_state prev_state = encoder->state;
+
+ int err = s_state_functions[encoder->state](encoder, out_buf);
+ if (err) {
+ return AWS_OP_ERR;
+ }
+
+ if (prev_state == encoder->state) {
+ /* dev-assert: Check that each state is doing as much work as it possibly can.
+ * Except for the PAYLOAD state, where it's up to the user to fill the buffer. */
+ AWS_ASSERT((out_buf->len == out_buf->capacity) || (encoder->state == AWS_WEBSOCKET_ENCODER_STATE_PAYLOAD));
+
+ break;
+ }
+ }
+
+ if (encoder->state == AWS_WEBSOCKET_ENCODER_STATE_DONE) {
+ encoder->state = AWS_WEBSOCKET_ENCODER_STATE_INIT;
+ encoder->is_frame_in_progress = false;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_websocket_encoder_start_frame(struct aws_websocket_encoder *encoder, const struct aws_websocket_frame *frame) {
+ /* Error-check as much as possible before accepting next frame */
+ if (encoder->is_frame_in_progress) {
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ /* RFC-6455 Section 5.2 contains all these rules... */
+
+ /* Opcode must fit in 4bits */
+ if (frame->opcode != (frame->opcode & 0x0F)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Outgoing frame has unknown opcode 0x%" PRIx8,
+ (void *)encoder->user_data,
+ frame->opcode);
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ /* High bit of 8byte length must be clear */
+ if (frame->payload_length > AWS_WEBSOCKET_8BYTE_EXTENDED_LENGTH_MAX_VALUE) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Outgoing frame's payload length exceeds the max",
+ (void *)encoder->user_data);
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ /* Data frames with the FIN bit clear are considered fragmented and must be followed by
+ * 1+ CONTINUATION frames, where only the final CONTINUATION frame's FIN bit is set.
+ *
+ * Control frames may be injected in the middle of a fragmented message,
+ * but control frames may not be fragmented themselves. */
+ bool keep_expecting_continuation_data_frame = encoder->expecting_continuation_data_frame;
+ if (aws_websocket_is_data_frame(frame->opcode)) {
+ bool is_continuation_frame = (AWS_WEBSOCKET_OPCODE_CONTINUATION == frame->opcode);
+
+ if (encoder->expecting_continuation_data_frame != is_continuation_frame) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: Fragmentation error. Outgoing frame starts a new message but previous message has not ended",
+ (void *)encoder->user_data);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ keep_expecting_continuation_data_frame = !frame->fin;
+ } else {
+ /* Control frames themselves MUST NOT be fragmented. */
+ if (!frame->fin) {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_WEBSOCKET,
+ "id=%p: It is illegal to send a fragmented control frame",
+ (void *)encoder->user_data);
+
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ }
+
+ /* Frame accepted */
+ encoder->frame = *frame;
+ encoder->is_frame_in_progress = true;
+ encoder->expecting_continuation_data_frame = keep_expecting_continuation_data_frame;
+
+ return AWS_OP_SUCCESS;
+}
+
+bool aws_websocket_encoder_is_frame_in_progress(const struct aws_websocket_encoder *encoder) {
+ return encoder->is_frame_in_progress;
+}
+
+void aws_websocket_encoder_init(
+ struct aws_websocket_encoder *encoder,
+ aws_websocket_encoder_payload_fn *stream_outgoing_payload,
+ void *user_data) {
+
+ AWS_ZERO_STRUCT(*encoder);
+ encoder->user_data = user_data;
+ encoder->stream_outgoing_payload = stream_outgoing_payload;
+}
+
+uint64_t aws_websocket_frame_encoded_size(const struct aws_websocket_frame *frame) {
+ /* This is an internal function, so asserts are sufficient error handling */
+ AWS_ASSERT(frame);
+ AWS_ASSERT(frame->payload_length <= AWS_WEBSOCKET_8BYTE_EXTENDED_LENGTH_MAX_VALUE);
+
+ /* All frames start with at least 2 bytes */
+ uint64_t total = 2;
+
+ /* If masked, add 4 bytes for masking-key */
+ if (frame->masked) {
+ total += 4;
+ }
+
+ /* If extended payload length, add 2 or 8 bytes */
+ if (frame->payload_length >= AWS_WEBSOCKET_8BYTE_EXTENDED_LENGTH_MIN_VALUE) {
+ total += 8;
+ } else if (frame->payload_length >= AWS_WEBSOCKET_2BYTE_EXTENDED_LENGTH_MIN_VALUE) {
+ total += 2;
+ }
+
+ /* Plus payload itself */
+ total += frame->payload_length;
+
+ return total;
+}
diff --git a/contrib/restricted/aws/aws-c-http/ya.make b/contrib/restricted/aws/aws-c-http/ya.make
new file mode 100644
index 0000000000..766d17d996
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-http/ya.make
@@ -0,0 +1,80 @@
+# Generated by devtools/yamaker from nixpkgs 23.05.
+
+LIBRARY()
+
+LICENSE(Apache-2.0)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+VERSION(0.7.6)
+
+ORIGINAL_SOURCE(https://github.com/awslabs/aws-c-http/archive/v0.7.6.tar.gz)
+
+PEERDIR(
+ contrib/restricted/aws/aws-c-cal
+ contrib/restricted/aws/aws-c-common
+ contrib/restricted/aws/aws-c-compression
+ contrib/restricted/aws/aws-c-io
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/aws/aws-c-http/include
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_RUNTIME()
+
+CFLAGS(
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DHAVE_SYSCONF
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+)
+
+SRCS(
+ source/connection.c
+ source/connection_manager.c
+ source/connection_monitor.c
+ source/h1_connection.c
+ source/h1_decoder.c
+ source/h1_encoder.c
+ source/h1_stream.c
+ source/h2_connection.c
+ source/h2_decoder.c
+ source/h2_frames.c
+ source/h2_stream.c
+ source/hpack.c
+ source/hpack_decoder.c
+ source/hpack_encoder.c
+ source/hpack_huffman_static.c
+ source/http.c
+ source/http2_stream_manager.c
+ source/proxy_connection.c
+ source/proxy_strategy.c
+ source/random_access_set.c
+ source/request_response.c
+ source/statistics.c
+ source/strutil.c
+ source/websocket.c
+ source/websocket_bootstrap.c
+ source/websocket_decoder.c
+ source/websocket_encoder.c
+)
+
+END()
diff --git a/contrib/restricted/aws/aws-c-io/include/aws/io/uri.h b/contrib/restricted/aws/aws-c-io/include/aws/io/uri.h
new file mode 100644
index 0000000000..1153abb4d2
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-io/include/aws/io/uri.h
@@ -0,0 +1,11 @@
+#ifndef AWS_IO_URI_H
+#define AWS_IO_URI_H
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/io/io.h>
+
+#include <aws/common/uri.h>
+
+#endif /* AWS_IO_URI_H */
diff --git a/contrib/restricted/aws/aws-c-mqtt/CMakeLists.darwin-arm64.txt b/contrib/restricted/aws/aws-c-mqtt/CMakeLists.darwin-arm64.txt
new file mode 100644
index 0000000000..f6f7676670
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/CMakeLists.darwin-arm64.txt
@@ -0,0 +1,61 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-mqtt)
+target_compile_options(restricted-aws-aws-c-mqtt PRIVATE
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_MQTT_USE_IMPORT_EXPORT
+ -DAWS_MQTT_WITH_WEBSOCKETS
+ -DAWS_USE_EPOLL
+ -DHAVE_SYSCONF
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-mqtt PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/include
+)
+target_link_libraries(restricted-aws-aws-c-mqtt PUBLIC
+ restricted-aws-aws-c-common
+ restricted-aws-aws-c-http
+ restricted-aws-aws-c-io
+)
+target_sources(restricted-aws-aws-c-mqtt PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/client.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/client_channel_handler.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/fixed_header.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/mqtt.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/packets.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/shared_constants.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/topic_tree.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_callbacks.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_client.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_encoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_listener.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_options_storage.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_topic_alias.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_types.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_utils.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/rate_limiters.c
+)
diff --git a/contrib/restricted/aws/aws-c-mqtt/CMakeLists.darwin-x86_64.txt b/contrib/restricted/aws/aws-c-mqtt/CMakeLists.darwin-x86_64.txt
new file mode 100644
index 0000000000..f6f7676670
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/CMakeLists.darwin-x86_64.txt
@@ -0,0 +1,61 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-mqtt)
+target_compile_options(restricted-aws-aws-c-mqtt PRIVATE
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_MQTT_USE_IMPORT_EXPORT
+ -DAWS_MQTT_WITH_WEBSOCKETS
+ -DAWS_USE_EPOLL
+ -DHAVE_SYSCONF
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-mqtt PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/include
+)
+target_link_libraries(restricted-aws-aws-c-mqtt PUBLIC
+ restricted-aws-aws-c-common
+ restricted-aws-aws-c-http
+ restricted-aws-aws-c-io
+)
+target_sources(restricted-aws-aws-c-mqtt PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/client.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/client_channel_handler.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/fixed_header.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/mqtt.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/packets.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/shared_constants.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/topic_tree.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_callbacks.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_client.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_encoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_listener.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_options_storage.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_topic_alias.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_types.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_utils.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/rate_limiters.c
+)
diff --git a/contrib/restricted/aws/aws-c-mqtt/CMakeLists.linux-aarch64.txt b/contrib/restricted/aws/aws-c-mqtt/CMakeLists.linux-aarch64.txt
new file mode 100644
index 0000000000..664aafb94e
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/CMakeLists.linux-aarch64.txt
@@ -0,0 +1,62 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-mqtt)
+target_compile_options(restricted-aws-aws-c-mqtt PRIVATE
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_MQTT_USE_IMPORT_EXPORT
+ -DAWS_MQTT_WITH_WEBSOCKETS
+ -DAWS_USE_EPOLL
+ -DHAVE_SYSCONF
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-mqtt PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/include
+)
+target_link_libraries(restricted-aws-aws-c-mqtt PUBLIC
+ contrib-libs-linux-headers
+ restricted-aws-aws-c-common
+ restricted-aws-aws-c-http
+ restricted-aws-aws-c-io
+)
+target_sources(restricted-aws-aws-c-mqtt PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/client.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/client_channel_handler.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/fixed_header.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/mqtt.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/packets.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/shared_constants.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/topic_tree.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_callbacks.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_client.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_encoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_listener.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_options_storage.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_topic_alias.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_types.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_utils.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/rate_limiters.c
+)
diff --git a/contrib/restricted/aws/aws-c-mqtt/CMakeLists.linux-x86_64.txt b/contrib/restricted/aws/aws-c-mqtt/CMakeLists.linux-x86_64.txt
new file mode 100644
index 0000000000..664aafb94e
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/CMakeLists.linux-x86_64.txt
@@ -0,0 +1,62 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-mqtt)
+target_compile_options(restricted-aws-aws-c-mqtt PRIVATE
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_MQTT_USE_IMPORT_EXPORT
+ -DAWS_MQTT_WITH_WEBSOCKETS
+ -DAWS_USE_EPOLL
+ -DHAVE_SYSCONF
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-mqtt PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/include
+)
+target_link_libraries(restricted-aws-aws-c-mqtt PUBLIC
+ contrib-libs-linux-headers
+ restricted-aws-aws-c-common
+ restricted-aws-aws-c-http
+ restricted-aws-aws-c-io
+)
+target_sources(restricted-aws-aws-c-mqtt PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/client.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/client_channel_handler.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/fixed_header.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/mqtt.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/packets.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/shared_constants.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/topic_tree.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_callbacks.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_client.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_encoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_listener.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_options_storage.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_topic_alias.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_types.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_utils.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/rate_limiters.c
+)
diff --git a/contrib/restricted/aws/aws-c-mqtt/CMakeLists.txt b/contrib/restricted/aws/aws-c-mqtt/CMakeLists.txt
new file mode 100644
index 0000000000..2dce3a77fe
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/CMakeLists.txt
@@ -0,0 +1,19 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+if (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" AND NOT HAVE_CUDA)
+ include(CMakeLists.linux-aarch64.txt)
+elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
+ include(CMakeLists.darwin-x86_64.txt)
+elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
+ include(CMakeLists.darwin-arm64.txt)
+elseif (WIN32 AND CMAKE_SYSTEM_PROCESSOR STREQUAL "AMD64" AND NOT HAVE_CUDA)
+ include(CMakeLists.windows-x86_64.txt)
+elseif (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT HAVE_CUDA)
+ include(CMakeLists.linux-x86_64.txt)
+endif()
diff --git a/contrib/restricted/aws/aws-c-mqtt/CMakeLists.windows-x86_64.txt b/contrib/restricted/aws/aws-c-mqtt/CMakeLists.windows-x86_64.txt
new file mode 100644
index 0000000000..f6f7676670
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/CMakeLists.windows-x86_64.txt
@@ -0,0 +1,61 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-mqtt)
+target_compile_options(restricted-aws-aws-c-mqtt PRIVATE
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_MQTT_USE_IMPORT_EXPORT
+ -DAWS_MQTT_WITH_WEBSOCKETS
+ -DAWS_USE_EPOLL
+ -DHAVE_SYSCONF
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-mqtt PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/include
+)
+target_link_libraries(restricted-aws-aws-c-mqtt PUBLIC
+ restricted-aws-aws-c-common
+ restricted-aws-aws-c-http
+ restricted-aws-aws-c-io
+)
+target_sources(restricted-aws-aws-c-mqtt PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/client.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/client_channel_handler.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/fixed_header.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/mqtt.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/packets.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/shared_constants.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/topic_tree.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_callbacks.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_client.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_decoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_encoder.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_listener.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_options_storage.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_topic_alias.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_types.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_utils.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-mqtt/source/v5/rate_limiters.c
+)
diff --git a/contrib/restricted/aws/aws-c-mqtt/CODE_OF_CONDUCT.md b/contrib/restricted/aws/aws-c-mqtt/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..3b64466870
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/CODE_OF_CONDUCT.md
@@ -0,0 +1,4 @@
+## Code of Conduct
+This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
+For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
+opensource-codeofconduct@amazon.com with any additional questions or comments.
diff --git a/contrib/restricted/aws/aws-c-mqtt/CONTRIBUTING.md b/contrib/restricted/aws/aws-c-mqtt/CONTRIBUTING.md
new file mode 100644
index 0000000000..809c37bec6
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/CONTRIBUTING.md
@@ -0,0 +1,61 @@
+# Contributing Guidelines
+
+Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional
+documentation, we greatly value feedback and contributions from our community.
+
+Please read through this document before submitting any issues or pull requests to ensure we have all the necessary
+information to effectively respond to your bug report or contribution.
+
+
+## Reporting Bugs/Feature Requests
+
+We welcome you to use the GitHub issue tracker to report bugs or suggest features.
+
+When filing an issue, please check [existing open](https://github.com/awslabs/aws-c-mqtt/issues), or [recently closed](https://github.com/awslabs/aws-c-mqtt/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already
+reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
+
+* A reproducible test case or series of steps
+* The version of our code being used
+* Any modifications you've made relevant to the bug
+* Anything unusual about your environment or deployment
+
+
+## Contributing via Pull Requests
+Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that:
+
+1. You are working against the latest source on the *main* branch.
+2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
+3. You open an issue to discuss any significant work - we would hate for your time to be wasted.
+
+To send us a pull request, please:
+
+1. Fork the repository.
+2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change.
+3. Ensure local tests pass.
+4. Commit to your fork using clear commit messages.
+5. Send us a pull request, answering any default questions in the pull request interface.
+6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation.
+
+GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
+[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
+
+
+## Finding contributions to work on
+Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels ((enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/awslabs/aws-c-mqtt/labels/help%20wanted) issues is a great place to start.
+
+
+## Code of Conduct
+This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
+For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
+opensource-codeofconduct@amazon.com with any additional questions or comments.
+
+
+## Security issue notifications
+If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue.
+
+
+## Licensing
+
+See the [LICENSE](https://github.com/awslabs/aws-c-mqtt/blob/main/LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution.
+
+We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes.
diff --git a/contrib/restricted/aws/aws-c-mqtt/LICENSE b/contrib/restricted/aws/aws-c-mqtt/LICENSE
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/contrib/restricted/aws/aws-c-mqtt/NOTICE b/contrib/restricted/aws/aws-c-mqtt/NOTICE
new file mode 100644
index 0000000000..5f56b8d0d8
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/NOTICE
@@ -0,0 +1,3 @@
+AWS C Mqtt
+Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+SPDX-License-Identifier: Apache-2.0.
diff --git a/contrib/restricted/aws/aws-c-mqtt/README.md b/contrib/restricted/aws/aws-c-mqtt/README.md
new file mode 100644
index 0000000000..070aab53b5
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/README.md
@@ -0,0 +1,208 @@
+## AWS C MQTT
+
+C99 implementation of the MQTT 3.1.1 specification.
+
+## License
+
+This library is licensed under the Apache 2.0 License.
+
+## Usage
+
+### Building
+
+CMake 3.1+ is required to build.
+
+`<install-path>` must be an absolute path in the following instructions.
+
+#### Linux-Only Dependencies
+
+If you are building on Linux, you will need to build aws-lc and s2n-tls first.
+
+```
+git clone git@github.com:awslabs/aws-lc.git
+cmake -S aws-lc -B aws-lc/build -DCMAKE_INSTALL_PREFIX=<install-path>
+cmake --build aws-lc/build --target install
+
+git clone git@github.com:aws/s2n-tls.git
+cmake -S s2n-tls -B s2n-tls/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build s2n-tls/build --target install
+```
+
+#### Building aws-c-mqtt and Remaining Dependencies
+
+```
+git clone git@github.com:awslabs/aws-c-common.git
+cmake -S aws-c-common -B aws-c-common/build -DCMAKE_INSTALL_PREFIX=<install-path>
+cmake --build aws-c-common/build --target install
+
+git clone git@github.com:awslabs/aws-c-cal.git
+cmake -S aws-c-cal -B aws-c-cal/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build aws-c-cal/build --target install
+
+git clone git@github.com:awslabs/aws-c-io.git
+cmake -S aws-c-io -B aws-c-io/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build aws-c-io/build --target install
+
+git clone git@github.com:awslabs/aws-c-compression.git
+cmake -S aws-c-compression -B aws-c-compression/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build aws-c-compression/build --target install
+
+git clone git@github.com:awslabs/aws-c-http.git
+cmake -S aws-c-http -B aws-c-http/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build aws-c-http/build --target install
+
+git clone git@github.com:awslabs/aws-c-mqtt.git
+cmake -S aws-c-mqtt -B aws-c-mqtt/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build aws-c-mqtt/build --target install
+```
+
+### Overview
+
+This library contains an MQTT implementation that is simple and easy to use, but also quite powerful and low on
+unnecessary copies. Here is a general overview of the API:
+
+### `struct aws_mqtt_client;`
+
+`aws_mqtt_client` is meant to be created once per application to pool common resources required for opening MQTT
+connections. The instance does not need to be allocated, and may be managed by the user.
+
+```c
+int aws_mqtt_client_init(
+ struct aws_mqtt_client *client,
+ struct aws_allocator *allocator,
+ struct aws_event_loop_group *elg);
+```
+Initializes an instance of `aws_mqtt_client` with the required parameters.
+* `client` is effectively the `this` parameter.
+* `allocator` will be used to initialize the client (note that the client itself is NOT allocated).
+ *This resource must outlive `client`*.
+* `bootstrap` will be used to initiate new socket connections MQTT.
+ *This resource must outlive `client`*.
+ See [aws-c-io][aws-c-io] for more information about `aws_client_bootstrap`.
+
+```c
+void aws_mqtt_client_clean_up(struct aws_mqtt_client *client);
+```
+Cleans up a client and frees all owned resources.
+
+**NOTE**: DO NOT CALL THIS FUNCTION UNTIL ALL OUTSTANDING CONNECTIONS ARE CLOSED.
+
+### `struct aws_mqtt_client_connection;`
+
+```c
+struct aws_mqtt_client_connection *aws_mqtt_client_connection_new(
+ struct aws_mqtt_client *client,
+ struct aws_mqtt_client_connection_callbacks callbacks,
+ const struct aws_byte_cursor *host_name,
+ uint16_t port,
+ struct aws_socket_options *socket_options,
+ struct aws_tls_ctx_options *tls_options);
+```
+Allocates and initializes a new connection object (does NOT actually connect). You may use the returned object to
+configure connection parameters, and then call `aws_mqtt_client_connection_connect` to actually open the connection.
+* `client` is required in order to use an existing DNS resolver, event loop group, and allocator.
+* `callbacks` provides the connection-level (not operation level) callbacks and the userdata to be given back.
+* `host_name` lists the end point to connect to. This may be a DNS address or an IP address.
+ *This resource may be freed immediately after return.*
+* `port` the port to connect to on `host_name`.
+* `socket_options` describes how to open the connection.
+ See [aws-c-io][aws-c-io] for more information about `aws_socket_options`.
+* `tls_options` provides TLS credentials to connect with. Pass `NULL` to not use TLS (**NOT RECOMMENDED**).
+ See [aws-c-io][aws-c-io] for more information about `aws_tls_ctx_options`.
+
+```c
+void aws_mqtt_client_connection_destroy(struct aws_mqtt_client_connection *connection);
+```
+Destroys a connection and frees all outstanding resources.
+
+**NOTE**: DO NOT CALL THIS FUNCTION UNTIL THE CONNECTION IS CLOSED.
+
+```c
+int aws_mqtt_client_connection_set_will(
+ struct aws_mqtt_client_connection *connection,
+ const struct aws_byte_cursor *topic,
+ enum aws_mqtt_qos qos,
+ bool retain,
+ const struct aws_byte_cursor *payload);
+```
+Sets the last will and testament to be distributed by the server upon client disconnection. Must be called before
+`aws_mqtt_client_connection_connect`. See `aws_mqtt_client_connection_publish` for information on the parameters.
+`topic` and `payload` must persist past the call to `aws_mqtt_client_connection_connect`.
+
+```c
+int aws_mqtt_client_connection_set_login(
+ struct aws_mqtt_client_connection *connection,
+ const struct aws_byte_cursor *username,
+ const struct aws_byte_cursor *password);
+```
+Sets the username and password to be sent to the server on connection. Must be called before
+`aws_mqtt_client_connection_connect`. `username` and `password` must persist past the call to
+`aws_mqtt_client_connection_connect`.
+
+```c
+int aws_mqtt_client_connection_set_reconnect_timeout(
+ struct aws_mqtt_client_connection *connection,
+ uint64_t min_timeout,
+ uint64_t max_timeout);
+```
+Sets the minimum and maximum reconnect timeouts. The time between reconnect attempts will start at min and multipy by 2
+until max is reached.
+
+```c
+int aws_mqtt_client_connection_connect(
+ struct aws_mqtt_client_connection *connection,
+ const struct aws_byte_cursor *client_id,
+ bool clean_session,
+ uint16_t keep_alive_time);
+```
+Connects to the remote endpoint. The parameters here are set in the MQTT CONNECT packet directly. `client_id` must persist until the `on_connack` connection callback is called.
+
+```c
+int aws_mqtt_client_connection_disconnect(struct aws_mqtt_client_connection *connection);
+```
+Closes an open connection. Does not clean up any resources, that's to be done by `aws_mqtt_client_connection_destroy`,
+probably from the `on_disconnected` connection callback.
+
+```c
+uint16_t aws_mqtt_client_connection_subscribe_single(
+ struct aws_mqtt_client_connection *connection,
+ const struct aws_byte_cursor *topic_filter,
+ enum aws_mqtt_qos qos,
+ aws_mqtt_client_publish_received_fn *on_publish,
+ void *on_publish_ud,
+ aws_mqtt_suback_single_fn *on_suback,
+ void *on_suback_ud);
+```
+Subscribes to the topic filter given with the given QoS. `on_publish` will be called whenever a packet matching
+`topic_filter` arrives. `on_suback` will be called when the SUBACK packet has been received. `topic_filter` must persist until `on_suback` is called. The packet_id of the SUBSCRIBE packet will be returned, or 0 on error.
+
+```c
+uint16_t aws_mqtt_client_connection_unsubscribe(
+ struct aws_mqtt_client_connection *connection,
+ const struct aws_byte_cursor *topic_filter,
+ aws_mqtt_op_complete_fn *on_unsuback,
+ void *on_unsuback_ud);
+```
+Unsubscribes from the topic filter given. `topic_filter` must persist until `on_unsuback` is called. The packet_id of
+the UNSUBSCRIBE packet will be returned, or 0 on error.
+
+```c
+uint16_t aws_mqtt_client_connection_publish(
+ struct aws_mqtt_client_connection *connection,
+ const struct aws_byte_cursor *topic,
+ enum aws_mqtt_qos qos,
+ bool retain,
+ const struct aws_byte_cursor *payload,
+ aws_mqtt_op_complete_fn *on_complete,
+ void *userdata);
+```
+Publish a payload to the topic specified. For QoS 0, `on_complete` will be called as soon as the packet is sent over
+the wire. For QoS 1, as soon as PUBACK comes back. For QoS 2, PUBCOMP. `topic` and `payload` must persist until
+`on_complete`.
+
+```c
+int aws_mqtt_client_connection_ping(struct aws_mqtt_client_connection *connection);
+```
+Sends a PINGREQ packet to the server.
+
+[aws-c-io]: https://github.com/awslabs/aws-c-io
diff --git a/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/client.h b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/client.h
new file mode 100644
index 0000000000..f12a5c19b5
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/client.h
@@ -0,0 +1,647 @@
+#ifndef AWS_MQTT_CLIENT_H
+#define AWS_MQTT_CLIENT_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/hash_table.h>
+
+#include <aws/common/byte_buf.h>
+#include <aws/common/ref_count.h>
+#include <aws/common/string.h>
+
+#include <aws/io/event_loop.h>
+#include <aws/io/host_resolver.h>
+
+#include <aws/mqtt/mqtt.h>
+
+/* forward declares */
+struct aws_client_bootstrap;
+struct aws_http_header;
+struct aws_http_message;
+struct aws_http_proxy_options;
+struct aws_socket_options;
+struct aws_tls_connection_options;
+
+/**
+ * Empty struct that is passed when on_connection_closed is called.
+ * Currently holds nothing but will allow expanding in the future should it be needed.
+ */
+struct on_connection_closed_data;
+
+struct aws_mqtt_client {
+ struct aws_allocator *allocator;
+ struct aws_client_bootstrap *bootstrap;
+ struct aws_ref_count ref_count;
+};
+
+struct aws_mqtt_client_connection;
+
+/**
+ * Callback called when a request roundtrip is complete (QoS0 immediately, QoS1 on PUBACK, QoS2 on PUBCOMP). Either
+ * succeed or not
+ */
+typedef void(aws_mqtt_op_complete_fn)(
+ struct aws_mqtt_client_connection *connection,
+ uint16_t packet_id,
+ int error_code,
+ void *userdata);
+
+/**
+ * Called when a connection attempt is completed, either in success or error.
+ *
+ * If error code is AWS_ERROR_SUCCESS, then a CONNACK has been received from the server and return_code and
+ * session_present contain the values received. If error_code is not AWS_ERROR_SUCCESS, it refers to the internal error
+ * that occurred during connection, and return_code and session_present are invalid.
+ */
+typedef void(aws_mqtt_client_on_connection_complete_fn)(
+ struct aws_mqtt_client_connection *connection,
+ int error_code,
+ enum aws_mqtt_connect_return_code return_code,
+ bool session_present,
+ void *userdata);
+
+/* Called if the connection to the server is lost. */
+typedef void(aws_mqtt_client_on_connection_interrupted_fn)(
+ struct aws_mqtt_client_connection *connection,
+ int error_code,
+ void *userdata);
+
+/**
+ * Called if the connection to the server is closed by user request
+ * Note: Currently the "data" argument is always NULL, but this may change in the future if additional data is needed to
+ * be sent.
+ */
+typedef void(aws_mqtt_client_on_connection_closed_fn)(
+ struct aws_mqtt_client_connection *connection,
+ struct on_connection_closed_data *data,
+ void *userdata);
+
+/**
+ * Called when a connection to the server is resumed
+ * (if clean_session is true, calling aws_mqtt_resubscribe_existing_topics is suggested)
+ */
+typedef void(aws_mqtt_client_on_connection_resumed_fn)(
+ struct aws_mqtt_client_connection *connection,
+ enum aws_mqtt_connect_return_code return_code,
+ bool session_present,
+ void *userdata);
+
+/**
+ * Called when a multi-topic subscription request is complete.
+ * Note: If any topic_suback's qos value is AWS_MQTT_QOS_FAILURE,
+ * then that topic subscription was rejected by the broker.
+ */
+typedef void(aws_mqtt_suback_multi_fn)(
+ struct aws_mqtt_client_connection *connection,
+ uint16_t packet_id,
+ const struct aws_array_list *topic_subacks, /* contains aws_mqtt_topic_subscription pointers */
+ int error_code,
+ void *userdata);
+
+/**
+ * Called when a single-topic subscription request is complete.
+ * Note: If the qos value is AWS_MQTT_QOS_FAILURE,
+ * then the subscription was rejected by the broker.
+ */
+typedef void(aws_mqtt_suback_fn)(
+ struct aws_mqtt_client_connection *connection,
+ uint16_t packet_id,
+ const struct aws_byte_cursor *topic,
+ enum aws_mqtt_qos qos,
+ int error_code,
+ void *userdata);
+
+/**
+ * Called when a publish message is received.
+ *
+ * \param[in] connection The connection object
+ * \param[in] topic The information channel to which the payload data was published.
+ * \param[in] payload The payload data.
+ * \param[in] dup DUP flag. If true, this might be re-delivery of an earlier attempt to send the message.
+ * \param[in] qos Quality of Service used to deliver the message.
+ * \param[in] retain Retain flag. If true, the message was sent as a result of a new subscription being
+ * made by the client.
+ */
+typedef void(aws_mqtt_client_publish_received_fn)(
+ struct aws_mqtt_client_connection *connection,
+ const struct aws_byte_cursor *topic,
+ const struct aws_byte_cursor *payload,
+ bool dup,
+ enum aws_mqtt_qos qos,
+ bool retain,
+ void *userdata);
+
+/** Called when a connection is closed, right before any resources are deleted */
+typedef void(aws_mqtt_client_on_disconnect_fn)(struct aws_mqtt_client_connection *connection, void *userdata);
+
+/**
+ * Function to invoke when the websocket handshake request transformation completes.
+ * This function MUST be invoked or the application will soft-lock.
+ *
+ * `request` and `complete_ctx` must be the same pointers provided to the `aws_mqtt_transform_websocket_handshake_fn`.
+ * `error_code` should should be AWS_ERROR_SUCCESS if transformation was successful,
+ * otherwise pass a different AWS_ERROR_X value.
+ */
+typedef void(aws_mqtt_transform_websocket_handshake_complete_fn)(
+ struct aws_http_message *request,
+ int error_code,
+ void *complete_ctx);
+
+/**
+ * Function that may transform the websocket handshake request.
+ * Called each time a websocket connection is attempted.
+ *
+ * The default request uses path "/mqtt". All required headers are present,
+ * plus the optional header "Sec-WebSocket-Protocol: mqtt".
+ *
+ * The user MUST invoke the `complete_fn` when transformation is complete or the application will soft-lock.
+ * When invoking the `complete_fn`, pass along the `request` and `complete_ctx` provided here and an error code.
+ * The error code should be AWS_ERROR_SUCCESS if transformation was successful,
+ * otherwise pass a different AWS_ERROR_X value.
+ */
+typedef void(aws_mqtt_transform_websocket_handshake_fn)(
+ struct aws_http_message *request,
+ void *user_data,
+ aws_mqtt_transform_websocket_handshake_complete_fn *complete_fn,
+ void *complete_ctx);
+
+/**
+ * Function that may accept or reject a websocket handshake response.
+ * Called each time a valid websocket connection is established.
+ *
+ * All required headers have been checked already (ex: "Sec-Websocket-Accept"),
+ *
+ * Return AWS_OP_SUCCESS to accept the connection or AWS_OP_ERR to stop the connection attempt.
+ */
+typedef int aws_mqtt_validate_websocket_handshake_fn(
+ struct aws_mqtt_client_connection *connection,
+ const struct aws_http_header *header_array,
+ size_t num_headers,
+ void *userdata);
+
+/** Passed to subscribe() and suback callbacks */
+struct aws_mqtt_topic_subscription {
+ struct aws_byte_cursor topic;
+ enum aws_mqtt_qos qos;
+
+ aws_mqtt_client_publish_received_fn *on_publish;
+ aws_mqtt_userdata_cleanup_fn *on_cleanup;
+ void *on_publish_ud;
+};
+
+/**
+ * host_name The server name to connect to. This resource may be freed immediately on return.
+ * port The port on the server to connect to
+ * client_id The clientid to place in the CONNECT packet.
+ * socket_options The socket options to pass to the aws_client_bootstrap functions.
+ * This is copied into the connection
+ * tls_options TLS settings to use when opening a connection.
+ * This is copied into the connection
+ * Pass NULL to connect without TLS (NOT RECOMMENDED)
+ * clean_session True to discard all server session data and start fresh
+ * keep_alive_time_secs The keep alive value to place in the CONNECT PACKET, a PING will automatically
+ * be sent at this interval as well. If you specify 0, defaults will be used
+ * and a ping will be sent once per 20 minutes.
+ * This duration must be longer than ping_timeout_ms.
+ * ping_timeout_ms Network connection is re-established if a ping response is not received
+ * within this amount of time (milliseconds). If you specify 0, a default value of 3 seconds
+ * is used. Alternatively, tcp keep-alive may be away to accomplish this in a more efficient
+ * (low-power) scenario, but keep-alive options may not work the same way on every platform
+ * and OS version. This duration must be shorter than keep_alive_time_secs.
+ * protocol_operation_timeout_ms
+ * Timeout when waiting for the response to some operation requires response by protocol.
+ * Set to zero to disable timeout. Otherwise, the operation will fail with error
+ * AWS_ERROR_MQTT_TIMEOUT if no response is received within this amount of time after
+ * the packet is written to the socket. The timer is reset if the connection is interrupted.
+ * It applied to PUBLISH (QoS>0) and UNSUBSCRIBE now.
+ * Note: While the MQTT 3 specification states that a broker MUST respond,
+ * some brokers are known to ignore publish packets in exceptional circumstances
+ * (e.g. AWS IoT Core will not respond if the publish quota is exceeded).
+ * on_connection_complete The callback to fire when the connection attempt completes
+ * user_data Passed to the userdata param of on_connection_complete
+ */
+struct aws_mqtt_connection_options {
+ struct aws_byte_cursor host_name;
+ uint16_t port;
+ struct aws_socket_options *socket_options;
+ struct aws_tls_connection_options *tls_options;
+ struct aws_byte_cursor client_id;
+ uint16_t keep_alive_time_secs;
+ uint32_t ping_timeout_ms;
+ uint32_t protocol_operation_timeout_ms;
+ aws_mqtt_client_on_connection_complete_fn *on_connection_complete;
+ void *user_data;
+ bool clean_session;
+};
+
+/**
+ * Contains some simple statistics about the current state of the connection's queue of operations
+ */
+struct aws_mqtt_connection_operation_statistics {
+ /**
+ * total number of operations submitted to the connection that have not yet been completed. Unacked operations
+ * are a subset of this.
+ */
+ uint64_t incomplete_operation_count;
+
+ /**
+ * total packet size of operations submitted to the connection that have not yet been completed. Unacked operations
+ * are a subset of this.
+ */
+ uint64_t incomplete_operation_size;
+
+ /**
+ * total number of operations that have been sent to the server and are waiting for a corresponding ACK before
+ * they can be completed.
+ */
+ uint64_t unacked_operation_count;
+
+ /**
+ * total packet size of operations that have been sent to the server and are waiting for a corresponding ACK before
+ * they can be completed.
+ */
+ uint64_t unacked_operation_size;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Creates an instance of aws_mqtt_client.
+ *
+ * \param[in] allocator The allocator the client will use for all future allocations
+ * \param[in] bootstrap The client bootstrap to use to initiate new socket connections
+ *
+ * \returns a new instance of an aws_mqtt_client if successful, NULL otherwise
+ */
+AWS_MQTT_API
+struct aws_mqtt_client *aws_mqtt_client_new(struct aws_allocator *allocator, struct aws_client_bootstrap *bootstrap);
+
+/**
+ * Increments the ref count to an mqtt client, allowing the caller to take a reference to it
+ *
+ * \param[in] client The client to increment the ref count on
+ *
+ * \returns the mqtt client
+ */
+AWS_MQTT_API
+struct aws_mqtt_client *aws_mqtt_client_acquire(struct aws_mqtt_client *client);
+
+/**
+ * Decrements the ref count on an mqtt client. If the ref count drops to zero, the client is cleaned up.
+ *
+ * \param[in] client The client to release a ref count on
+ */
+AWS_MQTT_API
+void aws_mqtt_client_release(struct aws_mqtt_client *client);
+
+/**
+ * Spawns a new connection object.
+ *
+ * \param[in] client The client to spawn the connection from
+ *
+ * \returns a new mqtt connection on success, NULL otherwise
+ */
+AWS_MQTT_API
+struct aws_mqtt_client_connection *aws_mqtt_client_connection_new(struct aws_mqtt_client *client);
+
+/**
+ * Increments the ref count to an mqtt client connection, allowing the caller to take a reference to it
+ *
+ * \param[in] connection The connection object
+ *
+ * \returns the mqtt connection
+ */
+AWS_MQTT_API
+struct aws_mqtt_client_connection *aws_mqtt_client_connection_acquire(struct aws_mqtt_client_connection *connection);
+
+/**
+ * Decrements the ref count on an mqtt connection. If the ref count drops to zero, the connection is cleaned up.
+ * Note: cannot call this with lock held, since it will start the destroy process and cause a dead lock.
+ *
+ * \param[in] connection The connection object
+ */
+AWS_MQTT_API
+void aws_mqtt_client_connection_release(struct aws_mqtt_client_connection *connection);
+
+/**
+ * Sets the will message to send with the CONNECT packet.
+ *
+ * \param[in] connection The connection object
+ * \param[in] topic The topic to publish the will on
+ * \param[in] qos The QoS to publish the will with
+ * \param[in] retain The retain flag to publish the will with
+ * \param[in] payload The data if the will message
+ */
+AWS_MQTT_API
+int aws_mqtt_client_connection_set_will(
+ struct aws_mqtt_client_connection *connection,
+ const struct aws_byte_cursor *topic,
+ enum aws_mqtt_qos qos,
+ bool retain,
+ const struct aws_byte_cursor *payload);
+
+/**
+ * Sets the username and/or password to send with the CONNECT packet.
+ *
+ * \param[in] connection The connection object
+ * \param[in] username The username to connect with
+ * \param[in] password [optional] The password to connect with
+ */
+AWS_MQTT_API
+int aws_mqtt_client_connection_set_login(
+ struct aws_mqtt_client_connection *connection,
+ const struct aws_byte_cursor *username,
+ const struct aws_byte_cursor *password);
+
+/**
+ * Use MQTT over websockets when connecting.
+ * Requires the MQTT_WITH_WEBSOCKETS build option.
+ *
+ * In this scenario, an HTTP connection is established, which is then upgraded to a websocket connection,
+ * which is then used to send MQTT data.
+ *
+ * \param[in] connection The connection object.
+ * \param[in] transformer [optional] Function that may transform the websocket handshake request.
+ * See `aws_mqtt_transform_websocket_handshake_fn` for more info.
+ * \param[in] transformer_ud [optional] Userdata for request_transformer.
+ * \param[in] validator [optional] Function that may reject the websocket handshake response.
+ * \param[in] validator_ud [optional] Userdata for response_validator.
+ */
+AWS_MQTT_API
+int aws_mqtt_client_connection_use_websockets(
+ struct aws_mqtt_client_connection *connection,
+ aws_mqtt_transform_websocket_handshake_fn *transformer,
+ void *transformer_ud,
+ aws_mqtt_validate_websocket_handshake_fn *validator,
+ void *validator_ud);
+
+/**
+ * Set http proxy options for the connection.
+ */
+AWS_MQTT_API
+int aws_mqtt_client_connection_set_http_proxy_options(
+ struct aws_mqtt_client_connection *connection,
+ struct aws_http_proxy_options *proxy_options);
+
+/**
+ * Sets the minimum and maximum reconnect timeouts.
+ *
+ * The time between reconnect attempts will start at min and multiply by 2 until max is reached.
+ *
+ * \param[in] connection The connection object
+ * \param[in] min_timeout The timeout to start with
+ * \param[in] max_timeout The highest allowable wait time between reconnect attempts
+ */
+AWS_MQTT_API
+int aws_mqtt_client_connection_set_reconnect_timeout(
+ struct aws_mqtt_client_connection *connection,
+ uint64_t min_timeout,
+ uint64_t max_timeout);
+
+/**
+ * Sets the callbacks to call when a connection is interrupted and resumed.
+ *
+ * \param[in] connection The connection object
+ * \param[in] on_interrupted The function to call when a connection is lost
+ * \param[in] on_interrupted_ud Userdata for on_interrupted
+ * \param[in] on_resumed The function to call when a connection is resumed
+ (if clean_session is true, calling aws_mqtt_resubscribe_existing_topics is suggested)
+ * \param[in] on_resumed_ud Userdata for on_resumed
+ */
+AWS_MQTT_API
+int aws_mqtt_client_connection_set_connection_interruption_handlers(
+ struct aws_mqtt_client_connection *connection,
+ aws_mqtt_client_on_connection_interrupted_fn *on_interrupted,
+ void *on_interrupted_ud,
+ aws_mqtt_client_on_connection_resumed_fn *on_resumed,
+ void *on_resumed_ud);
+
+/**
+ * Sets the callback to call when the connection is closed normally by user request.
+ * This is different than the connection interrupted or lost, this only covers successful
+ * closure.
+ *
+ * \param[in] connection The connection object
+ * \param[in] on_closed The function to call when a connection is closed
+ * \param[in] on_closed_ud Userdata for on_closed
+ */
+AWS_MQTT_API
+int aws_mqtt_client_connection_set_connection_closed_handler(
+ struct aws_mqtt_client_connection *connection,
+ aws_mqtt_client_on_connection_closed_fn *on_closed,
+ void *on_closed_ud);
+
+/**
+ * Sets the callback to call whenever ANY publish packet is received. Only safe to set when connection is not connected.
+ *
+ * \param[in] connection The connection object
+ * \param[in] on_any_publish The function to call when a publish is received (pass NULL to unset)
+ * \param[in] on_any_publish_ud Userdata for on_any_publish
+ */
+AWS_MQTT_API
+int aws_mqtt_client_connection_set_on_any_publish_handler(
+ struct aws_mqtt_client_connection *connection,
+ aws_mqtt_client_publish_received_fn *on_any_publish,
+ void *on_any_publish_ud);
+
+/**
+ * Opens the actual connection defined by aws_mqtt_client_connection_new.
+ * Once the connection is opened, on_connack will be called. Only called when connection is disconnected.
+ *
+ * \param[in] connection The connection object
+ * \param[in] connection_options Configuration information for the connection attempt
+ *
+ * \returns AWS_OP_SUCCESS if the connection has been successfully initiated,
+ * otherwise AWS_OP_ERR and aws_last_error() will be set.
+ */
+AWS_MQTT_API
+int aws_mqtt_client_connection_connect(
+ struct aws_mqtt_client_connection *connection,
+ const struct aws_mqtt_connection_options *connection_options);
+
+/**
+ * DEPRECATED
+ * Opens the actual connection defined by aws_mqtt_client_connection_new.
+ * Once the connection is opened, on_connack will be called.
+ *
+ * Must be called on a connection that has previously been open,
+ * as the parameters passed during the last connection will be reused.
+ *
+ * \param[in] connection The connection object
+ * \param[in] on_connection_complete The callback to fire when the connection attempt completes
+ * \param[in] userdata (nullable) Passed to the userdata param of on_connection_complete
+ *
+ * \returns AWS_OP_SUCCESS if the connection has been successfully initiated,
+ * otherwise AWS_OP_ERR and aws_last_error() will be set.
+ */
+AWS_MQTT_API
+int aws_mqtt_client_connection_reconnect(
+ struct aws_mqtt_client_connection *connection,
+ aws_mqtt_client_on_connection_complete_fn *on_connection_complete,
+ void *userdata);
+
+/**
+ * Closes the connection asynchronously, calls the on_disconnect callback.
+ * All uncompleted requests (publish/subscribe/unsubscribe) will be cancelled, regardless to the status of
+ * clean_session. DISCONNECT packet will be sent, which deletes the will message from server.
+ *
+ * \param[in] connection The connection to close
+ * \param[in] on_disconnect (nullable) Callback function to invoke when the connection is completely disconnected.
+ * \param[in] userdata (nullable) passed to on_disconnect
+ *
+ * \returns AWS_OP_SUCCESS if the connection is open and is being shutdown,
+ * otherwise AWS_OP_ERR and aws_last_error() is set.
+ */
+AWS_MQTT_API
+int aws_mqtt_client_connection_disconnect(
+ struct aws_mqtt_client_connection *connection,
+ aws_mqtt_client_on_disconnect_fn *on_disconnect,
+ void *userdata);
+
+/**
+ * Subscribe to topic filters. on_publish will be called when a PUBLISH matching each topic_filter is received.
+ *
+ * \param[in] connection The connection to subscribe on
+ * \param[in] topic_filters An array_list of aws_mqtt_topic_subscription (NOT pointers) describing the requests.
+ * \param[in] on_suback (nullable) Called when a SUBACK has been received from the server and the subscription
+ * is complete. Broker may fail one of the topics, check the qos in
+ * aws_mqtt_topic_subscription from the callback
+ * \param[in] on_suback_ud (nullable) Passed to on_suback
+ *
+ * \returns The packet id of the subscribe packet if successfully sent, otherwise 0.
+ */
+AWS_MQTT_API
+uint16_t aws_mqtt_client_connection_subscribe_multiple(
+ struct aws_mqtt_client_connection *connection,
+ const struct aws_array_list *topic_filters,
+ aws_mqtt_suback_multi_fn *on_suback,
+ void *on_suback_ud);
+
+/**
+ * Subscribe to a single topic filter. on_publish will be called when a PUBLISH matching topic_filter is received.
+ *
+ * \param[in] connection The connection to subscribe on
+ * \param[in] topic_filter The topic filter to subscribe on. This resource must persist until on_suback.
+ * \param[in] qos The maximum QoS of messages to receive
+ * \param[in] on_publish (nullable) Called when a PUBLISH packet matching topic_filter is received
+ * \param[in] on_publish_ud (nullable) Passed to on_publish
+ * \param[in] on_ud_cleanup (nullable) Called when a subscription is removed, on_publish_ud is passed.
+ * \param[in] on_suback (nullable) Called when a SUBACK has been received from the server and the subscription is
+ * complete
+ * \param[in] on_suback_ud (nullable) Passed to on_suback
+ *
+ * \returns The packet id of the subscribe packet if successfully sent, otherwise 0.
+ */
+AWS_MQTT_API
+uint16_t aws_mqtt_client_connection_subscribe(
+ struct aws_mqtt_client_connection *connection,
+ const struct aws_byte_cursor *topic_filter,
+ enum aws_mqtt_qos qos,
+ aws_mqtt_client_publish_received_fn *on_publish,
+ void *on_publish_ud,
+ aws_mqtt_userdata_cleanup_fn *on_ud_cleanup,
+ aws_mqtt_suback_fn *on_suback,
+ void *on_suback_ud);
+
+/**
+ * Subscribe to a single topic filter WITHOUT sending a SUBSCRIBE packet.
+ * This is useful if you expect the broker to send PUBLISHES without first subscribing.
+ * on_publish will be called when a PUBLISH matching topic_filter is received.
+ *
+ * \param[in] connection The connection to subscribe on
+ * \param[in] topic_filter The topic filter to subscribe on. This resource must persist until on_suback.
+ * \param[in] on_publish (nullable) Called when a PUBLISH packet matching topic_filter is received
+ * \param[in] on_publish_ud (nullable) Passed to on_publish
+ * \param[in] on_ud_cleanup (nullable) Called when a subscription is removed, on_publish_ud is passed.
+ * \param[in] on_suback (nullable) Called when a SUBACK has been received from the server and the subscription is
+ * complete
+ * \param[in] on_suback_ud (nullable) Passed to on_suback
+ *
+ * \returns The "packet id" of the operation if successfully initiated, otherwise 0.
+ */
+AWS_MQTT_API
+uint16_t aws_mqtt_client_connection_subscribe_local(
+ struct aws_mqtt_client_connection *connection,
+ const struct aws_byte_cursor *topic_filter,
+ aws_mqtt_client_publish_received_fn *on_publish,
+ void *on_publish_ud,
+ aws_mqtt_userdata_cleanup_fn *on_ud_cleanup,
+ aws_mqtt_suback_fn *on_suback,
+ void *on_suback_ud);
+
+/**
+ * Resubscribe to all topics currently subscribed to. This is to help when resuming a connection with a clean session.
+ *
+ * \param[in] connection The connection to subscribe on
+ * \param[in] on_suback (nullable) Called when a SUBACK has been received from the server and the subscription is
+ * complete
+ * \param[in] on_suback_ud (nullable) Passed to on_suback
+ *
+ * \returns The packet id of the subscribe packet if successfully sent, otherwise 0 (and aws_last_error() will be set).
+ */
+AWS_MQTT_API
+uint16_t aws_mqtt_resubscribe_existing_topics(
+ struct aws_mqtt_client_connection *connection,
+ aws_mqtt_suback_multi_fn *on_suback,
+ void *on_suback_ud);
+
+/**
+ * Unsubscribe to a topic filter.
+ *
+ * \param[in] connection The connection to unsubscribe on
+ * \param[in] topic_filter The topic filter to unsubscribe on. This resource must persist until on_unsuback.
+ * \param[in] on_unsuback (nullable) Called when a UNSUBACK has been received from the server and the subscription
+ * is removed
+ * \param[in] on_unsuback_ud (nullable) Passed to on_unsuback
+ *
+ * \returns The packet id of the unsubscribe packet if successfully sent, otherwise 0.
+ */
+AWS_MQTT_API
+uint16_t aws_mqtt_client_connection_unsubscribe(
+ struct aws_mqtt_client_connection *connection,
+ const struct aws_byte_cursor *topic_filter,
+ aws_mqtt_op_complete_fn *on_unsuback,
+ void *on_unsuback_ud);
+
+/**
+ * Send a PUBLISH packet over connection.
+ *
+ * \param[in] connection The connection to publish on
+ * \param[in] topic The topic to publish on
+ * \param[in] qos The requested QoS of the packet
+ * \param[in] retain True to have the server save the packet, and send to all new subscriptions matching topic
+ * \param[in] payload The data to send as the payload of the publish
+ * \param[in] on_complete (nullable) For QoS 0, called as soon as the packet is sent
+ * For QoS 1, called when PUBACK is received
+ * For QoS 2, called when PUBCOMP is received
+ * \param[in] user_data (nullable) Passed to on_complete
+ *
+ * \returns The packet id of the publish packet if successfully sent, otherwise 0.
+ */
+AWS_MQTT_API
+uint16_t aws_mqtt_client_connection_publish(
+ struct aws_mqtt_client_connection *connection,
+ const struct aws_byte_cursor *topic,
+ enum aws_mqtt_qos qos,
+ bool retain,
+ const struct aws_byte_cursor *payload,
+ aws_mqtt_op_complete_fn *on_complete,
+ void *userdata);
+
+/**
+ * Queries the connection's internal statistics for incomplete/unacked operations.
+ * \param connection connection to get statistics for
+ * \param stats set of incomplete/unacked operation statistics
+ * \returns AWS_OP_SUCCESS if getting the operation statistics were successful, AWS_OP_ERR otherwise
+ */
+AWS_MQTT_API
+int aws_mqtt_client_connection_get_stats(
+ struct aws_mqtt_client_connection *connection,
+ struct aws_mqtt_connection_operation_statistics *stats);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_MQTT_CLIENT_H */
diff --git a/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/exports.h b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/exports.h
new file mode 100644
index 0000000000..d87f4760f0
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/exports.h
@@ -0,0 +1,27 @@
+#ifndef AWS_MQTT_EXPORTS_H
+#define AWS_MQTT_EXPORTS_H
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#if defined(USE_WINDOWS_DLL_SEMANTICS) || defined(WIN32)
+# ifdef AWS_MQTT_USE_IMPORT_EXPORT
+# ifdef AWS_MQTT_EXPORTS
+# define AWS_MQTT_API __declspec(dllexport)
+# else
+# define AWS_MQTT_API __declspec(dllimport)
+# endif /* AWS_MQTT_EXPORTS */
+# else
+# define AWS_MQTT_API
+# endif /* USE_IMPORT_EXPORT */
+
+#else /* defined (USE_WINDOWS_DLL_SEMANTICS) || defined (WIN32) */
+# if ((__GNUC__ >= 4) || defined(__clang__)) && defined(AWS_MQTT_USE_IMPORT_EXPORT) && defined(AWS_MQTT_EXPORTS)
+# define AWS_MQTT_API __attribute__((visibility("default")))
+# else
+# define AWS_MQTT_API
+# endif /* __GNUC__ >= 4 || defined(__clang__) */
+
+#endif /* defined (USE_WINDOWS_DLL_SEMANTICS) || defined (WIN32) */
+
+#endif /* AWS_MQTT_EXPORTS_H */
diff --git a/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/mqtt.h b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/mqtt.h
new file mode 100644
index 0000000000..22a63ce225
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/mqtt.h
@@ -0,0 +1,120 @@
+#ifndef AWS_MQTT_MQTT_H
+#define AWS_MQTT_MQTT_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/byte_buf.h>
+#include <aws/common/logging.h>
+
+#include <aws/mqtt/exports.h>
+
+#define AWS_C_MQTT_PACKAGE_ID 5
+
+/* Quality of Service associated with a publish action or subscription [MQTT-4.3]. */
+enum aws_mqtt_qos {
+ AWS_MQTT_QOS_AT_MOST_ONCE = 0x0,
+ AWS_MQTT_QOS_AT_LEAST_ONCE = 0x1,
+ AWS_MQTT_QOS_EXACTLY_ONCE = 0x2,
+ /* reserved = 3 */
+ AWS_MQTT_QOS_FAILURE = 0x80, /* Only used in SUBACK packets */
+};
+
+/* Result of a connect request [MQTT-3.2.2.3]. */
+enum aws_mqtt_connect_return_code {
+ AWS_MQTT_CONNECT_ACCEPTED,
+ AWS_MQTT_CONNECT_UNACCEPTABLE_PROTOCOL_VERSION,
+ AWS_MQTT_CONNECT_IDENTIFIER_REJECTED,
+ AWS_MQTT_CONNECT_SERVER_UNAVAILABLE,
+ AWS_MQTT_CONNECT_BAD_USERNAME_OR_PASSWORD,
+ AWS_MQTT_CONNECT_NOT_AUTHORIZED,
+ /* reserved = 6 - 255 */
+};
+
+enum aws_mqtt_error {
+ AWS_ERROR_MQTT_INVALID_RESERVED_BITS = AWS_ERROR_ENUM_BEGIN_RANGE(AWS_C_MQTT_PACKAGE_ID),
+ AWS_ERROR_MQTT_BUFFER_TOO_BIG,
+ AWS_ERROR_MQTT_INVALID_REMAINING_LENGTH,
+ AWS_ERROR_MQTT_UNSUPPORTED_PROTOCOL_NAME,
+ AWS_ERROR_MQTT_UNSUPPORTED_PROTOCOL_LEVEL,
+ AWS_ERROR_MQTT_INVALID_CREDENTIALS,
+ AWS_ERROR_MQTT_INVALID_QOS,
+ AWS_ERROR_MQTT_INVALID_PACKET_TYPE,
+ AWS_ERROR_MQTT_INVALID_TOPIC,
+ AWS_ERROR_MQTT_TIMEOUT,
+ AWS_ERROR_MQTT_PROTOCOL_ERROR,
+ AWS_ERROR_MQTT_NOT_CONNECTED,
+ AWS_ERROR_MQTT_ALREADY_CONNECTED,
+ AWS_ERROR_MQTT_BUILT_WITHOUT_WEBSOCKETS,
+ AWS_ERROR_MQTT_UNEXPECTED_HANGUP,
+ AWS_ERROR_MQTT_CONNECTION_SHUTDOWN,
+ AWS_ERROR_MQTT_CONNECTION_DESTROYED,
+ AWS_ERROR_MQTT_CONNECTION_DISCONNECTING,
+ AWS_ERROR_MQTT_CANCELLED_FOR_CLEAN_SESSION,
+ AWS_ERROR_MQTT_QUEUE_FULL,
+ AWS_ERROR_MQTT5_CLIENT_OPTIONS_VALIDATION,
+ AWS_ERROR_MQTT5_CONNECT_OPTIONS_VALIDATION,
+ AWS_ERROR_MQTT5_DISCONNECT_OPTIONS_VALIDATION,
+ AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION,
+ AWS_ERROR_MQTT5_SUBSCRIBE_OPTIONS_VALIDATION,
+ AWS_ERROR_MQTT5_UNSUBSCRIBE_OPTIONS_VALIDATION,
+ AWS_ERROR_MQTT5_USER_PROPERTY_VALIDATION,
+ AWS_ERROR_MQTT5_PACKET_VALIDATION,
+ AWS_ERROR_MQTT5_ENCODE_FAILURE,
+ AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR,
+ AWS_ERROR_MQTT5_CONNACK_CONNECTION_REFUSED,
+ AWS_ERROR_MQTT5_CONNACK_TIMEOUT,
+ AWS_ERROR_MQTT5_PING_RESPONSE_TIMEOUT,
+ AWS_ERROR_MQTT5_USER_REQUESTED_STOP,
+ AWS_ERROR_MQTT5_DISCONNECT_RECEIVED,
+ AWS_ERROR_MQTT5_CLIENT_TERMINATED,
+ AWS_ERROR_MQTT5_OPERATION_FAILED_DUE_TO_OFFLINE_QUEUE_POLICY,
+ AWS_ERROR_MQTT5_ENCODE_SIZE_UNSUPPORTED_PACKET_TYPE,
+ AWS_ERROR_MQTT5_OPERATION_PROCESSING_FAILURE,
+ AWS_ERROR_MQTT5_INVALID_INBOUND_TOPIC_ALIAS,
+ AWS_ERROR_MQTT5_INVALID_OUTBOUND_TOPIC_ALIAS,
+ AWS_ERROR_MQTT5_INVALID_UTF8_STRING,
+
+ AWS_ERROR_END_MQTT_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_MQTT_PACKAGE_ID),
+};
+
+enum aws_mqtt_log_subject {
+ AWS_LS_MQTT_GENERAL = AWS_LOG_SUBJECT_BEGIN_RANGE(AWS_C_MQTT_PACKAGE_ID),
+ AWS_LS_MQTT_CLIENT,
+ AWS_LS_MQTT_TOPIC_TREE,
+ AWS_LS_MQTT5_GENERAL,
+ AWS_LS_MQTT5_CLIENT,
+ AWS_LS_MQTT5_CANARY,
+};
+
+/** Function called on cleanup of a userdata. */
+typedef void(aws_mqtt_userdata_cleanup_fn)(void *userdata);
+
+AWS_EXTERN_C_BEGIN
+
+AWS_MQTT_API
+bool aws_mqtt_is_valid_topic(const struct aws_byte_cursor *topic);
+AWS_MQTT_API
+bool aws_mqtt_is_valid_topic_filter(const struct aws_byte_cursor *topic_filter);
+
+/**
+ * Initializes internal datastructures used by aws-c-mqtt.
+ * Must be called before using any functionality in aws-c-mqtt.
+ */
+AWS_MQTT_API
+void aws_mqtt_library_init(struct aws_allocator *allocator);
+
+/**
+ * Shuts down the internal datastructures used by aws-c-mqtt.
+ */
+AWS_MQTT_API
+void aws_mqtt_library_clean_up(void);
+
+AWS_MQTT_API
+void aws_mqtt_fatal_assert_library_initialized(void);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_MQTT_MQTT_H */
diff --git a/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/client_impl.h b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/client_impl.h
new file mode 100644
index 0000000000..6bdfe749c9
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/client_impl.h
@@ -0,0 +1,399 @@
+#ifndef AWS_MQTT_PRIVATE_CLIENT_IMPL_H
+#define AWS_MQTT_PRIVATE_CLIENT_IMPL_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/mqtt/client.h>
+
+#include <aws/mqtt/private/fixed_header.h>
+#include <aws/mqtt/private/topic_tree.h>
+
+#include <aws/common/hash_table.h>
+#include <aws/common/mutex.h>
+#include <aws/common/task_scheduler.h>
+
+#include <aws/io/channel.h>
+#include <aws/io/channel_bootstrap.h>
+#include <aws/io/message_pool.h>
+#include <aws/io/socket.h>
+#include <aws/io/tls_channel_handler.h>
+
+#define MQTT_CLIENT_CALL_CALLBACK(client_ptr, callback) \
+ do { \
+ if ((client_ptr)->callback) { \
+ (client_ptr)->callback((client_ptr), (client_ptr)->callback##_ud); \
+ } \
+ } while (false)
+#define MQTT_CLIENT_CALL_CALLBACK_ARGS(client_ptr, callback, ...) \
+ do { \
+ if ((client_ptr)->callback) { \
+ (client_ptr)->callback((client_ptr), __VA_ARGS__, (client_ptr)->callback##_ud); \
+ } \
+ } while (false)
+
+#if ASSERT_LOCK_HELD
+# define ASSERT_SYNCED_DATA_LOCK_HELD(object) \
+ { \
+ int cached_error = aws_last_error(); \
+ AWS_ASSERT(aws_mutex_try_lock(&(object)->synced_data.lock) == AWS_OP_ERR); \
+ aws_raise_error(cached_error); \
+ }
+#else
+# define ASSERT_SYNCED_DATA_LOCK_HELD(object)
+#endif
+
+enum aws_mqtt_client_connection_state {
+ AWS_MQTT_CLIENT_STATE_CONNECTING,
+ AWS_MQTT_CLIENT_STATE_CONNECTED,
+ AWS_MQTT_CLIENT_STATE_RECONNECTING,
+ AWS_MQTT_CLIENT_STATE_DISCONNECTING,
+ AWS_MQTT_CLIENT_STATE_DISCONNECTED,
+};
+
+enum aws_mqtt_client_request_state {
+ AWS_MQTT_CLIENT_REQUEST_ONGOING,
+ AWS_MQTT_CLIENT_REQUEST_COMPLETE,
+ AWS_MQTT_CLIENT_REQUEST_ERROR,
+};
+
+/**
+ * Contains some simple statistics about the current state of the connection's queue of operations
+ */
+struct aws_mqtt_connection_operation_statistics_impl {
+ /**
+ * total number of operations submitted to the connection that have not yet been completed. Unacked operations
+ * are a subset of this.
+ */
+ struct aws_atomic_var incomplete_operation_count_atomic;
+
+ /**
+ * total packet size of operations submitted to the connection that have not yet been completed. Unacked operations
+ * are a subset of this.
+ */
+ struct aws_atomic_var incomplete_operation_size_atomic;
+
+ /**
+ * total number of operations that have been sent to the server and are waiting for a corresponding ACK before
+ * they can be completed.
+ */
+ struct aws_atomic_var unacked_operation_count_atomic;
+
+ /**
+ * total packet size of operations that have been sent to the server and are waiting for a corresponding ACK before
+ * they can be completed.
+ */
+ struct aws_atomic_var unacked_operation_size_atomic;
+};
+
+/**
+ * Called after the timeout if a matching ack packet hasn't arrived, with is_first_attempt set as false.
+ * Or called when the request packet attempt to send firstly, with is_first_attempt set as true.
+ * Return AWS_MQTT_CLIENT_REQUEST_ONGOING to check on the task later.
+ * Return AWS_MQTT_CLIENT_REQUEST_COMPLETE to consider request complete.
+ * Return AWS_MQTT_CLIENT_REQUEST_ERROR cancel the task and report an error to the caller.
+ */
+typedef enum aws_mqtt_client_request_state(
+ aws_mqtt_send_request_fn)(uint16_t packet_id, bool is_first_attempt, void *userdata);
+
+/**
+ * Called when the operation statistics change.
+ */
+typedef void(aws_mqtt_on_operation_statistics_fn)(struct aws_mqtt_client_connection *connection, void *userdata);
+
+/* Flags that indicate the way in which way an operation is currently affecting the statistics of the connection */
+enum aws_mqtt_operation_statistic_state_flags {
+ /* The operation is not affecting the connection's statistics at all */
+ AWS_MQTT_OSS_NONE = 0,
+
+ /* The operation is affecting the connection's "incomplete operation" statistics */
+ AWS_MQTT_OSS_INCOMPLETE = 1 << 0,
+
+ /* The operation is affecting the connection's "unacked operation" statistics */
+ AWS_MQTT_OSS_UNACKED = 1 << 1,
+};
+
+struct aws_mqtt_request {
+ struct aws_linked_list_node list_node;
+
+ struct aws_allocator *allocator;
+ struct aws_mqtt_client_connection *connection;
+
+ struct aws_channel_task outgoing_task;
+
+ /* How this operation is currently affecting the statistics of the connection */
+ enum aws_mqtt_operation_statistic_state_flags statistic_state_flags;
+ /* The encoded size of the packet - used for operation statistics tracking */
+ uint64_t packet_size;
+
+ uint16_t packet_id;
+ bool retryable;
+ bool initiated;
+ aws_mqtt_send_request_fn *send_request;
+ void *send_request_ud;
+ aws_mqtt_op_complete_fn *on_complete;
+ void *on_complete_ud;
+};
+
+struct aws_mqtt_reconnect_task {
+ struct aws_task task;
+ struct aws_atomic_var connection_ptr;
+ struct aws_allocator *allocator;
+};
+
+/* The lifetime of this struct is from subscribe -> suback */
+struct subscribe_task_arg {
+
+ struct aws_mqtt_client_connection *connection;
+
+ /* list of pointer of subscribe_task_topics */
+ struct aws_array_list topics;
+
+ /* Packet to populate */
+ struct aws_mqtt_packet_subscribe subscribe;
+
+ /* true if transaction was committed to the topic tree, false requires a retry */
+ bool tree_updated;
+
+ struct {
+ aws_mqtt_suback_multi_fn *multi;
+ aws_mqtt_suback_fn *single;
+ } on_suback;
+ void *on_suback_ud;
+};
+
+/* The lifetime of this struct is the same as the lifetime of the subscription */
+struct subscribe_task_topic {
+ struct aws_mqtt_client_connection *connection;
+
+ struct aws_mqtt_topic_subscription request;
+ struct aws_string *filter;
+ bool is_local;
+
+ struct aws_ref_count ref_count;
+};
+
+struct aws_mqtt_client_connection {
+
+ struct aws_allocator *allocator;
+ struct aws_ref_count ref_count;
+ struct aws_mqtt_client *client;
+
+ /* Channel handler information */
+ struct aws_channel_handler handler;
+ struct aws_channel_slot *slot;
+
+ /* The host information, changed by user when state is AWS_MQTT_CLIENT_STATE_DISCONNECTED */
+ struct aws_string *host_name;
+ uint16_t port;
+ struct aws_tls_connection_options tls_options;
+ struct aws_socket_options socket_options;
+ struct aws_http_proxy_config *http_proxy_config;
+ struct aws_event_loop *loop;
+
+ /* Connect parameters */
+ struct aws_byte_buf client_id;
+ bool clean_session;
+ uint16_t keep_alive_time_secs;
+ uint64_t ping_timeout_ns;
+ uint64_t operation_timeout_ns;
+ struct aws_string *username;
+ struct aws_string *password;
+ struct {
+ struct aws_byte_buf topic;
+ enum aws_mqtt_qos qos;
+ bool retain;
+ struct aws_byte_buf payload;
+ } will;
+ struct {
+ uint64_t current_sec; /* seconds */
+ uint64_t min_sec; /* seconds */
+ uint64_t max_sec; /* seconds */
+
+ /*
+ * Invariant: this is always zero except when the current MQTT channel has received a successful connack
+ * and is not yet shutdown. During that interval, it is the timestamp the connack was received.
+ */
+ uint64_t channel_successful_connack_timestamp_ns;
+ } reconnect_timeouts;
+
+ /* User connection callbacks */
+ aws_mqtt_client_on_connection_complete_fn *on_connection_complete;
+ void *on_connection_complete_ud;
+ aws_mqtt_client_on_connection_interrupted_fn *on_interrupted;
+ void *on_interrupted_ud;
+ aws_mqtt_client_on_connection_resumed_fn *on_resumed;
+ void *on_resumed_ud;
+ aws_mqtt_client_on_connection_closed_fn *on_closed;
+ void *on_closed_ud;
+ aws_mqtt_client_publish_received_fn *on_any_publish;
+ void *on_any_publish_ud;
+ aws_mqtt_client_on_disconnect_fn *on_disconnect;
+ void *on_disconnect_ud;
+ aws_mqtt_on_operation_statistics_fn *on_any_operation_statistics;
+ void *on_any_operation_statistics_ud;
+
+ /* Connection tasks. */
+ struct aws_mqtt_reconnect_task *reconnect_task;
+ struct aws_channel_task ping_task;
+
+ /**
+ * Number of times this connection has successfully CONNACK-ed, used
+ * to ensure on_connection_completed is sent on the first completed
+ * CONNECT/CONNACK cycle
+ */
+ size_t connection_count;
+ bool use_tls; /* Only used by main thread */
+
+ /* Only the event-loop thread may touch this data */
+ struct {
+ /* If an incomplete packet arrives, store the data here. */
+ struct aws_byte_buf pending_packet;
+
+ bool waiting_on_ping_response;
+
+ /* Keeps track of all open subscriptions */
+ /* TODO: The subscriptions are liveing with the connection object. So if the connection disconnect from one
+ * endpoint and connect with another endpoint, the subscription tree will still be the same as before. */
+ struct aws_mqtt_topic_tree subscriptions;
+
+ /**
+ * List of all requests waiting for response.
+ */
+ struct aws_linked_list ongoing_requests_list;
+ } thread_data;
+
+ /* Any thread may touch this data, but the lock must be held (unless it's an atomic) */
+ struct {
+ /* Note: never fire user callback with lock hold. */
+ struct aws_mutex lock;
+
+ /* The state of the connection */
+ enum aws_mqtt_client_connection_state state;
+
+ /**
+ * Memory pool for all aws_mqtt_request.
+ */
+ struct aws_memory_pool requests_pool;
+
+ /**
+ * Store all requests that is not completed including the pending requests.
+ *
+ * hash table from uint16_t (packet_id) to aws_mqtt_outstanding_request
+ */
+ struct aws_hash_table outstanding_requests_table;
+
+ /**
+ * List of all requests that cannot be scheduled until the connection comes online.
+ */
+ struct aws_linked_list pending_requests_list;
+
+ /**
+ * Remember the last packet ID assigned.
+ * Helps us find the next free ID faster.
+ */
+ uint16_t packet_id;
+
+ } synced_data;
+
+ struct {
+ aws_mqtt_transform_websocket_handshake_fn *handshake_transformer;
+ void *handshake_transformer_ud;
+ aws_mqtt_validate_websocket_handshake_fn *handshake_validator;
+ void *handshake_validator_ud;
+ bool enabled;
+
+ struct aws_http_message *handshake_request;
+ } websocket;
+
+ /**
+ * Statistics tracking operational state
+ */
+ struct aws_mqtt_connection_operation_statistics_impl operation_statistics_impl;
+};
+
+struct aws_channel_handler_vtable *aws_mqtt_get_client_channel_vtable(void);
+
+/* Helper for getting a message object for a packet */
+struct aws_io_message *mqtt_get_message_for_packet(
+ struct aws_mqtt_client_connection *connection,
+ struct aws_mqtt_fixed_header *header);
+
+void mqtt_connection_lock_synced_data(struct aws_mqtt_client_connection *connection);
+void mqtt_connection_unlock_synced_data(struct aws_mqtt_client_connection *connection);
+
+/* Note: needs to be called with lock held. */
+void mqtt_connection_set_state(
+ struct aws_mqtt_client_connection *connection,
+ enum aws_mqtt_client_connection_state state);
+
+/**
+ * This function registers a new outstanding request and returns the message identifier to use (or 0 on error).
+ * send_request will be called from request_timeout_task if everything succeed. Not called with error.
+ * on_complete will be called once the request completed, either either in success or error.
+ * noRetry is true for the packets will never be retried or offline queued.
+ */
+AWS_MQTT_API uint16_t mqtt_create_request(
+ struct aws_mqtt_client_connection *connection,
+ aws_mqtt_send_request_fn *send_request,
+ void *send_request_ud,
+ aws_mqtt_op_complete_fn *on_complete,
+ void *on_complete_ud,
+ bool noRetry,
+ uint64_t packet_size);
+
+/* Call when an ack packet comes back from the server. */
+AWS_MQTT_API void mqtt_request_complete(
+ struct aws_mqtt_client_connection *connection,
+ int error_code,
+ uint16_t packet_id);
+
+/* Call to close the connection with an error code */
+AWS_MQTT_API void mqtt_disconnect_impl(struct aws_mqtt_client_connection *connection, int error_code);
+
+/* Creates the task used to reestablish a broken connection */
+AWS_MQTT_API void aws_create_reconnect_task(struct aws_mqtt_client_connection *connection);
+
+/**
+ * Sets the callback to call whenever the operation statistics change.
+ *
+ * \param[in] connection The connection object
+ * \param[in] on_operation_statistics The function to call when the operation statistics change (pass NULL to unset)
+ * \param[in] on_operation_statistics_ud Userdata for on_operation_statistics
+ */
+AWS_MQTT_API int aws_mqtt_client_connection_set_on_operation_statistics_handler(
+ struct aws_mqtt_client_connection *connection,
+ aws_mqtt_on_operation_statistics_fn *on_operation_statistics,
+ void *on_operation_statistics_ud);
+
+/*
+ * Sends a PINGREQ packet to the server to keep the connection alive. This is not exported and should not ever
+ * be called directly. This function is driven by the timeout values passed to aws_mqtt_client_connect().
+ * If a PINGRESP is not received within a reasonable period of time, the connection will be closed.
+ *
+ * \params[in] connection The connection to ping on
+ *
+ * \returns AWS_OP_SUCCESS if the connection is open and the PINGREQ is sent or queued to send,
+ * otherwise AWS_OP_ERR and aws_last_error() is set.
+ */
+int aws_mqtt_client_connection_ping(struct aws_mqtt_client_connection *connection);
+
+/**
+ * Changes the operation statistics for the passed-in aws_mqtt_request. Used for tracking
+ * whether operations have been completed or not.
+ *
+ * NOTE: This function will get lock the synced data! Do NOT call with the synced data already
+ * held or the function will deadlock trying to get the lock
+ *
+ * @param connection The connection whose operations are being tracked
+ * @param request The request to change the state of
+ * @param new_state_flags The new state to use
+ */
+void aws_mqtt_connection_statistics_change_operation_statistic_state(
+ struct aws_mqtt_client_connection *connection,
+ struct aws_mqtt_request *request,
+ enum aws_mqtt_operation_statistic_state_flags new_state_flags);
+
+#endif /* AWS_MQTT_PRIVATE_CLIENT_IMPL_H */
diff --git a/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/fixed_header.h b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/fixed_header.h
new file mode 100644
index 0000000000..4944c86fe9
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/fixed_header.h
@@ -0,0 +1,62 @@
+#ifndef AWS_MQTT_PRIVATE_FIXED_HEADER_H
+#define AWS_MQTT_PRIVATE_FIXED_HEADER_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/byte_buf.h>
+
+#include <aws/mqtt/mqtt.h>
+
+/* Represents the types of the MQTT control packets [MQTT-2.2.1]. */
+enum aws_mqtt_packet_type {
+ /* reserved = 0, */
+ AWS_MQTT_PACKET_CONNECT = 1,
+ AWS_MQTT_PACKET_CONNACK,
+ AWS_MQTT_PACKET_PUBLISH,
+ AWS_MQTT_PACKET_PUBACK,
+ AWS_MQTT_PACKET_PUBREC,
+ AWS_MQTT_PACKET_PUBREL,
+ AWS_MQTT_PACKET_PUBCOMP,
+ AWS_MQTT_PACKET_SUBSCRIBE,
+ AWS_MQTT_PACKET_SUBACK,
+ AWS_MQTT_PACKET_UNSUBSCRIBE,
+ AWS_MQTT_PACKET_UNSUBACK,
+ AWS_MQTT_PACKET_PINGREQ,
+ AWS_MQTT_PACKET_PINGRESP,
+ AWS_MQTT_PACKET_DISCONNECT,
+ /* reserved = 15, */
+};
+
+/**
+ * Represents the fixed header [MQTT-2.2].
+ */
+struct aws_mqtt_fixed_header {
+ enum aws_mqtt_packet_type packet_type;
+ size_t remaining_length;
+ uint8_t flags;
+};
+
+/**
+ * Get the type of packet from the first byte of the buffer [MQTT-2.2.1].
+ */
+AWS_MQTT_API enum aws_mqtt_packet_type aws_mqtt_get_packet_type(const uint8_t *buffer);
+
+/**
+ * Get traits describing a packet described by header [MQTT-2.2.2].
+ */
+AWS_MQTT_API bool aws_mqtt_packet_has_flags(const struct aws_mqtt_fixed_header *header);
+
+/**
+ * Write a fixed header to a byte stream.
+ */
+AWS_MQTT_API int aws_mqtt_fixed_header_encode(struct aws_byte_buf *buf, const struct aws_mqtt_fixed_header *header);
+
+/**
+ * Read a fixed header from a byte stream.
+ */
+AWS_MQTT_API int aws_mqtt_fixed_header_decode(struct aws_byte_cursor *cur, struct aws_mqtt_fixed_header *header);
+
+#endif /* AWS_MQTT_PRIVATE_FIXED_HEADER_H */
diff --git a/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/mqtt_client_test_helper.h b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/mqtt_client_test_helper.h
new file mode 100644
index 0000000000..9dc2f12996
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/mqtt_client_test_helper.h
@@ -0,0 +1,37 @@
+#ifndef AWS_MQTT_CLIENT_TEST_HELPER_H
+#define AWS_MQTT_CLIENT_TEST_HELPER_H
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/stdint.h>
+#include <aws/mqtt/exports.h>
+
+struct aws_allocator;
+struct aws_byte_cursor;
+struct aws_mqtt_client_connection;
+struct aws_string;
+
+AWS_EXTERN_C_BEGIN
+
+/** This is for testing applications sending MQTT payloads. Don't ever include this file outside of a unit test. */
+
+/** result buffer will be initialized and payload will be written into it */
+AWS_MQTT_API
+int aws_mqtt_client_get_payload_for_outstanding_publish_packet(
+ struct aws_mqtt_client_connection *connection,
+ uint16_t packet_id,
+ struct aws_allocator *allocator,
+ struct aws_byte_buf *result);
+
+AWS_MQTT_API
+int aws_mqtt_client_get_topic_for_outstanding_publish_packet(
+ struct aws_mqtt_client_connection *connection,
+ uint16_t packet_id,
+ struct aws_allocator *allocator,
+ struct aws_string **result);
+
+AWS_EXTERN_C_END
+
+#endif // AWS_C_IOT_MQTT_CLIENT_TEST_HELPER_H
diff --git a/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/packets.h b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/packets.h
new file mode 100644
index 0000000000..94a7591257
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/packets.h
@@ -0,0 +1,351 @@
+#ifndef AWS_MQTT_PRIVATE_PACKETS_H
+#define AWS_MQTT_PRIVATE_PACKETS_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/mqtt/mqtt.h>
+#include <aws/mqtt/private/fixed_header.h>
+
+#include <aws/common/array_list.h>
+#include <aws/common/byte_buf.h>
+#include <aws/common/string.h>
+
+/*
+ * General MQTT Control Packet Format [MQTT-2]:
+ * 1. Fixed header, present in all packets
+ * 2. Variable header, present in some packets
+ * 3. Payload, preset in some packets
+ */
+
+/* Struct used internally for representing subscriptions */
+struct aws_mqtt_subscription {
+ /* Topic filter to subscribe to [MQTT-4.7]. */
+ struct aws_byte_cursor topic_filter;
+ /* Maximum QoS of messages to receive [MQTT-4.3]. */
+ enum aws_mqtt_qos qos;
+};
+
+/**
+ * Used to represent the following MQTT packets:
+ * - PUBACK
+ * - PUBREC
+ * - PUBREL
+ * - PUBCOMP
+ * - UNSUBACK
+ */
+struct aws_mqtt_packet_ack {
+ /* Fixed header */
+ struct aws_mqtt_fixed_header fixed_header;
+
+ /* Variable header */
+ uint16_t packet_identifier;
+};
+
+/**
+ * Represents the MQTT SUBACK packet
+ */
+struct aws_mqtt_packet_suback {
+ /* Fixed header */
+ struct aws_mqtt_fixed_header fixed_header;
+
+ /* Variable header */
+ uint16_t packet_identifier;
+
+ /* Payload */
+ /* List of uint8_t return code */
+ struct aws_array_list return_codes;
+};
+
+/* Represents the MQTT CONNECT packet */
+struct aws_mqtt_packet_connect {
+ /* Fixed header */
+ struct aws_mqtt_fixed_header fixed_header;
+
+ /* Variable header */
+ bool clean_session;
+ bool has_will;
+ bool will_retain;
+ bool has_password;
+ bool has_username;
+ uint16_t keep_alive_timeout;
+ enum aws_mqtt_qos will_qos;
+ struct aws_byte_cursor client_identifier;
+
+ /* Payload */
+ struct aws_byte_cursor will_topic;
+ struct aws_byte_cursor will_message;
+ struct aws_byte_cursor username;
+ struct aws_byte_cursor password;
+};
+
+/* Represents the MQTT CONNACK packet */
+struct aws_mqtt_packet_connack {
+ /* Fixed header */
+ struct aws_mqtt_fixed_header fixed_header;
+
+ /* Variable header */
+ bool session_present;
+ uint8_t connect_return_code;
+};
+
+/* Represents the MQTT PUBLISH packet */
+struct aws_mqtt_packet_publish {
+ struct aws_mqtt_fixed_header fixed_header;
+
+ /* Variable header */
+ uint16_t packet_identifier;
+ struct aws_byte_cursor topic_name;
+
+ /* Payload */
+ struct aws_byte_cursor payload;
+};
+
+/* Represents the MQTT SUBSCRIBE packet */
+struct aws_mqtt_packet_subscribe {
+ /* Fixed header */
+ struct aws_mqtt_fixed_header fixed_header;
+
+ /* Variable header */
+ uint16_t packet_identifier;
+
+ /* Payload */
+ /* List of aws_mqtt_subscription */
+ struct aws_array_list topic_filters;
+};
+
+/* Represents the MQTT UNSUBSCRIBE packet */
+struct aws_mqtt_packet_unsubscribe {
+ /* Fixed header */
+ struct aws_mqtt_fixed_header fixed_header;
+
+ /* Variable header */
+ uint16_t packet_identifier;
+
+ /* Payload */
+ /* List of aws_byte_cursors */
+ struct aws_array_list topic_filters;
+};
+/**
+ * Used to represent the following MQTT packets:
+ * - PINGREQ
+ * - PINGRESP
+ * - DISCONNECT
+ */
+struct aws_mqtt_packet_connection {
+ /* Fixed header */
+ struct aws_mqtt_fixed_header fixed_header;
+};
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*****************************************************************************/
+/* Ack */
+
+AWS_MQTT_API
+int aws_mqtt_packet_ack_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_ack *packet);
+
+AWS_MQTT_API
+int aws_mqtt_packet_ack_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_ack *packet);
+
+/*****************************************************************************/
+/* Connect */
+
+AWS_MQTT_API
+int aws_mqtt_packet_connect_init(
+ struct aws_mqtt_packet_connect *packet,
+ struct aws_byte_cursor client_identifier,
+ bool clean_session,
+ uint16_t keep_alive);
+
+AWS_MQTT_API
+int aws_mqtt_packet_connect_add_will(
+ struct aws_mqtt_packet_connect *packet,
+ struct aws_byte_cursor topic,
+ enum aws_mqtt_qos qos,
+ bool retain,
+ struct aws_byte_cursor payload);
+
+AWS_MQTT_API
+int aws_mqtt_packet_connect_add_credentials(
+ struct aws_mqtt_packet_connect *packet,
+ struct aws_byte_cursor username,
+ struct aws_byte_cursor password);
+
+AWS_MQTT_API
+int aws_mqtt_packet_connect_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_connect *packet);
+
+AWS_MQTT_API
+int aws_mqtt_packet_connect_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_connect *packet);
+
+/*****************************************************************************/
+/* Connack */
+
+AWS_MQTT_API
+int aws_mqtt_packet_connack_init(
+ struct aws_mqtt_packet_connack *packet,
+ bool session_present,
+ enum aws_mqtt_connect_return_code return_code);
+
+AWS_MQTT_API
+int aws_mqtt_packet_connack_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_connack *packet);
+
+AWS_MQTT_API
+int aws_mqtt_packet_connack_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_connack *packet);
+
+/*****************************************************************************/
+/* Publish */
+
+AWS_MQTT_API
+int aws_mqtt_packet_publish_init(
+ struct aws_mqtt_packet_publish *packet,
+ bool retain,
+ enum aws_mqtt_qos qos,
+ bool dup,
+ struct aws_byte_cursor topic_name,
+ uint16_t packet_identifier,
+ struct aws_byte_cursor payload);
+
+AWS_MQTT_API
+int aws_mqtt_packet_publish_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_publish *packet);
+
+AWS_MQTT_API
+int aws_mqtt_packet_publish_encode_headers(struct aws_byte_buf *buf, const struct aws_mqtt_packet_publish *packet);
+
+AWS_MQTT_API
+int aws_mqtt_packet_publish_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_publish *packet);
+
+AWS_MQTT_API
+bool aws_mqtt_packet_publish_get_dup(const struct aws_mqtt_packet_publish *packet);
+
+AWS_MQTT_API
+enum aws_mqtt_qos aws_mqtt_packet_publish_get_qos(const struct aws_mqtt_packet_publish *packet);
+
+AWS_MQTT_API
+bool aws_mqtt_packet_publish_get_retain(const struct aws_mqtt_packet_publish *packet);
+
+/*****************************************************************************/
+/* Puback */
+
+AWS_MQTT_API
+int aws_mqtt_packet_puback_init(struct aws_mqtt_packet_ack *packet, uint16_t packet_identifier);
+
+/*****************************************************************************/
+/* Pubrec */
+
+AWS_MQTT_API
+int aws_mqtt_packet_pubrec_init(struct aws_mqtt_packet_ack *packet, uint16_t packet_identifier);
+
+/*****************************************************************************/
+/* Pubrel */
+
+AWS_MQTT_API
+int aws_mqtt_packet_pubrel_init(struct aws_mqtt_packet_ack *packet, uint16_t packet_identifier);
+
+/*****************************************************************************/
+/* Pubcomp */
+
+AWS_MQTT_API
+int aws_mqtt_packet_pubcomp_init(struct aws_mqtt_packet_ack *packet, uint16_t packet_identifier);
+
+/*****************************************************************************/
+/* Subscribe */
+
+AWS_MQTT_API
+int aws_mqtt_packet_subscribe_init(
+ struct aws_mqtt_packet_subscribe *packet,
+ struct aws_allocator *allocator,
+ uint16_t packet_identifier);
+
+AWS_MQTT_API
+void aws_mqtt_packet_subscribe_clean_up(struct aws_mqtt_packet_subscribe *packet);
+
+AWS_MQTT_API
+int aws_mqtt_packet_subscribe_add_topic(
+ struct aws_mqtt_packet_subscribe *packet,
+ struct aws_byte_cursor topic_filter,
+ enum aws_mqtt_qos qos);
+
+AWS_MQTT_API
+int aws_mqtt_packet_subscribe_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_subscribe *packet);
+
+AWS_MQTT_API
+int aws_mqtt_packet_subscribe_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_subscribe *packet);
+
+/*****************************************************************************/
+/* Suback */
+
+AWS_MQTT_API
+int aws_mqtt_packet_suback_init(
+ struct aws_mqtt_packet_suback *packet,
+ struct aws_allocator *allocator,
+ uint16_t packet_identifier);
+
+AWS_MQTT_API
+void aws_mqtt_packet_suback_clean_up(struct aws_mqtt_packet_suback *packet);
+
+AWS_MQTT_API
+int aws_mqtt_packet_suback_add_return_code(struct aws_mqtt_packet_suback *packet, uint8_t return_code);
+
+AWS_MQTT_API
+int aws_mqtt_packet_suback_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_suback *packet);
+
+AWS_MQTT_API
+int aws_mqtt_packet_suback_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_suback *packet);
+
+/*****************************************************************************/
+/* Unsubscribe */
+
+AWS_MQTT_API
+int aws_mqtt_packet_unsubscribe_init(
+ struct aws_mqtt_packet_unsubscribe *packet,
+ struct aws_allocator *allocator,
+ uint16_t packet_identifier);
+
+AWS_MQTT_API
+void aws_mqtt_packet_unsubscribe_clean_up(struct aws_mqtt_packet_unsubscribe *packet);
+
+AWS_MQTT_API
+int aws_mqtt_packet_unsubscribe_add_topic(
+ struct aws_mqtt_packet_unsubscribe *packet,
+ struct aws_byte_cursor topic_filter);
+
+AWS_MQTT_API
+int aws_mqtt_packet_unsubscribe_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_unsubscribe *packet);
+
+AWS_MQTT_API
+int aws_mqtt_packet_unsubscribe_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_unsubscribe *packet);
+
+/*****************************************************************************/
+/* Unsuback */
+
+AWS_MQTT_API
+int aws_mqtt_packet_unsuback_init(struct aws_mqtt_packet_ack *packet, uint16_t packet_identifier);
+
+/*****************************************************************************/
+/* Ping request/response, disconnect */
+
+AWS_MQTT_API
+int aws_mqtt_packet_pingreq_init(struct aws_mqtt_packet_connection *packet);
+
+AWS_MQTT_API
+int aws_mqtt_packet_pingresp_init(struct aws_mqtt_packet_connection *packet);
+
+AWS_MQTT_API
+int aws_mqtt_packet_disconnect_init(struct aws_mqtt_packet_connection *packet);
+
+AWS_MQTT_API
+int aws_mqtt_packet_connection_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_connection *packet);
+
+AWS_MQTT_API
+int aws_mqtt_packet_connection_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_connection *packet);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* AWS_MQTT_PRIVATE_PACKETS_H */
diff --git a/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/shared_constants.h b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/shared_constants.h
new file mode 100644
index 0000000000..0a835942a5
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/shared_constants.h
@@ -0,0 +1,18 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#ifndef AWS_MQTT_SHARED_CONSTANTS_H
+#define AWS_MQTT_SHARED_CONSTANTS_H
+
+#include <aws/mqtt/mqtt.h>
+
+AWS_EXTERN_C_BEGIN
+
+AWS_MQTT_API extern const struct aws_byte_cursor *g_websocket_handshake_default_path;
+AWS_MQTT_API extern const struct aws_http_header *g_websocket_handshake_default_protocol_header;
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_MQTT_SHARED_CONSTANTS_H */
diff --git a/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/topic_tree.h b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/topic_tree.h
new file mode 100644
index 0000000000..9444eafecc
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/topic_tree.h
@@ -0,0 +1,174 @@
+#ifndef AWS_MQTT_PRIVATE_TOPIC_TREE_H
+#define AWS_MQTT_PRIVATE_TOPIC_TREE_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/hash_table.h>
+
+#include <aws/mqtt/private/packets.h>
+
+/** Type of function called when a publish received matches a subscription */
+typedef void(aws_mqtt_publish_received_fn)(
+ const struct aws_byte_cursor *topic,
+ const struct aws_byte_cursor *payload,
+ bool dup,
+ enum aws_mqtt_qos qos,
+ bool retain,
+ void *user_data);
+
+/**
+ * Function called per subscription when iterating through subscriptions.
+ * Return true to continue iteration, or false to stop.
+ */
+typedef bool(
+ aws_mqtt_topic_tree_iterator_fn)(const struct aws_byte_cursor *topic, enum aws_mqtt_qos qos, void *user_data);
+
+struct aws_mqtt_topic_node {
+
+ /* This node's part of the topic filter. If in another node's subtopics, this is the key. */
+ struct aws_byte_cursor topic;
+
+ /**
+ * aws_byte_cursor -> aws_mqtt_topic_node
+ * '#' and '+' are special values in here
+ */
+ struct aws_hash_table subtopics;
+
+ /* The entire topic filter. If !owns_topic_filter, this topic_filter belongs to someone else. */
+ const struct aws_string *topic_filter;
+ bool owns_topic_filter;
+
+ /* The following will only be populated if the node IS a subscription */
+ /* Max QoS to deliver. */
+ enum aws_mqtt_qos qos;
+ /* Callback to call on message received */
+ aws_mqtt_publish_received_fn *callback;
+ aws_mqtt_userdata_cleanup_fn *cleanup;
+ void *userdata;
+};
+
+struct aws_mqtt_topic_tree {
+ struct aws_mqtt_topic_node *root;
+ struct aws_allocator *allocator;
+};
+
+/**
+ * The size of transaction instances.
+ * When you initialize an aws_array_list for use as a transaction, pass this as the item size.
+ */
+extern AWS_MQTT_API size_t aws_mqtt_topic_tree_action_size;
+
+/**
+ * Initialize a topic tree with an allocator to later use.
+ * Note that calling init allocates root.
+ */
+AWS_MQTT_API int aws_mqtt_topic_tree_init(struct aws_mqtt_topic_tree *tree, struct aws_allocator *allocator);
+/**
+ * Cleanup and deallocate an entire topic tree.
+ */
+AWS_MQTT_API void aws_mqtt_topic_tree_clean_up(struct aws_mqtt_topic_tree *tree);
+
+/**
+ * Iterates through all registered subscriptions, and calls iterator.
+ *
+ * Iterator may return false to stop iterating, or true to continue.
+ */
+AWS_MQTT_API void aws_mqtt_topic_tree_iterate(
+ const struct aws_mqtt_topic_tree *tree,
+ aws_mqtt_topic_tree_iterator_fn *iterator,
+ void *user_data);
+
+/**
+ * Gets the total number of subscriptions in the tree.
+ */
+AWS_MQTT_API size_t aws_mqtt_topic_tree_get_sub_count(const struct aws_mqtt_topic_tree *tree);
+
+/**
+ * Insert a new topic filter into the subscription tree (subscribe).
+ *
+ * \param[in] tree The tree to insert into.
+ * \param[in] transaction The transaction to add the insert action to.
+ * Must be initialized with aws_mqtt_topic_tree_action_size as item size.
+ * \param[in] topic_filter The topic filter to subscribe on. May contain wildcards.
+ * \param[in] callback The callback to call on a publish with a matching topic.
+ * \param[in] connection The connection object to pass to the callback. This is a void* to support client and server
+ * connections in the future.
+ * \param[in] userdata The userdata to pass to callback.
+ *
+ * \returns AWS_OP_SUCCESS on successful insertion, AWS_OP_ERR with aws_last_error() populated on failure.
+ * If AWS_OP_ERR is returned, aws_mqtt_topic_tree_transaction_rollback should be called to prevent leaks.
+ */
+AWS_MQTT_API int aws_mqtt_topic_tree_transaction_insert(
+ struct aws_mqtt_topic_tree *tree,
+ struct aws_array_list *transaction,
+ const struct aws_string *topic_filter,
+ enum aws_mqtt_qos qos,
+ aws_mqtt_publish_received_fn *callback,
+ aws_mqtt_userdata_cleanup_fn *cleanup,
+ void *userdata);
+
+/**
+ * Remove a topic filter from the subscription tree (unsubscribe).
+ *
+ * \param[in] tree The tree to remove from.
+ * \param[in] transaction The transaction to add the insert action to.
+ * Must be initialized with aws_mqtt_topic_tree_action_size as item size.
+ * \param[in] topic_filter The filter to remove (must be exactly the same as the topic_filter passed to insert).
+ * \param[out] old_userdata The userdata assigned to this subscription will be assigned if not NULL.
+ * \NOTE once the transaction is committed, old_userdata may be destroyed,
+ * if a cleanup callback was set on insert.
+ *
+ * \returns AWS_OP_SUCCESS on successful removal, AWS_OP_ERR with aws_last_error() populated on failure.
+ * If AWS_OP_ERR is returned, aws_mqtt_topic_tree_transaction_rollback should be called to prevent leaks.
+ */
+AWS_MQTT_API int aws_mqtt_topic_tree_transaction_remove(
+ struct aws_mqtt_topic_tree *tree,
+ struct aws_array_list *transaction,
+ const struct aws_byte_cursor *topic_filter,
+ void **old_userdata);
+
+AWS_MQTT_API void aws_mqtt_topic_tree_transaction_commit(
+ struct aws_mqtt_topic_tree *tree,
+ struct aws_array_list *transaction);
+
+AWS_MQTT_API void aws_mqtt_topic_tree_transaction_roll_back(
+ struct aws_mqtt_topic_tree *tree,
+ struct aws_array_list *transaction);
+
+/**
+ * Insert a new topic filter into the subscription tree (subscribe).
+ *
+ * \param[in] tree The tree to insert into.
+ * \param[in] topic_filter The topic filter to subscribe on. May contain wildcards.
+ * \param[in] callback The callback to call on a publish with a matching topic.
+ * \param[in] connection The connection object to pass to the callback. This is a void* to support client and server
+ * connections in the future.
+ * \param[in] userdata The userdata to pass to callback.
+ *
+ * \returns AWS_OP_SUCCESS on successful insertion, AWS_OP_ERR with aws_last_error() populated on failure.
+ */
+AWS_MQTT_API
+int aws_mqtt_topic_tree_insert(
+ struct aws_mqtt_topic_tree *tree,
+ const struct aws_string *topic_filter,
+ enum aws_mqtt_qos qos,
+ aws_mqtt_publish_received_fn *callback,
+ aws_mqtt_userdata_cleanup_fn *cleanup,
+ void *userdata);
+
+AWS_MQTT_API
+int aws_mqtt_topic_tree_remove(struct aws_mqtt_topic_tree *tree, const struct aws_byte_cursor *topic_filter);
+
+/**
+ * Dispatches a publish packet to all subscriptions matching the publish topic.
+ *
+ * \param[in] tree The tree to publish on.
+ * \param[in] pub The publish packet to dispatch. The topic MUST NOT contain wildcards.
+ */
+void AWS_MQTT_API
+ aws_mqtt_topic_tree_publish(const struct aws_mqtt_topic_tree *tree, struct aws_mqtt_packet_publish *pub);
+
+#endif /* AWS_MQTT_PRIVATE_TOPIC_TREE_H */
diff --git a/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_callbacks.h b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_callbacks.h
new file mode 100644
index 0000000000..2ff4fb67e2
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_callbacks.h
@@ -0,0 +1,90 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#ifndef AWS_MQTT_MQTT5_CALLBACKS_H
+#define AWS_MQTT_MQTT5_CALLBACKS_H
+
+#include <aws/mqtt/mqtt.h>
+
+#include <aws/common/linked_list.h>
+#include <aws/mqtt/v5/mqtt5_client.h>
+
+struct aws_mqtt5_callback_set;
+
+/*
+ * An internal type for managing chains of callbacks attached to an mqtt5 client. Supports chains for
+ * lifecycle event handling and incoming publish packet handling.
+ *
+ * Assumed to be owned and used only by an MQTT5 client.
+ */
+struct aws_mqtt5_callback_set_manager {
+ struct aws_mqtt5_client *client;
+
+ struct aws_linked_list callback_set_entries;
+
+ uint64_t next_callback_set_entry_id;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/*
+ * Initializes a callback set manager
+ */
+AWS_MQTT_API
+void aws_mqtt5_callback_set_manager_init(
+ struct aws_mqtt5_callback_set_manager *manager,
+ struct aws_mqtt5_client *client);
+
+/*
+ * Cleans up a callback set manager.
+ *
+ * aws_mqtt5_callback_set_manager_init must have been previously called or this will crash.
+ */
+AWS_MQTT_API
+void aws_mqtt5_callback_set_manager_clean_up(struct aws_mqtt5_callback_set_manager *manager);
+
+/*
+ * Adds a callback set to the front of the handler chain. Returns an integer id that can be used to selectively
+ * remove the callback set from the manager.
+ *
+ * May only be called on the client's event loop thread.
+ */
+AWS_MQTT_API
+uint64_t aws_mqtt5_callback_set_manager_push_front(
+ struct aws_mqtt5_callback_set_manager *manager,
+ struct aws_mqtt5_callback_set *callback_set);
+
+/*
+ * Removes a callback set from the handler chain.
+ *
+ * May only be called on the client's event loop thread.
+ */
+AWS_MQTT_API
+void aws_mqtt5_callback_set_manager_remove(struct aws_mqtt5_callback_set_manager *manager, uint64_t callback_set_id);
+
+/*
+ * Walks the handler chain for an MQTT5 client's incoming publish messages. The chain's callbacks will be invoked
+ * until either the end is reached or one of the callbacks returns true.
+ *
+ * May only be called on the client's event loop thread.
+ */
+AWS_MQTT_API
+void aws_mqtt5_callback_set_manager_on_publish_received(
+ struct aws_mqtt5_callback_set_manager *manager,
+ const struct aws_mqtt5_packet_publish_view *publish_view);
+
+/*
+ * Walks the handler chain for an MQTT5 client's lifecycle events.
+ *
+ * May only be called on the client's event loop thread.
+ */
+AWS_MQTT_API
+void aws_mqtt5_callback_set_manager_on_lifecycle_event(
+ struct aws_mqtt5_callback_set_manager *manager,
+ const struct aws_mqtt5_client_lifecycle_event *lifecycle_event);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_MQTT_MQTT5_CALLBACKS_H */
diff --git a/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_client_impl.h b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_client_impl.h
new file mode 100644
index 0000000000..7c08354963
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_client_impl.h
@@ -0,0 +1,648 @@
+#ifndef AWS_MQTT_MQTT5_CLIENT_IMPL_H
+#define AWS_MQTT_MQTT5_CLIENT_IMPL_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/mqtt/mqtt.h>
+
+#include <aws/common/hash_table.h>
+#include <aws/common/mutex.h>
+#include <aws/common/ref_count.h>
+#include <aws/io/channel.h>
+#include <aws/mqtt/private/v5/mqtt5_callbacks.h>
+#include <aws/mqtt/private/v5/mqtt5_decoder.h>
+#include <aws/mqtt/private/v5/mqtt5_encoder.h>
+#include <aws/mqtt/private/v5/mqtt5_topic_alias.h>
+#include <aws/mqtt/private/v5/rate_limiters.h>
+#include <aws/mqtt/v5/mqtt5_types.h>
+
+struct aws_event_loop;
+struct aws_http_message;
+struct aws_http_proxy_options;
+struct aws_mqtt5_client_options_storage;
+struct aws_mqtt5_operation;
+struct aws_websocket_client_connection_options;
+
+/**
+ * The various states that the client can be in. A client has both a current state and a desired state.
+ * Desired state is only allowed to be one of {STOPPED, CONNECTED, TERMINATED}. The client transitions states
+ * based on either
+ * (1) changes in desired state, or
+ * (2) external events.
+ *
+ * Most states are interruptible (in the sense of a change in desired state causing an immediate change in state) but
+ * CONNECTING and CHANNEL_SHUTDOWN cannot be interrupted due to waiting for an asynchronous callback (that has no
+ * cancel) to complete.
+ */
+enum aws_mqtt5_client_state {
+
+ /*
+ * The client is not connected and not waiting for anything to happen.
+ *
+ * Next States:
+ * CONNECTING - if the user invokes Start() on the client
+ * TERMINATED - if the user releases the last ref count on the client
+ */
+ AWS_MCS_STOPPED,
+
+ /*
+ * The client is attempting to connect to a remote endpoint, and is waiting for channel setup to complete. This
+ * state is not interruptible by any means other than channel setup completion.
+ *
+ * Next States:
+ * MQTT_CONNECT - if the channel completes setup with no error and desired state is still CONNECTED
+ * CHANNEL_SHUTDOWN - if the channel completes setup with no error, but desired state is not CONNECTED
+ * PENDING_RECONNECT - if the channel fails to complete setup and desired state is still CONNECTED
+ * STOPPED - if the channel fails to complete setup and desired state is not CONNECTED
+ */
+ AWS_MCS_CONNECTING,
+
+ /*
+ * The client is sending a CONNECT packet and waiting on a CONNACK packet.
+ *
+ * Next States:
+ * CONNECTED - if a successful CONNACK is received and desired state is still CONNECTED
+ * CHANNEL_SHUTDOWN - On send/encode errors, read/decode errors, unsuccessful CONNACK, timeout to receive
+ * CONNACK, desired state is no longer CONNECTED
+ * PENDING_RECONNECT - unexpected channel shutdown completion and desired state still CONNECTED
+ * STOPPED - unexpected channel shutdown completion and desired state no longer CONNECTED
+ */
+ AWS_MCS_MQTT_CONNECT,
+
+ /*
+ * The client is ready to perform user-requested mqtt operations.
+ *
+ * Next States:
+ * CHANNEL_SHUTDOWN - On send/encode errors, read/decode errors, DISCONNECT packet received, desired state
+ * no longer CONNECTED, PINGRESP timeout
+ * PENDING_RECONNECT - unexpected channel shutdown completion and desired state still CONNECTED
+ * STOPPED - unexpected channel shutdown completion and desired state no longer CONNECTED
+ */
+ AWS_MCS_CONNECTED,
+
+ /*
+ * The client is attempt to shut down a connection cleanly by finishing the current operation and then
+ * transmitting an outbound DISCONNECT.
+ *
+ * Next States:
+ * CHANNEL_SHUTDOWN - on successful (or unsuccessful) send of the DISCONNECT
+ * PENDING_RECONNECT - unexpected channel shutdown completion and desired state still CONNECTED
+ * STOPPED - unexpected channel shutdown completion and desired state no longer CONNECTED
+ */
+ AWS_MCS_CLEAN_DISCONNECT,
+
+ /*
+ * The client is waiting for the io channel to completely shut down. This state is not interruptible.
+ *
+ * Next States:
+ * PENDING_RECONNECT - the io channel has shut down and desired state is still CONNECTED
+ * STOPPED - the io channel has shut down and desired state is not CONNECTED
+ */
+ AWS_MCS_CHANNEL_SHUTDOWN,
+
+ /*
+ * The client is waiting for the reconnect timer to expire before attempting to connect again.
+ *
+ * Next States:
+ * CONNECTING - the reconnect timer has expired and desired state is still CONNECTED
+ * STOPPED - desired state is no longer CONNECTED
+ */
+ AWS_MCS_PENDING_RECONNECT,
+
+ /*
+ * The client is performing final shutdown and release of all resources. This state is only realized for
+ * a non-observable instant of time (transition out of STOPPED).
+ */
+ AWS_MCS_TERMINATED,
+};
+
+/**
+ * Table of overridable external functions to allow mocking and monitoring of the client.
+ */
+struct aws_mqtt5_client_vtable {
+ /* aws_high_res_clock_get_ticks */
+ uint64_t (*get_current_time_fn)(void);
+
+ /* aws_channel_shutdown */
+ int (*channel_shutdown_fn)(struct aws_channel *channel, int error_code);
+
+ /* aws_websocket_client_connect */
+ int (*websocket_connect_fn)(const struct aws_websocket_client_connection_options *options);
+
+ /* aws_client_bootstrap_new_socket_channel */
+ int (*client_bootstrap_new_socket_channel_fn)(struct aws_socket_channel_bootstrap_options *options);
+
+ /* aws_http_proxy_new_socket_channel */
+ int (*http_proxy_new_socket_channel_fn)(
+ struct aws_socket_channel_bootstrap_options *channel_options,
+ const struct aws_http_proxy_options *proxy_options);
+
+ /* This doesn't replace anything, it's just for test verification of state changes */
+ void (*on_client_state_change_callback_fn)(
+ struct aws_mqtt5_client *client,
+ enum aws_mqtt5_client_state old_state,
+ enum aws_mqtt5_client_state new_state,
+ void *vtable_user_data);
+
+ /* This doesn't replace anything, it's just for test verification of statistic changes */
+ void (*on_client_statistics_changed_callback_fn)(
+ struct aws_mqtt5_client *client,
+ struct aws_mqtt5_operation *operation,
+ void *vtable_user_data);
+
+ /* aws_channel_acquire_message_from_pool */
+ struct aws_io_message *(*aws_channel_acquire_message_from_pool_fn)(
+ struct aws_channel *channel,
+ enum aws_io_message_type message_type,
+ size_t size_hint,
+ void *user_data);
+
+ /* aws_channel_slot_send_message */
+ int (*aws_channel_slot_send_message_fn)(
+ struct aws_channel_slot *slot,
+ struct aws_io_message *message,
+ enum aws_channel_direction dir,
+ void *user_data);
+
+ void *vtable_user_data;
+};
+
+/*
+ * In order to make it easier to guarantee the lifecycle events are properly paired and emitted, we track
+ * a separate state (from aws_mqtt5_client_state) and emit lifecycle events based on it.
+ *
+ * For example, if our lifecycle event is state CONNECTING, than anything going wrong becomes a CONNECTION_FAILED event
+ * whereas if we were in CONNECTED, it must be a DISCONNECTED event. By setting the state to NONE after emitting
+ * a CONNECTION_FAILED or DISCONNECTED event, then emission spots further down the execution pipeline will not
+ * accidentally emit an additional event. This also allows us to emit immediately when an event happens, if
+ * appropriate, without having to persist additional event data (like packet views) until some singular point.
+ *
+ * For example:
+ *
+ * If I'm in CONNECTING and the channel shuts down, I want to emit a CONNECTION_FAILED event with the error code.
+ * If I'm in CONNECTING and I receive a failed CONNACK, I want to emit a CONNECTION_FAILED event immediately with
+ * the CONNACK view in it and then invoke channel shutdown (and channel shutdown completing later should not emit an
+ * event).
+ * If I'm in CONNECTED and the channel shuts down, I want to emit a DISCONNECTED event with the error code.
+ * If I'm in CONNECTED and get a DISCONNECT packet from the server, I want to emit a DISCONNECTED event with
+ * the DISCONNECT packet in it, invoke channel shutdown, and then I *don't* want to emit a DISCONNECTED event
+ * when the channel finishes shutting down.
+ */
+enum aws_mqtt5_lifecycle_state {
+ AWS_MQTT5_LS_NONE,
+ AWS_MQTT5_LS_CONNECTING,
+ AWS_MQTT5_LS_CONNECTED,
+};
+
+/*
+ * Operation-related state notes
+ *
+ * operation flow:
+ * (qos 0 publish, disconnect, connect)
+ * user (via cross thread task) ->
+ * queued_operations -> (on front of queue)
+ * current_operation -> (on completely encoded and passed to next handler)
+ * write_completion_operations -> (on socket write complete)
+ * release
+ *
+ * (qos 1+ publish, sub/unsub)
+ * user (via cross thread task) ->
+ * queued_operations -> (on front of queue)
+ * current_operation (allocate packet id if necessary) -> (on completely encoded and passed to next handler)
+ * unacked_operations && unacked_operations_table -> (on ack received)
+ * release
+ *
+ * QoS 1+ requires both a table and a list holding the same operations in order to support fast lookups by
+ * mqtt packet id and in-order re-queueing in the case of a disconnection (required by spec)
+ *
+ * On Qos 1 PUBLISH completely received (and final callback invoked):
+ * Add PUBACK at head of queued_operations
+ *
+ * On disconnect (on transition to PENDING_RECONNECT or STOPPED):
+ * If current_operation, move current_operation to head of queued_operations
+ * Fail all operations in the pending write completion list
+ * Fail, remove, and release operations in queued_operations where
+ * (1) They fail the offline queue policy OR
+ * (2) They are a PUBACK, PINGREQ, or DISCONNECT
+ * Fail, remove, and release unacked_operations if:
+ * (1) They fail the offline queue policy AND
+ * (2) operation is not Qos 1+ publish
+ *
+ * On reconnect (post CONNACK):
+ * if rejoined_session:
+ * Move-and-append all non-qos1+-publishes in unacked_operations to the front of queued_operations
+ * Move-and-append remaining operations (qos1+ publishes) to the front of queued_operations
+ * else:
+ * Fail, remove, and release unacked_operations that fail the offline queue policy
+ * Move and append unacked operations to front of queued_operations
+ *
+ * Clear unacked_operations_table
+ */
+struct aws_mqtt5_client_operational_state {
+
+ /* back pointer to the client */
+ struct aws_mqtt5_client *client;
+
+ /*
+ * One more than the most recently used packet id. This is the best starting point for a forward search through
+ * the id space for a free id.
+ */
+ aws_mqtt5_packet_id_t next_mqtt_packet_id;
+
+ struct aws_linked_list queued_operations;
+ struct aws_mqtt5_operation *current_operation;
+ struct aws_hash_table unacked_operations_table;
+ struct aws_linked_list unacked_operations;
+ struct aws_linked_list write_completion_operations;
+
+ /*
+ * Is there an io message in transit (to the socket) that has not invoked its write completion callback yet?
+ * The client implementation only allows one in-transit message at a time, and so if this is true, we don't
+ * send additional ones/
+ */
+ bool pending_write_completion;
+};
+
+/*
+ * State related to flow-control rules for the mqtt5 client
+ *
+ * Includes:
+ * (1) Mqtt5 ReceiveMaximum support
+ * (2) AWS IoT Core limit support:
+ * (a) Publish TPS rate limit
+ * (b) Total outbound throughput limit
+ */
+struct aws_mqtt5_client_flow_control_state {
+
+ /*
+ * Mechanically follows the mqtt5 suggested implementation:
+ *
+ * Starts at the server's receive maximum.
+ * 1. Decrement every time we send a QoS1+ publish
+ * 2. Increment every time we receive a PUBACK
+ *
+ * Qos1+ publishes (and all operations behind them in the queue) are blocked while this value is zero.
+ *
+ * Qos 2 support will require additional work here to match the spec.
+ */
+ uint32_t unacked_publish_token_count;
+
+ /*
+ * Optional throttle (extended validation) that prevents the client from exceeding Iot Core's default throughput
+ * limit
+ */
+ struct aws_rate_limiter_token_bucket throughput_throttle;
+
+ /*
+ * Optional throttle (extended validation) that prevents the client from exceeding Iot Core's default publish
+ * rate limit.
+ */
+ struct aws_rate_limiter_token_bucket publish_throttle;
+};
+
+/**
+ * Contains some simple statistics about the current state of the client's queue of operations
+ */
+struct aws_mqtt5_client_operation_statistics_impl {
+ /*
+ * total number of operations submitted to the client that have not yet been completed. Unacked operations
+ * are a subset of this.
+ */
+ struct aws_atomic_var incomplete_operation_count_atomic;
+
+ /*
+ * total packet size of operations submitted to the client that have not yet been completed. Unacked operations
+ * are a subset of this.
+ */
+ struct aws_atomic_var incomplete_operation_size_atomic;
+
+ /*
+ * total number of operations that have been sent to the server and are waiting for a corresponding ACK before
+ * they can be completed.
+ */
+ struct aws_atomic_var unacked_operation_count_atomic;
+
+ /*
+ * total packet size of operations that have been sent to the server and are waiting for a corresponding ACK before
+ * they can be completed.
+ */
+ struct aws_atomic_var unacked_operation_size_atomic;
+};
+
+struct aws_mqtt5_client {
+
+ struct aws_allocator *allocator;
+ struct aws_ref_count ref_count;
+
+ const struct aws_mqtt5_client_vtable *vtable;
+
+ /*
+ * Client configuration
+ */
+ const struct aws_mqtt5_client_options_storage *config;
+
+ /*
+ * The recurrent task that runs all client logic outside of external event callbacks. Bound to the client's
+ * event loop.
+ */
+ struct aws_task service_task;
+
+ /*
+ * Tracks when the client's service task is next schedule to run. Is zero if the task is not scheduled to run or
+ * we are in the middle of a service (so technically not scheduled too).
+ */
+ uint64_t next_service_task_run_time;
+
+ /*
+ * True if the client's service task is running. Used to skip service task reevaluation due to state changes
+ * while running the service task. Reevaluation will occur at the very end of the service.
+ */
+ bool in_service;
+
+ /*
+ * The final mqtt5 settings negotiated between defaults, CONNECT, and CONNACK. Only valid while in
+ * CONNECTED or CLEAN_DISCONNECT states.
+ */
+ struct aws_mqtt5_negotiated_settings negotiated_settings;
+
+ /*
+ * Event loop all the client's connections and any related tasks will be pinned to, ensuring serialization and
+ * concurrency safety.
+ */
+ struct aws_event_loop *loop;
+
+ /* Channel handler information */
+ struct aws_channel_handler handler;
+ struct aws_channel_slot *slot;
+
+ /*
+ * What state is the client working towards?
+ */
+ enum aws_mqtt5_client_state desired_state;
+
+ /*
+ * What is the client's current state?
+ */
+ enum aws_mqtt5_client_state current_state;
+
+ /*
+ * The client's lifecycle state. Used to correctly emit lifecycle events in spite of the complicated
+ * async execution pathways that are possible.
+ */
+ enum aws_mqtt5_lifecycle_state lifecycle_state;
+
+ /*
+ * The client's MQTT packet encoder
+ */
+ struct aws_mqtt5_encoder encoder;
+
+ /*
+ * The client's MQTT packet decoder
+ */
+ struct aws_mqtt5_decoder decoder;
+
+ /*
+ * Cache of inbound topic aliases
+ */
+ struct aws_mqtt5_inbound_topic_alias_resolver inbound_topic_alias_resolver;
+
+ /*
+ * Cache of outbound topic aliases
+ */
+ struct aws_mqtt5_outbound_topic_alias_resolver *outbound_topic_alias_resolver;
+
+ /*
+ * Temporary state-related data.
+ *
+ * clean_disconnect_error_code - the CLEAN_DISCONNECT state takes time to complete and we want to be able
+ * to pass an error code from a prior event to the channel shutdown. This holds the "override" error code
+ * that we'd like to shut down the channel with while CLEAN_DISCONNECT is processed.
+ *
+ * handshake exists on websocket-configured clients between the transform completion timepoint and the
+ * websocket setup callback.
+ */
+ int clean_disconnect_error_code;
+ struct aws_http_message *handshake;
+
+ /*
+ * Wraps all state related to pending and in-progress MQTT operations within the client.
+ */
+ struct aws_mqtt5_client_operational_state operational_state;
+
+ /* Statistics tracking operational state */
+ struct aws_mqtt5_client_operation_statistics_impl operation_statistics_impl;
+
+ /*
+ * Wraps all state related to outbound flow control.
+ */
+ struct aws_mqtt5_client_flow_control_state flow_control_state;
+
+ /*
+ * Manages notification listener chains for lifecycle events and incoming publishes
+ */
+ struct aws_mqtt5_callback_set_manager callback_manager;
+
+ /*
+ * When should the next PINGREQ be sent?
+ */
+ uint64_t next_ping_time;
+
+ /*
+ * When should we shut down the channel due to failure to receive a PINGRESP? Only non-zero when an outstanding
+ * PINGREQ has not been answered.
+ */
+ uint64_t next_ping_timeout_time;
+
+ /*
+ * When should the client next attempt to reconnect? Only used by PENDING_RECONNECT state.
+ */
+ uint64_t next_reconnect_time_ns;
+
+ /*
+ * How many consecutive reconnect failures have we experienced?
+ */
+ uint64_t reconnect_count;
+
+ /*
+ * How much should we wait before our next reconnect attempt?
+ */
+ uint64_t current_reconnect_delay_ms;
+
+ /*
+ * When should the client reset current_reconnect_delay_interval_ms to the minimum value? Only relevant to the
+ * CONNECTED state.
+ */
+ uint64_t next_reconnect_delay_reset_time_ns;
+
+ /*
+ * When should we shut down the channel due to failure to receive a CONNACK? Only relevant during the MQTT_CONNECT
+ * state.
+ */
+ uint64_t next_mqtt_connect_packet_timeout_time;
+
+ /*
+ * Starts false and set to true as soon as a successful connection is established. If the session resumption
+ * behavior is AWS_MQTT5_CSBT_REJOIN_POST_SUCCESS then this must be true before the client sends CONNECT packets
+ * with clean start set to false.
+ */
+ bool has_connected_successfully;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/*
+ * A number of private APIs which are either set up for mocking parts of the client or testing subsystems within it by
+ * exposing what would normally be static functions internal to the implementation.
+ */
+
+/*
+ * Override the vtable used by the client; useful for mocking certain scenarios.
+ */
+AWS_MQTT_API void aws_mqtt5_client_set_vtable(
+ struct aws_mqtt5_client *client,
+ const struct aws_mqtt5_client_vtable *vtable);
+
+/*
+ * Gets the default vtable used by the client. In order to mock something, we start with the default and then
+ * mutate it selectively to achieve the scenario we're interested in.
+ */
+AWS_MQTT_API const struct aws_mqtt5_client_vtable *aws_mqtt5_client_get_default_vtable(void);
+
+/*
+ * Sets the packet id, if necessary, on an operation based on the current pending acks table. The caller is
+ * responsible for adding the operation to the unacked table when the packet has been encoding in an io message.
+ *
+ * There is an argument that the operation should go into the table only on socket write completion, but that breaks
+ * allocation unless an additional, independent table is added, which I'd prefer not to do presently. Also, socket
+ * write completion callbacks can be a bit delayed which could lead to a situation where the response from a local
+ * server could arrive before the write completion runs which would be a disaster.
+ */
+AWS_MQTT_API int aws_mqtt5_operation_bind_packet_id(
+ struct aws_mqtt5_operation *operation,
+ struct aws_mqtt5_client_operational_state *client_operational_state);
+
+/*
+ * Initialize and clean up of the client operational state. Exposed (privately) to enabled tests to reuse the
+ * init/cleanup used by the client itself.
+ */
+AWS_MQTT_API int aws_mqtt5_client_operational_state_init(
+ struct aws_mqtt5_client_operational_state *client_operational_state,
+ struct aws_allocator *allocator,
+ struct aws_mqtt5_client *client);
+
+AWS_MQTT_API void aws_mqtt5_client_operational_state_clean_up(
+ struct aws_mqtt5_client_operational_state *client_operational_state);
+
+/*
+ * Resets the client's operational state based on a disconnection (from above comment):
+ *
+ * If current_operation
+ * move current_operation to head of queued_operations
+ * Fail all operations in the pending write completion list
+ * Fail, remove, and release operations in queued_operations where they fail the offline queue policy
+ * Iterate unacked_operations:
+ * If qos1+ publish
+ * set dup flag
+ * else
+ * unset/release packet id
+ * Fail, remove, and release unacked_operations if:
+ * (1) They fail the offline queue policy AND
+ * (2) the operation is not Qos 1+ publish
+ */
+AWS_MQTT_API void aws_mqtt5_client_on_disconnection_update_operational_state(struct aws_mqtt5_client *client);
+
+/*
+ * Updates the client's operational state based on a successfully established connection event:
+ *
+ * if rejoined_session:
+ * Move-and-append all non-qos1+-publishes in unacked_operations to the front of queued_operations
+ * Move-and-append remaining operations (qos1+ publishes) to the front of queued_operations
+ * else:
+ * Fail, remove, and release unacked_operations that fail the offline queue policy
+ * Move and append unacked operations to front of queued_operations
+ */
+AWS_MQTT_API void aws_mqtt5_client_on_connection_update_operational_state(struct aws_mqtt5_client *client);
+
+/*
+ * Processes the pending operation queue based on the current state of the associated client
+ */
+AWS_MQTT_API int aws_mqtt5_client_service_operational_state(
+ struct aws_mqtt5_client_operational_state *client_operational_state);
+
+/*
+ * Updates the client's operational state based on the receipt of an ACK packet from the server. In general this
+ * means looking up the original operation in the pending ack table, completing it, removing it from both the
+ * pending ack table and list, and then destroying it.
+ */
+AWS_MQTT_API void aws_mqtt5_client_operational_state_handle_ack(
+ struct aws_mqtt5_client_operational_state *client_operational_state,
+ aws_mqtt5_packet_id_t packet_id,
+ enum aws_mqtt5_packet_type packet_type,
+ const void *packet_view,
+ int error_code);
+
+/*
+ * Helper function that returns whether or not the current value of the negotiated settings can be used. Primarily
+ * a client state check (received CONNACK, not yet disconnected)
+ */
+AWS_MQTT_API bool aws_mqtt5_client_are_negotiated_settings_valid(const struct aws_mqtt5_client *client);
+
+/*
+ * Initializes the client's flow control state. This state governs the rates and delays between processing
+ * operations and sending packets.
+ */
+AWS_MQTT_API void aws_mqtt5_client_flow_control_state_init(struct aws_mqtt5_client *client);
+
+/*
+ * Resets the client's flow control state to a known baseline. Invoked right after entering the connected state.
+ */
+AWS_MQTT_API void aws_mqtt5_client_flow_control_state_reset(struct aws_mqtt5_client *client);
+
+/*
+ * Updates the client's flow control state based on the receipt of a PUBACK for a Qos1 publish.
+ */
+AWS_MQTT_API void aws_mqtt5_client_flow_control_state_on_puback(struct aws_mqtt5_client *client);
+
+/*
+ * Updates the client's flow control state based on successfully encoding an operation into a channel message.
+ */
+AWS_MQTT_API void aws_mqtt5_client_flow_control_state_on_outbound_operation(
+ struct aws_mqtt5_client *client,
+ struct aws_mqtt5_operation *operation);
+
+/*
+ * Given the next operation in the queue, examines the flow control state to determine when is the earliest time
+ * it should be processed.
+ */
+AWS_MQTT_API uint64_t aws_mqtt5_client_flow_control_state_get_next_operation_service_time(
+ struct aws_mqtt5_client *client,
+ struct aws_mqtt5_operation *operation,
+ uint64_t now);
+
+/*
+ * Updates the client's operation statistics based on a change in the state of an operation.
+ */
+AWS_MQTT_API void aws_mqtt5_client_statistics_change_operation_statistic_state(
+ struct aws_mqtt5_client *client,
+ struct aws_mqtt5_operation *operation,
+ enum aws_mqtt5_operation_statistic_state_flags new_state_flags);
+
+/**
+ * Converts a client state type to a readable description.
+ *
+ * @param state client state
+ * @return short string describing the client state
+ */
+AWS_MQTT_API const char *aws_mqtt5_client_state_to_c_string(enum aws_mqtt5_client_state state);
+
+/*
+ * Temporary, private API to turn on total incoming packet logging at the byte level.
+ */
+AWS_MQTT_API void aws_mqtt5_client_enable_full_packet_logging(struct aws_mqtt5_client *client);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_MQTT_MQTT5_CLIENT_IMPL_H */
diff --git a/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_decoder.h b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_decoder.h
new file mode 100644
index 0000000000..8d6aea92bb
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_decoder.h
@@ -0,0 +1,264 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#ifndef AWS_MQTT_MQTT5_DECODER_H
+#define AWS_MQTT_MQTT5_DECODER_H
+
+#include <aws/mqtt/mqtt.h>
+
+#include <aws/common/atomics.h>
+#include <aws/common/byte_buf.h>
+#include <aws/mqtt/private/v5/mqtt5_options_storage.h>
+#include <aws/mqtt/v5/mqtt5_types.h>
+
+struct aws_mqtt5_client;
+struct aws_mqtt5_decoder;
+struct aws_mqtt5_inbound_topic_alias_resolver;
+
+/**
+ * Overall decoder state. We read the packet type and the remaining length, and then buffer the
+ * entire packet before decoding.
+ */
+enum aws_mqtt5_decoder_state {
+ AWS_MQTT5_DS_READ_PACKET_TYPE,
+ AWS_MQTT5_DS_READ_REMAINING_LENGTH,
+ AWS_MQTT5_DS_READ_PACKET,
+ AWS_MQTT5_DS_FATAL_ERROR,
+};
+
+/*
+ * Basic return value for a number of different decoding operations. Error is always fatal and implies the
+ * connection needs to be torn down.
+ */
+enum aws_mqtt5_decode_result_type {
+ AWS_MQTT5_DRT_MORE_DATA,
+ AWS_MQTT5_DRT_SUCCESS,
+ AWS_MQTT5_DRT_ERROR,
+};
+
+/*
+ * Callbacks the decoder should invoke. We don't invoke functions directly on the client because
+ * we want to test the decoder's correctness in isolation.
+ */
+typedef int(aws_mqtt5_on_packet_received_fn)(
+ enum aws_mqtt5_packet_type type,
+ void *packet_view,
+ void *decoder_callback_user_data);
+
+typedef int(aws_mqtt5_on_publish_payload_data_fn)(
+ struct aws_mqtt5_packet_publish_view *publish_view,
+ struct aws_byte_cursor payload,
+ void *decoder_callback_user_data);
+
+/**
+ * per-packet-type decoding function signature
+ */
+typedef int(aws_mqtt5_decoding_fn)(struct aws_mqtt5_decoder *decoder);
+
+/**
+ * table of decoding functions. Tests use an augmented version that includes decoders for packet types normally
+ * only decoded by an mqtt server.
+ */
+struct aws_mqtt5_decoder_function_table {
+ aws_mqtt5_decoding_fn *decoders_by_packet_type[16];
+};
+
+/**
+ * Basic decoder configuration.
+ */
+struct aws_mqtt5_decoder_options {
+ void *callback_user_data;
+ aws_mqtt5_on_packet_received_fn *on_packet_received;
+ const struct aws_mqtt5_decoder_function_table *decoder_table;
+};
+
+struct aws_mqtt5_decoder {
+ struct aws_allocator *allocator;
+ struct aws_mqtt5_decoder_options options;
+
+ enum aws_mqtt5_decoder_state state;
+
+ /*
+ * decode scratch space: packets may get fully buffered here before decode
+ * Exceptions:
+ * when the incoming io message buffer contains the entire packet, we decode directly from it instead
+ */
+ struct aws_byte_buf scratch_space;
+
+ /*
+ * packet type and flags
+ */
+ uint8_t packet_first_byte;
+
+ uint32_t remaining_length;
+
+ /*
+ * Packet decoders work from this cursor. It may point to scratch_space (for packets that were delivered
+ * in more than one fragment) or to an io message buffer that contains the entire packet.
+ */
+ struct aws_byte_cursor packet_cursor;
+
+ struct aws_mqtt5_inbound_topic_alias_resolver *topic_alias_resolver;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * One-time initialization for an mqtt5 decoder
+ *
+ * @param decoder decoder to initialize
+ * @param allocator allocator to use for memory allocation
+ * @param options configuration options
+ * @return success/failure
+ */
+AWS_MQTT_API int aws_mqtt5_decoder_init(
+ struct aws_mqtt5_decoder *decoder,
+ struct aws_allocator *allocator,
+ struct aws_mqtt5_decoder_options *options);
+
+/**
+ * Cleans up an mqtt5 decoder
+ *
+ * @param decoder decoder to clean up
+ */
+AWS_MQTT_API void aws_mqtt5_decoder_clean_up(struct aws_mqtt5_decoder *decoder);
+
+/**
+ * Resets the state of an mqtt5 decoder. Used whenever a new connection is established
+ *
+ * @param decoder decoder to reset state for
+ */
+AWS_MQTT_API void aws_mqtt5_decoder_reset(struct aws_mqtt5_decoder *decoder);
+
+/**
+ * Basic entry point for all incoming mqtt5 data once the basic connection has been established
+ *
+ * @param decoder decoder to decode data with
+ * @param data the data to decode
+ * @return success/failure - failure implies a need to shut down the connection
+ */
+AWS_MQTT_API int aws_mqtt5_decoder_on_data_received(struct aws_mqtt5_decoder *decoder, struct aws_byte_cursor data);
+
+/**
+ * Sets the optional inbound alias resolver that the decoder should use during the lifetime of a connection
+ *
+ * @param decoder decoder to apply inbound topic alias resolution to
+ * @param resolver inbound topic alias resolver
+ */
+AWS_MQTT_API void aws_mqtt5_decoder_set_inbound_topic_alias_resolver(
+ struct aws_mqtt5_decoder *decoder,
+ struct aws_mqtt5_inbound_topic_alias_resolver *resolver);
+
+/**
+ * Default decoding table; tests use an augmented version with decoders for packets that only the server needs to
+ * decode.
+ */
+AWS_MQTT_API extern const struct aws_mqtt5_decoder_function_table *g_aws_mqtt5_default_decoder_table;
+
+AWS_EXTERN_C_END
+
+/* Decode helpers */
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Decodes, if possible, a variable length integer from a cursor. If the decode is successful, the cursor is advanced
+ * past the variable length integer encoding. This can be used both for streaming and non-streaming decode operations.
+ *
+ * @param cursor data to decode from
+ * @param dest where to put a successfully decoded variable length integer
+ * @return the result of attempting the decode: {success, error, not enough data} Does not set aws_last_error.
+ */
+AWS_MQTT_API enum aws_mqtt5_decode_result_type aws_mqtt5_decode_vli(struct aws_byte_cursor *cursor, uint32_t *dest);
+
+/**
+ * Decodes an MQTT5 user property from a cursor
+ *
+ * @param packet_cursor data to decode from
+ * @param properties property set to add the decoded property to
+ * @return success/failure - failures implies connection termination
+ */
+AWS_MQTT_API int aws_mqtt5_decode_user_property(
+ struct aws_byte_cursor *packet_cursor,
+ struct aws_mqtt5_user_property_set *properties);
+
+AWS_EXTERN_C_END
+
+/* Decode helper macros operating on a cursor */
+
+/*
+ * u8 and u16 decode are a little different in order to support encoded values that are widened to larger storage.
+ * To make that safe, we decode to a local and then assign the local to the final spot. There should be no
+ * complaints as long as the implicit conversion is the same size or wider.
+ *
+ * Some u8 examples include qos (one byte encode -> int-based enum) and various reason codes
+ * Some u16 examples include cursor lengths decoded directly into a cursor's len field (u16 -> size_t)
+ */
+#define AWS_MQTT5_DECODE_U8(cursor_ptr, u8_ptr, error_label) \
+ { \
+ uint8_t decoded_value = 0; \
+ if (!aws_byte_cursor_read_u8((cursor_ptr), (&decoded_value))) { \
+ goto error_label; \
+ } \
+ *u8_ptr = decoded_value; \
+ }
+
+#define AWS_MQTT5_DECODE_U8_OPTIONAL(cursor_ptr, u8_ptr, u8_ptr_ptr, error_label) \
+ AWS_MQTT5_DECODE_U8(cursor_ptr, u8_ptr, error_label); \
+ *(u8_ptr_ptr) = (u8_ptr);
+
+#define AWS_MQTT5_DECODE_U16(cursor_ptr, u16_ptr, error_label) \
+ { \
+ uint16_t decoded_value = 0; \
+ if (!aws_byte_cursor_read_be16((cursor_ptr), (&decoded_value))) { \
+ goto error_label; \
+ } \
+ *u16_ptr = decoded_value; \
+ }
+
+/*
+ * In addition to decoding a length prefix, this also verifies that the length prefix does not exceed the source
+ * cursor length.
+ */
+#define AWS_MQTT5_DECODE_U16_PREFIX(cursor_ptr, u16_ptr, error_label) \
+ AWS_MQTT5_DECODE_U16((cursor_ptr), (u16_ptr), error_label); \
+ if (cursor_ptr->len < *(u16_ptr)) { \
+ aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR); \
+ goto error_label; \
+ }
+
+#define AWS_MQTT5_DECODE_U16_OPTIONAL(cursor_ptr, u16_ptr, u16_ptr_ptr, error_label) \
+ AWS_MQTT5_DECODE_U16((cursor_ptr), u16_ptr, error_label); \
+ *(u16_ptr_ptr) = (u16_ptr);
+
+#define AWS_MQTT5_DECODE_U32(cursor_ptr, u32_ptr, error_label) \
+ if (!aws_byte_cursor_read_be32((cursor_ptr), (u32_ptr))) { \
+ goto error_label; \
+ }
+
+#define AWS_MQTT5_DECODE_U32_OPTIONAL(cursor_ptr, u32_ptr, u32_ptr_ptr, error_label) \
+ AWS_MQTT5_DECODE_U32((cursor_ptr), u32_ptr, error_label); \
+ *(u32_ptr_ptr) = (u32_ptr);
+
+#define AWS_MQTT5_DECODE_VLI(cursor_ptr, u32_ptr, error_label) \
+ if (AWS_MQTT5_DRT_SUCCESS != aws_mqtt5_decode_vli((cursor_ptr), (u32_ptr))) { \
+ goto error_label; \
+ }
+
+/* decodes both the length prefix and the following cursor field */
+#define AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR(cursor_ptr, dest_cursor_ptr, error_label) \
+ { \
+ uint16_t prefix_length = 0; \
+ AWS_MQTT5_DECODE_U16_PREFIX((cursor_ptr), &prefix_length, error_label) \
+ \
+ *(dest_cursor_ptr) = aws_byte_cursor_advance((cursor_ptr), prefix_length); \
+ }
+
+#define AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL( \
+ cursor_ptr, dest_cursor_ptr, dest_cursor_ptr_ptr, error_label) \
+ AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR((cursor_ptr), (dest_cursor_ptr), error_label) \
+ *(dest_cursor_ptr_ptr) = (dest_cursor_ptr);
+
+#endif /* AWS_MQTT_MQTT5_DECODER_H */
diff --git a/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_encoder.h b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_encoder.h
new file mode 100644
index 0000000000..80dfbc2058
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_encoder.h
@@ -0,0 +1,357 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#ifndef AWS_MQTT_MQTT5_ENCODER_H
+#define AWS_MQTT_MQTT5_ENCODER_H
+
+#include <aws/mqtt/mqtt.h>
+
+#include <aws/common/array_list.h>
+#include <aws/common/byte_buf.h>
+
+#include <aws/mqtt/v5/mqtt5_types.h>
+
+struct aws_mqtt5_client;
+struct aws_mqtt5_encoder;
+struct aws_mqtt5_outbound_topic_alias_resolver;
+
+/**
+ * We encode packets by looking at all of the packet's values/properties and building a sequence of encoding steps.
+ * Each encoding step is a simple, primitive operation of which there are two types:
+ * (1) encode an integer in some fashion (fixed width or variable length)
+ * (2) encode a raw sequence of bytes (either a cursor or a stream)
+ *
+ * Once the encoding step sequence is constructed, we do the actual encoding by iterating the sequence, performing
+ * the steps. This is interruptible/resumable, so we can perform encodings that span multiple buffers easily.
+ */
+enum aws_mqtt5_encoding_step_type {
+ /* encode a single byte */
+ AWS_MQTT5_EST_U8,
+
+ /* encode a 16 bit unsigned integer in network order */
+ AWS_MQTT5_EST_U16,
+
+ /* encode a 32 bit unsigned integer in network order */
+ AWS_MQTT5_EST_U32,
+
+ /*
+ * encode a 32 bit unsigned integer using MQTT variable length encoding. It is assumed that the 32 bit value has
+ * already been checked against the maximum allowed value for variable length encoding.
+ */
+ AWS_MQTT5_EST_VLI,
+
+ /*
+ * encode an array of bytes as referenced by a cursor. Most of the time this step is paired with either a prefix
+ * specifying the number of bytes or a preceding variable length integer from which the data length can be
+ * computed.
+ */
+ AWS_MQTT5_EST_CURSOR,
+
+ /* encode a stream of bytes. The same context that applies to cursor encoding above also applies here. */
+ AWS_MQTT5_EST_STREAM,
+};
+
+/**
+ * Elemental unit of packet encoding.
+ */
+struct aws_mqtt5_encoding_step {
+ enum aws_mqtt5_encoding_step_type type;
+ union {
+ uint8_t value_u8;
+ uint16_t value_u16;
+ uint32_t value_u32;
+ struct aws_byte_cursor value_cursor;
+ struct aws_input_stream *value_stream;
+ } value;
+};
+
+/**
+ * signature of a function that can takes a view assumed to be a specific packet type and appends the encoding
+ * steps necessary to encode that packet into the encoder
+ */
+typedef int(aws_mqtt5_encode_begin_packet_type_fn)(struct aws_mqtt5_encoder *encoder, const void *view);
+
+/**
+ * Per-packet-type table of encoding functions
+ */
+struct aws_mqtt5_encoder_function_table {
+ aws_mqtt5_encode_begin_packet_type_fn *encoders_by_packet_type[16];
+};
+
+/**
+ * Configuration options for an mqtt5 encoder. Everything is optional at this time.
+ */
+struct aws_mqtt5_encoder_options {
+ struct aws_mqtt5_client *client;
+ const struct aws_mqtt5_encoder_function_table *encoders;
+};
+
+/**
+ * An encoder is just a list of steps and a current location for the encoding process within that list.
+ */
+struct aws_mqtt5_encoder {
+ struct aws_mqtt5_encoder_options config;
+
+ struct aws_array_list encoding_steps;
+ size_t current_encoding_step_index;
+
+ struct aws_mqtt5_outbound_topic_alias_resolver *topic_alias_resolver;
+};
+
+/**
+ * Encoding proceeds until either
+ * (1) a fatal error is reached
+ * (2) the steps are done
+ * (3) no room is left in the buffer
+ */
+enum aws_mqtt5_encoding_result {
+ /*
+ * A fatal error state was reached during encoding. This forces a connection shut down with no DISCONNECT.
+ * An error can arise from several sources:
+ * (1) Bug in the encoder (length calculations, step calculations)
+ * (2) Bug in the view validation logic that is assumed to have caught any illegal/forbidden situations like
+ * values-too-big, etc...
+ * (3) System error when reading from a stream that is more than just a memory buffer
+ *
+ * Regardless of the origin, the connection is in an unusable state once this happens.
+ *
+ * If the encode function returns this value, aws last error will have an error value in it
+ */
+ AWS_MQTT5_ER_ERROR,
+
+ /* All encoding steps in the encoder have been completed. The encoder is ready for a new packet. */
+ AWS_MQTT5_ER_FINISHED,
+
+ /*
+ * The buffer has been filled as closely to full as possible and there are still encoding steps remaining that
+ * have not been completed. It is technically possible to hit a permanent out-of-room state if the buffer size
+ * is less than 4. Don't do that.
+ */
+ AWS_MQTT5_ER_OUT_OF_ROOM,
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Initializes an mqtt5 encoder
+ *
+ * @param encoder encoder to initialize
+ * @param allocator allocator to use for all memory allocation
+ * @param options encoder configuration options to use
+ * @return
+ */
+AWS_MQTT_API int aws_mqtt5_encoder_init(
+ struct aws_mqtt5_encoder *encoder,
+ struct aws_allocator *allocator,
+ struct aws_mqtt5_encoder_options *options);
+
+/**
+ * Cleans up an mqtt5 encoder
+ *
+ * @param encoder encoder to free up all resources for
+ */
+AWS_MQTT_API void aws_mqtt5_encoder_clean_up(struct aws_mqtt5_encoder *encoder);
+
+/**
+ * Resets the state on an mqtt5 encoder. Ok to call after a failure to a packet _begin_packet() function. Not ok to
+ * call after a failed call to aws_mqtt5_encoder_encode_to_buffer()
+ *
+ * @param encoder encoder to reset
+ * @return
+ */
+AWS_MQTT_API void aws_mqtt5_encoder_reset(struct aws_mqtt5_encoder *encoder);
+
+/**
+ * Adds all of the primitive encoding steps necessary to encode an MQTT5 packet
+ *
+ * @param encoder encoder to add encoding steps to
+ * @param packet_type type of packet to encode
+ * @param packet_view view into the corresponding packet type
+ * @return success/failure
+ */
+AWS_MQTT_API int aws_mqtt5_encoder_append_packet_encoding(
+ struct aws_mqtt5_encoder *encoder,
+ enum aws_mqtt5_packet_type packet_type,
+ const void *packet_view);
+
+/*
+ * We intend that the client implementation only submits one packet at a time to the encoder, corresponding to the
+ * current operation of the client. This is an important property to maintain to allow us to correlate socket
+ * completions with packets/operations sent. It's the client's responsibility though; the encoder is dumb.
+ *
+ * The client will greedily use as much of an iomsg's buffer as it can if there are multiple operations (packets)
+ * queued and there is sufficient room.
+ */
+
+/**
+ * Asks the encoder to encode as much as it possibly can into the supplied buffer.
+ *
+ * @param encoder encoder to do the encoding
+ * @param buffer where to encode into
+ * @return result of the encoding process. aws last error will be set appropriately.
+ */
+AWS_MQTT_API enum aws_mqtt5_encoding_result aws_mqtt5_encoder_encode_to_buffer(
+ struct aws_mqtt5_encoder *encoder,
+ struct aws_byte_buf *buffer);
+
+/**
+ * Sets the outbound alias resolver that the encoder should use during the lifetime of a connection
+ *
+ * @param encoder encoder to apply outbound topic alias resolution to
+ * @param resolver outbound topic alias resolver
+ */
+AWS_MQTT_API void aws_mqtt5_encoder_set_outbound_topic_alias_resolver(
+ struct aws_mqtt5_encoder *encoder,
+ struct aws_mqtt5_outbound_topic_alias_resolver *resolver);
+
+/**
+ * Default encoder table. Tests copy it and augment with additional functions in order to do round-trip encode-decode
+ * tests for packets that are only encoded on the server.
+ */
+AWS_MQTT_API extern const struct aws_mqtt5_encoder_function_table *g_aws_mqtt5_encoder_default_function_table;
+
+AWS_EXTERN_C_END
+
+/******************************************************************************************************************
+ * Encoding helper functions and macros - placed in header so that test-only encoding has access
+ ******************************************************************************************************************/
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Utility function to calculate the encoded packet size of a given packet view. Used to validate operations
+ * against the server's maximum packet size.
+ *
+ * @param packet_type type of packet the view represents
+ * @param packet_view packet view
+ * @param packet_size output parameter, set if the size was successfully calculated
+ * @return success/failure
+ */
+AWS_MQTT_API int aws_mqtt5_packet_view_get_encoded_size(
+ enum aws_mqtt5_packet_type packet_type,
+ const void *packet_view,
+ size_t *packet_size);
+
+/**
+ * Encodes a variable length integer to a buffer. Assumes the buffer has been checked for sufficient room (this
+ * is not a streaming/resumable operation)
+ *
+ * @param buf buffer to encode to
+ * @param value value to encode
+ * @return success/failure
+ */
+AWS_MQTT_API int aws_mqtt5_encode_variable_length_integer(struct aws_byte_buf *buf, uint32_t value);
+
+/**
+ * Computes how many bytes are necessary to encode a value as a variable length integer
+ * @param value value to encode
+ * @param encode_size output parameter for the encoding size
+ * @return success/failure where failure is exclusively value-is-illegal-and-too-large-to-encode
+ */
+AWS_MQTT_API int aws_mqtt5_get_variable_length_encode_size(size_t value, size_t *encode_size);
+
+AWS_MQTT_API void aws_mqtt5_encoder_push_step_u8(struct aws_mqtt5_encoder *encoder, uint8_t value);
+
+AWS_MQTT_API void aws_mqtt5_encoder_push_step_u16(struct aws_mqtt5_encoder *encoder, uint16_t value);
+
+AWS_MQTT_API void aws_mqtt5_encoder_push_step_u32(struct aws_mqtt5_encoder *encoder, uint32_t value);
+
+AWS_MQTT_API int aws_mqtt5_encoder_push_step_vli(struct aws_mqtt5_encoder *encoder, uint32_t value);
+
+AWS_MQTT_API void aws_mqtt5_encoder_push_step_cursor(struct aws_mqtt5_encoder *encoder, struct aws_byte_cursor value);
+
+AWS_MQTT_API size_t aws_mqtt5_compute_user_property_encode_length(
+ const struct aws_mqtt5_user_property *properties,
+ size_t user_property_count);
+
+AWS_MQTT_API void aws_mqtt5_add_user_property_encoding_steps(
+ struct aws_mqtt5_encoder *encoder,
+ const struct aws_mqtt5_user_property *user_properties,
+ size_t user_property_count);
+
+AWS_EXTERN_C_END
+
+/* macros to simplify encoding step list construction */
+
+#define ADD_ENCODE_STEP_U8(encoder, value) aws_mqtt5_encoder_push_step_u8(encoder, (uint8_t)(value))
+#define ADD_ENCODE_STEP_U16(encoder, value) aws_mqtt5_encoder_push_step_u16(encoder, (uint16_t)(value))
+#define ADD_ENCODE_STEP_U32(encoder, value) aws_mqtt5_encoder_push_step_u32(encoder, (uint32_t)(value))
+#define ADD_ENCODE_STEP_CURSOR(encoder, cursor) aws_mqtt5_encoder_push_step_cursor(encoder, (cursor))
+
+#define ADD_ENCODE_STEP_VLI(encoder, value) \
+ if (aws_mqtt5_encoder_push_step_vli(encoder, (value))) { \
+ return AWS_OP_ERR; \
+ }
+
+#define ADD_ENCODE_STEP_LENGTH_PREFIXED_CURSOR(encoder, cursor) \
+ { \
+ aws_mqtt5_encoder_push_step_u16(encoder, (uint16_t)((cursor).len)); \
+ aws_mqtt5_encoder_push_step_cursor(encoder, (cursor)); \
+ }
+
+#define ADD_ENCODE_STEP_OPTIONAL_LENGTH_PREFIXED_CURSOR(encoder, cursor_ptr) \
+ if (cursor_ptr != NULL) { \
+ ADD_ENCODE_STEP_LENGTH_PREFIXED_CURSOR(encoder, *cursor_ptr); \
+ }
+
+/* Property-oriented macros for encode steps. Properties have an additional prefix byte saying what their type is. */
+
+#define ADD_ENCODE_STEP_OPTIONAL_U8_PROPERTY(encoder, property_value, value_ptr) \
+ if ((value_ptr) != NULL) { \
+ ADD_ENCODE_STEP_U8(encoder, property_value); \
+ ADD_ENCODE_STEP_U8(encoder, *(value_ptr)); \
+ }
+
+#define ADD_ENCODE_STEP_OPTIONAL_U16_PROPERTY(encoder, property_value, value_ptr) \
+ if ((value_ptr) != NULL) { \
+ ADD_ENCODE_STEP_U8(encoder, property_value); \
+ ADD_ENCODE_STEP_U16(encoder, *(value_ptr)); \
+ }
+
+#define ADD_ENCODE_STEP_OPTIONAL_U32_PROPERTY(encoder, property_value, value_ptr) \
+ if ((value_ptr) != NULL) { \
+ ADD_ENCODE_STEP_U8(encoder, property_value); \
+ ADD_ENCODE_STEP_U32(encoder, *(value_ptr)); \
+ }
+
+#define ADD_ENCODE_STEP_OPTIONAL_VLI_PROPERTY(encoder, property_value, value_ptr) \
+ if ((value_ptr) != NULL) { \
+ ADD_ENCODE_STEP_U8(encoder, property_value); \
+ ADD_ENCODE_STEP_VLI(encoder, *(value_ptr)); \
+ }
+
+#define ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY(encoder, property_type, cursor_ptr) \
+ if ((cursor_ptr) != NULL) { \
+ ADD_ENCODE_STEP_U8(encoder, property_type); \
+ ADD_ENCODE_STEP_U16(encoder, (cursor_ptr)->len); \
+ ADD_ENCODE_STEP_CURSOR(encoder, *(cursor_ptr)); \
+ }
+
+/*
+ * Macros to simplify packet size calculations, which are significantly complicated by mqtt5's many optional
+ * properties.
+ */
+
+#define ADD_OPTIONAL_U8_PROPERTY_LENGTH(property_ptr, length) \
+ if ((property_ptr) != NULL) { \
+ (length) += 2; \
+ }
+
+#define ADD_OPTIONAL_U16_PROPERTY_LENGTH(property_ptr, length) \
+ if ((property_ptr) != NULL) { \
+ (length) += 3; \
+ }
+
+#define ADD_OPTIONAL_U32_PROPERTY_LENGTH(property_ptr, length) \
+ if ((property_ptr) != NULL) { \
+ (length) += 5; \
+ }
+
+#define ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(property_ptr, length) \
+ if ((property_ptr) != NULL) { \
+ (length) += 3 + ((property_ptr)->len); \
+ }
+
+#endif /* AWS_MQTT_MQTT5_ENCODER_H */
diff --git a/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_options_storage.h b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_options_storage.h
new file mode 100644
index 0000000000..dcc07d399d
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_options_storage.h
@@ -0,0 +1,343 @@
+#ifndef AWS_MQTT_MQTT5_OPERATION_H
+#define AWS_MQTT_MQTT5_OPERATION_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/mqtt/mqtt.h>
+
+#include <aws/common/linked_list.h>
+#include <aws/common/logging.h>
+#include <aws/common/ref_count.h>
+#include <aws/http/proxy.h>
+#include <aws/io/retry_strategy.h>
+#include <aws/io/socket.h>
+#include <aws/io/tls_channel_handler.h>
+#include <aws/mqtt/v5/mqtt5_client.h>
+#include <aws/mqtt/v5/mqtt5_packet_storage.h>
+#include <aws/mqtt/v5/mqtt5_types.h>
+
+struct aws_client_bootstrap;
+struct aws_mqtt5_client;
+struct aws_mqtt5_client_options;
+struct aws_mqtt5_operation;
+struct aws_string;
+
+/* Basic vtable for all mqtt operations. Implementations are per-packet type */
+struct aws_mqtt5_operation_vtable {
+ void (*aws_mqtt5_operation_completion_fn)(
+ struct aws_mqtt5_operation *operation,
+ int error_code,
+ enum aws_mqtt5_packet_type packet_type,
+ const void *completion_view);
+
+ void (
+ *aws_mqtt5_operation_set_packet_id_fn)(struct aws_mqtt5_operation *operation, aws_mqtt5_packet_id_t packet_id);
+
+ aws_mqtt5_packet_id_t *(*aws_mqtt5_operation_get_packet_id_address_fn)(const struct aws_mqtt5_operation *operation);
+
+ int (*aws_mqtt5_operation_validate_vs_connection_settings_fn)(
+ const void *operation_packet_view,
+ const struct aws_mqtt5_client *client);
+};
+
+/* Flags that indicate the way in which an operation is currently affecting the statistics of the client */
+enum aws_mqtt5_operation_statistic_state_flags {
+ /* The operation is not affecting the client's statistics at all */
+ AWS_MQTT5_OSS_NONE = 0,
+
+ /* The operation is affecting the client's "incomplete operation" statistics */
+ AWS_MQTT5_OSS_INCOMPLETE = 1 << 0,
+
+ /* The operation is affecting the client's "unacked operation" statistics */
+ AWS_MQTT5_OSS_UNACKED = 1 << 1,
+};
+
+/**
+ * This is the base structure for all mqtt5 operations. It includes the type, a ref count, timeout timepoint,
+ * and list management.
+ */
+struct aws_mqtt5_operation {
+ const struct aws_mqtt5_operation_vtable *vtable;
+ struct aws_ref_count ref_count;
+ uint64_t ack_timeout_timepoint_ns;
+ struct aws_linked_list_node node;
+
+ enum aws_mqtt5_packet_type packet_type;
+ const void *packet_view;
+
+ /* How this operation is currently affecting the statistics of the client */
+ enum aws_mqtt5_operation_statistic_state_flags statistic_state_flags;
+
+ /* Size of the MQTT packet this operation represents */
+ size_t packet_size;
+
+ void *impl;
+};
+
+struct aws_mqtt5_operation_connect {
+ struct aws_mqtt5_operation base;
+ struct aws_allocator *allocator;
+
+ struct aws_mqtt5_packet_connect_storage options_storage;
+};
+
+struct aws_mqtt5_operation_publish {
+ struct aws_mqtt5_operation base;
+ struct aws_allocator *allocator;
+
+ struct aws_mqtt5_packet_publish_storage options_storage;
+
+ struct aws_mqtt5_publish_completion_options completion_options;
+};
+
+struct aws_mqtt5_operation_puback {
+ struct aws_mqtt5_operation base;
+ struct aws_allocator *allocator;
+
+ struct aws_mqtt5_packet_puback_storage options_storage;
+};
+
+struct aws_mqtt5_operation_disconnect {
+ struct aws_mqtt5_operation base;
+ struct aws_allocator *allocator;
+
+ struct aws_mqtt5_packet_disconnect_storage options_storage;
+
+ struct aws_mqtt5_disconnect_completion_options external_completion_options;
+ struct aws_mqtt5_disconnect_completion_options internal_completion_options;
+};
+
+struct aws_mqtt5_operation_subscribe {
+ struct aws_mqtt5_operation base;
+ struct aws_allocator *allocator;
+
+ struct aws_mqtt5_packet_subscribe_storage options_storage;
+
+ struct aws_mqtt5_subscribe_completion_options completion_options;
+};
+
+struct aws_mqtt5_operation_unsubscribe {
+ struct aws_mqtt5_operation base;
+ struct aws_allocator *allocator;
+
+ struct aws_mqtt5_packet_unsubscribe_storage options_storage;
+
+ struct aws_mqtt5_unsubscribe_completion_options completion_options;
+};
+
+struct aws_mqtt5_operation_pingreq {
+ struct aws_mqtt5_operation base;
+ struct aws_allocator *allocator;
+};
+
+struct aws_mqtt5_client_options_storage {
+ struct aws_allocator *allocator;
+
+ struct aws_string *host_name;
+ uint16_t port;
+ struct aws_client_bootstrap *bootstrap;
+ struct aws_socket_options socket_options;
+
+ struct aws_tls_connection_options tls_options;
+ struct aws_tls_connection_options *tls_options_ptr;
+
+ struct aws_http_proxy_options http_proxy_options;
+ struct aws_http_proxy_config *http_proxy_config;
+
+ aws_mqtt5_transform_websocket_handshake_fn *websocket_handshake_transform;
+ void *websocket_handshake_transform_user_data;
+
+ aws_mqtt5_publish_received_fn *publish_received_handler;
+ void *publish_received_handler_user_data;
+
+ enum aws_mqtt5_client_session_behavior_type session_behavior;
+ enum aws_mqtt5_extended_validation_and_flow_control_options extended_validation_and_flow_control_options;
+ enum aws_mqtt5_client_operation_queue_behavior_type offline_queue_behavior;
+
+ enum aws_exponential_backoff_jitter_mode retry_jitter_mode;
+ uint64_t min_reconnect_delay_ms;
+ uint64_t max_reconnect_delay_ms;
+ uint64_t min_connected_time_to_reset_reconnect_delay_ms;
+
+ uint64_t ack_timeout_seconds;
+
+ uint32_t ping_timeout_ms;
+ uint32_t connack_timeout_ms;
+
+ struct aws_mqtt5_client_topic_alias_options topic_aliasing_options;
+
+ struct aws_mqtt5_packet_connect_storage connect;
+
+ aws_mqtt5_client_connection_event_callback_fn *lifecycle_event_handler;
+ void *lifecycle_event_handler_user_data;
+
+ aws_mqtt5_client_termination_completion_fn *client_termination_handler;
+ void *client_termination_handler_user_data;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/* Operation base */
+
+AWS_MQTT_API struct aws_mqtt5_operation *aws_mqtt5_operation_acquire(struct aws_mqtt5_operation *operation);
+
+AWS_MQTT_API struct aws_mqtt5_operation *aws_mqtt5_operation_release(struct aws_mqtt5_operation *operation);
+
+AWS_MQTT_API void aws_mqtt5_operation_complete(
+ struct aws_mqtt5_operation *operation,
+ int error_code,
+ enum aws_mqtt5_packet_type packet_type,
+ const void *associated_view);
+
+AWS_MQTT_API void aws_mqtt5_operation_set_packet_id(
+ struct aws_mqtt5_operation *operation,
+ aws_mqtt5_packet_id_t packet_id);
+
+AWS_MQTT_API aws_mqtt5_packet_id_t aws_mqtt5_operation_get_packet_id(const struct aws_mqtt5_operation *operation);
+
+AWS_MQTT_API aws_mqtt5_packet_id_t *aws_mqtt5_operation_get_packet_id_address(
+ const struct aws_mqtt5_operation *operation);
+
+AWS_MQTT_API int aws_mqtt5_operation_validate_vs_connection_settings(
+ const struct aws_mqtt5_operation *operation,
+ const struct aws_mqtt5_client *client);
+
+/* Connect */
+
+AWS_MQTT_API struct aws_mqtt5_operation_connect *aws_mqtt5_operation_connect_new(
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_packet_connect_view *connect_options);
+
+AWS_MQTT_API int aws_mqtt5_packet_connect_view_validate(const struct aws_mqtt5_packet_connect_view *connect_view);
+
+AWS_MQTT_API void aws_mqtt5_packet_connect_view_log(
+ const struct aws_mqtt5_packet_connect_view *connect_view,
+ enum aws_log_level level);
+
+/* Connack */
+
+AWS_MQTT_API void aws_mqtt5_packet_connack_view_log(
+ const struct aws_mqtt5_packet_connack_view *connack_view,
+ enum aws_log_level level);
+
+/* Disconnect */
+
+AWS_MQTT_API struct aws_mqtt5_operation_disconnect *aws_mqtt5_operation_disconnect_new(
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_packet_disconnect_view *disconnect_options,
+ const struct aws_mqtt5_disconnect_completion_options *external_completion_options,
+ const struct aws_mqtt5_disconnect_completion_options *internal_completion_options);
+
+AWS_MQTT_API struct aws_mqtt5_operation_disconnect *aws_mqtt5_operation_disconnect_acquire(
+ struct aws_mqtt5_operation_disconnect *disconnect_op);
+
+AWS_MQTT_API struct aws_mqtt5_operation_disconnect *aws_mqtt5_operation_disconnect_release(
+ struct aws_mqtt5_operation_disconnect *disconnect_op);
+
+AWS_MQTT_API int aws_mqtt5_packet_disconnect_view_validate(
+ const struct aws_mqtt5_packet_disconnect_view *disconnect_view);
+
+AWS_MQTT_API void aws_mqtt5_packet_disconnect_view_log(
+ const struct aws_mqtt5_packet_disconnect_view *disconnect_view,
+ enum aws_log_level level);
+
+/* Publish */
+
+AWS_MQTT_API struct aws_mqtt5_operation_publish *aws_mqtt5_operation_publish_new(
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_client *client,
+ const struct aws_mqtt5_packet_publish_view *publish_options,
+ const struct aws_mqtt5_publish_completion_options *completion_options);
+
+AWS_MQTT_API int aws_mqtt5_packet_publish_view_validate(const struct aws_mqtt5_packet_publish_view *publish_view);
+
+AWS_MQTT_API int aws_mqtt5_packet_publish_view_validate_vs_iot_core(
+ const struct aws_mqtt5_packet_publish_view *publish_view);
+
+AWS_MQTT_API void aws_mqtt5_packet_publish_view_log(
+ const struct aws_mqtt5_packet_publish_view *publish_view,
+ enum aws_log_level level);
+
+/* Puback */
+
+AWS_MQTT_API struct aws_mqtt5_operation_puback *aws_mqtt5_operation_puback_new(
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_packet_puback_view *puback_options);
+
+AWS_MQTT_API void aws_mqtt5_packet_puback_view_log(
+ const struct aws_mqtt5_packet_puback_view *puback_view,
+ enum aws_log_level level);
+
+/* Subscribe */
+
+AWS_MQTT_API struct aws_mqtt5_operation_subscribe *aws_mqtt5_operation_subscribe_new(
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_client *client,
+ const struct aws_mqtt5_packet_subscribe_view *subscribe_options,
+ const struct aws_mqtt5_subscribe_completion_options *completion_options);
+
+AWS_MQTT_API int aws_mqtt5_packet_subscribe_view_validate(const struct aws_mqtt5_packet_subscribe_view *subscribe_view);
+
+AWS_MQTT_API int aws_mqtt5_packet_subscribe_view_validate_vs_iot_core(
+ const struct aws_mqtt5_packet_subscribe_view *subscribe_view);
+
+AWS_MQTT_API void aws_mqtt5_packet_subscribe_view_log(
+ const struct aws_mqtt5_packet_subscribe_view *subscribe_view,
+ enum aws_log_level level);
+
+/* Suback */
+
+AWS_MQTT_API void aws_mqtt5_packet_suback_view_log(
+ const struct aws_mqtt5_packet_suback_view *suback_view,
+ enum aws_log_level level);
+
+/* Unsubscribe */
+
+AWS_MQTT_API struct aws_mqtt5_operation_unsubscribe *aws_mqtt5_operation_unsubscribe_new(
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_client *client,
+ const struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_options,
+ const struct aws_mqtt5_unsubscribe_completion_options *completion_options);
+
+AWS_MQTT_API int aws_mqtt5_packet_unsubscribe_view_validate(
+ const struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_view);
+
+AWS_MQTT_API int aws_mqtt5_packet_unsubscribe_view_validate_vs_iot_core(
+ const struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_view);
+
+AWS_MQTT_API void aws_mqtt5_packet_unsubscribe_view_log(
+ const struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_view,
+ enum aws_log_level level);
+
+/* Unsuback */
+
+AWS_MQTT_API void aws_mqtt5_packet_unsuback_view_log(
+ const struct aws_mqtt5_packet_unsuback_view *unsuback_view,
+ enum aws_log_level level);
+
+/* PINGREQ */
+
+AWS_MQTT_API struct aws_mqtt5_operation_pingreq *aws_mqtt5_operation_pingreq_new(struct aws_allocator *allocator);
+
+/* client */
+
+AWS_MQTT_API
+struct aws_mqtt5_client_options_storage *aws_mqtt5_client_options_storage_new(
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_client_options *options);
+
+AWS_MQTT_API
+void aws_mqtt5_client_options_storage_destroy(struct aws_mqtt5_client_options_storage *options_storage);
+
+AWS_MQTT_API int aws_mqtt5_client_options_validate(const struct aws_mqtt5_client_options *client_options);
+
+AWS_MQTT_API void aws_mqtt5_client_options_storage_log(
+ const struct aws_mqtt5_client_options_storage *options_storage,
+ enum aws_log_level level);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_MQTT_MQTT5_OPERATION_H */
diff --git a/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_topic_alias.h b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_topic_alias.h
new file mode 100644
index 0000000000..8c044d4efe
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_topic_alias.h
@@ -0,0 +1,66 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#ifndef AWS_MQTT_MQTT5_TOPIC_ALIAS_H
+#define AWS_MQTT_MQTT5_TOPIC_ALIAS_H
+
+#include <aws/mqtt/mqtt.h>
+
+#include <aws/common/array_list.h>
+#include <aws/mqtt/v5/mqtt5_client.h>
+
+/* outbound resolvers are polymorphic; implementations are completely internal */
+struct aws_mqtt5_outbound_topic_alias_resolver;
+
+/* there are only two possibilities for inbound resolution: on or off */
+struct aws_mqtt5_inbound_topic_alias_resolver {
+ struct aws_allocator *allocator;
+
+ struct aws_array_list topic_aliases;
+};
+
+AWS_EXTERN_C_BEGIN
+
+AWS_MQTT_API int aws_mqtt5_inbound_topic_alias_resolver_init(
+ struct aws_mqtt5_inbound_topic_alias_resolver *resolver,
+ struct aws_allocator *allocator);
+
+AWS_MQTT_API void aws_mqtt5_inbound_topic_alias_resolver_clean_up(
+ struct aws_mqtt5_inbound_topic_alias_resolver *resolver);
+
+AWS_MQTT_API int aws_mqtt5_inbound_topic_alias_resolver_reset(
+ struct aws_mqtt5_inbound_topic_alias_resolver *resolver,
+ uint16_t cache_size);
+
+AWS_MQTT_API int aws_mqtt5_inbound_topic_alias_resolver_resolve_alias(
+ struct aws_mqtt5_inbound_topic_alias_resolver *resolver,
+ uint16_t alias,
+ struct aws_byte_cursor *topic_out);
+
+AWS_MQTT_API int aws_mqtt5_inbound_topic_alias_resolver_register_alias(
+ struct aws_mqtt5_inbound_topic_alias_resolver *resolver,
+ uint16_t alias,
+ struct aws_byte_cursor topic);
+
+AWS_MQTT_API struct aws_mqtt5_outbound_topic_alias_resolver *aws_mqtt5_outbound_topic_alias_resolver_new(
+ struct aws_allocator *allocator,
+ enum aws_mqtt5_client_outbound_topic_alias_behavior_type outbound_alias_behavior);
+
+AWS_MQTT_API void aws_mqtt5_outbound_topic_alias_resolver_destroy(
+ struct aws_mqtt5_outbound_topic_alias_resolver *resolver);
+
+AWS_MQTT_API int aws_mqtt5_outbound_topic_alias_resolver_reset(
+ struct aws_mqtt5_outbound_topic_alias_resolver *resolver,
+ uint16_t topic_alias_maximum);
+
+AWS_MQTT_API int aws_mqtt5_outbound_topic_alias_resolver_resolve_outbound_publish(
+ struct aws_mqtt5_outbound_topic_alias_resolver *resolver,
+ const struct aws_mqtt5_packet_publish_view *publish_view,
+ uint16_t *topic_alias_out,
+ struct aws_byte_cursor *topic_out);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_MQTT_MQTT5_TOPIC_ALIAS_H */
diff --git a/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_utils.h b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_utils.h
new file mode 100644
index 0000000000..be4c8ba2cf
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/mqtt5_utils.h
@@ -0,0 +1,363 @@
+#ifndef AWS_MQTT_MQTT5_UTILS_H
+#define AWS_MQTT_MQTT5_UTILS_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/mqtt/mqtt.h>
+
+#include <aws/mqtt/v5/mqtt5_client.h>
+#include <aws/mqtt/v5/mqtt5_types.h>
+
+struct aws_byte_buf;
+struct aws_mqtt5_negotiated_settings;
+
+#define AWS_MQTT5_MAXIMUM_VARIABLE_LENGTH_INTEGER 268435455
+#define AWS_MQTT5_MAXIMUM_PACKET_SIZE (5 + AWS_MQTT5_MAXIMUM_VARIABLE_LENGTH_INTEGER)
+#define AWS_MQTT5_RECEIVE_MAXIMUM 65535
+#define AWS_MQTT5_PINGREQ_ENCODED_SIZE 2
+
+/* property type codes */
+#define AWS_MQTT5_PROPERTY_TYPE_PAYLOAD_FORMAT_INDICATOR ((uint8_t)1)
+#define AWS_MQTT5_PROPERTY_TYPE_MESSAGE_EXPIRY_INTERVAL ((uint8_t)2)
+#define AWS_MQTT5_PROPERTY_TYPE_CONTENT_TYPE ((uint8_t)3)
+#define AWS_MQTT5_PROPERTY_TYPE_RESPONSE_TOPIC ((uint8_t)8)
+#define AWS_MQTT5_PROPERTY_TYPE_CORRELATION_DATA ((uint8_t)9)
+#define AWS_MQTT5_PROPERTY_TYPE_SUBSCRIPTION_IDENTIFIER ((uint8_t)11)
+#define AWS_MQTT5_PROPERTY_TYPE_SESSION_EXPIRY_INTERVAL ((uint8_t)17)
+#define AWS_MQTT5_PROPERTY_TYPE_ASSIGNED_CLIENT_IDENTIFIER ((uint8_t)18)
+#define AWS_MQTT5_PROPERTY_TYPE_SERVER_KEEP_ALIVE ((uint8_t)19)
+#define AWS_MQTT5_PROPERTY_TYPE_AUTHENTICATION_METHOD ((uint8_t)21)
+#define AWS_MQTT5_PROPERTY_TYPE_AUTHENTICATION_DATA ((uint8_t)22)
+#define AWS_MQTT5_PROPERTY_TYPE_REQUEST_PROBLEM_INFORMATION ((uint8_t)23)
+#define AWS_MQTT5_PROPERTY_TYPE_WILL_DELAY_INTERVAL ((uint8_t)24)
+#define AWS_MQTT5_PROPERTY_TYPE_REQUEST_RESPONSE_INFORMATION ((uint8_t)25)
+#define AWS_MQTT5_PROPERTY_TYPE_RESPONSE_INFORMATION ((uint8_t)26)
+#define AWS_MQTT5_PROPERTY_TYPE_SERVER_REFERENCE ((uint8_t)28)
+#define AWS_MQTT5_PROPERTY_TYPE_REASON_STRING ((uint8_t)31)
+#define AWS_MQTT5_PROPERTY_TYPE_RECEIVE_MAXIMUM ((uint8_t)33)
+#define AWS_MQTT5_PROPERTY_TYPE_TOPIC_ALIAS_MAXIMUM ((uint8_t)34)
+#define AWS_MQTT5_PROPERTY_TYPE_TOPIC_ALIAS ((uint8_t)35)
+#define AWS_MQTT5_PROPERTY_TYPE_MAXIMUM_QOS ((uint8_t)36)
+#define AWS_MQTT5_PROPERTY_TYPE_RETAIN_AVAILABLE ((uint8_t)37)
+#define AWS_MQTT5_PROPERTY_TYPE_USER_PROPERTY ((uint8_t)38)
+#define AWS_MQTT5_PROPERTY_TYPE_MAXIMUM_PACKET_SIZE ((uint8_t)39)
+#define AWS_MQTT5_PROPERTY_TYPE_WILDCARD_SUBSCRIPTIONS_AVAILABLE ((uint8_t)40)
+#define AWS_MQTT5_PROPERTY_TYPE_SUBSCRIPTION_IDENTIFIERS_AVAILABLE ((uint8_t)41)
+#define AWS_MQTT5_PROPERTY_TYPE_SHARED_SUBSCRIPTIONS_AVAILABLE ((uint8_t)42)
+
+/* decode/encode bit masks and positions */
+#define AWS_MQTT5_CONNECT_FLAGS_WILL_BIT (1U << 2)
+#define AWS_MQTT5_CONNECT_FLAGS_CLEAN_START_BIT (1U << 1)
+#define AWS_MQTT5_CONNECT_FLAGS_USER_NAME_BIT (1U << 7)
+#define AWS_MQTT5_CONNECT_FLAGS_PASSWORD_BIT (1U << 6)
+#define AWS_MQTT5_CONNECT_FLAGS_WILL_RETAIN_BIT (1U << 5)
+
+#define AWS_MQTT5_CONNECT_FLAGS_WILL_QOS_BIT_POSITION 3
+#define AWS_MQTT5_CONNECT_FLAGS_WILL_QOS_BIT_MASK 0x03
+
+#define AWS_MQTT5_SUBSCRIBE_FLAGS_NO_LOCAL (1U << 2)
+#define AWS_MQTT5_SUBSCRIBE_FLAGS_RETAIN_AS_PUBLISHED (1U << 3)
+
+#define AWS_MQTT5_SUBSCRIBE_FLAGS_RETAIN_HANDLING_TYPE_BIT_POSITION 4
+#define AWS_MQTT5_SUBSCRIBE_FLAGS_RETAIN_HANDLING_TYPE_BIT_MASK 0x03
+#define AWS_MQTT5_SUBSCRIBE_FLAGS_QOS_BIT_POSITION 0
+#define AWS_MQTT5_SUBSCRIBE_FLAGS_QOS_BIT_MASK 0x03
+
+/* Static AWS IoT Core Limit/Quota Values */
+#define AWS_IOT_CORE_MAXIMUM_CLIENT_ID_LENGTH 128
+#define AWS_IOT_CORE_MAXIMUM_TOPIC_LENGTH 256
+#define AWS_IOT_CORE_MAXIMUM_TOPIC_SEGMENTS 8
+#define AWS_IOT_CORE_MAXIMUM_SUSBCRIPTIONS_PER_SUBSCRIBE 8
+
+/* Dynamic IoT Core Limits */
+#define AWS_IOT_CORE_PUBLISH_PER_SECOND_LIMIT 100
+#define AWS_IOT_CORE_THROUGHPUT_LIMIT (512 * 1024)
+
+/* Client configuration defaults when parameter left zero */
+#define AWS_MQTT5_DEFAULT_SOCKET_CONNECT_TIMEOUT_MS 10000
+#define AWS_MQTT5_CLIENT_DEFAULT_MIN_RECONNECT_DELAY_MS 1000
+#define AWS_MQTT5_CLIENT_DEFAULT_MAX_RECONNECT_DELAY_MS 120000
+#define AWS_MQTT5_CLIENT_DEFAULT_MIN_CONNECTED_TIME_TO_RESET_RECONNECT_DELAY_MS 30000
+#define AWS_MQTT5_CLIENT_DEFAULT_PING_TIMEOUT_MS 30000
+#define AWS_MQTT5_CLIENT_DEFAULT_CONNACK_TIMEOUT_MS 20000
+#define AWS_MQTT5_CLIENT_DEFAULT_OPERATION_TIMEOUNT_SECONDS 60
+#define AWS_MQTT5_CLIENT_DEFAULT_INBOUND_TOPIC_ALIAS_CACHE_SIZE 25
+#define AWS_MQTT5_CLIENT_DEFAULT_OUTBOUND_TOPIC_ALIAS_CACHE_SIZE 25
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * CONNECT packet MQTT5 prefix which includes "MQTT" encoded as a utf-8 string followed by the protocol number (5)
+ *
+ * {0x00, 0x04, "MQTT", 0x05}
+ */
+AWS_MQTT_API extern struct aws_byte_cursor g_aws_mqtt5_connect_protocol_cursor;
+
+/**
+ * Validate utf-8 string under mqtt5 specs
+ *
+ * @param text
+ * @return AWS_OP_SUCCESS if the text is validate, otherwise AWS_OP_ERR
+ */
+AWS_MQTT_API int aws_mqtt5_validate_utf8_text(struct aws_byte_cursor text);
+
+/**
+ * Simple helper function to compute the first byte of an MQTT packet encoding as a function of 4 bit flags
+ * and the packet type.
+ *
+ * @param packet_type type of MQTT packet
+ * @param flags 4-bit wide flags, specific to each packet type, 0-valued for most
+ * @return the expected/required first byte of a packet of that type with flags set
+ */
+AWS_MQTT_API uint8_t aws_mqtt5_compute_fixed_header_byte1(enum aws_mqtt5_packet_type packet_type, uint8_t flags);
+
+AWS_MQTT_API void aws_mqtt5_negotiated_settings_log(
+ struct aws_mqtt5_negotiated_settings *negotiated_settings,
+ enum aws_log_level level);
+
+/**
+ * Assigns and stores a client id for use on CONNECT
+ *
+ * @param negotiated_settings settings to apply client id to
+ * @param client_id client id to set
+ */
+AWS_MQTT_API int aws_mqtt5_negotiated_settings_apply_client_id(
+ struct aws_mqtt5_negotiated_settings *negotiated_settings,
+ const struct aws_byte_cursor *client_id);
+
+/**
+ * Resets negotiated_settings to defaults reconciled with client set properties.
+ * Called on init of mqtt5 Client and just prior to a CONNECT.
+ *
+ * @param negotiated_settings struct containing settings to be set
+ * @param packet_connect_view Read-only snapshot of a CONNECT packet
+ * @return void
+ */
+AWS_MQTT_API void aws_mqtt5_negotiated_settings_reset(
+ struct aws_mqtt5_negotiated_settings *negotiated_settings,
+ const struct aws_mqtt5_packet_connect_view *packet_connect_view);
+
+/**
+ * Checks properties received from Server CONNACK and reconcile with negotiated_settings
+ *
+ * @param negotiated_settings struct containing settings to be set
+ * @param connack_data Read-only snapshot of a CONNACK packet
+ * @return void
+ */
+AWS_MQTT_API void aws_mqtt5_negotiated_settings_apply_connack(
+ struct aws_mqtt5_negotiated_settings *negotiated_settings,
+ const struct aws_mqtt5_packet_connack_view *connack_data);
+
+/**
+ * Converts a disconnect reason code into the Reason Code Name, as it appears in the mqtt5 spec.
+ *
+ * @param reason_code a disconnect reason code
+ * @return name associated with the reason code
+ */
+AWS_MQTT_API const char *aws_mqtt5_disconnect_reason_code_to_c_string(
+ enum aws_mqtt5_disconnect_reason_code reason_code,
+ bool *is_valid);
+
+/**
+ * Converts a connect reason code into the Reason Code Name, as it appears in the mqtt5 spec.
+ *
+ * @param reason_code a connect reason code
+ * @return name associated with the reason code
+ */
+AWS_MQTT_API const char *aws_mqtt5_connect_reason_code_to_c_string(enum aws_mqtt5_connect_reason_code reason_code);
+
+/**
+ * Converts a publish reason code into the Reason Code Name, as it appears in the mqtt5 spec.
+ *
+ * @param reason_code a publish reason code
+ * @return name associated with the reason code
+ */
+AWS_MQTT_API const char *aws_mqtt5_puback_reason_code_to_c_string(enum aws_mqtt5_puback_reason_code reason_code);
+
+/**
+ * Converts a subscribe reason code into the Reason Code Name, as it appears in the mqtt5 spec.
+ *
+ * @param reason_code a subscribe reason code
+ * @return name associated with the reason code
+ */
+AWS_MQTT_API const char *aws_mqtt5_suback_reason_code_to_c_string(enum aws_mqtt5_suback_reason_code reason_code);
+
+/**
+ * Converts a unsubscribe reason code into the Reason Code Name, as it appears in the mqtt5 spec.
+ *
+ * @param reason_code an unsubscribe reason code
+ * @return name associated with the reason code
+ */
+AWS_MQTT_API const char *aws_mqtt5_unsuback_reason_code_to_c_string(enum aws_mqtt5_unsuback_reason_code reason_code);
+
+/**
+ * Converts a session behavior type value to a readable description.
+ *
+ * @param session_behavior type of session behavior
+ * @return short string describing the session behavior
+ */
+AWS_MQTT_API const char *aws_mqtt5_client_session_behavior_type_to_c_string(
+ enum aws_mqtt5_client_session_behavior_type session_behavior);
+
+/**
+ * Converts a session behavior type value to a final non-default value.
+ *
+ * @param session_behavior type of session behavior
+ * @return session behavior value where default has been mapped to its intended meaning
+ */
+AWS_MQTT_API enum aws_mqtt5_client_session_behavior_type aws_mqtt5_client_session_behavior_type_to_non_default(
+ enum aws_mqtt5_client_session_behavior_type session_behavior);
+
+/**
+ * Converts an outbound topic aliasing behavior type value to a readable description.
+ *
+ * @param outbound_aliasing_behavior type of outbound topic aliasing behavior
+ * @return short string describing the outbound topic aliasing behavior
+ */
+AWS_MQTT_API const char *aws_mqtt5_outbound_topic_alias_behavior_type_to_c_string(
+ enum aws_mqtt5_client_outbound_topic_alias_behavior_type outbound_aliasing_behavior);
+
+/**
+ * Converts an outbound topic aliasing behavior type value to a final non-default value.
+ *
+ * @param outbound_aliasing_behavior type of outbound topic aliasing behavior
+ * @return outbound topic aliasing value where default has been mapped to its intended meaning
+ */
+AWS_MQTT_API enum aws_mqtt5_client_outbound_topic_alias_behavior_type
+ aws_mqtt5_outbound_topic_alias_behavior_type_to_non_default(
+ enum aws_mqtt5_client_outbound_topic_alias_behavior_type outbound_aliasing_behavior);
+
+/**
+ * Converts an inbound topic aliasing behavior type value to a readable description.
+ *
+ * @param inbound_aliasing_behavior type of inbound topic aliasing behavior
+ * @return short string describing the inbound topic aliasing behavior
+ */
+AWS_MQTT_API const char *aws_mqtt5_inbound_topic_alias_behavior_type_to_c_string(
+ enum aws_mqtt5_client_inbound_topic_alias_behavior_type inbound_aliasing_behavior);
+
+/**
+ * Converts an inbound topic aliasing behavior type value to a final non-default value.
+ *
+ * @param inbound_aliasing_behavior type of inbound topic aliasing behavior
+ * @return inbound topic aliasing value where default has been mapped to its intended meaning
+ */
+AWS_MQTT_API enum aws_mqtt5_client_inbound_topic_alias_behavior_type
+ aws_mqtt5_inbound_topic_alias_behavior_type_to_non_default(
+ enum aws_mqtt5_client_inbound_topic_alias_behavior_type inbound_aliasing_behavior);
+
+/**
+ * Converts an extended validation and flow control options value to a readable description.
+ *
+ * @param extended_validation_behavior type of extended validation and flow control
+ * @return short string describing the extended validation and flow control behavior
+ */
+AWS_MQTT_API const char *aws_mqtt5_extended_validation_and_flow_control_options_to_c_string(
+ enum aws_mqtt5_extended_validation_and_flow_control_options extended_validation_behavior);
+
+/**
+ * Converts an offline queue behavior type value to a readable description.
+ *
+ * @param offline_queue_behavior type of offline queue behavior
+ * @return short string describing the offline queue behavior
+ */
+AWS_MQTT_API const char *aws_mqtt5_client_operation_queue_behavior_type_to_c_string(
+ enum aws_mqtt5_client_operation_queue_behavior_type offline_queue_behavior);
+
+/**
+ * Converts an offline queue behavior type value to a final non-default value.
+ *
+ * @param offline_queue_behavior type of offline queue behavior
+ * @return offline queue behavior value where default has been mapped to its intended meaning
+ */
+AWS_MQTT_API enum aws_mqtt5_client_operation_queue_behavior_type
+ aws_mqtt5_client_operation_queue_behavior_type_to_non_default(
+ enum aws_mqtt5_client_operation_queue_behavior_type offline_queue_behavior);
+
+/**
+ * Converts a lifecycle event type value to a readable description.
+ *
+ * @param lifecycle_event type of lifecycle event
+ * @return short string describing the lifecycle event type
+ */
+AWS_MQTT_API const char *aws_mqtt5_client_lifecycle_event_type_to_c_string(
+ enum aws_mqtt5_client_lifecycle_event_type lifecycle_event);
+
+/**
+ * Converts a payload format indicator value to a readable description.
+ *
+ * @param format_indicator type of payload format indicator
+ * @return short string describing the payload format indicator
+ */
+AWS_MQTT_API const char *aws_mqtt5_payload_format_indicator_to_c_string(
+ enum aws_mqtt5_payload_format_indicator format_indicator);
+
+/**
+ * Converts a retain handling type value to a readable description.
+ *
+ * @param retain_handling_type type of retain handling
+ * @return short string describing the retain handling type
+ */
+AWS_MQTT_API const char *aws_mqtt5_retain_handling_type_to_c_string(
+ enum aws_mqtt5_retain_handling_type retain_handling_type);
+
+/**
+ * Converts a packet type value to a readable description.
+ *
+ * @param packet_type type of packet
+ * @return short string describing the packet type
+ */
+AWS_MQTT_API const char *aws_mqtt5_packet_type_to_c_string(enum aws_mqtt5_packet_type packet_type);
+
+/**
+ * Computes a uniformly-distributed random number in the specified range. Not intended for cryptographic purposes.
+ *
+ * @param from one end of the range to sample from
+ * @param to other end of the range to sample from
+ * @return a random number from the supplied range, with roughly a uniform distribution
+ */
+AWS_MQTT_API uint64_t aws_mqtt5_client_random_in_range(uint64_t from, uint64_t to);
+
+/**
+ * Utility function to skip the "$aws/rules/<rule-name>/" prefix of a topic. Technically this works for topic
+ * filters too.
+ *
+ * @param topic_cursor topic to get the non-rules suffix for
+ * @return remaining part of the topic after the leading AWS IoT Rules prefix has been skipped, if present
+ */
+AWS_MQTT_API struct aws_byte_cursor aws_mqtt5_topic_skip_aws_iot_rules_prefix(struct aws_byte_cursor topic_cursor);
+
+/**
+ * Computes the number of topic segments in a topic or topic filter
+ * @param topic_cursor topic or topic filter
+ * @return number of topic segments in the topic or topic filter
+ */
+AWS_MQTT_API size_t aws_mqtt5_topic_get_segment_count(struct aws_byte_cursor topic_cursor);
+
+/**
+ * Checks a topic filter for validity against AWS IoT Core rules
+ * @param topic_filter_cursor topic filter to check
+ * @return true if valid, false otherwise
+ */
+AWS_MQTT_API bool aws_mqtt_is_valid_topic_filter_for_iot_core(struct aws_byte_cursor topic_filter_cursor);
+
+/**
+ * Checks a topic for validity against AWS IoT Core rules
+ * @param topic_cursor topic to check
+ * @return true if valid, false otherwise
+ */
+AWS_MQTT_API bool aws_mqtt_is_valid_topic_for_iot_core(struct aws_byte_cursor topic_cursor);
+
+/**
+ * Checks if a topic filter matches a shared subscription according to the mqtt5 spec
+ * @param topic_cursor topic to check
+ * @return true if this matches the definition of a shared subscription, false otherwise
+ */
+AWS_MQTT_API bool aws_mqtt_is_topic_filter_shared_subscription(struct aws_byte_cursor topic_cursor);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_MQTT_MQTT5_UTILS_H */
diff --git a/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/rate_limiters.h b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/rate_limiters.h
new file mode 100644
index 0000000000..23cb18803f
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/private/v5/rate_limiters.h
@@ -0,0 +1,110 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#ifndef AWS_RATE_LIMITERS_H
+#define AWS_RATE_LIMITERS_H
+
+#include <aws/mqtt/mqtt.h>
+
+#include <aws/io/io.h>
+
+struct aws_rate_limiter_token_bucket_options {
+ /* Clock function override. If left null, the high resolution clock will be used */
+ aws_io_clock_fn *clock_fn;
+
+ /* How many tokens regenerate per second? */
+ uint64_t tokens_per_second;
+
+ /* Initial amount of tokens the limiter will start with */
+ uint64_t initial_token_count;
+
+ /*
+ * Maximum amount of tokens the limiter can hold. Regenerated tokens that exceed this maximum are
+ * discarded
+ */
+ uint64_t maximum_token_count;
+};
+
+/**
+ * A token-bucket based rate limiter.
+ *
+ * Has an unusually complex implementation due to implementer-desired constraints:
+ *
+ * (1) Model regeneration as an integral rate per second. This is for ease-of-use. A regeneration interval would
+ * be a much simpler implementation, but not as intuitive (or accurate for non-integral rates).
+ * (2) Integer math only. Not comfortable falling back on doubles and not having a good understanding of the
+ * accuracy issues, over time, that doing so would create.
+ * (3) Minimize as much as possible the dangers of multiplication saturation and integer division round-down.
+ * (4) No integer division round-off "error" accumulation allowed. Arguments could be made that it might be small
+ * enough to never make a difference but I'd rather not even have the argument at all.
+ * (5) A perfectly accurate how-long-must-I-wait query. Not just a safe over-estimate.
+ */
+struct aws_rate_limiter_token_bucket {
+ uint64_t last_service_time;
+ uint64_t current_token_count;
+
+ uint64_t fractional_nanos;
+ uint64_t fractional_nano_tokens;
+
+ struct aws_rate_limiter_token_bucket_options config;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Initializes a token-bucket-based rate limiter
+ *
+ * @param limiter rate limiter to intiialize
+ * @param options configuration values for the token bucket rate limiter
+ * @return AWS_OP_SUCCESS/AWS_OP_ERR
+ */
+AWS_MQTT_API int aws_rate_limiter_token_bucket_init(
+ struct aws_rate_limiter_token_bucket *limiter,
+ const struct aws_rate_limiter_token_bucket_options *options);
+
+/**
+ * Resets a token-bucket-based rate limiter
+ *
+ * @param limiter rate limiter to reset
+ */
+AWS_MQTT_API void aws_rate_limiter_token_bucket_reset(struct aws_rate_limiter_token_bucket *limiter);
+
+/**
+ * Queries if the token bucket has a number of tokens currently available
+ *
+ * @param limiter token bucket rate limiter to query, non-const because token count is lazily updated
+ * @param token_count how many tokens to check for
+ * @return true if that many tokens are available, false otherwise
+ */
+AWS_MQTT_API bool aws_rate_limiter_token_bucket_can_take_tokens(
+ struct aws_rate_limiter_token_bucket *limiter,
+ uint64_t token_count);
+
+/**
+ * Takes a number of tokens from the token bucket rate limiter
+ *
+ * @param limiter token bucket rate limiter to take from
+ * @param token_count how many tokens to take
+ * @return AWS_OP_SUCCESS if there were that many tokens available, AWS_OP_ERR otherwise
+ */
+AWS_MQTT_API int aws_rate_limiter_token_bucket_take_tokens(
+ struct aws_rate_limiter_token_bucket *limiter,
+ uint64_t token_count);
+
+/**
+ * Queries a token-bucket-based rate limiter for how long, in nanoseconds, until a specified amount of tokens will
+ * be available.
+ *
+ * @param limiter token-bucket-based rate limiter to query
+ * @param token_count how many tokens need to be avilable
+ * @return how long the caller must wait, in nanoseconds, before that many tokens are available
+ */
+AWS_MQTT_API uint64_t aws_rate_limiter_token_bucket_compute_wait_for_tokens(
+ struct aws_rate_limiter_token_bucket *limiter,
+ uint64_t token_count);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_RATE_LIMITERS_H */
diff --git a/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/v5/mqtt5_client.h b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/v5/mqtt5_client.h
new file mode 100644
index 0000000000..e99338cee2
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/v5/mqtt5_client.h
@@ -0,0 +1,809 @@
+#ifndef AWS_MQTT_MQTT5_CLIENT_H
+#define AWS_MQTT_MQTT5_CLIENT_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+/**
+ * DEVELOPER PREVIEW DISCLAIMER
+ *
+ * MQTT5 support is currently in **developer preview**. We encourage feedback at all times, but feedback during the
+ * preview window is especially valuable in shaping the final product. During the preview period we may make
+ * backwards-incompatible changes to the public API, but in general, this is something we will try our best to avoid.
+ */
+
+#include <aws/mqtt/mqtt.h>
+
+#include <aws/io/retry_strategy.h>
+#include <aws/mqtt/v5/mqtt5_types.h>
+
+struct aws_allocator;
+struct aws_client_bootstrap;
+struct aws_http_message;
+struct aws_input_stream;
+struct aws_mqtt5_client;
+struct aws_mqtt5_client_lifecycle_event;
+struct aws_tls_connection_options;
+struct aws_socket_options;
+
+/* public client-related enums */
+
+/**
+ * Controls how the mqtt client should behave with respect to mqtt sessions.
+ */
+enum aws_mqtt5_client_session_behavior_type {
+ /**
+ * Maps to AWS_MQTT5_CSBT_CLEAN
+ */
+ AWS_MQTT5_CSBT_DEFAULT,
+
+ /**
+ * Always join a new, clean session
+ */
+ AWS_MQTT5_CSBT_CLEAN,
+
+ /**
+ * Always attempt to rejoin an existing session after an initial connection success.
+ */
+ AWS_MQTT5_CSBT_REJOIN_POST_SUCCESS,
+
+ /**
+ * Always attempt to rejoin an existing session. Since the client does not support durable session persistence,
+ * this option is not guaranteed to be spec compliant because any unacknowledged qos1 publishes (which are
+ * part of the client session state) will not be present on the initial connection. Until we support
+ * durable session resumption, this option is technically spec-breaking, but useful.
+ */
+ AWS_MQTT5_CSBT_REJOIN_ALWAYS,
+};
+
+/**
+ * Outbound topic aliasing behavior is controlled by this type.
+ *
+ * Topic alias behavior is described in https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901113
+ *
+ * If the server allows topic aliasing, this setting controls how topic aliases are used on PUBLISH packets sent
+ * from the client to the server.
+ *
+ * If topic aliasing is not supported by the server, this setting has no effect and any attempts to directly
+ * manipulate the topic alias id in outbound publishes will be ignored.
+ */
+enum aws_mqtt5_client_outbound_topic_alias_behavior_type {
+ /**
+ * Maps to AWS_MQTT5_COTABT_DISABLED This keeps the client from being broken (by default) if the broker
+ * topic aliasing implementation has a problem.
+ */
+ AWS_MQTT5_COTABT_DEFAULT,
+
+ /**
+ * Outbound aliasing is the user's responsibility. Client will cache and use
+ * previously-established aliases if they fall within the negotiated limits of the connection.
+ *
+ * The user must still always submit a full topic in their publishes because disconnections disrupt
+ * topic alias mappings unpredictably. The client will properly use the alias when the current connection
+ * has seen the alias binding already.
+ */
+ AWS_MQTT5_COTABT_USER,
+
+ /**
+ * Client fails any user-specified topic aliasing and acts on the outbound alias set as an LRU cache.
+ */
+ AWS_MQTT5_COTABT_LRU,
+
+ /**
+ * Completely disable outbound topic aliasing. Attempting to set a topic alias on a PUBLISH results in
+ * an error.
+ */
+ AWS_MQTT5_COTABT_DISABLED
+};
+
+/**
+ * Inbound topic aliasing behavior is controlled by this type.
+ *
+ * Topic alias behavior is described in https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901113
+ *
+ * This setting controls whether or not the client will send a positive topic alias maximum to the server
+ * in its CONNECT packets.
+ *
+ * If topic aliasing is not supported by the server, this setting has no net effect.
+ */
+enum aws_mqtt5_client_inbound_topic_alias_behavior_type {
+ /**
+ * Maps to AWS_MQTT5_CITABT_DISABLED
+ */
+ AWS_MQTT5_CITABT_DEFAULT,
+
+ /**
+ * Allow the server to send PUBLISH packets to the client that use topic aliasing
+ */
+ AWS_MQTT5_CITABT_ENABLED,
+
+ /**
+ * Forbid the server from sending PUBLISH packets to the client that use topic aliasing
+ */
+ AWS_MQTT5_CITABT_DISABLED
+};
+
+/**
+ * Configuration struct for all client topic aliasing behavior. If this is left null, then all default options
+ * (as it zeroed) will be used.
+ */
+struct aws_mqtt5_client_topic_alias_options {
+
+ /**
+ * Controls what kind of outbound topic aliasing behavior the client should attempt to use.
+ */
+ enum aws_mqtt5_client_outbound_topic_alias_behavior_type outbound_topic_alias_behavior;
+
+ /**
+ * If outbound topic aliasing is set to LRU, this controls the maximum size of the cache. If outbound topic
+ * aliasing is set to LRU and this is zero, a sensible default is used (25). If outbound topic aliasing is not
+ * set to LRU, then this setting has no effect.
+ *
+ * The final size of the cache is determined by the minimum of this setting and the value of the
+ * topic_alias_maximum property of the received CONNACK. If the received CONNACK does not have an explicit
+ * positive value for that field, outbound topic aliasing is disabled for the duration of that connection.
+ */
+ uint16_t outbound_alias_cache_max_size;
+
+ /**
+ * Controls what kind of inbound topic aliasing behavior the client should use.
+ *
+ * Even if inbound topic aliasing is enabled, it is up to the server to choose whether or not to use it.
+ */
+ enum aws_mqtt5_client_inbound_topic_alias_behavior_type inbound_topic_alias_behavior;
+
+ /**
+ * If inbound topic aliasing is enabled, this will control the size of the inbound alias cache. If inbound
+ * aliases are enabled and this is zero, then a sensible default will be used (25). If inbound aliases are
+ * disabled, this setting has no effect.
+ *
+ * Behaviorally, this value overrides anything present in the topic_alias_maximum field of
+ * the CONNECT packet options. We intentionally don't bind that field to managed clients to reduce
+ */
+ uint16_t inbound_alias_cache_size;
+};
+
+/**
+ * Extended validation and flow control options
+ *
+ * Potentially a point of expansion in the future. We could add custom controls letting people override
+ * the Aws IOT Core limits based on their account properties. We could, with IoT Core support, add dynamic
+ * limit recognition via user properties as well.
+ */
+enum aws_mqtt5_extended_validation_and_flow_control_options {
+ /**
+ * Do not do any additional validation or flow control outside of the MQTT5 spec
+ */
+ AWS_MQTT5_EVAFCO_NONE,
+
+ /**
+ * Apply additional client-side validation and operational flow control that respects the
+ * default AWS IoT Core limits.
+ *
+ * Currently applies the following additional validation:
+ * (1) No more than 8 subscriptions per SUBSCRIBE packet
+ * (2) Topics and topic filters have a maximum of 7 slashes (8 segments), not counting any AWS rules prefix
+ * (3) Topics must be <= 256 bytes in length
+ * (4) Client id must be <= 128 bytes in length
+ *
+ * Also applies the following flow control:
+ * (1) Outbound throughput throttled to 512KB/s
+ * (2) Outbound publish TPS throttled to 100
+ */
+ AWS_MQTT5_EVAFCO_AWS_IOT_CORE_DEFAULTS,
+};
+
+/**
+ * Controls how disconnects affect the queued and in-progress operations submitted to the client. Also controls
+ * how operations are handled while the client is not connected. In particular, if the client is not connected,
+ * then any operation that would be failed on disconnect (according to these rules) will be rejected.
+ */
+enum aws_mqtt5_client_operation_queue_behavior_type {
+
+ /*
+ * Maps to AWS_MQTT5_COQBT_FAIL_QOS0_PUBLISH_ON_DISCONNECT
+ */
+ AWS_MQTT5_COQBT_DEFAULT,
+
+ /*
+ * Requeues QoS 1+ publishes on disconnect; unacked publishes go to the front, unprocessed publishes stay
+ * in place. All other operations (QoS 0 publishes, subscribe, unsubscribe) are failed.
+ */
+ AWS_MQTT5_COQBT_FAIL_NON_QOS1_PUBLISH_ON_DISCONNECT,
+
+ /*
+ * Qos 0 publishes that are not complete at the time of disconnection are failed. Unacked QoS 1+ publishes are
+ * requeued at the head of the line for immediate retransmission on a session resumption. All other operations
+ * are requeued in original order behind any retransmissions.
+ */
+ AWS_MQTT5_COQBT_FAIL_QOS0_PUBLISH_ON_DISCONNECT,
+
+ /*
+ * All operations that are not complete at the time of disconnection are failed, except those operations that
+ * the mqtt 5 spec requires to be retransmitted (unacked qos1+ publishes).
+ */
+ AWS_MQTT5_COQBT_FAIL_ALL_ON_DISCONNECT,
+};
+
+/**
+ * Type of a client lifecycle event
+ */
+enum aws_mqtt5_client_lifecycle_event_type {
+ /**
+ * Emitted when the client begins an attempt to connect to the remote endpoint.
+ *
+ * Mandatory event fields: client, user_data
+ */
+ AWS_MQTT5_CLET_ATTEMPTING_CONNECT,
+
+ /**
+ * Emitted after the client connects to the remote endpoint and receives a successful CONNACK.
+ * Every ATTEMPTING_CONNECT will be followed by exactly one CONNECTION_SUCCESS or one CONNECTION_FAILURE.
+ *
+ * Mandatory event fields: client, user_data, connack_data, settings
+ */
+ AWS_MQTT5_CLET_CONNECTION_SUCCESS,
+
+ /**
+ * Emitted at any point during the connection process when it has conclusively failed.
+ * Every ATTEMPTING_CONNECT will be followed by exactly one CONNECTION_SUCCESS or one CONNECTION_FAILURE.
+ *
+ * Mandatory event fields: client, user_data, error_code
+ * Conditional event fields: connack_data
+ */
+ AWS_MQTT5_CLET_CONNECTION_FAILURE,
+
+ /**
+ * Lifecycle event containing information about a disconnect. Every CONNECTION_SUCCESS will eventually be
+ * followed by one and only one DISCONNECTION.
+ *
+ * Mandatory event fields: client, user_data, error_code
+ * Conditional event fields: disconnect_data
+ */
+ AWS_MQTT5_CLET_DISCONNECTION,
+
+ /**
+ * Lifecycle event notifying the user that the client has entered the STOPPED state. Entering this state will
+ * cause the client to wipe all MQTT session state.
+ *
+ * Mandatory event fields: client, user_data
+ */
+ AWS_MQTT5_CLET_STOPPED,
+};
+
+/* client-related callback function signatures */
+
+/**
+ * Signature of the continuation function to be called after user-code transforms a websocket handshake request
+ */
+typedef void(aws_mqtt5_transform_websocket_handshake_complete_fn)(
+ struct aws_http_message *request,
+ int error_code,
+ void *complete_ctx);
+
+/**
+ * Signature of the websocket handshake request transformation function. After transformation, the completion
+ * function must be invoked to send the request.
+ */
+typedef void(aws_mqtt5_transform_websocket_handshake_fn)(
+ struct aws_http_message *request,
+ void *user_data,
+ aws_mqtt5_transform_websocket_handshake_complete_fn *complete_fn,
+ void *complete_ctx);
+
+/**
+ * Callback signature for mqtt5 client lifecycle events.
+ */
+typedef void(aws_mqtt5_client_connection_event_callback_fn)(const struct aws_mqtt5_client_lifecycle_event *event);
+
+/**
+ * Signature of callback to invoke on Publish success/failure.
+ */
+typedef void(aws_mqtt5_publish_completion_fn)(
+ enum aws_mqtt5_packet_type packet_type,
+ const void *packet,
+ int error_code,
+ void *complete_ctx);
+
+/**
+ * Signature of callback to invoke on Subscribe success/failure.
+ */
+typedef void(aws_mqtt5_subscribe_completion_fn)(
+ const struct aws_mqtt5_packet_suback_view *suback,
+ int error_code,
+ void *complete_ctx);
+
+/**
+ * Signature of callback to invoke on Unsubscribe success/failure.
+ */
+typedef void(aws_mqtt5_unsubscribe_completion_fn)(
+ const struct aws_mqtt5_packet_unsuback_view *unsuback,
+ int error_code,
+ void *complete_ctx);
+
+/**
+ * Signature of callback to invoke on Publish received
+ */
+typedef void(aws_mqtt5_publish_received_fn)(const struct aws_mqtt5_packet_publish_view *publish, void *user_data);
+
+/**
+ * Signature of a listener publish received callback that returns an indicator whether or not the publish
+ * was handled by the listener.
+ */
+typedef bool(
+ aws_mqtt5_listener_publish_received_fn)(const struct aws_mqtt5_packet_publish_view *publish, void *user_data);
+
+/**
+ * Signature of callback to invoke when a DISCONNECT is fully written to the socket (or fails to be)
+ */
+typedef void(aws_mqtt5_disconnect_completion_fn)(int error_code, void *complete_ctx);
+
+/**
+ * Signature of callback invoked when a client has completely destroyed itself
+ */
+typedef void(aws_mqtt5_client_termination_completion_fn)(void *complete_ctx);
+
+/* operation completion options structures */
+
+/**
+ * Completion callback options for the Publish operation
+ */
+struct aws_mqtt5_publish_completion_options {
+ aws_mqtt5_publish_completion_fn *completion_callback;
+ void *completion_user_data;
+};
+
+/**
+ * Completion callback options for the Subscribe operation
+ */
+struct aws_mqtt5_subscribe_completion_options {
+ aws_mqtt5_subscribe_completion_fn *completion_callback;
+ void *completion_user_data;
+};
+
+/**
+ * Completion callback options for the Unsubscribe operation
+ */
+struct aws_mqtt5_unsubscribe_completion_options {
+ aws_mqtt5_unsubscribe_completion_fn *completion_callback;
+ void *completion_user_data;
+};
+
+/**
+ * Public completion callback options for the a DISCONNECT operation
+ */
+struct aws_mqtt5_disconnect_completion_options {
+ aws_mqtt5_disconnect_completion_fn *completion_callback;
+ void *completion_user_data;
+};
+
+/**
+ * Mqtt behavior settings that are dynamically negotiated as part of the CONNECT/CONNACK exchange.
+ */
+struct aws_mqtt5_negotiated_settings {
+ /**
+ * The maximum QoS used between the server and client.
+ */
+ enum aws_mqtt5_qos maximum_qos;
+
+ /**
+ * the amount of time in seconds the server will retain the session after a disconnect.
+ */
+ uint32_t session_expiry_interval;
+
+ /**
+ * the number of QoS 1 and QoS2 publications the server is willing to process concurrently.
+ */
+ uint16_t receive_maximum_from_server;
+
+ /**
+ * the maximum packet size the server is willing to accept.
+ */
+ uint32_t maximum_packet_size_to_server;
+
+ /**
+ * the highest value that the server will accept as a Topic Alias sent by the client.
+ */
+ uint16_t topic_alias_maximum_to_server;
+
+ /**
+ * the highest value that the client will accept as a Topic Alias sent by the server.
+ */
+ uint16_t topic_alias_maximum_to_client;
+
+ /**
+ * the amount of time in seconds before the server will disconnect the client for inactivity.
+ */
+ uint16_t server_keep_alive;
+
+ /**
+ * whether the server supports retained messages.
+ */
+ bool retain_available;
+
+ /**
+ * whether the server supports wildcard subscriptions.
+ */
+ bool wildcard_subscriptions_available;
+
+ /**
+ * whether the server supports subscription identifiers
+ */
+ bool subscription_identifiers_available;
+
+ /**
+ * whether the server supports shared subscriptions
+ */
+ bool shared_subscriptions_available;
+
+ /**
+ * whether the client has rejoined an existing session.
+ */
+ bool rejoined_session;
+
+ struct aws_byte_buf client_id_storage;
+};
+
+/**
+ * Contains some simple statistics about the current state of the client's queue of operations
+ */
+struct aws_mqtt5_client_operation_statistics {
+ /*
+ * total number of operations submitted to the client that have not yet been completed. Unacked operations
+ * are a subset of this.
+ */
+ uint64_t incomplete_operation_count;
+
+ /*
+ * total packet size of operations submitted to the client that have not yet been completed. Unacked operations
+ * are a subset of this.
+ */
+ uint64_t incomplete_operation_size;
+
+ /*
+ * total number of operations that have been sent to the server and are waiting for a corresponding ACK before
+ * they can be completed.
+ */
+ uint64_t unacked_operation_count;
+
+ /*
+ * total packet size of operations that have been sent to the server and are waiting for a corresponding ACK before
+ * they can be completed.
+ */
+ uint64_t unacked_operation_size;
+};
+
+/**
+ * Details about a client lifecycle event.
+ */
+struct aws_mqtt5_client_lifecycle_event {
+
+ /**
+ * Type of event this is.
+ */
+ enum aws_mqtt5_client_lifecycle_event_type event_type;
+
+ /**
+ * Client this event corresponds to. Necessary (can't be replaced with user data) because the client
+ * doesn't exist at the time the event callback user data is configured.
+ */
+ struct aws_mqtt5_client *client;
+
+ /**
+ * Aws-c-* error code associated with the event
+ */
+ int error_code;
+
+ /**
+ * User data associated with the client's lifecycle event handler. Set with client configuration.
+ */
+ void *user_data;
+
+ /**
+ * If this event was caused by receiving a CONNACK, this will be a view of that packet.
+ */
+ const struct aws_mqtt5_packet_connack_view *connack_data;
+
+ /**
+ * If this is a successful connection establishment, this will contain the negotiated mqtt5 behavioral settings
+ */
+ const struct aws_mqtt5_negotiated_settings *settings;
+
+ /**
+ * If this event was caused by receiving a DISCONNECT, this will be a view of that packet.
+ */
+ const struct aws_mqtt5_packet_disconnect_view *disconnect_data;
+};
+
+/**
+ * Basic mqtt5 client configuration struct.
+ *
+ * Contains desired connection properties
+ * Configuration that represents properties of the mqtt5 CONNECT packet go in the connect view (connect_options)
+ */
+struct aws_mqtt5_client_options {
+
+ /**
+ * Host to establish mqtt connections to
+ */
+ struct aws_byte_cursor host_name;
+
+ /**
+ * Port to establish mqtt connections to
+ */
+ uint16_t port;
+
+ /**
+ * Client bootstrap to use whenever this client establishes a connection
+ */
+ struct aws_client_bootstrap *bootstrap;
+
+ /**
+ * Socket options to use whenever this client establishes a connection
+ */
+ const struct aws_socket_options *socket_options;
+
+ /**
+ * (Optional) Tls options to use whenever this client establishes a connection
+ */
+ const struct aws_tls_connection_options *tls_options;
+
+ /**
+ * (Optional) Http proxy options to use whenever this client establishes a connection
+ */
+ const struct aws_http_proxy_options *http_proxy_options;
+
+ /**
+ * (Optional) Websocket handshake transformation function and user data. Websockets are used if the
+ * transformation function is non-null.
+ */
+ aws_mqtt5_transform_websocket_handshake_fn *websocket_handshake_transform;
+ void *websocket_handshake_transform_user_data;
+
+ /**
+ * All CONNECT-related options, includes the will configuration, if desired
+ */
+ const struct aws_mqtt5_packet_connect_view *connect_options;
+
+ /**
+ * Controls session rejoin behavior
+ */
+ enum aws_mqtt5_client_session_behavior_type session_behavior;
+
+ /**
+ * Controls if any additional AWS-specific validation or flow control should be performed by the client.
+ */
+ enum aws_mqtt5_extended_validation_and_flow_control_options extended_validation_and_flow_control_options;
+
+ /**
+ * Controls how the client treats queued/in-progress operations when the connection drops for any reason.
+ */
+ enum aws_mqtt5_client_operation_queue_behavior_type offline_queue_behavior;
+
+ /**
+ * Controls the exponential backoff behavior when the client is waiting to reconnect.
+ *
+ * See: https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/
+ */
+ enum aws_exponential_backoff_jitter_mode retry_jitter_mode;
+
+ /**
+ * Minimum amount of time in ms to wait before attempting to reconnect. If this is zero, a default of 1000 ms will
+ * be used.
+ */
+ uint64_t min_reconnect_delay_ms;
+
+ /**
+ * Maximum amount of time in ms to wait before attempting to reconnect. If this is zero, a default of 120000 ms
+ * will be used.
+ */
+ uint64_t max_reconnect_delay_ms;
+
+ /**
+ * Amount of time that must elapse with a good connection before the reconnect delay is reset to the minimum. If
+ * this zero, a default of 30000 ms will be used.
+ */
+ uint64_t min_connected_time_to_reset_reconnect_delay_ms;
+
+ /**
+ * Time interval to wait after sending a PINGREQ for a PINGRESP to arrive. If one does not arrive, the connection
+ * will be shut down. If this is zero, a default of 30000 ms will be used.
+ */
+ uint32_t ping_timeout_ms;
+
+ /**
+ * Time interval to wait after sending a CONNECT request for a CONNACK to arrive. If one does not arrive, the
+ * connection will be shut down. If this zero, a default of 20000 ms will be used.
+ */
+ uint32_t connack_timeout_ms;
+
+ /**
+ * Time interval to wait for an ack after sending a SUBSCRIBE, UNSUBSCRIBE, or PUBLISH with QoS 1+ before
+ * failing the packet, notifying the client of failure, and removing it. If this is zero, a default of 60 seconds
+ * will be used.
+ */
+ uint32_t ack_timeout_seconds;
+
+ /**
+ * Controls how the client uses mqtt5 topic aliasing. If NULL, zero-based defaults will be used.
+ */
+ struct aws_mqtt5_client_topic_alias_options *topic_aliasing_options;
+
+ /**
+ * Callback for received publish packets
+ */
+ aws_mqtt5_publish_received_fn *publish_received_handler;
+ void *publish_received_handler_user_data;
+
+ /**
+ * Callback and user data for all client lifecycle events.
+ * Life cycle events include:
+ * ConnectionSuccess
+ * ConnectionFailure,
+ * Disconnect
+ * (client) Stopped
+ *
+ * Disconnect lifecycle events are 1-1 with -- strictly after -- ConnectionSuccess events.
+ */
+ aws_mqtt5_client_connection_event_callback_fn *lifecycle_event_handler;
+ void *lifecycle_event_handler_user_data;
+
+ /**
+ * Callback for when the client has completely destroyed itself
+ */
+ aws_mqtt5_client_termination_completion_fn *client_termination_handler;
+ void *client_termination_handler_user_data;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Creates a new mqtt5 client using the supplied configuration
+ *
+ * @param allocator allocator to use with all memory operations related to this client's creation and operation
+ * @param options mqtt5 client configuration
+ * @return a new mqtt5 client or NULL
+ */
+AWS_MQTT_API
+struct aws_mqtt5_client *aws_mqtt5_client_new(
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_client_options *options);
+
+/**
+ * Acquires a reference to an mqtt5 client
+ *
+ * @param client client to acquire a reference to. May be NULL.
+ * @return what was passed in as the client (a client or NULL)
+ */
+AWS_MQTT_API
+struct aws_mqtt5_client *aws_mqtt5_client_acquire(struct aws_mqtt5_client *client);
+
+/**
+ * Release a reference to an mqtt5 client. When the client ref count drops to zero, the client will automatically
+ * trigger a stop and once the stop completes, the client will delete itself.
+ *
+ * @param client client to release a reference to. May be NULL.
+ * @return NULL
+ */
+AWS_MQTT_API
+struct aws_mqtt5_client *aws_mqtt5_client_release(struct aws_mqtt5_client *client);
+
+/**
+ * Asynchronous notify to the mqtt5 client that you want it to attempt to connect to the configured endpoint.
+ * The client will attempt to stay connected using the properties of the reconnect-related parameters
+ * in the mqtt5 client configuration.
+ *
+ * @param client mqtt5 client to start
+ * @return success/failure in the synchronous logic that kicks off the start process
+ */
+AWS_MQTT_API
+int aws_mqtt5_client_start(struct aws_mqtt5_client *client);
+
+/**
+ * Asynchronous notify to the mqtt5 client that you want it to transition to the stopped state. When the client
+ * reaches the stopped state, all session state is erased.
+ *
+ * @param client mqtt5 client to stop
+ * @param disconnect_options (optional) properties of a DISCONNECT packet to send as part of the shutdown process
+ * @return success/failure in the synchronous logic that kicks off the stop process
+ */
+AWS_MQTT_API
+int aws_mqtt5_client_stop(
+ struct aws_mqtt5_client *client,
+ const struct aws_mqtt5_packet_disconnect_view *disconnect_options,
+ const struct aws_mqtt5_disconnect_completion_options *completion_options);
+
+/**
+ * Queues a Publish operation in an mqtt5 client
+ *
+ * @param client mqtt5 client to queue a Publish for
+ * @param publish_options configuration options for the Publish operation
+ * @param completion_options completion callback configuration. Successful QoS 0 publishes invoke the callback when
+ * the data has been written to the socket. Successful QoS1+ publishes invoke the callback when the corresponding ack
+ * is received. Unsuccessful publishes invoke the callback at the point in time a failure condition is reached.
+ * @return success/failure in the synchronous logic that kicks off the publish operation
+ */
+AWS_MQTT_API
+int aws_mqtt5_client_publish(
+ struct aws_mqtt5_client *client,
+ const struct aws_mqtt5_packet_publish_view *publish_options,
+ const struct aws_mqtt5_publish_completion_options *completion_options);
+
+/**
+ * Queues a Subscribe operation in an mqtt5 client
+ *
+ * @param client mqtt5 client to queue a Subscribe for
+ * @param subscribe_options configuration options for the Subscribe operation
+ * @param completion_options Completion callback configuration. Invoked when the corresponding SUBACK is received or
+ * a failure condition is reached. An error code implies complete failure of the subscribe, while a success code
+ * implies the user must still check all of the SUBACK's reason codes for per-subscription feedback.
+ * @return success/failure in the synchronous logic that kicks off the Subscribe operation
+ */
+AWS_MQTT_API
+int aws_mqtt5_client_subscribe(
+ struct aws_mqtt5_client *client,
+ const struct aws_mqtt5_packet_subscribe_view *subscribe_options,
+ const struct aws_mqtt5_subscribe_completion_options *completion_options);
+
+/**
+ * Queues an Unsubscribe operation in an mqtt5 client
+ *
+ * @param client mqtt5 client to queue an Unsubscribe for
+ * @param unsubscribe_options configuration options for the Unsubscribe operation
+ * @param completion_options Completion callback configuration. Invoked when the corresponding UNSUBACK is received or
+ * a failure condition is reached. An error code implies complete failure of the unsubscribe, while a success code
+ * implies the user must still check all of the UNSUBACK's reason codes for per-topic-filter feedback.
+ * @return success/failure in the synchronous logic that kicks off the Unsubscribe operation
+ */
+AWS_MQTT_API
+int aws_mqtt5_client_unsubscribe(
+ struct aws_mqtt5_client *client,
+ const struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_options,
+ const struct aws_mqtt5_unsubscribe_completion_options *completion_options);
+
+/**
+ * Queries the client's internal statistics for incomplete operations.
+ * @param client client to get statistics for
+ * @param stats set of incomplete operation statistics
+ */
+AWS_MQTT_API
+void aws_mqtt5_client_get_stats(struct aws_mqtt5_client *client, struct aws_mqtt5_client_operation_statistics *stats);
+
+/* Misc related type APIs */
+
+/**
+ * Initializes the Client ID byte buf in negotiated settings
+ *
+ * @param allocator allocator to use for memory allocation
+ * @param negotiated_settings settings to apply client id to
+ * @param client_id client id to set
+ */
+AWS_MQTT_API int aws_mqtt5_negotiated_settings_init(
+ struct aws_allocator *allocator,
+ struct aws_mqtt5_negotiated_settings *negotiated_settings,
+ const struct aws_byte_cursor *client_id);
+
+/**
+ * Makes an owning copy of a negotiated settings structure
+ *
+ * @param source settings to copy from
+ * @param dest settings to copy into. Must be in a zeroed or initialized state because it gets clean up
+ * called on it as the first step of the copy process.
+ * @return success/failure
+ */
+AWS_MQTT_API int aws_mqtt5_negotiated_settings_copy(
+ const struct aws_mqtt5_negotiated_settings *source,
+ struct aws_mqtt5_negotiated_settings *dest);
+
+/**
+ * Clean up owned memory in negotiated_settings
+ *
+ * @param negotiated_settings settings to clean up
+ */
+AWS_MQTT_API void aws_mqtt5_negotiated_settings_clean_up(struct aws_mqtt5_negotiated_settings *negotiated_settings);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_MQTT_MQTT5_CLIENT_H */
diff --git a/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/v5/mqtt5_listener.h b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/v5/mqtt5_listener.h
new file mode 100644
index 0000000000..8d0498cebd
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/v5/mqtt5_listener.h
@@ -0,0 +1,85 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#ifndef AWS_MQTT_MQTT5_LISTENER_H
+#define AWS_MQTT_MQTT5_LISTENER_H
+
+#include <aws/mqtt/mqtt.h>
+
+#include <aws/mqtt/v5/mqtt5_client.h>
+
+/*
+ * Callback signature for when an mqtt5 listener has completely destroyed itself.
+ */
+typedef void(aws_mqtt5_listener_termination_completion_fn)(void *complete_ctx);
+
+/**
+ * A record that tracks MQTT5 client callbacks which can be dynamically injected via a listener.
+ */
+struct aws_mqtt5_callback_set {
+ aws_mqtt5_listener_publish_received_fn *listener_publish_received_handler;
+ void *listener_publish_received_handler_user_data;
+
+ aws_mqtt5_client_connection_event_callback_fn *lifecycle_event_handler;
+ void *lifecycle_event_handler_user_data;
+};
+
+/**
+ * Configuration options for MQTT5 listener objects.
+ */
+struct aws_mqtt5_listener_config {
+
+ /**
+ * MQTT5 client to listen to events on
+ */
+ struct aws_mqtt5_client *client;
+
+ /**
+ * Callbacks to invoke when events occur on the MQTT5 client
+ */
+ struct aws_mqtt5_callback_set listener_callbacks;
+
+ /**
+ * Listener destruction is asynchronous and thus requires a termination callback and associated user data
+ * to notify the user that the listener has been fully destroyed and no further events will be received.
+ */
+ aws_mqtt5_listener_termination_completion_fn *termination_callback;
+ void *termination_callback_user_data;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Creates a new MQTT5 listener object. For as long as the listener lives, incoming publishes and lifecycle events
+ * will be forwarded to the callbacks configured on the listener.
+ *
+ * @param allocator allocator to use
+ * @param config listener configuration
+ * @return a new aws_mqtt5_listener object
+ */
+AWS_MQTT_API struct aws_mqtt5_listener *aws_mqtt5_listener_new(
+ struct aws_allocator *allocator,
+ struct aws_mqtt5_listener_config *config);
+
+/**
+ * Adds a reference to an mqtt5 listener.
+ *
+ * @param listener listener to add a reference to
+ * @return the listener object
+ */
+AWS_MQTT_API struct aws_mqtt5_listener *aws_mqtt5_listener_acquire(struct aws_mqtt5_listener *listener);
+
+/**
+ * Removes a reference to an mqtt5 listener. When the reference count drops to zero, the listener's asynchronous
+ * destruction will be started.
+ *
+ * @param listener listener to remove a reference from
+ * @return NULL
+ */
+AWS_MQTT_API struct aws_mqtt5_listener *aws_mqtt5_listener_release(struct aws_mqtt5_listener *listener);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_MQTT_MQTT5_LISTENER_H */
diff --git a/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/v5/mqtt5_packet_storage.h b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/v5/mqtt5_packet_storage.h
new file mode 100644
index 0000000000..9a7028f459
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/v5/mqtt5_packet_storage.h
@@ -0,0 +1,336 @@
+#ifndef AWS_MQTT_MQTT5_PACKET_STORAGE_H
+#define AWS_MQTT_MQTT5_PACKET_STORAGE_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+/**
+ * DEVELOPER PREVIEW DISCLAIMER
+ *
+ * MQTT5 support is currently in **developer preview**. We encourage feedback at all times, but feedback during the
+ * preview window is especially valuable in shaping the final product. During the preview period we may make
+ * backwards-incompatible changes to the public API, but in general, this is something we will try our best to avoid.
+ */
+
+#include <aws/mqtt/mqtt.h>
+
+#include <aws/mqtt/v5/mqtt5_types.h>
+
+struct aws_mqtt5_user_property_set {
+ struct aws_array_list properties;
+};
+
+struct aws_mqtt5_packet_connect_storage {
+ struct aws_allocator *allocator;
+
+ struct aws_mqtt5_packet_connect_view storage_view;
+
+ struct aws_byte_cursor username;
+
+ struct aws_byte_cursor password;
+
+ uint32_t session_expiry_interval_seconds;
+
+ uint8_t request_response_information;
+
+ uint8_t request_problem_information;
+
+ uint16_t receive_maximum;
+
+ uint16_t topic_alias_maximum;
+
+ uint32_t maximum_packet_size_bytes;
+
+ struct aws_mqtt5_packet_publish_storage *will;
+
+ uint32_t will_delay_interval_seconds;
+
+ struct aws_mqtt5_user_property_set user_properties;
+
+ struct aws_byte_cursor authentication_method;
+
+ struct aws_byte_cursor authentication_data;
+
+ struct aws_byte_buf storage;
+};
+
+struct aws_mqtt5_packet_connack_storage {
+ struct aws_allocator *allocator;
+
+ struct aws_mqtt5_packet_connack_view storage_view;
+
+ uint32_t session_expiry_interval;
+
+ uint16_t receive_maximum;
+
+ enum aws_mqtt5_qos maximum_qos;
+
+ bool retain_available;
+
+ uint32_t maximum_packet_size;
+
+ struct aws_byte_cursor assigned_client_identifier;
+
+ uint16_t topic_alias_maximum;
+
+ struct aws_byte_cursor reason_string;
+
+ bool wildcard_subscriptions_available;
+
+ bool subscription_identifiers_available;
+
+ bool shared_subscriptions_available;
+
+ uint16_t server_keep_alive;
+
+ struct aws_byte_cursor response_information;
+
+ struct aws_byte_cursor server_reference;
+
+ struct aws_byte_cursor authentication_method;
+
+ struct aws_byte_cursor authentication_data;
+
+ struct aws_mqtt5_user_property_set user_properties;
+
+ struct aws_byte_buf storage;
+};
+
+struct aws_mqtt5_packet_suback_storage {
+
+ struct aws_allocator *allocator;
+
+ struct aws_mqtt5_packet_suback_view storage_view;
+
+ struct aws_byte_cursor reason_string;
+
+ struct aws_mqtt5_user_property_set user_properties;
+
+ struct aws_array_list reason_codes;
+
+ struct aws_byte_buf storage;
+};
+
+struct aws_mqtt5_packet_unsuback_storage {
+
+ struct aws_allocator *allocator;
+
+ struct aws_mqtt5_packet_unsuback_view storage_view;
+
+ struct aws_byte_cursor reason_string;
+
+ struct aws_mqtt5_user_property_set user_properties;
+
+ struct aws_array_list reason_codes;
+
+ struct aws_byte_buf storage;
+};
+
+struct aws_mqtt5_packet_publish_storage {
+ struct aws_mqtt5_packet_publish_view storage_view;
+
+ enum aws_mqtt5_payload_format_indicator payload_format;
+
+ uint32_t message_expiry_interval_seconds;
+
+ uint16_t topic_alias;
+
+ struct aws_byte_cursor response_topic;
+
+ struct aws_byte_cursor correlation_data;
+
+ struct aws_byte_cursor content_type;
+
+ struct aws_mqtt5_user_property_set user_properties;
+ struct aws_array_list subscription_identifiers;
+
+ struct aws_byte_buf storage;
+};
+
+struct aws_mqtt5_packet_puback_storage {
+ struct aws_mqtt5_packet_puback_view storage_view;
+
+ struct aws_byte_cursor reason_string;
+
+ struct aws_mqtt5_user_property_set user_properties;
+
+ struct aws_byte_buf storage;
+};
+
+struct aws_mqtt5_packet_disconnect_storage {
+ struct aws_mqtt5_packet_disconnect_view storage_view;
+
+ uint32_t session_expiry_interval_seconds;
+
+ struct aws_byte_cursor reason_string;
+
+ struct aws_mqtt5_user_property_set user_properties;
+
+ struct aws_byte_cursor server_reference;
+
+ struct aws_byte_buf storage;
+};
+
+struct aws_mqtt5_packet_subscribe_storage {
+ struct aws_mqtt5_packet_subscribe_view storage_view;
+
+ uint32_t subscription_identifier;
+
+ struct aws_array_list subscriptions;
+
+ struct aws_mqtt5_user_property_set user_properties;
+
+ struct aws_byte_buf storage;
+};
+
+struct aws_mqtt5_packet_unsubscribe_storage {
+ struct aws_mqtt5_packet_unsubscribe_view storage_view;
+
+ struct aws_array_list topic_filters;
+
+ struct aws_mqtt5_user_property_set user_properties;
+
+ struct aws_byte_buf storage;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/* User properties */
+
+AWS_MQTT_API int aws_mqtt5_user_property_set_init_with_storage(
+ struct aws_mqtt5_user_property_set *property_set,
+ struct aws_allocator *allocator,
+ struct aws_byte_buf *storage_buffer,
+ size_t property_count,
+ const struct aws_mqtt5_user_property *properties);
+
+AWS_MQTT_API void aws_mqtt5_user_property_set_clean_up(struct aws_mqtt5_user_property_set *property_set);
+
+AWS_MQTT_API size_t aws_mqtt5_user_property_set_size(const struct aws_mqtt5_user_property_set *property_set);
+
+/* Connect */
+
+AWS_MQTT_API int aws_mqtt5_packet_connect_storage_init(
+ struct aws_mqtt5_packet_connect_storage *connect_storage,
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_packet_connect_view *connect_options);
+
+AWS_MQTT_API int aws_mqtt5_packet_connect_storage_init_from_external_storage(
+ struct aws_mqtt5_packet_connect_storage *connect_storage,
+ struct aws_allocator *allocator);
+
+AWS_MQTT_API void aws_mqtt5_packet_connect_storage_clean_up(struct aws_mqtt5_packet_connect_storage *connect_storage);
+
+/* Connack */
+
+AWS_MQTT_API int aws_mqtt5_packet_connack_storage_init(
+ struct aws_mqtt5_packet_connack_storage *connack_storage,
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_packet_connack_view *connack_options);
+
+AWS_MQTT_API int aws_mqtt5_packet_connack_storage_init_from_external_storage(
+ struct aws_mqtt5_packet_connack_storage *connack_storage,
+ struct aws_allocator *allocator);
+
+AWS_MQTT_API void aws_mqtt5_packet_connack_storage_clean_up(struct aws_mqtt5_packet_connack_storage *connack_storage);
+
+/* Disconnect */
+
+AWS_MQTT_API int aws_mqtt5_packet_disconnect_storage_init(
+ struct aws_mqtt5_packet_disconnect_storage *disconnect_storage,
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_packet_disconnect_view *disconnect_options);
+
+AWS_MQTT_API int aws_mqtt5_packet_disconnect_storage_init_from_external_storage(
+ struct aws_mqtt5_packet_disconnect_storage *disconnect_storage,
+ struct aws_allocator *allocator);
+
+AWS_MQTT_API void aws_mqtt5_packet_disconnect_storage_clean_up(
+ struct aws_mqtt5_packet_disconnect_storage *disconnect_storage);
+
+/* Publish */
+
+AWS_MQTT_API int aws_mqtt5_packet_publish_storage_init(
+ struct aws_mqtt5_packet_publish_storage *publish_storage,
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_packet_publish_view *publish_options);
+
+AWS_MQTT_API int aws_mqtt5_packet_publish_storage_init_from_external_storage(
+ struct aws_mqtt5_packet_publish_storage *publish_storage,
+ struct aws_allocator *allocator);
+
+AWS_MQTT_API void aws_mqtt5_packet_publish_storage_clean_up(struct aws_mqtt5_packet_publish_storage *publish_storage);
+
+/* Puback */
+
+AWS_MQTT_API int aws_mqtt5_packet_puback_storage_init(
+ struct aws_mqtt5_packet_puback_storage *puback_storage,
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_packet_puback_view *puback_view);
+
+AWS_MQTT_API int aws_mqtt5_packet_puback_storage_init_from_external_storage(
+ struct aws_mqtt5_packet_puback_storage *puback_storage,
+ struct aws_allocator *allocator);
+
+AWS_MQTT_API void aws_mqtt5_packet_puback_storage_clean_up(struct aws_mqtt5_packet_puback_storage *puback_storage);
+
+/* Subscribe */
+
+AWS_MQTT_API int aws_mqtt5_packet_subscribe_storage_init(
+ struct aws_mqtt5_packet_subscribe_storage *subscribe_storage,
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_packet_subscribe_view *subscribe_options);
+
+AWS_MQTT_API int aws_mqtt5_packet_subscribe_storage_init_from_external_storage(
+ struct aws_mqtt5_packet_subscribe_storage *subscribe_storage,
+ struct aws_allocator *allocator);
+
+AWS_MQTT_API void aws_mqtt5_packet_subscribe_storage_clean_up(
+ struct aws_mqtt5_packet_subscribe_storage *subscribe_storage);
+
+/* Suback */
+
+AWS_MQTT_API int aws_mqtt5_packet_suback_storage_init(
+ struct aws_mqtt5_packet_suback_storage *suback_storage,
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_packet_suback_view *suback_view);
+
+AWS_MQTT_API int aws_mqtt5_packet_suback_storage_init_from_external_storage(
+ struct aws_mqtt5_packet_suback_storage *suback_storage,
+ struct aws_allocator *allocator);
+
+AWS_MQTT_API void aws_mqtt5_packet_suback_storage_clean_up(struct aws_mqtt5_packet_suback_storage *suback_storage);
+
+/* Unsubscribe */
+
+AWS_MQTT_API int aws_mqtt5_packet_unsubscribe_storage_init(
+ struct aws_mqtt5_packet_unsubscribe_storage *unsubscribe_storage,
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_options);
+
+AWS_MQTT_API int aws_mqtt5_packet_unsubscribe_storage_init_from_external_storage(
+ struct aws_mqtt5_packet_unsubscribe_storage *unsubscribe_storage,
+ struct aws_allocator *allocator);
+
+AWS_MQTT_API void aws_mqtt5_packet_unsubscribe_storage_clean_up(
+ struct aws_mqtt5_packet_unsubscribe_storage *unsubscribe_storage);
+
+/* Unsuback */
+
+AWS_MQTT_API int aws_mqtt5_packet_unsuback_storage_init(
+ struct aws_mqtt5_packet_unsuback_storage *unsuback_storage,
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_packet_unsuback_view *unsuback_view);
+
+AWS_MQTT_API int aws_mqtt5_packet_unsuback_storage_init_from_external_storage(
+ struct aws_mqtt5_packet_unsuback_storage *unsuback_storage,
+ struct aws_allocator *allocator);
+
+AWS_MQTT_API void aws_mqtt5_packet_unsuback_storage_clean_up(
+ struct aws_mqtt5_packet_unsuback_storage *unsuback_storage);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_MQTT_MQTT5_PACKET_STORAGE_H */
diff --git a/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/v5/mqtt5_types.h b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/v5/mqtt5_types.h
new file mode 100644
index 0000000000..f8db39516b
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/include/aws/mqtt/v5/mqtt5_types.h
@@ -0,0 +1,486 @@
+#ifndef AWS_MQTT_MQTT5_TYPES_H
+#define AWS_MQTT_MQTT5_TYPES_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+/**
+ * DEVELOPER PREVIEW DISCLAIMER
+ *
+ * MQTT5 support is currently in **developer preview**. We encourage feedback at all times, but feedback during the
+ * preview window is especially valuable in shaping the final product. During the preview period we may make
+ * backwards-incompatible changes to the public API, but in general, this is something we will try our best to avoid.
+ */
+
+#include <aws/mqtt/mqtt.h>
+
+#include <aws/common/array_list.h>
+#include <aws/common/byte_buf.h>
+
+/**
+ * Some artificial (non-MQTT spec specified) limits that we place on input packets (publish, subscribe, unsubscibe)
+ * which lets us safely do the various packet size calculations with a bare minimum of checked arithmetic.
+ *
+ * I don't see any conceivable use cases why you'd want more than this, but they are relaxable to some degree.
+ *
+ * TODO: Add some static assert calculations that verify that we can't possibly overflow against the maximum value
+ * of a variable length integer for relevant packet size encodings that are absolute worst-case against these limits.
+ */
+#define AWS_MQTT5_CLIENT_MAXIMUM_USER_PROPERTIES 1024
+#define AWS_MQTT5_CLIENT_MAXIMUM_SUBSCRIPTIONS_PER_SUBSCRIBE 1024
+#define AWS_MQTT5_CLIENT_MAXIMUM_TOPIC_FILTERS_PER_UNSUBSCRIBE 1024
+
+/**
+ * Over-the-wire packet id as defined in the mqtt spec. Allocated at the point in time when the packet is
+ * is next to go down the channel and about to be encoded into an io message buffer.
+ *
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901026
+ */
+typedef uint16_t aws_mqtt5_packet_id_t;
+
+/**
+ * MQTT Message delivery quality of service.
+ * Enum values match mqtt spec encoding values.
+ *
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901234
+ */
+enum aws_mqtt5_qos {
+
+ /** https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901235 */
+ AWS_MQTT5_QOS_AT_MOST_ONCE = 0x0,
+
+ /** https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901236 */
+ AWS_MQTT5_QOS_AT_LEAST_ONCE = 0x1,
+
+ /** https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901237 */
+ AWS_MQTT5_QOS_EXACTLY_ONCE = 0x2,
+};
+
+/**
+ * Server return code for CONNECT attempts.
+ * Enum values match mqtt spec encoding values.
+ *
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901079
+ */
+enum aws_mqtt5_connect_reason_code {
+ AWS_MQTT5_CRC_SUCCESS = 0,
+ AWS_MQTT5_CRC_UNSPECIFIED_ERROR = 128,
+ AWS_MQTT5_CRC_MALFORMED_PACKET = 129,
+ AWS_MQTT5_CRC_PROTOCOL_ERROR = 130,
+ AWS_MQTT5_CRC_IMPLEMENTATION_SPECIFIC_ERROR = 131,
+ AWS_MQTT5_CRC_UNSUPPORTED_PROTOCOL_VERSION = 132,
+ AWS_MQTT5_CRC_CLIENT_IDENTIFIER_NOT_VALID = 133,
+ AWS_MQTT5_CRC_BAD_USERNAME_OR_PASSWORD = 134,
+ AWS_MQTT5_CRC_NOT_AUTHORIZED = 135,
+ AWS_MQTT5_CRC_SERVER_UNAVAILABLE = 136,
+ AWS_MQTT5_CRC_SERVER_BUSY = 137,
+ AWS_MQTT5_CRC_BANNED = 138,
+ AWS_MQTT5_CRC_BAD_AUTHENTICATION_METHOD = 140,
+ AWS_MQTT5_CRC_TOPIC_NAME_INVALID = 144,
+ AWS_MQTT5_CRC_PACKET_TOO_LARGE = 149,
+ AWS_MQTT5_CRC_QUOTA_EXCEEDED = 151,
+ AWS_MQTT5_CRC_PAYLOAD_FORMAT_INVALID = 153,
+ AWS_MQTT5_CRC_RETAIN_NOT_SUPPORTED = 154,
+ AWS_MQTT5_CRC_QOS_NOT_SUPPORTED = 155,
+ AWS_MQTT5_CRC_USE_ANOTHER_SERVER = 156,
+ AWS_MQTT5_CRC_SERVER_MOVED = 157,
+ AWS_MQTT5_CRC_CONNECTION_RATE_EXCEEDED = 159,
+};
+
+/**
+ * Reason code inside DISCONNECT packets.
+ * Enum values match mqtt spec encoding values.
+ *
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901208
+ */
+enum aws_mqtt5_disconnect_reason_code {
+ AWS_MQTT5_DRC_NORMAL_DISCONNECTION = 0,
+ AWS_MQTT5_DRC_DISCONNECT_WITH_WILL_MESSAGE = 4,
+ AWS_MQTT5_DRC_UNSPECIFIED_ERROR = 128,
+ AWS_MQTT5_DRC_MALFORMED_PACKET = 129,
+ AWS_MQTT5_DRC_PROTOCOL_ERROR = 130,
+ AWS_MQTT5_DRC_IMPLEMENTATION_SPECIFIC_ERROR = 131,
+ AWS_MQTT5_DRC_NOT_AUTHORIZED = 135,
+ AWS_MQTT5_DRC_SERVER_BUSY = 137,
+ AWS_MQTT5_DRC_SERVER_SHUTTING_DOWN = 139,
+ AWS_MQTT5_DRC_KEEP_ALIVE_TIMEOUT = 141,
+ AWS_MQTT5_DRC_SESSION_TAKEN_OVER = 142,
+ AWS_MQTT5_DRC_TOPIC_FILTER_INVALID = 143,
+ AWS_MQTT5_DRC_TOPIC_NAME_INVALID = 144,
+ AWS_MQTT5_DRC_RECEIVE_MAXIMUM_EXCEEDED = 147,
+ AWS_MQTT5_DRC_TOPIC_ALIAS_INVALID = 148,
+ AWS_MQTT5_DRC_PACKET_TOO_LARGE = 149,
+ AWS_MQTT5_DRC_MESSAGE_RATE_TOO_HIGH = 150,
+ AWS_MQTT5_DRC_QUOTA_EXCEEDED = 151,
+ AWS_MQTT5_DRC_ADMINISTRATIVE_ACTION = 152,
+ AWS_MQTT5_DRC_PAYLOAD_FORMAT_INVALID = 153,
+ AWS_MQTT5_DRC_RETAIN_NOT_SUPPORTED = 154,
+ AWS_MQTT5_DRC_QOS_NOT_SUPPORTED = 155,
+ AWS_MQTT5_DRC_USE_ANOTHER_SERVER = 156,
+ AWS_MQTT5_DRC_SERVER_MOVED = 157,
+ AWS_MQTT5_DRC_SHARED_SUBSCRIPTIONS_NOT_SUPPORTED = 158,
+ AWS_MQTT5_DRC_CONNECTION_RATE_EXCEEDED = 159,
+ AWS_MQTT5_DRC_MAXIMUM_CONNECT_TIME = 160,
+ AWS_MQTT5_DRC_SUBSCRIPTION_IDENTIFIERS_NOT_SUPPORTED = 161,
+ AWS_MQTT5_DRC_WILDCARD_SUBSCRIPTIONS_NOT_SUPPORTED = 162,
+};
+
+/**
+ * Reason code inside PUBACK packets.
+ * Enum values match mqtt spec encoding values.
+ *
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901124
+ */
+enum aws_mqtt5_puback_reason_code {
+ AWS_MQTT5_PARC_SUCCESS = 0,
+ AWS_MQTT5_PARC_NO_MATCHING_SUBSCRIBERS = 16,
+ AWS_MQTT5_PARC_UNSPECIFIED_ERROR = 128,
+ AWS_MQTT5_PARC_IMPLEMENTATION_SPECIFIC_ERROR = 131,
+ AWS_MQTT5_PARC_NOT_AUTHORIZED = 135,
+ AWS_MQTT5_PARC_TOPIC_NAME_INVALID = 144,
+ AWS_MQTT5_PARC_PACKET_IDENTIFIER_IN_USE = 145,
+ AWS_MQTT5_PARC_QUOTA_EXCEEDED = 151,
+ AWS_MQTT5_PARC_PAYLOAD_FORMAT_INVALID = 153,
+};
+
+/**
+ * Reason code inside SUBACK packet payloads.
+ * Enum values match mqtt spec encoding values.
+ *
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901178
+ */
+enum aws_mqtt5_suback_reason_code {
+ AWS_MQTT5_SARC_GRANTED_QOS_0 = 0,
+ AWS_MQTT5_SARC_GRANTED_QOS_1 = 1,
+ AWS_MQTT5_SARC_GRANTED_QOS_2 = 2,
+ AWS_MQTT5_SARC_UNSPECIFIED_ERROR = 128,
+ AWS_MQTT5_SARC_IMPLEMENTATION_SPECIFIC_ERROR = 131,
+ AWS_MQTT5_SARC_NOT_AUTHORIZED = 135,
+ AWS_MQTT5_SARC_TOPIC_FILTER_INVALID = 143,
+ AWS_MQTT5_SARC_PACKET_IDENTIFIER_IN_USE = 145,
+ AWS_MQTT5_SARC_QUOTA_EXCEEDED = 151,
+ AWS_MQTT5_SARC_SHARED_SUBSCRIPTIONS_NOT_SUPPORTED = 158,
+ AWS_MQTT5_SARC_SUBSCRIPTION_IDENTIFIERS_NOT_SUPPORTED = 161,
+ AWS_MQTT5_SARC_WILDCARD_SUBSCRIPTIONS_NOT_SUPPORTED = 162,
+};
+
+/**
+ * Reason code inside UNSUBACK packet payloads.
+ * Enum values match mqtt spec encoding values.
+ *
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901194
+ */
+enum aws_mqtt5_unsuback_reason_code {
+ AWS_MQTT5_UARC_SUCCESS = 0,
+ AWS_MQTT5_UARC_NO_SUBSCRIPTION_EXISTED = 17,
+ AWS_MQTT5_UARC_UNSPECIFIED_ERROR = 128,
+ AWS_MQTT5_UARC_IMPLEMENTATION_SPECIFIC_ERROR = 131,
+ AWS_MQTT5_UARC_NOT_AUTHORIZED = 135,
+ AWS_MQTT5_UARC_TOPIC_FILTER_INVALID = 143,
+ AWS_MQTT5_UARC_PACKET_IDENTIFIER_IN_USE = 145,
+};
+
+/**
+ * Type of mqtt packet.
+ * Enum values match mqtt spec encoding values.
+ *
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901022
+ */
+enum aws_mqtt5_packet_type {
+ /* internal indicator that the associated packet is null */
+ AWS_MQTT5_PT_NONE = -1,
+ AWS_MQTT5_PT_RESERVED = 0,
+ AWS_MQTT5_PT_CONNECT = 1,
+ AWS_MQTT5_PT_CONNACK = 2,
+ AWS_MQTT5_PT_PUBLISH = 3,
+ AWS_MQTT5_PT_PUBACK = 4,
+ AWS_MQTT5_PT_PUBREC = 5,
+ AWS_MQTT5_PT_PUBREL = 6,
+ AWS_MQTT5_PT_PUBCOMP = 7,
+ AWS_MQTT5_PT_SUBSCRIBE = 8,
+ AWS_MQTT5_PT_SUBACK = 9,
+ AWS_MQTT5_PT_UNSUBSCRIBE = 10,
+ AWS_MQTT5_PT_UNSUBACK = 11,
+ AWS_MQTT5_PT_PINGREQ = 12,
+ AWS_MQTT5_PT_PINGRESP = 13,
+ AWS_MQTT5_PT_DISCONNECT = 14,
+ AWS_MQTT5_PT_AUTH = 15,
+};
+
+/**
+ * Non-persistent representation of an mqtt5 user property.
+ */
+struct aws_mqtt5_user_property {
+ struct aws_byte_cursor name;
+ struct aws_byte_cursor value;
+};
+
+/**
+ * Optional property describing a message's payload format.
+ * Enum values match mqtt spec encoding values.
+ *
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901063
+ */
+enum aws_mqtt5_payload_format_indicator {
+ AWS_MQTT5_PFI_BYTES = 0,
+ AWS_MQTT5_PFI_UTF8 = 1,
+};
+
+/**
+ * Configures how retained messages should be handled when subscribing with a topic filter that matches topics with
+ * associated retained messages.
+ *
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901169
+ */
+enum aws_mqtt5_retain_handling_type {
+
+ /**
+ * Server should send all retained messages on topics that match the subscription's filter.
+ */
+ AWS_MQTT5_RHT_SEND_ON_SUBSCRIBE = 0x00,
+
+ /**
+ * Server should send all retained messages on topics that match the subscription's filter, where this is the
+ * first (relative to connection) subscription filter that matches the topic with a retained message.
+ */
+ AWS_MQTT5_RHT_SEND_ON_SUBSCRIBE_IF_NEW = 0x01,
+
+ /**
+ * Subscribe must not trigger any retained message publishes from the server.
+ */
+ AWS_MQTT5_RHT_DONT_SEND = 0x02,
+};
+
+/**
+ * Configures a single subscription within a Subscribe operation
+ */
+struct aws_mqtt5_subscription_view {
+ /**
+ * Topic filter to subscribe to
+ */
+ struct aws_byte_cursor topic_filter;
+
+ /**
+ * Maximum QOS that the subscriber will accept messages for. Negotiated QoS may be different.
+ */
+ enum aws_mqtt5_qos qos;
+
+ /**
+ * Should the server not send publishes to a client when that client was the one who sent the publish?
+ */
+ bool no_local;
+
+ /**
+ * Should messages sent due to this subscription keep the retain flag preserved on the message?
+ */
+ bool retain_as_published;
+
+ /**
+ * Should retained messages on matching topics be sent in reaction to this subscription?
+ */
+ enum aws_mqtt5_retain_handling_type retain_handling_type;
+};
+
+/**
+ * Read-only snapshot of a DISCONNECT packet
+ *
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901205
+ */
+struct aws_mqtt5_packet_disconnect_view {
+ enum aws_mqtt5_disconnect_reason_code reason_code;
+ const uint32_t *session_expiry_interval_seconds;
+ const struct aws_byte_cursor *reason_string;
+
+ size_t user_property_count;
+ const struct aws_mqtt5_user_property *user_properties;
+
+ const struct aws_byte_cursor *server_reference;
+};
+
+/**
+ * Read-only snapshot of a SUBSCRIBE packet
+ *
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901161
+ */
+struct aws_mqtt5_packet_subscribe_view {
+ aws_mqtt5_packet_id_t packet_id;
+
+ size_t subscription_count;
+ const struct aws_mqtt5_subscription_view *subscriptions;
+
+ const uint32_t *subscription_identifier;
+
+ size_t user_property_count;
+ const struct aws_mqtt5_user_property *user_properties;
+};
+
+/**
+ * Read-only snapshot of an UNSUBSCRIBE packet
+ *
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901179
+ */
+struct aws_mqtt5_packet_unsubscribe_view {
+ aws_mqtt5_packet_id_t packet_id;
+
+ size_t topic_filter_count;
+ const struct aws_byte_cursor *topic_filters;
+
+ size_t user_property_count;
+ const struct aws_mqtt5_user_property *user_properties;
+};
+
+/**
+ * Read-only snapshot of a PUBLISH packet. Used both in configuration of a publish operation and callback
+ * data in message receipt.
+ *
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901100
+ */
+struct aws_mqtt5_packet_publish_view {
+ struct aws_byte_cursor payload;
+
+ /* packet_id is only set for QoS 1 and QoS 2 */
+ aws_mqtt5_packet_id_t packet_id;
+
+ enum aws_mqtt5_qos qos;
+
+ /*
+ * Used to set the duplicate flag on QoS 1+ re-delivery attempts.
+ * Set to false on all first attempts or QoS 0. Set to true on any re-delivery.
+ */
+ bool duplicate;
+ bool retain;
+ struct aws_byte_cursor topic;
+ const enum aws_mqtt5_payload_format_indicator *payload_format;
+ const uint32_t *message_expiry_interval_seconds;
+ const uint16_t *topic_alias;
+ const struct aws_byte_cursor *response_topic;
+ const struct aws_byte_cursor *correlation_data;
+
+ /* These are ignored when building publish operations */
+ size_t subscription_identifier_count;
+ const uint32_t *subscription_identifiers;
+
+ const struct aws_byte_cursor *content_type;
+
+ size_t user_property_count;
+ const struct aws_mqtt5_user_property *user_properties;
+};
+
+/**
+ * Read-only snapshot of a CONNECT packet
+ *
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901033
+ */
+struct aws_mqtt5_packet_connect_view {
+ uint16_t keep_alive_interval_seconds;
+
+ struct aws_byte_cursor client_id;
+
+ const struct aws_byte_cursor *username;
+ const struct aws_byte_cursor *password;
+
+ bool clean_start;
+
+ const uint32_t *session_expiry_interval_seconds;
+
+ const uint8_t *request_response_information;
+ const uint8_t *request_problem_information;
+ const uint16_t *receive_maximum;
+ const uint16_t *topic_alias_maximum;
+ const uint32_t *maximum_packet_size_bytes;
+
+ const uint32_t *will_delay_interval_seconds;
+ const struct aws_mqtt5_packet_publish_view *will;
+
+ size_t user_property_count;
+ const struct aws_mqtt5_user_property *user_properties;
+
+ /* Do not bind these. We don't support AUTH packets yet. For decode/encade testing purposes only. */
+ const struct aws_byte_cursor *authentication_method;
+ const struct aws_byte_cursor *authentication_data;
+};
+
+/**
+ * Read-only snapshot of a CONNACK packet.
+ *
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901074
+ */
+struct aws_mqtt5_packet_connack_view {
+ bool session_present;
+ enum aws_mqtt5_connect_reason_code reason_code;
+
+ const uint32_t *session_expiry_interval;
+ const uint16_t *receive_maximum;
+ const enum aws_mqtt5_qos *maximum_qos;
+ const bool *retain_available;
+ const uint32_t *maximum_packet_size;
+ const struct aws_byte_cursor *assigned_client_identifier;
+ const uint16_t *topic_alias_maximum;
+ const struct aws_byte_cursor *reason_string;
+
+ size_t user_property_count;
+ const struct aws_mqtt5_user_property *user_properties;
+
+ const bool *wildcard_subscriptions_available;
+ const bool *subscription_identifiers_available;
+ const bool *shared_subscriptions_available;
+
+ const uint16_t *server_keep_alive;
+ const struct aws_byte_cursor *response_information;
+ const struct aws_byte_cursor *server_reference;
+ const struct aws_byte_cursor *authentication_method;
+ const struct aws_byte_cursor *authentication_data;
+};
+
+/**
+ * Read-only snapshot of a PUBACK packet
+ *
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901121
+ */
+struct aws_mqtt5_packet_puback_view {
+ aws_mqtt5_packet_id_t packet_id;
+
+ enum aws_mqtt5_puback_reason_code reason_code;
+ const struct aws_byte_cursor *reason_string;
+
+ size_t user_property_count;
+ const struct aws_mqtt5_user_property *user_properties;
+};
+
+/**
+ * Read-only snapshot of a SUBACK packet
+ *
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901171
+ */
+struct aws_mqtt5_packet_suback_view {
+ aws_mqtt5_packet_id_t packet_id;
+
+ const struct aws_byte_cursor *reason_string;
+
+ size_t user_property_count;
+ const struct aws_mqtt5_user_property *user_properties;
+
+ size_t reason_code_count;
+ const enum aws_mqtt5_suback_reason_code *reason_codes;
+};
+
+/**
+ * Read-only snapshot of an UNSUBACK packet
+ *
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901187
+ */
+struct aws_mqtt5_packet_unsuback_view {
+ aws_mqtt5_packet_id_t packet_id;
+
+ const struct aws_byte_cursor *reason_string;
+
+ size_t user_property_count;
+ const struct aws_mqtt5_user_property *user_properties;
+
+ size_t reason_code_count;
+ const enum aws_mqtt5_unsuback_reason_code *reason_codes;
+};
+
+#endif /* AWS_MQTT_MQTT5_TYPES_H */
diff --git a/contrib/restricted/aws/aws-c-mqtt/source/client.c b/contrib/restricted/aws/aws-c-mqtt/source/client.c
new file mode 100644
index 0000000000..7aea13b727
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/source/client.c
@@ -0,0 +1,3232 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/mqtt/client.h>
+
+#include <aws/mqtt/private/client_impl.h>
+#include <aws/mqtt/private/mqtt_client_test_helper.h>
+#include <aws/mqtt/private/packets.h>
+#include <aws/mqtt/private/shared_constants.h>
+#include <aws/mqtt/private/topic_tree.h>
+
+#include <aws/http/proxy.h>
+
+#include <aws/io/channel_bootstrap.h>
+#include <aws/io/event_loop.h>
+#include <aws/io/socket.h>
+#include <aws/io/tls_channel_handler.h>
+#include <aws/io/uri.h>
+
+#include <aws/common/clock.h>
+#include <aws/common/task_scheduler.h>
+
+#include <inttypes.h>
+
+#ifdef AWS_MQTT_WITH_WEBSOCKETS
+# include <aws/http/request_response.h>
+# include <aws/http/websocket.h>
+#endif
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4204)
+#endif
+
+/* 3 seconds */
+static const uint64_t s_default_ping_timeout_ns = 3000000000;
+
+/* 20 minutes - This is the default (and max) for AWS IoT as of 2020.02.18 */
+static const uint16_t s_default_keep_alive_sec = 1200;
+
+static int s_mqtt_client_connect(
+ struct aws_mqtt_client_connection *connection,
+ aws_mqtt_client_on_connection_complete_fn *on_connection_complete,
+ void *userdata);
+/*******************************************************************************
+ * Helper functions
+ ******************************************************************************/
+
+void mqtt_connection_lock_synced_data(struct aws_mqtt_client_connection *connection) {
+ int err = aws_mutex_lock(&connection->synced_data.lock);
+ AWS_ASSERT(!err);
+ (void)err;
+}
+
+void mqtt_connection_unlock_synced_data(struct aws_mqtt_client_connection *connection) {
+ ASSERT_SYNCED_DATA_LOCK_HELD(connection);
+
+ int err = aws_mutex_unlock(&connection->synced_data.lock);
+ AWS_ASSERT(!err);
+ (void)err;
+}
+
+static void s_aws_mqtt_schedule_reconnect_task(struct aws_mqtt_client_connection *connection) {
+ uint64_t next_attempt_ns = 0;
+ aws_high_res_clock_get_ticks(&next_attempt_ns);
+ next_attempt_ns += aws_timestamp_convert(
+ connection->reconnect_timeouts.current_sec, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL);
+ aws_event_loop_schedule_task_future(connection->loop, &connection->reconnect_task->task, next_attempt_ns);
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Scheduling reconnect, for %" PRIu64 " on event-loop %p",
+ (void *)connection,
+ next_attempt_ns,
+ (void *)connection->loop);
+}
+
+static void s_aws_mqtt_client_destroy(struct aws_mqtt_client *client) {
+
+ AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "client=%p: Cleaning up MQTT client", (void *)client);
+ aws_client_bootstrap_release(client->bootstrap);
+
+ aws_mem_release(client->allocator, client);
+}
+
+void mqtt_connection_set_state(
+ struct aws_mqtt_client_connection *connection,
+ enum aws_mqtt_client_connection_state state) {
+ ASSERT_SYNCED_DATA_LOCK_HELD(connection);
+ if (connection->synced_data.state == state) {
+ AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: MQTT connection already in state %d", (void *)connection, state);
+ return;
+ }
+ connection->synced_data.state = state;
+}
+
+struct request_timeout_wrapper;
+
+/* used for timeout task */
+struct request_timeout_task_arg {
+ uint16_t packet_id;
+ struct aws_mqtt_client_connection *connection;
+ struct request_timeout_wrapper *task_arg_wrapper;
+};
+
+/*
+ * We want the timeout task to be able to destroy the forward reference from the operation's task arg structure
+ * to the timeout task. But the operation task arg structures don't have any data structure in common. So to allow
+ * the timeout to refer back to a zero-able forward pointer, we wrap a pointer to the timeout task and embed it
+ * in every operation's task arg that needs to create a timeout.
+ */
+struct request_timeout_wrapper {
+ struct request_timeout_task_arg *timeout_task_arg;
+};
+
+static void s_request_timeout(struct aws_channel_task *channel_task, void *arg, enum aws_task_status status) {
+ (void)channel_task;
+ struct request_timeout_task_arg *timeout_task_arg = arg;
+ struct aws_mqtt_client_connection *connection = timeout_task_arg->connection;
+
+ if (status == AWS_TASK_STATUS_RUN_READY) {
+ if (timeout_task_arg->task_arg_wrapper != NULL) {
+ mqtt_request_complete(connection, AWS_ERROR_MQTT_TIMEOUT, timeout_task_arg->packet_id);
+ }
+ }
+
+ /*
+ * Whether cancelled or run, if we have a back pointer to the operation's task arg, we must zero it out
+ * so that when it completes it does not try to cancel us, because we will already be freed.
+ *
+ * If we don't have a back pointer to the operation's task arg, that means it already ran and completed.
+ */
+ if (timeout_task_arg->task_arg_wrapper != NULL) {
+ timeout_task_arg->task_arg_wrapper->timeout_task_arg = NULL;
+ timeout_task_arg->task_arg_wrapper = NULL;
+ }
+
+ aws_mem_release(connection->allocator, timeout_task_arg);
+}
+
+static struct request_timeout_task_arg *s_schedule_timeout_task(
+ struct aws_mqtt_client_connection *connection,
+ uint16_t packet_id) {
+ /* schedule a timeout task to run, in case server consider the publish is not received */
+ struct aws_channel_task *request_timeout_task = NULL;
+ struct request_timeout_task_arg *timeout_task_arg = NULL;
+ if (!aws_mem_acquire_many(
+ connection->allocator,
+ 2,
+ &timeout_task_arg,
+ sizeof(struct request_timeout_task_arg),
+ &request_timeout_task,
+ sizeof(struct aws_channel_task))) {
+ return NULL;
+ }
+ aws_channel_task_init(request_timeout_task, s_request_timeout, timeout_task_arg, "mqtt_request_timeout");
+ AWS_ZERO_STRUCT(*timeout_task_arg);
+ timeout_task_arg->connection = connection;
+ timeout_task_arg->packet_id = packet_id;
+ uint64_t timestamp = 0;
+ if (aws_channel_current_clock_time(connection->slot->channel, &timestamp)) {
+ aws_mem_release(connection->allocator, timeout_task_arg);
+ return NULL;
+ }
+ timestamp = aws_add_u64_saturating(timestamp, connection->operation_timeout_ns);
+ aws_channel_schedule_task_future(connection->slot->channel, request_timeout_task, timestamp);
+ return timeout_task_arg;
+}
+
+static void s_init_statistics(struct aws_mqtt_connection_operation_statistics_impl *stats) {
+ aws_atomic_store_int(&stats->incomplete_operation_count_atomic, 0);
+ aws_atomic_store_int(&stats->incomplete_operation_size_atomic, 0);
+ aws_atomic_store_int(&stats->unacked_operation_count_atomic, 0);
+ aws_atomic_store_int(&stats->unacked_operation_size_atomic, 0);
+}
+
+/*******************************************************************************
+ * Client Init
+ ******************************************************************************/
+struct aws_mqtt_client *aws_mqtt_client_new(struct aws_allocator *allocator, struct aws_client_bootstrap *bootstrap) {
+
+ aws_mqtt_fatal_assert_library_initialized();
+
+ struct aws_mqtt_client *client = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt_client));
+ if (client == NULL) {
+ return NULL;
+ }
+
+ AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "client=%p: Initalizing MQTT client", (void *)client);
+
+ client->allocator = allocator;
+ client->bootstrap = aws_client_bootstrap_acquire(bootstrap);
+ aws_ref_count_init(&client->ref_count, client, (aws_simple_completion_callback *)s_aws_mqtt_client_destroy);
+
+ return client;
+}
+
+struct aws_mqtt_client *aws_mqtt_client_acquire(struct aws_mqtt_client *client) {
+ if (client != NULL) {
+ aws_ref_count_acquire(&client->ref_count);
+ }
+
+ return client;
+}
+
+void aws_mqtt_client_release(struct aws_mqtt_client *client) {
+ if (client != NULL) {
+ aws_ref_count_release(&client->ref_count);
+ }
+}
+
+#define AWS_RESET_RECONNECT_BACKOFF_DELAY_SECONDS 10
+
+/* At this point, the channel for the MQTT connection has completed its shutdown */
+static void s_mqtt_client_shutdown(
+ struct aws_client_bootstrap *bootstrap,
+ int error_code,
+ struct aws_channel *channel,
+ void *user_data) {
+
+ (void)bootstrap;
+ (void)channel;
+
+ struct aws_mqtt_client_connection *connection = user_data;
+
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT, "id=%p: Channel has been shutdown with error code %d", (void *)connection, error_code);
+
+ enum aws_mqtt_client_connection_state prev_state;
+ struct aws_linked_list cancelling_requests;
+ aws_linked_list_init(&cancelling_requests);
+ bool disconnected_state = false;
+ { /* BEGIN CRITICAL SECTION */
+ mqtt_connection_lock_synced_data(connection);
+
+ /*
+ * On a channel that represents a valid connection (successful connack received),
+ * channel_successful_connack_timestamp_ns will be the time the connack was received. Otherwise it will be
+ * zero.
+ *
+ * Use that fact to determine whether or not we should reset the current reconnect backoff delay.
+ *
+ * We reset the reconnect backoff if either of:
+ * 1) the user called disconnect()
+ * 2) a successful connection had lasted longer than our minimum reset time (10s at the moment)
+ */
+ uint64_t now = 0;
+ aws_high_res_clock_get_ticks(&now);
+ uint64_t time_diff = now - connection->reconnect_timeouts.channel_successful_connack_timestamp_ns;
+
+ bool was_user_disconnect = connection->synced_data.state == AWS_MQTT_CLIENT_STATE_DISCONNECTING;
+ bool was_sufficiently_long_connection =
+ (connection->reconnect_timeouts.channel_successful_connack_timestamp_ns != 0) &&
+ (time_diff >=
+ aws_timestamp_convert(
+ AWS_RESET_RECONNECT_BACKOFF_DELAY_SECONDS, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL));
+
+ if (was_user_disconnect || was_sufficiently_long_connection) {
+ connection->reconnect_timeouts.current_sec = connection->reconnect_timeouts.min_sec;
+ }
+ connection->reconnect_timeouts.channel_successful_connack_timestamp_ns = 0;
+
+ /* Move all the ongoing requests to the pending requests list, because the response they are waiting for will
+ * never arrives. Sad. But, we will retry. */
+ if (connection->clean_session) {
+ /* For a clean session, the Session lasts as long as the Network Connection. Thus, discard the previous
+ * session */
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Discard ongoing requests and pending requests when a clean session connection lost.",
+ (void *)connection);
+ aws_linked_list_move_all_back(&cancelling_requests, &connection->thread_data.ongoing_requests_list);
+ aws_linked_list_move_all_back(&cancelling_requests, &connection->synced_data.pending_requests_list);
+ } else {
+ aws_linked_list_move_all_back(
+ &connection->synced_data.pending_requests_list, &connection->thread_data.ongoing_requests_list);
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: All subscribe/unsubscribe and publish QoS>0 have been move to pending list",
+ (void *)connection);
+ }
+ prev_state = connection->synced_data.state;
+ switch (connection->synced_data.state) {
+ case AWS_MQTT_CLIENT_STATE_CONNECTED:
+ /* unexpected hangup from broker, try to reconnect */
+ mqtt_connection_set_state(connection, AWS_MQTT_CLIENT_STATE_RECONNECTING);
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: connection was unexpected interrupted, switch state to RECONNECTING.",
+ (void *)connection);
+ break;
+ case AWS_MQTT_CLIENT_STATE_DISCONNECTING:
+ /* disconnect requested by user */
+ /* Successfully shutdown, if cleansession is set, ongoing and pending requests will be cleared */
+ disconnected_state = true;
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: disconnect finished, switch state to DISCONNECTED.",
+ (void *)connection);
+ break;
+ case AWS_MQTT_CLIENT_STATE_CONNECTING:
+ /* failed to connect */
+ disconnected_state = true;
+ break;
+ case AWS_MQTT_CLIENT_STATE_RECONNECTING:
+ /* reconnect failed, schedule the next attempt later, no need to change the state. */
+ break;
+ default:
+ /* AWS_MQTT_CLIENT_STATE_DISCONNECTED */
+ break;
+ }
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT, "id=%p: current state is %d", (void *)connection, (int)connection->synced_data.state);
+ /* Always clear slot, as that's what's been shutdown */
+ if (connection->slot) {
+ aws_channel_slot_remove(connection->slot);
+ AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: slot is removed successfully", (void *)connection);
+ connection->slot = NULL;
+ }
+
+ mqtt_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ if (!aws_linked_list_empty(&cancelling_requests)) {
+ struct aws_linked_list_node *current = aws_linked_list_front(&cancelling_requests);
+ const struct aws_linked_list_node *end = aws_linked_list_end(&cancelling_requests);
+ while (current != end) {
+ struct aws_mqtt_request *request = AWS_CONTAINER_OF(current, struct aws_mqtt_request, list_node);
+ if (request->on_complete) {
+ request->on_complete(
+ connection,
+ request->packet_id,
+ AWS_ERROR_MQTT_CANCELLED_FOR_CLEAN_SESSION,
+ request->on_complete_ud);
+ }
+ current = current->next;
+ }
+ { /* BEGIN CRITICAL SECTION */
+ mqtt_connection_lock_synced_data(connection);
+ while (!aws_linked_list_empty(&cancelling_requests)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&cancelling_requests);
+ struct aws_mqtt_request *request = AWS_CONTAINER_OF(node, struct aws_mqtt_request, list_node);
+ aws_hash_table_remove(
+ &connection->synced_data.outstanding_requests_table, &request->packet_id, NULL, NULL);
+ aws_memory_pool_release(&connection->synced_data.requests_pool, request);
+ }
+ mqtt_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ }
+
+ /* If there's no error code and this wasn't user-requested, set the error code to something useful */
+ if (error_code == AWS_ERROR_SUCCESS) {
+ if (prev_state != AWS_MQTT_CLIENT_STATE_DISCONNECTING && prev_state != AWS_MQTT_CLIENT_STATE_DISCONNECTED) {
+ error_code = AWS_ERROR_MQTT_UNEXPECTED_HANGUP;
+ }
+ }
+ switch (prev_state) {
+ case AWS_MQTT_CLIENT_STATE_RECONNECTING: {
+ /* If reconnect attempt failed, schedule the next attempt */
+ AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: Reconnect failed, retrying", (void *)connection);
+ s_aws_mqtt_schedule_reconnect_task(connection);
+ break;
+ }
+ case AWS_MQTT_CLIENT_STATE_CONNECTED: {
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Connection interrupted, calling callback and attempting reconnect",
+ (void *)connection);
+ MQTT_CLIENT_CALL_CALLBACK_ARGS(connection, on_interrupted, error_code);
+
+ /* In case user called disconnect from the on_interrupted callback */
+ bool stop_reconnect;
+ { /* BEGIN CRITICAL SECTION */
+ mqtt_connection_lock_synced_data(connection);
+ stop_reconnect = connection->synced_data.state == AWS_MQTT_CLIENT_STATE_DISCONNECTING;
+ if (stop_reconnect) {
+ disconnected_state = true;
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: disconnect finished, switch state to DISCONNECTED.",
+ (void *)connection);
+ }
+ mqtt_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ if (!stop_reconnect) {
+ s_aws_mqtt_schedule_reconnect_task(connection);
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ if (disconnected_state) {
+ { /* BEGIN CRITICAL SECTION */
+ mqtt_connection_lock_synced_data(connection);
+ mqtt_connection_set_state(connection, AWS_MQTT_CLIENT_STATE_DISCONNECTED);
+ mqtt_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ switch (prev_state) {
+ case AWS_MQTT_CLIENT_STATE_CONNECTED:
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Caller requested disconnect from on_interrupted callback, aborting reconnect",
+ (void *)connection);
+ MQTT_CLIENT_CALL_CALLBACK(connection, on_disconnect);
+ MQTT_CLIENT_CALL_CALLBACK_ARGS(connection, on_closed, NULL);
+ break;
+ case AWS_MQTT_CLIENT_STATE_DISCONNECTING:
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Disconnect completed, clearing request queue and calling callback",
+ (void *)connection);
+ MQTT_CLIENT_CALL_CALLBACK(connection, on_disconnect);
+ MQTT_CLIENT_CALL_CALLBACK_ARGS(connection, on_closed, NULL);
+ break;
+ case AWS_MQTT_CLIENT_STATE_CONNECTING:
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Initial connection attempt failed, calling callback",
+ (void *)connection);
+ MQTT_CLIENT_CALL_CALLBACK_ARGS(connection, on_connection_complete, error_code, 0, false);
+ break;
+ default:
+ break;
+ }
+ /* The connection can die now. Release the refcount */
+ aws_mqtt_client_connection_release(connection);
+ }
+}
+
+/*******************************************************************************
+ * Connection New
+ ******************************************************************************/
+/* The assumption here is that a connection always outlives its channels, and the channel this task was scheduled on
+ * always outlives this task, so all we need to do is check the connection state. If we are in a state that waits
+ * for a CONNACK, kill it off. In the case that the connection died between scheduling this task and it being executed
+ * the status will always be CANCELED because this task will be canceled when the owning channel goes away. */
+static void s_connack_received_timeout(struct aws_channel_task *channel_task, void *arg, enum aws_task_status status) {
+ struct aws_mqtt_client_connection *connection = arg;
+
+ if (status == AWS_TASK_STATUS_RUN_READY) {
+ bool time_out = false;
+ { /* BEGIN CRITICAL SECTION */
+ mqtt_connection_lock_synced_data(connection);
+ time_out =
+ (connection->synced_data.state == AWS_MQTT_CLIENT_STATE_CONNECTING ||
+ connection->synced_data.state == AWS_MQTT_CLIENT_STATE_RECONNECTING);
+ mqtt_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ if (time_out) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: mqtt CONNACK response timeout detected", (void *)connection);
+ aws_channel_shutdown(connection->slot->channel, AWS_ERROR_MQTT_TIMEOUT);
+ }
+ }
+
+ aws_mem_release(connection->allocator, channel_task);
+}
+
+/**
+ * Channel has been initialized callback. Sets up channel handler and sends out CONNECT packet.
+ * The on_connack callback is called with the CONNACK packet is received from the server.
+ */
+static void s_mqtt_client_init(
+ struct aws_client_bootstrap *bootstrap,
+ int error_code,
+ struct aws_channel *channel,
+ void *user_data) {
+
+ (void)bootstrap;
+ struct aws_io_message *message = NULL;
+
+ /* Setup callback contract is: if error_code is non-zero then channel is NULL. */
+ AWS_FATAL_ASSERT((error_code != 0) == (channel == NULL));
+
+ struct aws_mqtt_client_connection *connection = user_data;
+
+ if (error_code != AWS_OP_SUCCESS) {
+ /* client shutdown already handles this case, so just call that. */
+ s_mqtt_client_shutdown(bootstrap, error_code, channel, user_data);
+ return;
+ }
+
+ AWS_FATAL_ASSERT(aws_channel_get_event_loop(channel) == connection->loop);
+
+ /* user requested disconnect before the channel has been set up. Stop installing the slot and sending CONNECT. */
+ bool failed_create_slot = false;
+
+ { /* BEGIN CRITICAL SECTION */
+ mqtt_connection_lock_synced_data(connection);
+
+ if (connection->synced_data.state == AWS_MQTT_CLIENT_STATE_DISCONNECTING) {
+ /* It only happens when the user request disconnect during reconnecting, we don't need to fire any callback.
+ * The on_disconnect will be invoked as channel finish shutting down. */
+ mqtt_connection_unlock_synced_data(connection);
+ aws_channel_shutdown(channel, AWS_ERROR_SUCCESS);
+ return;
+ }
+ /* Create the slot */
+ connection->slot = aws_channel_slot_new(channel);
+ if (!connection->slot) {
+ failed_create_slot = true;
+ }
+ mqtt_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ /* intall the slot and handler */
+ if (failed_create_slot) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Failed to create new slot, something has gone horribly wrong, error %d (%s).",
+ (void *)connection,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto handle_error;
+ }
+
+ if (aws_channel_slot_insert_end(channel, connection->slot)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Failed to insert slot into channel %p, error %d (%s).",
+ (void *)connection,
+ (void *)channel,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto handle_error;
+ }
+
+ if (aws_channel_slot_set_handler(connection->slot, &connection->handler)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Failed to set MQTT handler into slot on channel %p, error %d (%s).",
+ (void *)connection,
+ (void *)channel,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ goto handle_error;
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_CLIENT, "id=%p: Connection successfully opened, sending CONNECT packet", (void *)connection);
+
+ struct aws_channel_task *connack_task = aws_mem_calloc(connection->allocator, 1, sizeof(struct aws_channel_task));
+ if (!connack_task) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: Failed to allocate timeout task.", (void *)connection);
+ goto handle_error;
+ }
+
+ aws_channel_task_init(connack_task, s_connack_received_timeout, connection, "mqtt_connack_timeout");
+
+ uint64_t now = 0;
+ if (aws_channel_current_clock_time(channel, &now)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT,
+ "static: Failed to setting MQTT handler into slot on channel %p, error %d (%s).",
+ (void *)channel,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ goto handle_error;
+ }
+ now += connection->ping_timeout_ns;
+ aws_channel_schedule_task_future(channel, connack_task, now);
+
+ struct aws_byte_cursor client_id_cursor = aws_byte_cursor_from_buf(&connection->client_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: MQTT Connection initializing CONNECT packet for client-id '" PRInSTR "'",
+ (void *)connection,
+ AWS_BYTE_CURSOR_PRI(client_id_cursor));
+
+ /* Send the connect packet */
+ struct aws_mqtt_packet_connect connect;
+ aws_mqtt_packet_connect_init(
+ &connect, client_id_cursor, connection->clean_session, connection->keep_alive_time_secs);
+
+ if (connection->will.topic.buffer) {
+ /* Add will if present */
+
+ struct aws_byte_cursor topic_cur = aws_byte_cursor_from_buf(&connection->will.topic);
+ struct aws_byte_cursor payload_cur = aws_byte_cursor_from_buf(&connection->will.payload);
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Adding will to connection on " PRInSTR " with payload " PRInSTR,
+ (void *)connection,
+ AWS_BYTE_CURSOR_PRI(topic_cur),
+ AWS_BYTE_CURSOR_PRI(payload_cur));
+ aws_mqtt_packet_connect_add_will(
+ &connect, topic_cur, connection->will.qos, connection->will.retain, payload_cur);
+ }
+
+ if (connection->username) {
+ struct aws_byte_cursor username_cur = aws_byte_cursor_from_string(connection->username);
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Adding username " PRInSTR " to connection",
+ (void *)connection,
+ AWS_BYTE_CURSOR_PRI(username_cur));
+
+ struct aws_byte_cursor password_cur = {
+ .ptr = NULL,
+ .len = 0,
+ };
+
+ if (connection->password) {
+ password_cur = aws_byte_cursor_from_string(connection->password);
+ }
+
+ aws_mqtt_packet_connect_add_credentials(&connect, username_cur, password_cur);
+ }
+
+ message = mqtt_get_message_for_packet(connection, &connect.fixed_header);
+ if (!message) {
+
+ AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: Failed to get message from pool", (void *)connection);
+ goto handle_error;
+ }
+
+ if (aws_mqtt_packet_connect_encode(&message->message_data, &connect)) {
+
+ AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: Failed to encode CONNECT packet", (void *)connection);
+ goto handle_error;
+ }
+
+ if (aws_channel_slot_send_message(connection->slot, message, AWS_CHANNEL_DIR_WRITE)) {
+
+ AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: Failed to send encoded CONNECT packet upstream", (void *)connection);
+ goto handle_error;
+ }
+
+ return;
+
+handle_error:
+ MQTT_CLIENT_CALL_CALLBACK_ARGS(connection, on_connection_complete, aws_last_error(), 0, false);
+ aws_channel_shutdown(channel, aws_last_error());
+
+ if (message) {
+ aws_mem_release(message->allocator, message);
+ }
+}
+
+static void s_attempt_reconnect(struct aws_task *task, void *userdata, enum aws_task_status status) {
+
+ (void)task;
+
+ struct aws_mqtt_reconnect_task *reconnect = userdata;
+ struct aws_mqtt_client_connection *connection = aws_atomic_load_ptr(&reconnect->connection_ptr);
+
+ if (status == AWS_TASK_STATUS_RUN_READY && connection) {
+ /* If the task is not cancelled and a connection has not succeeded, attempt reconnect */
+
+ mqtt_connection_lock_synced_data(connection);
+
+ /* Check before multiplying to avoid potential overflow */
+ if (connection->reconnect_timeouts.current_sec > connection->reconnect_timeouts.max_sec / 2) {
+ connection->reconnect_timeouts.current_sec = connection->reconnect_timeouts.max_sec;
+ } else {
+ connection->reconnect_timeouts.current_sec *= 2;
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Attempting reconnect, if it fails next attempt will be in %" PRIu64 " seconds",
+ (void *)connection,
+ connection->reconnect_timeouts.current_sec);
+
+ mqtt_connection_unlock_synced_data(connection);
+
+ if (s_mqtt_client_connect(
+ connection, connection->on_connection_complete, connection->on_connection_complete_ud)) {
+ /* If reconnect attempt failed, schedule the next attempt */
+ s_aws_mqtt_schedule_reconnect_task(connection);
+ } else {
+ /* Ideally, it would be nice to move this inside the lock, but I'm unsure of the correctness */
+ connection->reconnect_task->task.timestamp = 0;
+ }
+ } else {
+ aws_mem_release(reconnect->allocator, reconnect);
+ }
+}
+
+void aws_create_reconnect_task(struct aws_mqtt_client_connection *connection) {
+ if (connection->reconnect_task == NULL) {
+ connection->reconnect_task = aws_mem_calloc(connection->allocator, 1, sizeof(struct aws_mqtt_reconnect_task));
+ AWS_FATAL_ASSERT(connection->reconnect_task != NULL);
+
+ aws_atomic_init_ptr(&connection->reconnect_task->connection_ptr, connection);
+ connection->reconnect_task->allocator = connection->allocator;
+ aws_task_init(
+ &connection->reconnect_task->task, s_attempt_reconnect, connection->reconnect_task, "mqtt_reconnect");
+ }
+}
+
+static uint64_t s_hash_uint16_t(const void *item) {
+ return *(uint16_t *)item;
+}
+
+static bool s_uint16_t_eq(const void *a, const void *b) {
+ return *(uint16_t *)a == *(uint16_t *)b;
+}
+
+static void s_mqtt_client_connection_destroy_final(struct aws_mqtt_client_connection *connection) {
+ AWS_PRECONDITION(!connection || connection->allocator);
+ if (!connection) {
+ return;
+ }
+
+ /* If the slot is not NULL, the connection is still connected, which should be prevented from calling this function
+ */
+ AWS_ASSERT(!connection->slot);
+
+ AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: Destroying connection", (void *)connection);
+
+ /* If the reconnect_task isn't freed, free it */
+ if (connection->reconnect_task) {
+ aws_mem_release(connection->reconnect_task->allocator, connection->reconnect_task);
+ }
+ aws_string_destroy(connection->host_name);
+
+ /* Clear the credentials */
+ if (connection->username) {
+ aws_string_destroy_secure(connection->username);
+ }
+ if (connection->password) {
+ aws_string_destroy_secure(connection->password);
+ }
+
+ /* Clean up the will */
+ aws_byte_buf_clean_up(&connection->will.topic);
+ aws_byte_buf_clean_up(&connection->will.payload);
+
+ /* Clear the client_id */
+ aws_byte_buf_clean_up(&connection->client_id);
+
+ /* Free all of the active subscriptions */
+ aws_mqtt_topic_tree_clean_up(&connection->thread_data.subscriptions);
+
+ aws_hash_table_clean_up(&connection->synced_data.outstanding_requests_table);
+ /* clean up the pending_requests if it's not empty */
+ while (!aws_linked_list_empty(&connection->synced_data.pending_requests_list)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&connection->synced_data.pending_requests_list);
+ struct aws_mqtt_request *request = AWS_CONTAINER_OF(node, struct aws_mqtt_request, list_node);
+ /* Fire the callback and clean up the memory, as the connection get destroyed. */
+ if (request->on_complete) {
+ request->on_complete(
+ connection, request->packet_id, AWS_ERROR_MQTT_CONNECTION_DESTROYED, request->on_complete_ud);
+ }
+ aws_memory_pool_release(&connection->synced_data.requests_pool, request);
+ }
+ aws_memory_pool_clean_up(&connection->synced_data.requests_pool);
+
+ aws_mutex_clean_up(&connection->synced_data.lock);
+
+ aws_tls_connection_options_clean_up(&connection->tls_options);
+
+ /* Clean up the websocket proxy options */
+ if (connection->http_proxy_config) {
+ aws_http_proxy_config_destroy(connection->http_proxy_config);
+ connection->http_proxy_config = NULL;
+ }
+
+ aws_mqtt_client_release(connection->client);
+
+ /* Frees all allocated memory */
+ aws_mem_release(connection->allocator, connection);
+}
+
+static void s_on_final_disconnect(struct aws_mqtt_client_connection *connection, void *userdata) {
+ (void)userdata;
+
+ s_mqtt_client_connection_destroy_final(connection);
+}
+
+static void s_mqtt_client_connection_start_destroy(struct aws_mqtt_client_connection *connection) {
+ bool call_destroy_final = false;
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Last refcount on connection has been released, start destroying the connection.",
+ (void *)connection);
+ { /* BEGIN CRITICAL SECTION */
+ mqtt_connection_lock_synced_data(connection);
+ if (connection->synced_data.state != AWS_MQTT_CLIENT_STATE_DISCONNECTED) {
+ /*
+ * We don't call the on_disconnect callback until we've transitioned to the DISCONNECTED state. So it's
+ * safe to change it now while we hold the lock since we know we're not DISCONNECTED yet.
+ */
+ connection->on_disconnect = s_on_final_disconnect;
+
+ if (connection->synced_data.state != AWS_MQTT_CLIENT_STATE_DISCONNECTING) {
+ mqtt_disconnect_impl(connection, AWS_ERROR_SUCCESS);
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: final refcount has been released, switch state to DISCONNECTING.",
+ (void *)connection);
+ mqtt_connection_set_state(connection, AWS_MQTT_CLIENT_STATE_DISCONNECTING);
+ }
+ } else {
+ call_destroy_final = true;
+ }
+
+ mqtt_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ if (call_destroy_final) {
+ s_mqtt_client_connection_destroy_final(connection);
+ }
+}
+
+struct aws_mqtt_client_connection *aws_mqtt_client_connection_new(struct aws_mqtt_client *client) {
+ AWS_PRECONDITION(client);
+
+ struct aws_mqtt_client_connection *connection =
+ aws_mem_calloc(client->allocator, 1, sizeof(struct aws_mqtt_client_connection));
+ if (!connection) {
+ return NULL;
+ }
+
+ AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: Creating new connection", (void *)connection);
+
+ /* Initialize the client */
+ connection->allocator = client->allocator;
+ aws_ref_count_init(
+ &connection->ref_count, connection, (aws_simple_completion_callback *)s_mqtt_client_connection_start_destroy);
+ connection->client = aws_mqtt_client_acquire(client);
+ AWS_ZERO_STRUCT(connection->synced_data);
+ connection->synced_data.state = AWS_MQTT_CLIENT_STATE_DISCONNECTED;
+ connection->reconnect_timeouts.min_sec = 1;
+ connection->reconnect_timeouts.current_sec = 1;
+ connection->reconnect_timeouts.max_sec = 128;
+ aws_linked_list_init(&connection->synced_data.pending_requests_list);
+ aws_linked_list_init(&connection->thread_data.ongoing_requests_list);
+ s_init_statistics(&connection->operation_statistics_impl);
+
+ if (aws_mutex_init(&connection->synced_data.lock)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Failed to initialize mutex, error %d (%s)",
+ (void *)connection,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto failed_init_mutex;
+ }
+
+ if (aws_mqtt_topic_tree_init(&connection->thread_data.subscriptions, connection->allocator)) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Failed to initialize subscriptions topic_tree, error %d (%s)",
+ (void *)connection,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto failed_init_subscriptions;
+ }
+
+ if (aws_memory_pool_init(
+ &connection->synced_data.requests_pool, connection->allocator, 32, sizeof(struct aws_mqtt_request))) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Failed to initialize request pool, error %d (%s)",
+ (void *)connection,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto failed_init_requests_pool;
+ }
+
+ if (aws_hash_table_init(
+ &connection->synced_data.outstanding_requests_table,
+ connection->allocator,
+ sizeof(struct aws_mqtt_request *),
+ s_hash_uint16_t,
+ s_uint16_t_eq,
+ NULL,
+ NULL)) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Failed to initialize outstanding requests table, error %d (%s)",
+ (void *)connection,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto failed_init_outstanding_requests_table;
+ }
+
+ connection->loop = aws_event_loop_group_get_next_loop(client->bootstrap->event_loop_group);
+
+ /* Initialize the handler */
+ connection->handler.alloc = connection->allocator;
+ connection->handler.vtable = aws_mqtt_get_client_channel_vtable();
+ connection->handler.impl = connection;
+
+ return connection;
+
+failed_init_outstanding_requests_table:
+ aws_memory_pool_clean_up(&connection->synced_data.requests_pool);
+
+failed_init_requests_pool:
+ aws_mqtt_topic_tree_clean_up(&connection->thread_data.subscriptions);
+
+failed_init_subscriptions:
+ aws_mutex_clean_up(&connection->synced_data.lock);
+
+failed_init_mutex:
+ aws_mem_release(client->allocator, connection);
+
+ return NULL;
+}
+
+struct aws_mqtt_client_connection *aws_mqtt_client_connection_acquire(struct aws_mqtt_client_connection *connection) {
+ if (connection != NULL) {
+ aws_ref_count_acquire(&connection->ref_count);
+ }
+
+ return connection;
+}
+
+void aws_mqtt_client_connection_release(struct aws_mqtt_client_connection *connection) {
+ if (connection != NULL) {
+ aws_ref_count_release(&connection->ref_count);
+ }
+}
+
+/*******************************************************************************
+ * Connection Configuration
+ ******************************************************************************/
+
+/* To configure the connection, ensure the state is DISCONNECTED or CONNECTED */
+static int s_check_connection_state_for_configuration(struct aws_mqtt_client_connection *connection) {
+ int result = AWS_OP_SUCCESS;
+ { /* BEGIN CRITICAL SECTION */
+ mqtt_connection_lock_synced_data(connection);
+
+ if (connection->synced_data.state != AWS_MQTT_CLIENT_STATE_DISCONNECTED &&
+ connection->synced_data.state != AWS_MQTT_CLIENT_STATE_CONNECTED) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Connection is currently pending connect/disconnect. Unable to make configuration changes until "
+ "pending operation completes.",
+ (void *)connection);
+ result = AWS_OP_ERR;
+ }
+ mqtt_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ return result;
+}
+
+int aws_mqtt_client_connection_set_will(
+ struct aws_mqtt_client_connection *connection,
+ const struct aws_byte_cursor *topic,
+ enum aws_mqtt_qos qos,
+ bool retain,
+ const struct aws_byte_cursor *payload) {
+
+ AWS_PRECONDITION(connection);
+ AWS_PRECONDITION(topic);
+ if (s_check_connection_state_for_configuration(connection)) {
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ int result = AWS_OP_ERR;
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Setting last will with topic \"" PRInSTR "\"",
+ (void *)connection,
+ AWS_BYTE_CURSOR_PRI(*topic));
+
+ if (!aws_mqtt_is_valid_topic(topic)) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: Will topic is invalid", (void *)connection);
+ return aws_raise_error(AWS_ERROR_MQTT_INVALID_TOPIC);
+ }
+
+ struct aws_byte_buf local_topic_buf;
+ struct aws_byte_buf local_payload_buf;
+ AWS_ZERO_STRUCT(local_topic_buf);
+ AWS_ZERO_STRUCT(local_payload_buf);
+ struct aws_byte_buf topic_buf = aws_byte_buf_from_array(topic->ptr, topic->len);
+ if (aws_byte_buf_init_copy(&local_topic_buf, connection->allocator, &topic_buf)) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: Failed to copy will topic", (void *)connection);
+ goto cleanup;
+ }
+
+ connection->will.qos = qos;
+ connection->will.retain = retain;
+
+ struct aws_byte_buf payload_buf = aws_byte_buf_from_array(payload->ptr, payload->len);
+ if (aws_byte_buf_init_copy(&local_payload_buf, connection->allocator, &payload_buf)) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: Failed to copy will body", (void *)connection);
+ goto cleanup;
+ }
+
+ if (connection->will.topic.len) {
+ AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: Will has been set before, resetting it.", (void *)connection);
+ }
+ /* Succeed. */
+ result = AWS_OP_SUCCESS;
+
+ /* swap the local buffer with connection */
+ struct aws_byte_buf temp = local_topic_buf;
+ local_topic_buf = connection->will.topic;
+ connection->will.topic = temp;
+ temp = local_payload_buf;
+ local_payload_buf = connection->will.payload;
+ connection->will.payload = temp;
+
+cleanup:
+ aws_byte_buf_clean_up(&local_topic_buf);
+ aws_byte_buf_clean_up(&local_payload_buf);
+
+ return result;
+}
+
+int aws_mqtt_client_connection_set_login(
+ struct aws_mqtt_client_connection *connection,
+ const struct aws_byte_cursor *username,
+ const struct aws_byte_cursor *password) {
+
+ AWS_PRECONDITION(connection);
+ AWS_PRECONDITION(username);
+ if (s_check_connection_state_for_configuration(connection)) {
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ int result = AWS_OP_ERR;
+ AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: Setting username and password", (void *)connection);
+
+ struct aws_string *username_string = NULL;
+ struct aws_string *password_string = NULL;
+
+ username_string = aws_string_new_from_array(connection->allocator, username->ptr, username->len);
+ if (!username_string) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: Failed to copy username", (void *)connection);
+ goto cleanup;
+ }
+
+ if (password) {
+ password_string = aws_string_new_from_array(connection->allocator, password->ptr, password->len);
+ if (!password_string) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: Failed to copy password", (void *)connection);
+ goto cleanup;
+ }
+ }
+
+ if (connection->username) {
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT, "id=%p: Login information has been set before, resetting it.", (void *)connection);
+ }
+ /* Succeed. */
+ result = AWS_OP_SUCCESS;
+
+ /* swap the local string with connection */
+ struct aws_string *temp = username_string;
+ username_string = connection->username;
+ connection->username = temp;
+ temp = password_string;
+ password_string = connection->password;
+ connection->password = temp;
+
+cleanup:
+ aws_string_destroy_secure(username_string);
+ aws_string_destroy_secure(password_string);
+
+ return result;
+}
+
+int aws_mqtt_client_connection_set_reconnect_timeout(
+ struct aws_mqtt_client_connection *connection,
+ uint64_t min_timeout,
+ uint64_t max_timeout) {
+
+ AWS_PRECONDITION(connection);
+ if (s_check_connection_state_for_configuration(connection)) {
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Setting reconnect timeouts min: %" PRIu64 " max: %" PRIu64,
+ (void *)connection,
+ min_timeout,
+ max_timeout);
+ connection->reconnect_timeouts.min_sec = min_timeout;
+ connection->reconnect_timeouts.max_sec = max_timeout;
+ connection->reconnect_timeouts.current_sec = min_timeout;
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt_client_connection_set_connection_interruption_handlers(
+ struct aws_mqtt_client_connection *connection,
+ aws_mqtt_client_on_connection_interrupted_fn *on_interrupted,
+ void *on_interrupted_ud,
+ aws_mqtt_client_on_connection_resumed_fn *on_resumed,
+ void *on_resumed_ud) {
+
+ AWS_PRECONDITION(connection);
+ if (s_check_connection_state_for_configuration(connection)) {
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT, "id=%p: Setting connection interrupted and resumed handlers", (void *)connection);
+
+ connection->on_interrupted = on_interrupted;
+ connection->on_interrupted_ud = on_interrupted_ud;
+ connection->on_resumed = on_resumed;
+ connection->on_resumed_ud = on_resumed_ud;
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt_client_connection_set_connection_closed_handler(
+ struct aws_mqtt_client_connection *connection,
+ aws_mqtt_client_on_connection_closed_fn *on_closed,
+ void *on_closed_ud) {
+
+ AWS_PRECONDITION(connection);
+ if (s_check_connection_state_for_configuration(connection)) {
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+ AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: Setting connection closed handler", (void *)connection);
+
+ connection->on_closed = on_closed;
+ connection->on_closed_ud = on_closed_ud;
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt_client_connection_set_on_any_publish_handler(
+ struct aws_mqtt_client_connection *connection,
+ aws_mqtt_client_publish_received_fn *on_any_publish,
+ void *on_any_publish_ud) {
+
+ AWS_PRECONDITION(connection);
+ { /* BEGIN CRITICAL SECTION */
+ mqtt_connection_lock_synced_data(connection);
+
+ if (connection->synced_data.state == AWS_MQTT_CLIENT_STATE_CONNECTED) {
+ mqtt_connection_unlock_synced_data(connection);
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Connection is connected, publishes may arrive anytime. Unable to set publish handler until "
+ "offline.",
+ (void *)connection);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+ mqtt_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: Setting on_any_publish handler", (void *)connection);
+
+ connection->on_any_publish = on_any_publish;
+ connection->on_any_publish_ud = on_any_publish_ud;
+
+ return AWS_OP_SUCCESS;
+}
+
+/*******************************************************************************
+ * Websockets
+ ******************************************************************************/
+#ifdef AWS_MQTT_WITH_WEBSOCKETS
+
+int aws_mqtt_client_connection_use_websockets(
+ struct aws_mqtt_client_connection *connection,
+ aws_mqtt_transform_websocket_handshake_fn *transformer,
+ void *transformer_ud,
+ aws_mqtt_validate_websocket_handshake_fn *validator,
+ void *validator_ud) {
+
+ connection->websocket.handshake_transformer = transformer;
+ connection->websocket.handshake_transformer_ud = transformer_ud;
+ connection->websocket.handshake_validator = validator;
+ connection->websocket.handshake_validator_ud = validator_ud;
+ connection->websocket.enabled = true;
+
+ AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: Using websockets", (void *)connection);
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt_client_connection_set_http_proxy_options(
+ struct aws_mqtt_client_connection *connection,
+ struct aws_http_proxy_options *proxy_options) {
+
+ /* If there is existing proxy options, nuke em */
+ if (connection->http_proxy_config) {
+ aws_http_proxy_config_destroy(connection->http_proxy_config);
+ connection->http_proxy_config = NULL;
+ }
+
+ connection->http_proxy_config =
+ aws_http_proxy_config_new_tunneling_from_proxy_options(connection->allocator, proxy_options);
+
+ return connection->http_proxy_config != NULL ? AWS_OP_SUCCESS : AWS_OP_ERR;
+}
+
+static void s_on_websocket_shutdown(struct aws_websocket *websocket, int error_code, void *user_data) {
+ struct aws_mqtt_client_connection *connection = user_data;
+
+ struct aws_channel *channel = connection->slot ? connection->slot->channel : NULL;
+
+ s_mqtt_client_shutdown(connection->client->bootstrap, error_code, channel, connection);
+
+ if (websocket) {
+ aws_websocket_release(websocket);
+ }
+}
+
+static void s_on_websocket_setup(const struct aws_websocket_on_connection_setup_data *setup, void *user_data) {
+
+ /* Setup callback contract is: if error_code is non-zero then websocket is NULL. */
+ AWS_FATAL_ASSERT((setup->error_code != 0) == (setup->websocket == NULL));
+
+ struct aws_mqtt_client_connection *connection = user_data;
+ struct aws_channel *channel = NULL;
+
+ if (connection->websocket.handshake_request) {
+ aws_http_message_release(connection->websocket.handshake_request);
+ connection->websocket.handshake_request = NULL;
+ }
+
+ if (setup->websocket) {
+ channel = aws_websocket_get_channel(setup->websocket);
+ AWS_FATAL_ASSERT(channel);
+ AWS_FATAL_ASSERT(aws_channel_get_event_loop(channel) == connection->loop);
+
+ /* Websocket must be "converted" before the MQTT handler can be installed next to it. */
+ if (aws_websocket_convert_to_midchannel_handler(setup->websocket)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Failed converting websocket, error %d (%s)",
+ (void *)connection,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ aws_channel_shutdown(channel, aws_last_error());
+ return;
+ }
+
+ /* If validation callback is set, let the user accept/reject the handshake */
+ if (connection->websocket.handshake_validator) {
+ AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: Validating websocket handshake response.", (void *)connection);
+
+ if (connection->websocket.handshake_validator(
+ connection,
+ setup->handshake_response_header_array,
+ setup->num_handshake_response_headers,
+ connection->websocket.handshake_validator_ud)) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Failure reported by websocket handshake validator callback, error %d (%s)",
+ (void *)connection,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ aws_channel_shutdown(channel, aws_last_error());
+ return;
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT, "id=%p: Done validating websocket handshake response.", (void *)connection);
+ }
+ }
+
+ /* Call into the channel-setup callback, the rest of the logic is the same. */
+ s_mqtt_client_init(connection->client->bootstrap, setup->error_code, channel, connection);
+}
+
+static aws_mqtt_transform_websocket_handshake_complete_fn s_websocket_handshake_transform_complete; /* fwd declare */
+
+static int s_websocket_connect(struct aws_mqtt_client_connection *connection) {
+ AWS_ASSERT(connection->websocket.enabled);
+
+ /* Build websocket handshake request */
+ connection->websocket.handshake_request = aws_http_message_new_websocket_handshake_request(
+ connection->allocator, *g_websocket_handshake_default_path, aws_byte_cursor_from_string(connection->host_name));
+
+ if (!connection->websocket.handshake_request) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: Failed to generate websocket handshake request", (void *)connection);
+ goto error;
+ }
+
+ if (aws_http_message_add_header(
+ connection->websocket.handshake_request, *g_websocket_handshake_default_protocol_header)) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: Failed to generate websocket handshake request", (void *)connection);
+ goto error;
+ }
+
+ /* If user registered a transform callback, call it and wait for transform_complete() to be called.
+ * If no callback registered, call the transform_complete() function ourselves. */
+ if (connection->websocket.handshake_transformer) {
+ AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: Transforming websocket handshake request.", (void *)connection);
+
+ connection->websocket.handshake_transformer(
+ connection->websocket.handshake_request,
+ connection->websocket.handshake_transformer_ud,
+ s_websocket_handshake_transform_complete,
+ connection);
+
+ } else {
+ s_websocket_handshake_transform_complete(
+ connection->websocket.handshake_request, AWS_ERROR_SUCCESS, connection);
+ }
+
+ return AWS_OP_SUCCESS;
+
+error:
+ aws_http_message_release(connection->websocket.handshake_request);
+ connection->websocket.handshake_request = NULL;
+ return AWS_OP_ERR;
+}
+
+static void s_websocket_handshake_transform_complete(
+ struct aws_http_message *handshake_request,
+ int error_code,
+ void *complete_ctx) {
+
+ struct aws_mqtt_client_connection *connection = complete_ctx;
+
+ if (error_code) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Failure reported by websocket handshake transform callback.",
+ (void *)connection);
+
+ goto error;
+ }
+
+ if (connection->websocket.handshake_transformer) {
+ AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: Done transforming websocket handshake request.", (void *)connection);
+ }
+
+ /* Call websocket connect() */
+ struct aws_websocket_client_connection_options websocket_options = {
+ .allocator = connection->allocator,
+ .bootstrap = connection->client->bootstrap,
+ .socket_options = &connection->socket_options,
+ .tls_options = connection->tls_options.ctx ? &connection->tls_options : NULL,
+ .host = aws_byte_cursor_from_string(connection->host_name),
+ .port = connection->port,
+ .handshake_request = handshake_request,
+ .initial_window_size = 0, /* Prevent websocket data from arriving before the MQTT handler is installed */
+ .user_data = connection,
+ .on_connection_setup = s_on_websocket_setup,
+ .on_connection_shutdown = s_on_websocket_shutdown,
+ .requested_event_loop = connection->loop,
+ };
+
+ struct aws_http_proxy_options proxy_options;
+ AWS_ZERO_STRUCT(proxy_options);
+ if (connection->http_proxy_config != NULL) {
+ aws_http_proxy_options_init_from_config(&proxy_options, connection->http_proxy_config);
+ websocket_options.proxy_options = &proxy_options;
+ }
+
+ if (aws_websocket_client_connect(&websocket_options)) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: Failed to initiate websocket connection.", (void *)connection);
+ error_code = aws_last_error();
+ goto error;
+ }
+
+ /* Success */
+ return;
+
+error:;
+ /* Proceed to next step, telling it that we failed. */
+ struct aws_websocket_on_connection_setup_data websocket_setup = {.error_code = error_code};
+ s_on_websocket_setup(&websocket_setup, connection);
+}
+
+#else /* AWS_MQTT_WITH_WEBSOCKETS */
+int aws_mqtt_client_connection_use_websockets(
+ struct aws_mqtt_client_connection *connection,
+ aws_mqtt_transform_websocket_handshake_fn *transformer,
+ void *transformer_ud,
+ aws_mqtt_validate_websocket_handshake_fn *validator,
+ void *validator_ud) {
+
+ (void)connection;
+ (void)transformer;
+ (void)transformer_ud;
+ (void)validator;
+ (void)validator_ud;
+
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Cannot use websockets unless library is built with MQTT_WITH_WEBSOCKETS option.",
+ (void *)connection);
+
+ return aws_raise_error(AWS_ERROR_MQTT_BUILT_WITHOUT_WEBSOCKETS);
+}
+
+int aws_mqtt_client_connection_set_websocket_proxy_options(
+ struct aws_mqtt_client_connection *connection,
+ struct aws_http_proxy_options *proxy_options) {
+
+ (void)connection;
+ (void)proxy_options;
+
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Cannot use websockets unless library is built with MQTT_WITH_WEBSOCKETS option.",
+ (void *)connection);
+
+ return aws_raise_error(AWS_ERROR_MQTT_BUILT_WITHOUT_WEBSOCKETS);
+}
+#endif /* AWS_MQTT_WITH_WEBSOCKETS */
+
+/*******************************************************************************
+ * Connect
+ ******************************************************************************/
+
+int aws_mqtt_client_connection_connect(
+ struct aws_mqtt_client_connection *connection,
+ const struct aws_mqtt_connection_options *connection_options) {
+
+ /* TODO: Do we need to support resuming the connection if user connect to the same connection & endpoint and the
+ * clean_session is false?
+ * If not, the broker will resume the connection in this case, and we pretend we are making a new connection, which
+ * may cause some confusing behavior. This is basically what we have now. NOTE: The topic_tree is living with the
+ * connection right now, which is really confusing.
+ * If yes, an edge case will be: User disconnected from the connection with clean_session
+ * being false, then connect to another endpoint with the same connection object, we probably need to clear all
+ * those states from last connection and create a new "connection". Problem is what if user finish the second
+ * connection and reconnect to the first endpoint. There is no way for us to resume the connection in this case. */
+
+ AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: Opening connection", (void *)connection);
+ { /* BEGIN CRITICAL SECTION */
+ mqtt_connection_lock_synced_data(connection);
+
+ if (connection->synced_data.state != AWS_MQTT_CLIENT_STATE_DISCONNECTED) {
+ mqtt_connection_unlock_synced_data(connection);
+ return aws_raise_error(AWS_ERROR_MQTT_ALREADY_CONNECTED);
+ }
+ mqtt_connection_set_state(connection, AWS_MQTT_CLIENT_STATE_CONNECTING);
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_CLIENT, "id=%p: Begin connecting process, switch state to CONNECTING.", (void *)connection);
+ mqtt_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ if (connection->host_name) {
+ aws_string_destroy(connection->host_name);
+ }
+
+ connection->host_name = aws_string_new_from_array(
+ connection->allocator, connection_options->host_name.ptr, connection_options->host_name.len);
+ connection->port = connection_options->port;
+ connection->socket_options = *connection_options->socket_options;
+ connection->clean_session = connection_options->clean_session;
+ connection->keep_alive_time_secs = connection_options->keep_alive_time_secs;
+ connection->connection_count = 0;
+
+ if (!connection->keep_alive_time_secs) {
+ connection->keep_alive_time_secs = s_default_keep_alive_sec;
+ }
+ if (!connection_options->protocol_operation_timeout_ms) {
+ connection->operation_timeout_ns = UINT64_MAX;
+ } else {
+ connection->operation_timeout_ns = aws_timestamp_convert(
+ (uint64_t)connection_options->protocol_operation_timeout_ms,
+ AWS_TIMESTAMP_MILLIS,
+ AWS_TIMESTAMP_NANOS,
+ NULL);
+ }
+
+ if (!connection_options->ping_timeout_ms) {
+ connection->ping_timeout_ns = s_default_ping_timeout_ns;
+ } else {
+ connection->ping_timeout_ns = aws_timestamp_convert(
+ (uint64_t)connection_options->ping_timeout_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL);
+ }
+
+ /* Keep alive time should always be greater than the timeouts. */
+ if (AWS_UNLIKELY(connection->keep_alive_time_secs * (uint64_t)AWS_TIMESTAMP_NANOS <= connection->ping_timeout_ns)) {
+ AWS_LOGF_FATAL(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Illegal configuration, Connection keep alive %" PRIu64
+ "ns must be greater than the request timeouts %" PRIu64 "ns.",
+ (void *)connection,
+ (uint64_t)connection->keep_alive_time_secs * (uint64_t)AWS_TIMESTAMP_NANOS,
+ connection->ping_timeout_ns);
+ AWS_FATAL_ASSERT(
+ connection->keep_alive_time_secs * (uint64_t)AWS_TIMESTAMP_NANOS > connection->ping_timeout_ns);
+ }
+
+ AWS_LOGF_INFO(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: using ping timeout of %" PRIu64 " ns",
+ (void *)connection,
+ connection->ping_timeout_ns);
+
+ /* Cheat and set the tls_options host_name to our copy if they're the same */
+ if (connection_options->tls_options) {
+ connection->use_tls = true;
+ if (aws_tls_connection_options_copy(&connection->tls_options, connection_options->tls_options)) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT, "id=%p: Failed to copy TLS Connection Options into connection", (void *)connection);
+ return AWS_OP_ERR;
+ }
+
+ if (!connection_options->tls_options->server_name) {
+ struct aws_byte_cursor host_name_cur = aws_byte_cursor_from_string(connection->host_name);
+ if (aws_tls_connection_options_set_server_name(
+ &connection->tls_options, connection->allocator, &host_name_cur)) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT, "id=%p: Failed to set TLS Connection Options server name", (void *)connection);
+ goto error;
+ }
+ }
+
+ } else {
+ AWS_ZERO_STRUCT(connection->tls_options);
+ }
+
+ /* Clean up old client_id */
+ if (connection->client_id.buffer) {
+ aws_byte_buf_clean_up(&connection->client_id);
+ }
+
+ /* Only set connection->client_id if a new one was provided */
+ struct aws_byte_buf client_id_buf =
+ aws_byte_buf_from_array(connection_options->client_id.ptr, connection_options->client_id.len);
+ if (aws_byte_buf_init_copy(&connection->client_id, connection->allocator, &client_id_buf)) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: Failed to copy client_id into connection", (void *)connection);
+ goto error;
+ }
+
+ struct aws_linked_list cancelling_requests;
+ aws_linked_list_init(&cancelling_requests);
+ { /* BEGIN CRITICAL SECTION */
+ mqtt_connection_lock_synced_data(connection);
+ if (connection->clean_session) {
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: a clean session connection requested, all the previous requests will fail",
+ (void *)connection);
+ aws_linked_list_swap_contents(&connection->synced_data.pending_requests_list, &cancelling_requests);
+ }
+ mqtt_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ if (!aws_linked_list_empty(&cancelling_requests)) {
+
+ struct aws_linked_list_node *current = aws_linked_list_front(&cancelling_requests);
+ const struct aws_linked_list_node *end = aws_linked_list_end(&cancelling_requests);
+ /* invoke all the complete callback for requests from previous session */
+ while (current != end) {
+ struct aws_mqtt_request *request = AWS_CONTAINER_OF(current, struct aws_mqtt_request, list_node);
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Establishing a new clean session connection, discard the previous request %" PRIu16,
+ (void *)connection,
+ request->packet_id);
+ if (request->on_complete) {
+ request->on_complete(
+ connection,
+ request->packet_id,
+ AWS_ERROR_MQTT_CANCELLED_FOR_CLEAN_SESSION,
+ request->on_complete_ud);
+ }
+ current = current->next;
+ }
+ /* free the resource */
+ { /* BEGIN CRITICAL SECTION */
+ mqtt_connection_lock_synced_data(connection);
+ while (!aws_linked_list_empty(&cancelling_requests)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_front(&cancelling_requests);
+ struct aws_mqtt_request *request = AWS_CONTAINER_OF(node, struct aws_mqtt_request, list_node);
+ aws_hash_table_remove(
+ &connection->synced_data.outstanding_requests_table, &request->packet_id, NULL, NULL);
+ aws_memory_pool_release(&connection->synced_data.requests_pool, request);
+ }
+ mqtt_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ }
+
+ /* Begin the connecting process, acquire the connection to keep it alive until we disconnected */
+ aws_mqtt_client_connection_acquire(connection);
+
+ if (s_mqtt_client_connect(connection, connection_options->on_connection_complete, connection_options->user_data)) {
+ /*
+ * An error calling s_mqtt_client_connect should (must) be mutually exclusive with s_mqtt_client_shutdown().
+ * So it should be safe and correct to call release now to undo the pinning we did a few lines above.
+ */
+ aws_mqtt_client_connection_release(connection);
+
+ /* client_id has been updated with something but it will get cleaned up when the connection gets cleaned up
+ * so we don't need to worry about it here*/
+ if (connection->clean_session) {
+ AWS_LOGF_WARN(
+ AWS_LS_MQTT_CLIENT, "id=%p: The previous session has been cleaned up and losted!", (void *)connection);
+ }
+ goto error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+error:
+ aws_tls_connection_options_clean_up(&connection->tls_options);
+ AWS_ZERO_STRUCT(connection->tls_options);
+ { /* BEGIN CRITICAL SECTION */
+ mqtt_connection_lock_synced_data(connection);
+ mqtt_connection_set_state(connection, AWS_MQTT_CLIENT_STATE_DISCONNECTED);
+ mqtt_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ return AWS_OP_ERR;
+}
+
+static int s_mqtt_client_connect(
+ struct aws_mqtt_client_connection *connection,
+ aws_mqtt_client_on_connection_complete_fn *on_connection_complete,
+ void *userdata) {
+ connection->on_connection_complete = on_connection_complete;
+ connection->on_connection_complete_ud = userdata;
+
+ int result = 0;
+#ifdef AWS_MQTT_WITH_WEBSOCKETS
+ if (connection->websocket.enabled) {
+ result = s_websocket_connect(connection);
+ } else
+#endif /* AWS_MQTT_WITH_WEBSOCKETS */
+ {
+ struct aws_socket_channel_bootstrap_options channel_options;
+ AWS_ZERO_STRUCT(channel_options);
+ channel_options.bootstrap = connection->client->bootstrap;
+ channel_options.host_name = aws_string_c_str(connection->host_name);
+ channel_options.port = connection->port;
+ channel_options.socket_options = &connection->socket_options;
+ channel_options.tls_options = connection->use_tls ? &connection->tls_options : NULL;
+ channel_options.setup_callback = &s_mqtt_client_init;
+ channel_options.shutdown_callback = &s_mqtt_client_shutdown;
+ channel_options.user_data = connection;
+ channel_options.requested_event_loop = connection->loop;
+
+ if (connection->http_proxy_config == NULL) {
+ result = aws_client_bootstrap_new_socket_channel(&channel_options);
+ } else {
+ struct aws_http_proxy_options proxy_options;
+ AWS_ZERO_STRUCT(proxy_options);
+
+ aws_http_proxy_options_init_from_config(&proxy_options, connection->http_proxy_config);
+ result = aws_http_proxy_new_socket_channel(&channel_options, &proxy_options);
+ }
+ }
+
+ if (result) {
+ /* Connection attempt failed */
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Failed to begin connection routine, error %d (%s).",
+ (void *)connection,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/*******************************************************************************
+ * Reconnect DEPRECATED
+ ******************************************************************************/
+
+int aws_mqtt_client_connection_reconnect(
+ struct aws_mqtt_client_connection *connection,
+ aws_mqtt_client_on_connection_complete_fn *on_connection_complete,
+ void *userdata) {
+ (void)connection;
+ (void)on_connection_complete;
+ (void)userdata;
+
+ /* DEPRECATED, connection will reconnect automatically now. */
+ AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "aws_mqtt_client_connection_reconnect has been DEPRECATED.");
+ return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION);
+}
+
+/*******************************************************************************
+ * Disconnect
+ ******************************************************************************/
+
+int aws_mqtt_client_connection_disconnect(
+ struct aws_mqtt_client_connection *connection,
+ aws_mqtt_client_on_disconnect_fn *on_disconnect,
+ void *userdata) {
+
+ AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: user called disconnect.", (void *)connection);
+
+ { /* BEGIN CRITICAL SECTION */
+ mqtt_connection_lock_synced_data(connection);
+
+ if (connection->synced_data.state != AWS_MQTT_CLIENT_STATE_CONNECTED &&
+ connection->synced_data.state != AWS_MQTT_CLIENT_STATE_RECONNECTING) {
+ mqtt_connection_unlock_synced_data(connection);
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT, "id=%p: Connection is not open, and may not be closed", (void *)connection);
+ aws_raise_error(AWS_ERROR_MQTT_NOT_CONNECTED);
+ return AWS_OP_ERR;
+ }
+ mqtt_connection_set_state(connection, AWS_MQTT_CLIENT_STATE_DISCONNECTING);
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: User requests disconnecting, switch state to DISCONNECTING.",
+ (void *)connection);
+ connection->on_disconnect = on_disconnect;
+ connection->on_disconnect_ud = userdata;
+ mqtt_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: Closing connection", (void *)connection);
+
+ mqtt_disconnect_impl(connection, AWS_OP_SUCCESS);
+
+ return AWS_OP_SUCCESS;
+}
+
+/*******************************************************************************
+ * Subscribe
+ ******************************************************************************/
+
+static void s_on_publish_client_wrapper(
+ const struct aws_byte_cursor *topic,
+ const struct aws_byte_cursor *payload,
+ bool dup,
+ enum aws_mqtt_qos qos,
+ bool retain,
+ void *userdata) {
+
+ struct subscribe_task_topic *task_topic = userdata;
+
+ /* Call out to the user callback */
+ if (task_topic->request.on_publish) {
+ task_topic->request.on_publish(
+ task_topic->connection, topic, payload, dup, qos, retain, task_topic->request.on_publish_ud);
+ }
+}
+
+static void s_task_topic_release(void *userdata) {
+ struct subscribe_task_topic *task_topic = userdata;
+ if (task_topic != NULL) {
+ aws_ref_count_release(&task_topic->ref_count);
+ }
+}
+
+static void s_task_topic_clean_up(void *userdata) {
+
+ struct subscribe_task_topic *task_topic = userdata;
+
+ if (task_topic->request.on_cleanup) {
+ task_topic->request.on_cleanup(task_topic->request.on_publish_ud);
+ }
+ aws_string_destroy(task_topic->filter);
+ aws_mem_release(task_topic->connection->allocator, task_topic);
+}
+
+static enum aws_mqtt_client_request_state s_subscribe_send(uint16_t packet_id, bool is_first_attempt, void *userdata) {
+
+ (void)is_first_attempt;
+
+ struct subscribe_task_arg *task_arg = userdata;
+ bool initing_packet = task_arg->subscribe.fixed_header.packet_type == 0;
+ struct aws_io_message *message = NULL;
+
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Attempting send of subscribe %" PRIu16 " (%s)",
+ (void *)task_arg->connection,
+ packet_id,
+ is_first_attempt ? "first attempt" : "resend");
+
+ if (initing_packet) {
+ /* Init the subscribe packet */
+ if (aws_mqtt_packet_subscribe_init(&task_arg->subscribe, task_arg->connection->allocator, packet_id)) {
+ return AWS_MQTT_CLIENT_REQUEST_ERROR;
+ }
+ }
+
+ const size_t num_topics = aws_array_list_length(&task_arg->topics);
+ if (num_topics <= 0) {
+ aws_raise_error(AWS_ERROR_MQTT_INVALID_TOPIC);
+ return AWS_MQTT_CLIENT_REQUEST_ERROR;
+ }
+
+ AWS_VARIABLE_LENGTH_ARRAY(uint8_t, transaction_buf, num_topics * aws_mqtt_topic_tree_action_size);
+ struct aws_array_list transaction;
+ aws_array_list_init_static(&transaction, transaction_buf, num_topics, aws_mqtt_topic_tree_action_size);
+
+ for (size_t i = 0; i < num_topics; ++i) {
+
+ struct subscribe_task_topic *topic = NULL;
+ aws_array_list_get_at(&task_arg->topics, &topic, i);
+ AWS_ASSUME(topic); /* We know we're within bounds */
+
+ if (initing_packet) {
+ if (aws_mqtt_packet_subscribe_add_topic(&task_arg->subscribe, topic->request.topic, topic->request.qos)) {
+ goto handle_error;
+ }
+ }
+
+ if (!task_arg->tree_updated) {
+ if (aws_mqtt_topic_tree_transaction_insert(
+ &task_arg->connection->thread_data.subscriptions,
+ &transaction,
+ topic->filter,
+ topic->request.qos,
+ s_on_publish_client_wrapper,
+ s_task_topic_release,
+ topic)) {
+
+ goto handle_error;
+ }
+ /* If insert succeed, acquire the refcount */
+ aws_ref_count_acquire(&topic->ref_count);
+ }
+ }
+
+ message = mqtt_get_message_for_packet(task_arg->connection, &task_arg->subscribe.fixed_header);
+ if (!message) {
+
+ goto handle_error;
+ }
+
+ if (aws_mqtt_packet_subscribe_encode(&message->message_data, &task_arg->subscribe)) {
+
+ goto handle_error;
+ }
+
+ /* This is not necessarily a fatal error; if the subscribe fails, it'll just retry. Still need to clean up though.
+ */
+ if (aws_channel_slot_send_message(task_arg->connection->slot, message, AWS_CHANNEL_DIR_WRITE)) {
+ aws_mem_release(message->allocator, message);
+ }
+
+ if (!task_arg->tree_updated) {
+ aws_mqtt_topic_tree_transaction_commit(&task_arg->connection->thread_data.subscriptions, &transaction);
+ task_arg->tree_updated = true;
+ }
+
+ aws_array_list_clean_up(&transaction);
+ return AWS_MQTT_CLIENT_REQUEST_ONGOING;
+
+handle_error:
+
+ if (message) {
+ aws_mem_release(message->allocator, message);
+ }
+ if (!task_arg->tree_updated) {
+ aws_mqtt_topic_tree_transaction_roll_back(&task_arg->connection->thread_data.subscriptions, &transaction);
+ }
+
+ aws_array_list_clean_up(&transaction);
+ return AWS_MQTT_CLIENT_REQUEST_ERROR;
+}
+
+static void s_subscribe_complete(
+ struct aws_mqtt_client_connection *connection,
+ uint16_t packet_id,
+ int error_code,
+ void *userdata) {
+
+ struct subscribe_task_arg *task_arg = userdata;
+
+ struct subscribe_task_topic *topic = NULL;
+ aws_array_list_get_at(&task_arg->topics, &topic, 0);
+ AWS_ASSUME(topic);
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Subscribe %" PRIu16 " completed with error_code %d",
+ (void *)connection,
+ packet_id,
+ error_code);
+
+ size_t list_len = aws_array_list_length(&task_arg->topics);
+ if (task_arg->on_suback.multi) {
+ /* create a list of aws_mqtt_topic_subscription pointers from topics for the callback */
+ AWS_VARIABLE_LENGTH_ARRAY(uint8_t, cb_list_buf, list_len * sizeof(void *));
+ struct aws_array_list cb_list;
+ aws_array_list_init_static(&cb_list, cb_list_buf, list_len, sizeof(void *));
+ int err = 0;
+ for (size_t i = 0; i < list_len; i++) {
+ err |= aws_array_list_get_at(&task_arg->topics, &topic, i);
+ struct aws_mqtt_topic_subscription *subscription = &topic->request;
+ err |= aws_array_list_push_back(&cb_list, &subscription);
+ }
+ AWS_ASSUME(!err);
+ task_arg->on_suback.multi(connection, packet_id, &cb_list, error_code, task_arg->on_suback_ud);
+ aws_array_list_clean_up(&cb_list);
+ } else if (task_arg->on_suback.single) {
+ task_arg->on_suback.single(
+ connection, packet_id, &topic->request.topic, topic->request.qos, error_code, task_arg->on_suback_ud);
+ }
+ for (size_t i = 0; i < list_len; i++) {
+ aws_array_list_get_at(&task_arg->topics, &topic, i);
+ s_task_topic_release(topic);
+ }
+ aws_array_list_clean_up(&task_arg->topics);
+ aws_mqtt_packet_subscribe_clean_up(&task_arg->subscribe);
+ aws_mem_release(task_arg->connection->allocator, task_arg);
+}
+
+uint16_t aws_mqtt_client_connection_subscribe_multiple(
+ struct aws_mqtt_client_connection *connection,
+ const struct aws_array_list *topic_filters,
+ aws_mqtt_suback_multi_fn *on_suback,
+ void *on_suback_ud) {
+
+ AWS_PRECONDITION(connection);
+
+ struct subscribe_task_arg *task_arg = aws_mem_calloc(connection->allocator, 1, sizeof(struct subscribe_task_arg));
+ if (!task_arg) {
+ return 0;
+ }
+
+ task_arg->connection = connection;
+ task_arg->on_suback.multi = on_suback;
+ task_arg->on_suback_ud = on_suback_ud;
+
+ const size_t num_topics = aws_array_list_length(topic_filters);
+
+ if (aws_array_list_init_dynamic(&task_arg->topics, connection->allocator, num_topics, sizeof(void *))) {
+ goto handle_error;
+ }
+
+ AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: Starting multi-topic subscribe", (void *)connection);
+
+ /* Calculate the size of the subscribe packet
+ * The fixed header is 2 bytes and the packet ID is 2 bytes.
+ * Note: The size of the topic filter(s) are calculated in the loop below */
+ uint64_t subscribe_packet_size = 4;
+
+ for (size_t i = 0; i < num_topics; ++i) {
+
+ struct aws_mqtt_topic_subscription *request = NULL;
+ aws_array_list_get_at_ptr(topic_filters, (void **)&request, i);
+
+ if (!aws_mqtt_is_valid_topic_filter(&request->topic)) {
+ aws_raise_error(AWS_ERROR_MQTT_INVALID_TOPIC);
+ goto handle_error;
+ }
+
+ struct subscribe_task_topic *task_topic =
+ aws_mem_calloc(connection->allocator, 1, sizeof(struct subscribe_task_topic));
+ if (!task_topic) {
+ goto handle_error;
+ }
+ aws_ref_count_init(&task_topic->ref_count, task_topic, (aws_simple_completion_callback *)s_task_topic_clean_up);
+
+ task_topic->connection = connection;
+ task_topic->request = *request;
+
+ task_topic->filter = aws_string_new_from_array(
+ connection->allocator, task_topic->request.topic.ptr, task_topic->request.topic.len);
+ if (!task_topic->filter) {
+ aws_mem_release(connection->allocator, task_topic);
+ goto handle_error;
+ }
+
+ /* Update request topic cursor to refer to owned string */
+ task_topic->request.topic = aws_byte_cursor_from_string(task_topic->filter);
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Adding topic \"" PRInSTR "\"",
+ (void *)connection,
+ AWS_BYTE_CURSOR_PRI(task_topic->request.topic));
+
+ /* Push into the list */
+ aws_array_list_push_back(&task_arg->topics, &task_topic);
+
+ /* Subscribe topic filter is: always 3 bytes (1 for QoS, 2 for Length MSB/LSB) + the size of the topic filter */
+ subscribe_packet_size += 3 + task_topic->request.topic.len;
+ }
+
+ uint16_t packet_id = mqtt_create_request(
+ task_arg->connection,
+ &s_subscribe_send,
+ task_arg,
+ &s_subscribe_complete,
+ task_arg,
+ false, /* noRetry */
+ subscribe_packet_size);
+
+ if (packet_id == 0) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Failed to kick off multi-topic subscribe, with error %s",
+ (void *)connection,
+ aws_error_debug_str(aws_last_error()));
+ goto handle_error;
+ }
+
+ AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: Sending multi-topic subscribe %" PRIu16, (void *)connection, packet_id);
+ return packet_id;
+
+handle_error:
+
+ if (task_arg) {
+
+ if (task_arg->topics.data) {
+
+ const size_t num_added_topics = aws_array_list_length(&task_arg->topics);
+ for (size_t i = 0; i < num_added_topics; ++i) {
+
+ struct subscribe_task_topic *task_topic = NULL;
+ aws_array_list_get_at(&task_arg->topics, (void **)&task_topic, i);
+ AWS_ASSUME(task_topic);
+
+ aws_string_destroy(task_topic->filter);
+ aws_mem_release(connection->allocator, task_topic);
+ }
+
+ aws_array_list_clean_up(&task_arg->topics);
+ }
+
+ aws_mem_release(connection->allocator, task_arg);
+ }
+ return 0;
+}
+
+/*******************************************************************************
+ * Subscribe Single
+ ******************************************************************************/
+
+static void s_subscribe_single_complete(
+ struct aws_mqtt_client_connection *connection,
+ uint16_t packet_id,
+ int error_code,
+ void *userdata) {
+
+ struct subscribe_task_arg *task_arg = userdata;
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Subscribe %" PRIu16 " completed with error code %d",
+ (void *)connection,
+ packet_id,
+ error_code);
+
+ AWS_ASSERT(aws_array_list_length(&task_arg->topics) == 1);
+ struct subscribe_task_topic *topic = NULL;
+ aws_array_list_get_at(&task_arg->topics, &topic, 0);
+ AWS_ASSUME(topic); /* There needs to be exactly 1 topic in this list */
+ if (task_arg->on_suback.single) {
+ AWS_ASSUME(aws_string_is_valid(topic->filter));
+ aws_mqtt_suback_fn *suback = task_arg->on_suback.single;
+ suback(connection, packet_id, &topic->request.topic, topic->request.qos, error_code, task_arg->on_suback_ud);
+ }
+ s_task_topic_release(topic);
+ aws_array_list_clean_up(&task_arg->topics);
+ aws_mqtt_packet_subscribe_clean_up(&task_arg->subscribe);
+ aws_mem_release(task_arg->connection->allocator, task_arg);
+}
+
+uint16_t aws_mqtt_client_connection_subscribe(
+ struct aws_mqtt_client_connection *connection,
+ const struct aws_byte_cursor *topic_filter,
+ enum aws_mqtt_qos qos,
+ aws_mqtt_client_publish_received_fn *on_publish,
+ void *on_publish_ud,
+ aws_mqtt_userdata_cleanup_fn *on_ud_cleanup,
+ aws_mqtt_suback_fn *on_suback,
+ void *on_suback_ud) {
+
+ AWS_PRECONDITION(connection);
+
+ if (!aws_mqtt_is_valid_topic_filter(topic_filter)) {
+ aws_raise_error(AWS_ERROR_MQTT_INVALID_TOPIC);
+ return 0;
+ }
+
+ /* Because we know we're only going to have 1 topic, we can cheat and allocate the array_list in the same block as
+ * the task argument. */
+ void *task_topic_storage = NULL;
+ struct subscribe_task_topic *task_topic = NULL;
+ struct subscribe_task_arg *task_arg = aws_mem_acquire_many(
+ connection->allocator,
+ 2,
+ &task_arg,
+ sizeof(struct subscribe_task_arg),
+ &task_topic_storage,
+ sizeof(struct subscribe_task_topic *));
+
+ if (!task_arg) {
+ goto handle_error;
+ }
+ AWS_ZERO_STRUCT(*task_arg);
+
+ task_arg->connection = connection;
+ task_arg->on_suback.single = on_suback;
+ task_arg->on_suback_ud = on_suback_ud;
+
+ /* It stores the pointer */
+ aws_array_list_init_static(&task_arg->topics, task_topic_storage, 1, sizeof(void *));
+
+ /* Allocate the topic and push into the list */
+ task_topic = aws_mem_calloc(connection->allocator, 1, sizeof(struct subscribe_task_topic));
+ if (!task_topic) {
+ goto handle_error;
+ }
+ aws_ref_count_init(&task_topic->ref_count, task_topic, (aws_simple_completion_callback *)s_task_topic_clean_up);
+ aws_array_list_push_back(&task_arg->topics, &task_topic);
+
+ task_topic->filter = aws_string_new_from_array(connection->allocator, topic_filter->ptr, topic_filter->len);
+ if (!task_topic->filter) {
+ goto handle_error;
+ }
+
+ task_topic->connection = connection;
+ task_topic->request.topic = aws_byte_cursor_from_string(task_topic->filter);
+ task_topic->request.qos = qos;
+ task_topic->request.on_publish = on_publish;
+ task_topic->request.on_cleanup = on_ud_cleanup;
+ task_topic->request.on_publish_ud = on_publish_ud;
+
+ /* Calculate the size of the (single) subscribe packet
+ * The fixed header is 2 bytes,
+ * the topic filter is always at least 3 bytes (1 for QoS, 2 for Length MSB/LSB)
+ * - plus the size of the topic filter
+ * and finally the packet ID is 2 bytes */
+ uint64_t subscribe_packet_size = 7 + topic_filter->len;
+
+ uint16_t packet_id = mqtt_create_request(
+ task_arg->connection,
+ &s_subscribe_send,
+ task_arg,
+ &s_subscribe_single_complete,
+ task_arg,
+ false, /* noRetry */
+ subscribe_packet_size);
+
+ if (packet_id == 0) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Failed to start subscribe on topic " PRInSTR " with error %s",
+ (void *)connection,
+ AWS_BYTE_CURSOR_PRI(task_topic->request.topic),
+ aws_error_debug_str(aws_last_error()));
+ goto handle_error;
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Starting subscribe %" PRIu16 " on topic " PRInSTR,
+ (void *)connection,
+ packet_id,
+ AWS_BYTE_CURSOR_PRI(task_topic->request.topic));
+
+ return packet_id;
+
+handle_error:
+
+ if (task_topic) {
+ if (task_topic->filter) {
+ aws_string_destroy(task_topic->filter);
+ }
+ aws_mem_release(connection->allocator, task_topic);
+ }
+
+ if (task_arg) {
+ aws_mem_release(connection->allocator, task_arg);
+ }
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Subscribe Local
+ ******************************************************************************/
+
+/* The lifetime of this struct is from subscribe -> suback */
+struct subscribe_local_task_arg {
+
+ struct aws_mqtt_client_connection *connection;
+
+ struct subscribe_task_topic *task_topic;
+
+ aws_mqtt_suback_fn *on_suback;
+ void *on_suback_ud;
+};
+
+static enum aws_mqtt_client_request_state s_subscribe_local_send(
+ uint16_t packet_id,
+ bool is_first_attempt,
+ void *userdata) {
+
+ (void)is_first_attempt;
+
+ struct subscribe_local_task_arg *task_arg = userdata;
+
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Attempting save of local subscribe %" PRIu16 " (%s)",
+ (void *)task_arg->connection,
+ packet_id,
+ is_first_attempt ? "first attempt" : "redo");
+
+ struct subscribe_task_topic *topic = task_arg->task_topic;
+ if (aws_mqtt_topic_tree_insert(
+ &task_arg->connection->thread_data.subscriptions,
+ topic->filter,
+ topic->request.qos,
+ s_on_publish_client_wrapper,
+ s_task_topic_release,
+ topic)) {
+
+ return AWS_MQTT_CLIENT_REQUEST_ERROR;
+ }
+ aws_ref_count_acquire(&topic->ref_count);
+
+ return AWS_MQTT_CLIENT_REQUEST_COMPLETE;
+}
+
+static void s_subscribe_local_complete(
+ struct aws_mqtt_client_connection *connection,
+ uint16_t packet_id,
+ int error_code,
+ void *userdata) {
+
+ struct subscribe_local_task_arg *task_arg = userdata;
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Local subscribe %" PRIu16 " completed with error code %d",
+ (void *)connection,
+ packet_id,
+ error_code);
+
+ struct subscribe_task_topic *topic = task_arg->task_topic;
+ if (task_arg->on_suback) {
+ aws_mqtt_suback_fn *suback = task_arg->on_suback;
+ suback(connection, packet_id, &topic->request.topic, topic->request.qos, error_code, task_arg->on_suback_ud);
+ }
+ s_task_topic_release(topic);
+
+ aws_mem_release(task_arg->connection->allocator, task_arg);
+}
+
+uint16_t aws_mqtt_client_connection_subscribe_local(
+ struct aws_mqtt_client_connection *connection,
+ const struct aws_byte_cursor *topic_filter,
+ aws_mqtt_client_publish_received_fn *on_publish,
+ void *on_publish_ud,
+ aws_mqtt_userdata_cleanup_fn *on_ud_cleanup,
+ aws_mqtt_suback_fn *on_suback,
+ void *on_suback_ud) {
+
+ AWS_PRECONDITION(connection);
+
+ if (!aws_mqtt_is_valid_topic_filter(topic_filter)) {
+ aws_raise_error(AWS_ERROR_MQTT_INVALID_TOPIC);
+ return 0;
+ }
+
+ struct subscribe_task_topic *task_topic = NULL;
+
+ struct subscribe_local_task_arg *task_arg =
+ aws_mem_calloc(connection->allocator, 1, sizeof(struct subscribe_local_task_arg));
+
+ if (!task_arg) {
+ goto handle_error;
+ }
+ AWS_ZERO_STRUCT(*task_arg);
+
+ task_arg->connection = connection;
+ task_arg->on_suback = on_suback;
+ task_arg->on_suback_ud = on_suback_ud;
+ task_topic = aws_mem_calloc(connection->allocator, 1, sizeof(struct subscribe_task_topic));
+ if (!task_topic) {
+ goto handle_error;
+ }
+ aws_ref_count_init(&task_topic->ref_count, task_topic, (aws_simple_completion_callback *)s_task_topic_clean_up);
+ task_arg->task_topic = task_topic;
+
+ task_topic->filter = aws_string_new_from_array(connection->allocator, topic_filter->ptr, topic_filter->len);
+ if (!task_topic->filter) {
+ goto handle_error;
+ }
+
+ task_topic->connection = connection;
+ task_topic->is_local = true;
+ task_topic->request.topic = aws_byte_cursor_from_string(task_topic->filter);
+ task_topic->request.on_publish = on_publish;
+ task_topic->request.on_cleanup = on_ud_cleanup;
+ task_topic->request.on_publish_ud = on_publish_ud;
+
+ /* Calculate the size of the (local) subscribe packet
+ * The fixed header is 2 bytes, the packet ID is 2 bytes
+ * the topic filter is always 3 bytes (1 for QoS, 2 for Length MSB/LSB)
+ * - plus the size of the topic filter */
+ uint64_t subscribe_packet_size = 7 + topic_filter->len;
+
+ uint16_t packet_id = mqtt_create_request(
+ task_arg->connection,
+ s_subscribe_local_send,
+ task_arg,
+ &s_subscribe_local_complete,
+ task_arg,
+ false, /* noRetry */
+ subscribe_packet_size);
+
+ if (packet_id == 0) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Failed to start local subscribe on topic " PRInSTR " with error %s",
+ (void *)connection,
+ AWS_BYTE_CURSOR_PRI(task_topic->request.topic),
+ aws_error_debug_str(aws_last_error()));
+ goto handle_error;
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Starting local subscribe %" PRIu16 " on topic " PRInSTR,
+ (void *)connection,
+ packet_id,
+ AWS_BYTE_CURSOR_PRI(task_topic->request.topic));
+ return packet_id;
+
+handle_error:
+
+ if (task_topic) {
+ if (task_topic->filter) {
+ aws_string_destroy(task_topic->filter);
+ }
+ aws_mem_release(connection->allocator, task_topic);
+ }
+
+ if (task_arg) {
+ aws_mem_release(connection->allocator, task_arg);
+ }
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Resubscribe
+ ******************************************************************************/
+
+static bool s_reconnect_resub_iterator(const struct aws_byte_cursor *topic, enum aws_mqtt_qos qos, void *user_data) {
+ struct subscribe_task_arg *task_arg = user_data;
+
+ struct subscribe_task_topic *task_topic =
+ aws_mem_calloc(task_arg->connection->allocator, 1, sizeof(struct subscribe_task_topic));
+ struct aws_mqtt_topic_subscription sub;
+ AWS_ZERO_STRUCT(sub);
+ sub.topic = *topic;
+ sub.qos = qos;
+ task_topic->request = sub;
+ task_topic->connection = task_arg->connection;
+
+ aws_array_list_push_back(&task_arg->topics, &task_topic);
+ aws_ref_count_init(&task_topic->ref_count, task_topic, (aws_simple_completion_callback *)s_task_topic_clean_up);
+ return true;
+}
+
+static bool s_reconnect_resub_operation_statistics_iterator(
+ const struct aws_byte_cursor *topic,
+ enum aws_mqtt_qos qos,
+ void *user_data) {
+ (void)qos;
+ uint64_t *packet_size = user_data;
+ /* Always 3 bytes (1 for QoS, 2 for length MSB and LSB respectively) */
+ *packet_size += 3;
+ /* The size of the topic filter */
+ *packet_size += topic->len;
+ return true;
+}
+
+static enum aws_mqtt_client_request_state s_resubscribe_send(
+ uint16_t packet_id,
+ bool is_first_attempt,
+ void *userdata) {
+
+ struct subscribe_task_arg *task_arg = userdata;
+ bool initing_packet = task_arg->subscribe.fixed_header.packet_type == 0;
+ struct aws_io_message *message = NULL;
+
+ const size_t sub_count = aws_mqtt_topic_tree_get_sub_count(&task_arg->connection->thread_data.subscriptions);
+ /* Init the topics list even if there are no topics because the s_resubscribe_complete callback will always run. */
+ if (aws_array_list_init_dynamic(&task_arg->topics, task_arg->connection->allocator, sub_count, sizeof(void *))) {
+ goto handle_error;
+ }
+ if (sub_count == 0) {
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Not subscribed to any topics. Resubscribe is unnecessary, no packet will be sent.",
+ (void *)task_arg->connection);
+ return AWS_MQTT_CLIENT_REQUEST_COMPLETE;
+ }
+ aws_mqtt_topic_tree_iterate(&task_arg->connection->thread_data.subscriptions, s_reconnect_resub_iterator, task_arg);
+
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Attempting send of resubscribe %" PRIu16 " (%s)",
+ (void *)task_arg->connection,
+ packet_id,
+ is_first_attempt ? "first attempt" : "resend");
+
+ if (initing_packet) {
+ /* Init the subscribe packet */
+ if (aws_mqtt_packet_subscribe_init(&task_arg->subscribe, task_arg->connection->allocator, packet_id)) {
+ return AWS_MQTT_CLIENT_REQUEST_ERROR;
+ }
+
+ const size_t num_topics = aws_array_list_length(&task_arg->topics);
+ if (num_topics <= 0) {
+ aws_raise_error(AWS_ERROR_MQTT_INVALID_TOPIC);
+ return AWS_MQTT_CLIENT_REQUEST_ERROR;
+ }
+
+ for (size_t i = 0; i < num_topics; ++i) {
+
+ struct subscribe_task_topic *topic = NULL;
+ aws_array_list_get_at(&task_arg->topics, &topic, i);
+ AWS_ASSUME(topic); /* We know we're within bounds */
+
+ if (aws_mqtt_packet_subscribe_add_topic(&task_arg->subscribe, topic->request.topic, topic->request.qos)) {
+ goto handle_error;
+ }
+ }
+ }
+
+ message = mqtt_get_message_for_packet(task_arg->connection, &task_arg->subscribe.fixed_header);
+ if (!message) {
+
+ goto handle_error;
+ }
+
+ if (aws_mqtt_packet_subscribe_encode(&message->message_data, &task_arg->subscribe)) {
+
+ goto handle_error;
+ }
+
+ /* This is not necessarily a fatal error; if the send fails, it'll just retry. Still need to clean up though. */
+ if (aws_channel_slot_send_message(task_arg->connection->slot, message, AWS_CHANNEL_DIR_WRITE)) {
+ aws_mem_release(message->allocator, message);
+ }
+
+ return AWS_MQTT_CLIENT_REQUEST_ONGOING;
+
+handle_error:
+
+ if (message) {
+ aws_mem_release(message->allocator, message);
+ }
+
+ return AWS_MQTT_CLIENT_REQUEST_ERROR;
+}
+
+static void s_resubscribe_complete(
+ struct aws_mqtt_client_connection *connection,
+ uint16_t packet_id,
+ int error_code,
+ void *userdata) {
+
+ struct subscribe_task_arg *task_arg = userdata;
+
+ const size_t list_len = aws_array_list_length(&task_arg->topics);
+ if (list_len <= 0) {
+ goto clean_up;
+ }
+
+ struct subscribe_task_topic *topic = NULL;
+ aws_array_list_get_at(&task_arg->topics, &topic, 0);
+ AWS_ASSUME(topic);
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Subscribe %" PRIu16 " completed with error_code %d",
+ (void *)connection,
+ packet_id,
+ error_code);
+
+ if (task_arg->on_suback.multi) {
+ /* create a list of aws_mqtt_topic_subscription pointers from topics for the callback */
+ AWS_VARIABLE_LENGTH_ARRAY(uint8_t, cb_list_buf, list_len * sizeof(void *));
+ struct aws_array_list cb_list;
+ aws_array_list_init_static(&cb_list, cb_list_buf, list_len, sizeof(void *));
+ int err = 0;
+ for (size_t i = 0; i < list_len; i++) {
+ err |= aws_array_list_get_at(&task_arg->topics, &topic, i);
+ struct aws_mqtt_topic_subscription *subscription = &topic->request;
+ err |= aws_array_list_push_back(&cb_list, &subscription);
+ }
+ AWS_ASSUME(!err);
+ task_arg->on_suback.multi(connection, packet_id, &cb_list, error_code, task_arg->on_suback_ud);
+ aws_array_list_clean_up(&cb_list);
+ } else if (task_arg->on_suback.single) {
+ task_arg->on_suback.single(
+ connection, packet_id, &topic->request.topic, topic->request.qos, error_code, task_arg->on_suback_ud);
+ }
+
+clean_up:
+
+ /* We need to cleanup the subscribe_task_topics, since they are not inserted into the topic tree by resubscribe. We
+ * take the ownership to clean it up */
+ for (size_t i = 0; i < list_len; i++) {
+ aws_array_list_get_at(&task_arg->topics, &topic, i);
+ s_task_topic_release(topic);
+ }
+ aws_array_list_clean_up(&task_arg->topics);
+ aws_mqtt_packet_subscribe_clean_up(&task_arg->subscribe);
+ aws_mem_release(task_arg->connection->allocator, task_arg);
+}
+
+uint16_t aws_mqtt_resubscribe_existing_topics(
+ struct aws_mqtt_client_connection *connection,
+ aws_mqtt_suback_multi_fn *on_suback,
+ void *on_suback_ud) {
+
+ struct subscribe_task_arg *task_arg = aws_mem_calloc(connection->allocator, 1, sizeof(struct subscribe_task_arg));
+ if (!task_arg) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT, "id=%p: failed to allocate storage for resubscribe arguments", (void *)connection);
+ return 0;
+ }
+
+ AWS_ZERO_STRUCT(*task_arg);
+ task_arg->connection = connection;
+ task_arg->on_suback.multi = on_suback;
+ task_arg->on_suback_ud = on_suback_ud;
+
+ /* Calculate the size of the packet.
+ * The fixed header is 2 bytes and the packet ID is 2 bytes
+ * plus the size of each topic in the topic tree */
+ uint64_t resubscribe_packet_size = 4;
+ /* Get the length of each subscription we are going to resubscribe with */
+ aws_mqtt_topic_tree_iterate(
+ &connection->thread_data.subscriptions,
+ s_reconnect_resub_operation_statistics_iterator,
+ &resubscribe_packet_size);
+
+ uint16_t packet_id = mqtt_create_request(
+ task_arg->connection,
+ &s_resubscribe_send,
+ task_arg,
+ &s_resubscribe_complete,
+ task_arg,
+ false, /* noRetry */
+ resubscribe_packet_size);
+
+ if (packet_id == 0) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Failed to send multi-topic resubscribe with error %s",
+ (void *)connection,
+ aws_error_name(aws_last_error()));
+ goto handle_error;
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_CLIENT, "id=%p: Sending multi-topic resubscribe %" PRIu16, (void *)connection, packet_id);
+
+ return packet_id;
+
+handle_error:
+
+ aws_mem_release(connection->allocator, task_arg);
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Unsubscribe
+ ******************************************************************************/
+
+struct unsubscribe_task_arg {
+ struct aws_mqtt_client_connection *connection;
+ struct aws_string *filter_string;
+ struct aws_byte_cursor filter;
+ bool is_local;
+ /* Packet to populate */
+ struct aws_mqtt_packet_unsubscribe unsubscribe;
+
+ /* true if transaction was committed to the topic tree, false requires a retry */
+ bool tree_updated;
+
+ aws_mqtt_op_complete_fn *on_unsuback;
+ void *on_unsuback_ud;
+
+ struct request_timeout_wrapper timeout_wrapper;
+};
+
+static enum aws_mqtt_client_request_state s_unsubscribe_send(
+ uint16_t packet_id,
+ bool is_first_attempt,
+ void *userdata) {
+
+ (void)is_first_attempt;
+
+ struct unsubscribe_task_arg *task_arg = userdata;
+ struct aws_io_message *message = NULL;
+
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Attempting send of unsubscribe %" PRIu16 " %s",
+ (void *)task_arg->connection,
+ packet_id,
+ is_first_attempt ? "first attempt" : "resend");
+
+ static const size_t num_topics = 1;
+
+ AWS_VARIABLE_LENGTH_ARRAY(uint8_t, transaction_buf, num_topics * aws_mqtt_topic_tree_action_size);
+ struct aws_array_list transaction;
+ aws_array_list_init_static(&transaction, transaction_buf, num_topics, aws_mqtt_topic_tree_action_size);
+
+ if (!task_arg->tree_updated) {
+
+ struct subscribe_task_topic *topic;
+ if (aws_mqtt_topic_tree_transaction_remove(
+ &task_arg->connection->thread_data.subscriptions, &transaction, &task_arg->filter, (void **)&topic)) {
+ goto handle_error;
+ }
+
+ task_arg->is_local = topic ? topic->is_local : false;
+ }
+
+ if (!task_arg->is_local) {
+ if (task_arg->unsubscribe.fixed_header.packet_type == 0) {
+ /* If unsubscribe packet is uninitialized, init it */
+ if (aws_mqtt_packet_unsubscribe_init(&task_arg->unsubscribe, task_arg->connection->allocator, packet_id)) {
+ goto handle_error;
+ }
+ if (aws_mqtt_packet_unsubscribe_add_topic(&task_arg->unsubscribe, task_arg->filter)) {
+ goto handle_error;
+ }
+ }
+
+ message = mqtt_get_message_for_packet(task_arg->connection, &task_arg->unsubscribe.fixed_header);
+ if (!message) {
+ goto handle_error;
+ }
+
+ if (aws_mqtt_packet_unsubscribe_encode(&message->message_data, &task_arg->unsubscribe)) {
+ goto handle_error;
+ }
+
+ if (aws_channel_slot_send_message(task_arg->connection->slot, message, AWS_CHANNEL_DIR_WRITE)) {
+ goto handle_error;
+ }
+
+ /* TODO: timing should start from the message written into the socket, which is aws_io_message->on_completion
+ * invoked, but there are bugs in the websocket handler (and maybe also the h1 handler?) where we don't properly
+ * fire the on_completion callbacks. */
+ struct request_timeout_task_arg *timeout_task_arg = s_schedule_timeout_task(task_arg->connection, packet_id);
+ if (!timeout_task_arg) {
+ return AWS_MQTT_CLIENT_REQUEST_ERROR;
+ }
+
+ /*
+ * Set up mutual references between the operation task args and the timeout task args. Whoever runs first
+ * "wins", does its logic, and then breaks the connection between the two.
+ */
+ task_arg->timeout_wrapper.timeout_task_arg = timeout_task_arg;
+ timeout_task_arg->task_arg_wrapper = &task_arg->timeout_wrapper;
+ }
+
+ if (!task_arg->tree_updated) {
+ aws_mqtt_topic_tree_transaction_commit(&task_arg->connection->thread_data.subscriptions, &transaction);
+ task_arg->tree_updated = true;
+ }
+
+ aws_array_list_clean_up(&transaction);
+ /* If the subscribe is local-only, don't wait for a SUBACK to come back. */
+ return task_arg->is_local ? AWS_MQTT_CLIENT_REQUEST_COMPLETE : AWS_MQTT_CLIENT_REQUEST_ONGOING;
+
+handle_error:
+
+ if (message) {
+ aws_mem_release(message->allocator, message);
+ }
+ if (!task_arg->tree_updated) {
+ aws_mqtt_topic_tree_transaction_roll_back(&task_arg->connection->thread_data.subscriptions, &transaction);
+ }
+
+ aws_array_list_clean_up(&transaction);
+ return AWS_MQTT_CLIENT_REQUEST_ERROR;
+}
+
+static void s_unsubscribe_complete(
+ struct aws_mqtt_client_connection *connection,
+ uint16_t packet_id,
+ int error_code,
+ void *userdata) {
+
+ struct unsubscribe_task_arg *task_arg = userdata;
+
+ AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: Unsubscribe %" PRIu16 " complete", (void *)connection, packet_id);
+
+ /*
+ * If we have a forward pointer to a timeout task, then that means the timeout task has not run yet. So we should
+ * follow it and zero out the back pointer to us, because we're going away now. The timeout task will run later
+ * and be harmless (even vs. future operations with the same packet id) because it only cancels if it has a back
+ * pointer.
+ */
+ if (task_arg->timeout_wrapper.timeout_task_arg) {
+ task_arg->timeout_wrapper.timeout_task_arg->task_arg_wrapper = NULL;
+ task_arg->timeout_wrapper.timeout_task_arg = NULL;
+ }
+
+ if (task_arg->on_unsuback) {
+ task_arg->on_unsuback(connection, packet_id, error_code, task_arg->on_unsuback_ud);
+ }
+
+ aws_string_destroy(task_arg->filter_string);
+ aws_mqtt_packet_unsubscribe_clean_up(&task_arg->unsubscribe);
+ aws_mem_release(task_arg->connection->allocator, task_arg);
+}
+
+uint16_t aws_mqtt_client_connection_unsubscribe(
+ struct aws_mqtt_client_connection *connection,
+ const struct aws_byte_cursor *topic_filter,
+ aws_mqtt_op_complete_fn *on_unsuback,
+ void *on_unsuback_ud) {
+
+ AWS_PRECONDITION(connection);
+
+ if (!aws_mqtt_is_valid_topic_filter(topic_filter)) {
+ aws_raise_error(AWS_ERROR_MQTT_INVALID_TOPIC);
+ return 0;
+ }
+
+ struct unsubscribe_task_arg *task_arg =
+ aws_mem_calloc(connection->allocator, 1, sizeof(struct unsubscribe_task_arg));
+ if (!task_arg) {
+ return 0;
+ }
+
+ task_arg->connection = connection;
+ task_arg->filter_string = aws_string_new_from_array(connection->allocator, topic_filter->ptr, topic_filter->len);
+ task_arg->filter = aws_byte_cursor_from_string(task_arg->filter_string);
+ task_arg->on_unsuback = on_unsuback;
+ task_arg->on_unsuback_ud = on_unsuback_ud;
+
+ /* Calculate the size of the unsubscribe packet.
+ * The fixed header is always 2 bytes, the packet ID is always 2 bytes
+ * plus the size of the topic filter */
+ uint64_t unsubscribe_packet_size = 4 + task_arg->filter.len;
+
+ uint16_t packet_id = mqtt_create_request(
+ connection,
+ &s_unsubscribe_send,
+ task_arg,
+ s_unsubscribe_complete,
+ task_arg,
+ false, /* noRetry */
+ unsubscribe_packet_size);
+ if (packet_id == 0) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Failed to start unsubscribe, with error %s",
+ (void *)connection,
+ aws_error_debug_str(aws_last_error()));
+ goto handle_error;
+ }
+
+ AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: Starting unsubscribe %" PRIu16, (void *)connection, packet_id);
+
+ return packet_id;
+
+handle_error:
+
+ aws_string_destroy(task_arg->filter_string);
+ aws_mem_release(connection->allocator, task_arg);
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Publish
+ ******************************************************************************/
+
+struct publish_task_arg {
+ struct aws_mqtt_client_connection *connection;
+ struct aws_string *topic_string;
+ struct aws_byte_cursor topic;
+ enum aws_mqtt_qos qos;
+ bool retain;
+ struct aws_byte_cursor payload;
+ struct aws_byte_buf payload_buf;
+
+ /* Packet to populate */
+ struct aws_mqtt_packet_publish publish;
+
+ aws_mqtt_op_complete_fn *on_complete;
+ void *userdata;
+
+ struct request_timeout_wrapper timeout_wrapper;
+};
+
+/* should only be called by tests */
+static int s_get_stuff_from_outstanding_requests_table(
+ struct aws_mqtt_client_connection *connection,
+ uint16_t packet_id,
+ struct aws_allocator *allocator,
+ struct aws_byte_buf *result_buf,
+ struct aws_string **result_string) {
+
+ int err = AWS_OP_SUCCESS;
+
+ aws_mutex_lock(&connection->synced_data.lock);
+ struct aws_hash_element *elem = NULL;
+ aws_hash_table_find(&connection->synced_data.outstanding_requests_table, &packet_id, &elem);
+ if (elem) {
+ struct aws_mqtt_request *request = elem->value;
+ struct publish_task_arg *pub = (struct publish_task_arg *)request->send_request_ud;
+ if (result_buf != NULL) {
+ if (aws_byte_buf_init_copy(result_buf, allocator, &pub->payload_buf)) {
+ err = AWS_OP_ERR;
+ }
+ } else if (result_string != NULL) {
+ *result_string = aws_string_new_from_string(allocator, pub->topic_string);
+ if (*result_string == NULL) {
+ err = AWS_OP_ERR;
+ }
+ }
+ } else {
+ /* So lovely that this error is defined, but hashtable never actually raises it */
+ err = aws_raise_error(AWS_ERROR_HASHTBL_ITEM_NOT_FOUND);
+ }
+ aws_mutex_unlock(&connection->synced_data.lock);
+
+ return err;
+}
+
+/* should only be called by tests */
+int aws_mqtt_client_get_payload_for_outstanding_publish_packet(
+ struct aws_mqtt_client_connection *connection,
+ uint16_t packet_id,
+ struct aws_allocator *allocator,
+ struct aws_byte_buf *result) {
+
+ AWS_ZERO_STRUCT(*result);
+ return s_get_stuff_from_outstanding_requests_table(connection, packet_id, allocator, result, NULL);
+}
+
+/* should only be called by tests */
+int aws_mqtt_client_get_topic_for_outstanding_publish_packet(
+ struct aws_mqtt_client_connection *connection,
+ uint16_t packet_id,
+ struct aws_allocator *allocator,
+ struct aws_string **result) {
+
+ *result = NULL;
+ return s_get_stuff_from_outstanding_requests_table(connection, packet_id, allocator, NULL, result);
+}
+
+static enum aws_mqtt_client_request_state s_publish_send(uint16_t packet_id, bool is_first_attempt, void *userdata) {
+ struct publish_task_arg *task_arg = userdata;
+ struct aws_mqtt_client_connection *connection = task_arg->connection;
+
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Attempting send of publish %" PRIu16 " %s",
+ (void *)task_arg->connection,
+ packet_id,
+ is_first_attempt ? "first attempt" : "resend");
+
+ bool is_qos_0 = task_arg->qos == AWS_MQTT_QOS_AT_MOST_ONCE;
+ if (is_qos_0) {
+ packet_id = 0;
+ }
+
+ if (is_first_attempt) {
+ if (aws_mqtt_packet_publish_init(
+ &task_arg->publish,
+ task_arg->retain,
+ task_arg->qos,
+ !is_first_attempt,
+ task_arg->topic,
+ packet_id,
+ task_arg->payload)) {
+
+ return AWS_MQTT_CLIENT_REQUEST_ERROR;
+ }
+ }
+
+ struct aws_io_message *message = mqtt_get_message_for_packet(task_arg->connection, &task_arg->publish.fixed_header);
+ if (!message) {
+ return AWS_MQTT_CLIENT_REQUEST_ERROR;
+ }
+
+ /* Encode the headers, and everything but the payload */
+ if (aws_mqtt_packet_publish_encode_headers(&message->message_data, &task_arg->publish)) {
+ return AWS_MQTT_CLIENT_REQUEST_ERROR;
+ }
+
+ struct aws_byte_cursor payload_cur = task_arg->payload;
+ {
+ write_payload_chunk:
+ (void)NULL;
+
+ const size_t left_in_message = message->message_data.capacity - message->message_data.len;
+ const size_t to_write = payload_cur.len < left_in_message ? payload_cur.len : left_in_message;
+
+ if (to_write) {
+ /* Write this chunk */
+ struct aws_byte_cursor to_write_cur = aws_byte_cursor_advance(&payload_cur, to_write);
+ AWS_ASSERT(to_write_cur.ptr); /* to_write is guaranteed to be inside the bounds of payload_cur */
+ if (!aws_byte_buf_write_from_whole_cursor(&message->message_data, to_write_cur)) {
+
+ aws_mem_release(message->allocator, message);
+ return AWS_MQTT_CLIENT_REQUEST_ERROR;
+ }
+ }
+
+ if (aws_channel_slot_send_message(task_arg->connection->slot, message, AWS_CHANNEL_DIR_WRITE)) {
+ aws_mem_release(message->allocator, message);
+ /* If it's QoS 0, telling user that the message haven't been sent, else, the message will be resent once the
+ * connection is back */
+ return is_qos_0 ? AWS_MQTT_CLIENT_REQUEST_ERROR : AWS_MQTT_CLIENT_REQUEST_ONGOING;
+ }
+
+ /* If there's still payload left, get a new message and start again. */
+ if (payload_cur.len) {
+ message = mqtt_get_message_for_packet(task_arg->connection, &task_arg->publish.fixed_header);
+ goto write_payload_chunk;
+ }
+ }
+ if (!is_qos_0 && connection->operation_timeout_ns != UINT64_MAX) {
+ /* TODO: timing should start from the message written into the socket, which is aws_io_message->on_completion
+ * invoked, but there are bugs in the websocket handler (and maybe also the h1 handler?) where we don't properly
+ * fire fire the on_completion callbacks. */
+ struct request_timeout_task_arg *timeout_task_arg = s_schedule_timeout_task(connection, packet_id);
+ if (!timeout_task_arg) {
+ return AWS_MQTT_CLIENT_REQUEST_ERROR;
+ }
+
+ /*
+ * Set up mutual references between the operation task args and the timeout task args. Whoever runs first
+ * "wins", does its logic, and then breaks the connection between the two.
+ */
+ task_arg->timeout_wrapper.timeout_task_arg = timeout_task_arg;
+ timeout_task_arg->task_arg_wrapper = &task_arg->timeout_wrapper;
+ }
+
+ /* If QoS == 0, there will be no ack, so consider the request done now. */
+ return is_qos_0 ? AWS_MQTT_CLIENT_REQUEST_COMPLETE : AWS_MQTT_CLIENT_REQUEST_ONGOING;
+}
+
+static void s_publish_complete(
+ struct aws_mqtt_client_connection *connection,
+ uint16_t packet_id,
+ int error_code,
+ void *userdata) {
+ struct publish_task_arg *task_arg = userdata;
+
+ AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: Publish %" PRIu16 " complete", (void *)connection, packet_id);
+
+ if (task_arg->on_complete) {
+ task_arg->on_complete(connection, packet_id, error_code, task_arg->userdata);
+ }
+
+ /*
+ * If we have a forward pointer to a timeout task, then that means the timeout task has not run yet. So we should
+ * follow it and zero out the back pointer to us, because we're going away now. The timeout task will run later
+ * and be harmless (even vs. future operations with the same packet id) because it only cancels if it has a back
+ * pointer.
+ */
+ if (task_arg->timeout_wrapper.timeout_task_arg != NULL) {
+ task_arg->timeout_wrapper.timeout_task_arg->task_arg_wrapper = NULL;
+ task_arg->timeout_wrapper.timeout_task_arg = NULL;
+ }
+
+ aws_byte_buf_clean_up(&task_arg->payload_buf);
+ aws_string_destroy(task_arg->topic_string);
+ aws_mem_release(connection->allocator, task_arg);
+}
+
+uint16_t aws_mqtt_client_connection_publish(
+ struct aws_mqtt_client_connection *connection,
+ const struct aws_byte_cursor *topic,
+ enum aws_mqtt_qos qos,
+ bool retain,
+ const struct aws_byte_cursor *payload,
+ aws_mqtt_op_complete_fn *on_complete,
+ void *userdata) {
+
+ AWS_PRECONDITION(connection);
+
+ if (!aws_mqtt_is_valid_topic(topic)) {
+ aws_raise_error(AWS_ERROR_MQTT_INVALID_TOPIC);
+ return 0;
+ }
+
+ struct publish_task_arg *arg = aws_mem_calloc(connection->allocator, 1, sizeof(struct publish_task_arg));
+ if (!arg) {
+ return 0;
+ }
+
+ arg->connection = connection;
+ arg->topic_string = aws_string_new_from_array(connection->allocator, topic->ptr, topic->len);
+ arg->topic = aws_byte_cursor_from_string(arg->topic_string);
+ arg->qos = qos;
+ arg->retain = retain;
+ if (aws_byte_buf_init_copy_from_cursor(&arg->payload_buf, connection->allocator, *payload)) {
+ goto handle_error;
+ }
+ arg->payload = aws_byte_cursor_from_buf(&arg->payload_buf);
+ arg->on_complete = on_complete;
+ arg->userdata = userdata;
+
+ /* Calculate the size of the publish packet.
+ * The fixed header size is 2 bytes, the packet ID is 2 bytes,
+ * plus the size of both the topic name and payload */
+ uint64_t publish_packet_size = 4 + arg->topic.len + arg->payload.len;
+
+ bool retry = qos == AWS_MQTT_QOS_AT_MOST_ONCE;
+ uint16_t packet_id =
+ mqtt_create_request(connection, &s_publish_send, arg, &s_publish_complete, arg, retry, publish_packet_size);
+
+ if (packet_id == 0) {
+ /* bummer, we failed to make a new request */
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Failed starting publish to topic " PRInSTR ",error %d (%s)",
+ (void *)connection,
+ AWS_BYTE_CURSOR_PRI(*topic),
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto handle_error;
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Starting publish %" PRIu16 " to topic " PRInSTR,
+ (void *)connection,
+ packet_id,
+ AWS_BYTE_CURSOR_PRI(*topic));
+ return packet_id;
+
+handle_error:
+
+ /* we know arg is valid, topic_string may or may not be valid */
+ if (arg->topic_string) {
+ aws_string_destroy(arg->topic_string);
+ }
+
+ aws_byte_buf_clean_up(&arg->payload_buf);
+
+ aws_mem_release(connection->allocator, arg);
+
+ return 0;
+}
+
+/*******************************************************************************
+ * Ping
+ ******************************************************************************/
+
+static void s_pingresp_received_timeout(struct aws_channel_task *channel_task, void *arg, enum aws_task_status status) {
+ struct aws_mqtt_client_connection *connection = arg;
+
+ if (status == AWS_TASK_STATUS_RUN_READY) {
+ /* Check that a pingresp has been received since pingreq was sent */
+ if (connection->thread_data.waiting_on_ping_response) {
+ connection->thread_data.waiting_on_ping_response = false;
+ /* It's been too long since the last ping, close the connection */
+ AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: ping timeout detected", (void *)connection);
+ aws_channel_shutdown(connection->slot->channel, AWS_ERROR_MQTT_TIMEOUT);
+ }
+ }
+
+ aws_mem_release(connection->allocator, channel_task);
+}
+
+static enum aws_mqtt_client_request_state s_pingreq_send(uint16_t packet_id, bool is_first_attempt, void *userdata) {
+ (void)packet_id;
+ (void)is_first_attempt;
+ AWS_PRECONDITION(is_first_attempt);
+
+ struct aws_mqtt_client_connection *connection = userdata;
+
+ AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: pingreq send", (void *)connection);
+ struct aws_mqtt_packet_connection pingreq;
+ aws_mqtt_packet_pingreq_init(&pingreq);
+
+ struct aws_io_message *message = mqtt_get_message_for_packet(connection, &pingreq.fixed_header);
+ if (!message) {
+ return AWS_MQTT_CLIENT_REQUEST_ERROR;
+ }
+
+ if (aws_mqtt_packet_connection_encode(&message->message_data, &pingreq)) {
+ aws_mem_release(message->allocator, message);
+ return AWS_MQTT_CLIENT_REQUEST_ERROR;
+ }
+
+ if (aws_channel_slot_send_message(connection->slot, message, AWS_CHANNEL_DIR_WRITE)) {
+ aws_mem_release(message->allocator, message);
+ return AWS_MQTT_CLIENT_REQUEST_ERROR;
+ }
+
+ /* Mark down that now is when the last pingreq was sent */
+ connection->thread_data.waiting_on_ping_response = true;
+
+ struct aws_channel_task *ping_timeout_task =
+ aws_mem_calloc(connection->allocator, 1, sizeof(struct aws_channel_task));
+ if (!ping_timeout_task) {
+ /* allocation failed, no log, just return error. */
+ goto error;
+ }
+ aws_channel_task_init(ping_timeout_task, s_pingresp_received_timeout, connection, "mqtt_pingresp_timeout");
+ uint64_t now = 0;
+ if (aws_channel_current_clock_time(connection->slot->channel, &now)) {
+ goto error;
+ }
+ now += connection->ping_timeout_ns;
+ aws_channel_schedule_task_future(connection->slot->channel, ping_timeout_task, now);
+ return AWS_MQTT_CLIENT_REQUEST_COMPLETE;
+
+error:
+ return AWS_MQTT_CLIENT_REQUEST_ERROR;
+}
+
+int aws_mqtt_client_connection_ping(struct aws_mqtt_client_connection *connection) {
+
+ AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: Starting ping", (void *)connection);
+
+ uint16_t packet_id =
+ mqtt_create_request(connection, &s_pingreq_send, connection, NULL, NULL, true, /* noRetry */ 0);
+
+ AWS_LOGF_DEBUG(AWS_LS_MQTT_CLIENT, "id=%p: Starting ping with packet id %" PRIu16, (void *)connection, packet_id);
+
+ return (packet_id > 0) ? AWS_OP_SUCCESS : AWS_OP_ERR;
+}
+
+/*******************************************************************************
+ * Operation Statistics
+ ******************************************************************************/
+
+void aws_mqtt_connection_statistics_change_operation_statistic_state(
+ struct aws_mqtt_client_connection *connection,
+ struct aws_mqtt_request *request,
+ enum aws_mqtt_operation_statistic_state_flags new_state_flags) {
+
+ // Error checking
+ if (!connection) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT, "Invalid MQTT311 connection used when trying to change operation statistic state");
+ return;
+ }
+ if (!request) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT, "Invalid MQTT311 request used when trying to change operation statistic state");
+ return;
+ }
+
+ uint64_t packet_size = request->packet_size;
+ /**
+ * If the packet size is zero, then just skip it as we only want to track packets we have intentially
+ * calculated the size of and therefore it will be non-zero (zero packets will be ACKs, Pings, etc)
+ */
+ if (packet_size <= 0) {
+ return;
+ }
+
+ enum aws_mqtt_operation_statistic_state_flags old_state_flags = request->statistic_state_flags;
+ if (new_state_flags == old_state_flags) {
+ return;
+ }
+
+ struct aws_mqtt_connection_operation_statistics_impl *stats = &connection->operation_statistics_impl;
+ if ((old_state_flags & AWS_MQTT_OSS_INCOMPLETE) != (new_state_flags & AWS_MQTT_OSS_INCOMPLETE)) {
+ if ((new_state_flags & AWS_MQTT_OSS_INCOMPLETE) != 0) {
+ aws_atomic_fetch_add(&stats->incomplete_operation_count_atomic, 1);
+ aws_atomic_fetch_add(&stats->incomplete_operation_size_atomic, (size_t)packet_size);
+ } else {
+ aws_atomic_fetch_sub(&stats->incomplete_operation_count_atomic, 1);
+ aws_atomic_fetch_sub(&stats->incomplete_operation_size_atomic, (size_t)packet_size);
+ }
+ }
+
+ if ((old_state_flags & AWS_MQTT_OSS_UNACKED) != (new_state_flags & AWS_MQTT_OSS_UNACKED)) {
+ if ((new_state_flags & AWS_MQTT_OSS_UNACKED) != 0) {
+ aws_atomic_fetch_add(&stats->unacked_operation_count_atomic, 1);
+ aws_atomic_fetch_add(&stats->unacked_operation_size_atomic, (size_t)packet_size);
+ } else {
+ aws_atomic_fetch_sub(&stats->unacked_operation_count_atomic, 1);
+ aws_atomic_fetch_sub(&stats->unacked_operation_size_atomic, (size_t)packet_size);
+ }
+ }
+ request->statistic_state_flags = new_state_flags;
+
+ // If the callback is defined, then call it
+ if (connection && connection->on_any_operation_statistics && connection->on_any_operation_statistics_ud) {
+ (*connection->on_any_operation_statistics)(connection, connection->on_any_operation_statistics_ud);
+ }
+}
+
+int aws_mqtt_client_connection_get_stats(
+ struct aws_mqtt_client_connection *connection,
+ struct aws_mqtt_connection_operation_statistics *stats) {
+ // Error checking
+ if (!connection) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "Invalid MQTT311 connection used when trying to get operation statistics");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ if (!stats) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Invalid MQTT311 connection statistics struct used when trying to get operation statistics",
+ (void *)connection);
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ stats->incomplete_operation_count =
+ (uint64_t)aws_atomic_load_int(&connection->operation_statistics_impl.incomplete_operation_count_atomic);
+ stats->incomplete_operation_size =
+ (uint64_t)aws_atomic_load_int(&connection->operation_statistics_impl.incomplete_operation_size_atomic);
+ stats->unacked_operation_count =
+ (uint64_t)aws_atomic_load_int(&connection->operation_statistics_impl.unacked_operation_count_atomic);
+ stats->unacked_operation_size =
+ (uint64_t)aws_atomic_load_int(&connection->operation_statistics_impl.unacked_operation_size_atomic);
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt_client_connection_set_on_operation_statistics_handler(
+ struct aws_mqtt_client_connection *connection,
+ aws_mqtt_on_operation_statistics_fn *on_operation_statistics,
+ void *on_operation_statistics_ud) {
+
+ AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: Setting on_operation_statistics handler", (void *)connection);
+
+ connection->on_any_operation_statistics = on_operation_statistics;
+ connection->on_any_operation_statistics_ud = on_operation_statistics_ud;
+
+ return AWS_OP_SUCCESS;
+}
diff --git a/contrib/restricted/aws/aws-c-mqtt/source/client_channel_handler.c b/contrib/restricted/aws/aws-c-mqtt/source/client_channel_handler.c
new file mode 100644
index 0000000000..f9c01cbd45
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/source/client_channel_handler.c
@@ -0,0 +1,1061 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/mqtt/private/client_impl.h>
+
+#include <aws/mqtt/private/packets.h>
+#include <aws/mqtt/private/topic_tree.h>
+
+#include <aws/io/logging.h>
+
+#include <aws/common/clock.h>
+#include <aws/common/math.h>
+#include <aws/common/task_scheduler.h>
+
+#include <inttypes.h>
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4204)
+#endif
+
+/*******************************************************************************
+ * Packet State Machine
+ ******************************************************************************/
+
+typedef int(packet_handler_fn)(struct aws_mqtt_client_connection *connection, struct aws_byte_cursor message_cursor);
+
+static int s_packet_handler_default(
+ struct aws_mqtt_client_connection *connection,
+ struct aws_byte_cursor message_cursor) {
+ (void)connection;
+ (void)message_cursor;
+
+ AWS_LOGF_ERROR(AWS_LS_MQTT_CLIENT, "id=%p: Unhandled packet type received", (void *)connection);
+ return aws_raise_error(AWS_ERROR_MQTT_INVALID_PACKET_TYPE);
+}
+
+static void s_on_time_to_ping(struct aws_channel_task *channel_task, void *arg, enum aws_task_status status);
+static void s_schedule_ping(struct aws_mqtt_client_connection *connection) {
+ aws_channel_task_init(&connection->ping_task, s_on_time_to_ping, connection, "mqtt_ping");
+
+ uint64_t now = 0;
+ aws_channel_current_clock_time(connection->slot->channel, &now);
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT, "id=%p: Scheduling PING. current timestamp is %" PRIu64, (void *)connection, now);
+
+ uint64_t schedule_time =
+ now + aws_timestamp_convert(connection->keep_alive_time_secs, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL);
+
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: The next ping will be run at timestamp %" PRIu64,
+ (void *)connection,
+ schedule_time);
+ aws_channel_schedule_task_future(connection->slot->channel, &connection->ping_task, schedule_time);
+}
+
+static void s_on_time_to_ping(struct aws_channel_task *channel_task, void *arg, enum aws_task_status status) {
+ (void)channel_task;
+
+ if (status == AWS_TASK_STATUS_RUN_READY) {
+ struct aws_mqtt_client_connection *connection = arg;
+ AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: Sending PING", (void *)connection);
+ aws_mqtt_client_connection_ping(connection);
+ s_schedule_ping(connection);
+ }
+}
+static int s_packet_handler_connack(
+ struct aws_mqtt_client_connection *connection,
+ struct aws_byte_cursor message_cursor) {
+
+ AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: CONNACK received", (void *)connection);
+
+ struct aws_mqtt_packet_connack connack;
+ if (aws_mqtt_packet_connack_decode(&message_cursor, &connack)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT, "id=%p: error %d parsing CONNACK packet", (void *)connection, aws_last_error());
+
+ return AWS_OP_ERR;
+ }
+ bool was_reconnecting;
+ struct aws_linked_list requests;
+ aws_linked_list_init(&requests);
+ { /* BEGIN CRITICAL SECTION */
+ mqtt_connection_lock_synced_data(connection);
+ /* User requested disconnect, don't do anything */
+ if (connection->synced_data.state >= AWS_MQTT_CLIENT_STATE_DISCONNECTING) {
+ mqtt_connection_unlock_synced_data(connection);
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT, "id=%p: User has requested disconnect, dropping connection", (void *)connection);
+ return AWS_OP_SUCCESS;
+ }
+
+ was_reconnecting = connection->synced_data.state == AWS_MQTT_CLIENT_STATE_RECONNECTING;
+ if (connack.connect_return_code == AWS_MQTT_CONNECT_ACCEPTED) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: connection was accepted, switch state from %d to CONNECTED.",
+ (void *)connection,
+ (int)connection->synced_data.state);
+ /* Don't change the state if it's not ACCEPTED by broker */
+ mqtt_connection_set_state(connection, AWS_MQTT_CLIENT_STATE_CONNECTED);
+ aws_linked_list_swap_contents(&connection->synced_data.pending_requests_list, &requests);
+ }
+ mqtt_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ connection->connection_count++;
+
+ uint64_t now = 0;
+ aws_high_res_clock_get_ticks(&now);
+
+ if (connack.connect_return_code == AWS_MQTT_CONNECT_ACCEPTED) {
+
+ /*
+ * This was a successful MQTT connection establishment, record the time so that channel shutdown
+ * can make a good decision about reconnect backoff reset.
+ */
+ connection->reconnect_timeouts.channel_successful_connack_timestamp_ns = now;
+
+ /* If successfully connected, schedule all pending tasks */
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT, "id=%p: connection was accepted processing offline requests.", (void *)connection);
+
+ if (!aws_linked_list_empty(&requests)) {
+
+ struct aws_linked_list_node *current = aws_linked_list_front(&requests);
+ const struct aws_linked_list_node *end = aws_linked_list_end(&requests);
+
+ do {
+ struct aws_mqtt_request *request = AWS_CONTAINER_OF(current, struct aws_mqtt_request, list_node);
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: processing offline request %" PRIu16,
+ (void *)connection,
+ request->packet_id);
+ aws_channel_schedule_task_now(connection->slot->channel, &request->outgoing_task);
+ current = current->next;
+ } while (current != end);
+ }
+ } else {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: invalid connect return code %d, disconnecting",
+ (void *)connection,
+ connack.connect_return_code);
+ /* If error code returned, disconnect, on_completed will be invoked from shutdown process */
+ aws_channel_shutdown(connection->slot->channel, AWS_ERROR_MQTT_PROTOCOL_ERROR);
+
+ return AWS_OP_SUCCESS;
+ }
+
+ /* It is possible for a connection to complete, and a hangup to occur before the
+ * CONNECT/CONNACK cycle completes. In that case, we must deliver on_connection_complete
+ * on the first successful CONNACK or user code will never think it's connected */
+ if (was_reconnecting && connection->connection_count > 1) {
+
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: connection is a resumed connection, invoking on_resumed callback",
+ (void *)connection);
+
+ MQTT_CLIENT_CALL_CALLBACK_ARGS(connection, on_resumed, connack.connect_return_code, connack.session_present);
+ } else {
+
+ aws_create_reconnect_task(connection);
+
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: connection is a new connection, invoking on_connection_complete callback",
+ (void *)connection);
+ MQTT_CLIENT_CALL_CALLBACK_ARGS(
+ connection, on_connection_complete, AWS_OP_SUCCESS, connack.connect_return_code, connack.session_present);
+ }
+
+ AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: connection callback completed", (void *)connection);
+
+ s_schedule_ping(connection);
+ return AWS_OP_SUCCESS;
+}
+
+static int s_packet_handler_publish(
+ struct aws_mqtt_client_connection *connection,
+ struct aws_byte_cursor message_cursor) {
+
+ /* TODO: need to handle the QoS 2 message to avoid processing the message a second time */
+ struct aws_mqtt_packet_publish publish;
+ if (aws_mqtt_packet_publish_decode(&message_cursor, &publish)) {
+ return AWS_OP_ERR;
+ }
+
+ aws_mqtt_topic_tree_publish(&connection->thread_data.subscriptions, &publish);
+
+ bool dup = aws_mqtt_packet_publish_get_dup(&publish);
+ enum aws_mqtt_qos qos = aws_mqtt_packet_publish_get_qos(&publish);
+ bool retain = aws_mqtt_packet_publish_get_retain(&publish);
+
+ MQTT_CLIENT_CALL_CALLBACK_ARGS(connection, on_any_publish, &publish.topic_name, &publish.payload, dup, qos, retain);
+
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: publish received with msg id=%" PRIu16 " dup=%d qos=%d retain=%d payload-size=%zu topic=" PRInSTR,
+ (void *)connection,
+ publish.packet_identifier,
+ dup,
+ qos,
+ retain,
+ publish.payload.len,
+ AWS_BYTE_CURSOR_PRI(publish.topic_name));
+ struct aws_mqtt_packet_ack puback;
+ AWS_ZERO_STRUCT(puback);
+
+ /* Switch on QoS flags (bits 1 & 2) */
+ switch (qos) {
+ case AWS_MQTT_QOS_AT_MOST_ONCE:
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT, "id=%p: received publish QOS is 0, not sending puback", (void *)connection);
+ /* No more communication necessary */
+ break;
+ case AWS_MQTT_QOS_AT_LEAST_ONCE:
+ AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: received publish QOS is 1, sending puback", (void *)connection);
+ aws_mqtt_packet_puback_init(&puback, publish.packet_identifier);
+ break;
+ case AWS_MQTT_QOS_EXACTLY_ONCE:
+ AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: received publish QOS is 2, sending pubrec", (void *)connection);
+ aws_mqtt_packet_pubrec_init(&puback, publish.packet_identifier);
+ break;
+ default:
+ /* Impossible to hit this branch. QoS value is checked when decoding */
+ AWS_FATAL_ASSERT(0);
+ break;
+ }
+
+ if (puback.packet_identifier) {
+ struct aws_io_message *message = mqtt_get_message_for_packet(connection, &puback.fixed_header);
+ if (!message) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_mqtt_packet_ack_encode(&message->message_data, &puback)) {
+ aws_mem_release(message->allocator, message);
+ return AWS_OP_ERR;
+ }
+
+ if (aws_channel_slot_send_message(connection->slot, message, AWS_CHANNEL_DIR_WRITE)) {
+ aws_mem_release(message->allocator, message);
+ return AWS_OP_ERR;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_packet_handler_ack(struct aws_mqtt_client_connection *connection, struct aws_byte_cursor message_cursor) {
+ struct aws_mqtt_packet_ack ack;
+ if (aws_mqtt_packet_ack_decode(&message_cursor, &ack)) {
+ return AWS_OP_ERR;
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT, "id=%p: received ack for message id %" PRIu16, (void *)connection, ack.packet_identifier);
+
+ mqtt_request_complete(connection, AWS_ERROR_SUCCESS, ack.packet_identifier);
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_packet_handler_suback(
+ struct aws_mqtt_client_connection *connection,
+ struct aws_byte_cursor message_cursor) {
+ struct aws_mqtt_packet_suback suback;
+ if (aws_mqtt_packet_suback_init(&suback, connection->allocator, 0 /* fake packet_id */)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_mqtt_packet_suback_decode(&message_cursor, &suback)) {
+ goto error;
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: received suback for message id %" PRIu16,
+ (void *)connection,
+ suback.packet_identifier);
+
+ struct aws_mqtt_request *request = NULL;
+
+ { /* BEGIN CRITICAL SECTION */
+ mqtt_connection_lock_synced_data(connection);
+ struct aws_hash_element *elem = NULL;
+ aws_hash_table_find(&connection->synced_data.outstanding_requests_table, &suback.packet_identifier, &elem);
+ if (elem != NULL) {
+ request = elem->value;
+ }
+ mqtt_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ if (request == NULL) {
+ /* no corresponding request found */
+ goto done;
+ }
+
+ struct subscribe_task_arg *task_arg = request->on_complete_ud;
+ size_t request_topics_len = aws_array_list_length(&task_arg->topics);
+ size_t suback_return_code_len = aws_array_list_length(&suback.return_codes);
+ if (request_topics_len != suback_return_code_len) {
+ goto error;
+ }
+ size_t num_filters = aws_array_list_length(&suback.return_codes);
+ for (size_t i = 0; i < num_filters; ++i) {
+
+ uint8_t return_code = 0;
+ struct subscribe_task_topic *topic = NULL;
+ aws_array_list_get_at(&suback.return_codes, (void *)&return_code, i);
+ aws_array_list_get_at(&task_arg->topics, &topic, i);
+ topic->request.qos = return_code;
+ }
+
+done:
+ mqtt_request_complete(connection, AWS_ERROR_SUCCESS, suback.packet_identifier);
+ aws_mqtt_packet_suback_clean_up(&suback);
+ return AWS_OP_SUCCESS;
+error:
+ aws_mqtt_packet_suback_clean_up(&suback);
+ return AWS_OP_ERR;
+}
+
+static int s_packet_handler_pubrec(
+ struct aws_mqtt_client_connection *connection,
+ struct aws_byte_cursor message_cursor) {
+
+ struct aws_mqtt_packet_ack ack;
+ if (aws_mqtt_packet_ack_decode(&message_cursor, &ack)) {
+ return AWS_OP_ERR;
+ }
+
+ /* TODO: When sending PUBLISH with QoS 2, we should be storing the data until this packet is received, at which
+ * point we may discard it. */
+
+ /* Send PUBREL */
+ aws_mqtt_packet_pubrel_init(&ack, ack.packet_identifier);
+ struct aws_io_message *message = mqtt_get_message_for_packet(connection, &ack.fixed_header);
+ if (!message) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_mqtt_packet_ack_encode(&message->message_data, &ack)) {
+ goto on_error;
+ }
+
+ if (aws_channel_slot_send_message(connection->slot, message, AWS_CHANNEL_DIR_WRITE)) {
+ goto on_error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+
+ if (message) {
+ aws_mem_release(message->allocator, message);
+ }
+
+ return AWS_OP_ERR;
+}
+
+static int s_packet_handler_pubrel(
+ struct aws_mqtt_client_connection *connection,
+ struct aws_byte_cursor message_cursor) {
+
+ struct aws_mqtt_packet_ack ack;
+ if (aws_mqtt_packet_ack_decode(&message_cursor, &ack)) {
+ return AWS_OP_ERR;
+ }
+
+ /* Send PUBCOMP */
+ aws_mqtt_packet_pubcomp_init(&ack, ack.packet_identifier);
+ struct aws_io_message *message = mqtt_get_message_for_packet(connection, &ack.fixed_header);
+ if (!message) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_mqtt_packet_ack_encode(&message->message_data, &ack)) {
+ goto on_error;
+ }
+
+ if (aws_channel_slot_send_message(connection->slot, message, AWS_CHANNEL_DIR_WRITE)) {
+ goto on_error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+
+ if (message) {
+ aws_mem_release(message->allocator, message);
+ }
+
+ return AWS_OP_ERR;
+}
+
+static int s_packet_handler_pingresp(
+ struct aws_mqtt_client_connection *connection,
+ struct aws_byte_cursor message_cursor) {
+
+ (void)message_cursor;
+
+ AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: PINGRESP received", (void *)connection);
+
+ connection->thread_data.waiting_on_ping_response = false;
+
+ return AWS_OP_SUCCESS;
+}
+
+/* Bake up a big ol' function table just like Gramma used to make */
+static packet_handler_fn *s_packet_handlers[] = {
+ [AWS_MQTT_PACKET_CONNECT] = &s_packet_handler_default,
+ [AWS_MQTT_PACKET_CONNACK] = &s_packet_handler_connack,
+ [AWS_MQTT_PACKET_PUBLISH] = &s_packet_handler_publish,
+ [AWS_MQTT_PACKET_PUBACK] = &s_packet_handler_ack,
+ [AWS_MQTT_PACKET_PUBREC] = &s_packet_handler_pubrec,
+ [AWS_MQTT_PACKET_PUBREL] = &s_packet_handler_pubrel,
+ [AWS_MQTT_PACKET_PUBCOMP] = &s_packet_handler_ack,
+ [AWS_MQTT_PACKET_SUBSCRIBE] = &s_packet_handler_default,
+ [AWS_MQTT_PACKET_SUBACK] = &s_packet_handler_suback,
+ [AWS_MQTT_PACKET_UNSUBSCRIBE] = &s_packet_handler_default,
+ [AWS_MQTT_PACKET_UNSUBACK] = &s_packet_handler_ack,
+ [AWS_MQTT_PACKET_PINGREQ] = &s_packet_handler_default,
+ [AWS_MQTT_PACKET_PINGRESP] = &s_packet_handler_pingresp,
+ [AWS_MQTT_PACKET_DISCONNECT] = &s_packet_handler_default,
+};
+
+/*******************************************************************************
+ * Channel Handler
+ ******************************************************************************/
+
+static int s_process_mqtt_packet(
+ struct aws_mqtt_client_connection *connection,
+ enum aws_mqtt_packet_type packet_type,
+ struct aws_byte_cursor packet) {
+ { /* BEGIN CRITICAL SECTION */
+ mqtt_connection_lock_synced_data(connection);
+ /* [MQTT-3.2.0-1] The first packet sent from the Server to the Client MUST be a CONNACK Packet */
+ if (connection->synced_data.state == AWS_MQTT_CLIENT_STATE_CONNECTING &&
+ packet_type != AWS_MQTT_PACKET_CONNACK) {
+ mqtt_connection_unlock_synced_data(connection);
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: First message received from the server was not a CONNACK. Terminating connection.",
+ (void *)connection);
+ aws_channel_shutdown(connection->slot->channel, AWS_ERROR_MQTT_PROTOCOL_ERROR);
+ return aws_raise_error(AWS_ERROR_MQTT_PROTOCOL_ERROR);
+ }
+ mqtt_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ if (AWS_UNLIKELY(packet_type > AWS_MQTT_PACKET_DISCONNECT || packet_type < AWS_MQTT_PACKET_CONNECT)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Invalid packet type received %d. Terminating connection.",
+ (void *)connection,
+ packet_type);
+ return aws_raise_error(AWS_ERROR_MQTT_INVALID_PACKET_TYPE);
+ }
+
+ /* Handle the packet */
+ return s_packet_handlers[packet_type](connection, packet);
+}
+
+/**
+ * Handles incoming messages from the server.
+ */
+static int s_process_read_message(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ struct aws_io_message *message) {
+
+ struct aws_mqtt_client_connection *connection = handler->impl;
+
+ if (message->message_type != AWS_IO_MESSAGE_APPLICATION_DATA || message->message_data.len < 1) {
+ return AWS_OP_ERR;
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: precessing read message of size %zu",
+ (void *)connection,
+ message->message_data.len);
+
+ /* This cursor will be updated as we read through the message. */
+ struct aws_byte_cursor message_cursor = aws_byte_cursor_from_buf(&message->message_data);
+
+ /* If there's pending packet left over from last time, attempt to complete it. */
+ if (connection->thread_data.pending_packet.len) {
+ int result = AWS_OP_SUCCESS;
+
+ /* This determines how much to read from the message (min(expected_remaining, message.len)) */
+ size_t to_read = connection->thread_data.pending_packet.capacity - connection->thread_data.pending_packet.len;
+ /* This will be set to false if this message still won't complete the packet object. */
+ bool packet_complete = true;
+ if (to_read > message_cursor.len) {
+ to_read = message_cursor.len;
+ packet_complete = false;
+ }
+
+ /* Write the chunk to the buffer.
+ * This will either complete the packet, or be the entirety of message if more data is required. */
+ struct aws_byte_cursor chunk = aws_byte_cursor_advance(&message_cursor, to_read);
+ AWS_ASSERT(chunk.ptr); /* Guaranteed to be in bounds */
+ result = (int)aws_byte_buf_write_from_whole_cursor(&connection->thread_data.pending_packet, chunk) - 1;
+ if (result) {
+ goto handle_error;
+ }
+
+ /* If the packet is still incomplete, don't do anything with the data. */
+ if (!packet_complete) {
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: partial message is still incomplete, waiting on another read.",
+ (void *)connection);
+
+ goto cleanup;
+ }
+
+ /* Handle the completed pending packet */
+ struct aws_byte_cursor packet_data = aws_byte_cursor_from_buf(&connection->thread_data.pending_packet);
+ AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: full mqtt packet re-assembled, dispatching.", (void *)connection);
+ result = s_process_mqtt_packet(connection, aws_mqtt_get_packet_type(packet_data.ptr), packet_data);
+
+ handle_error:
+ /* Clean up the pending packet */
+ aws_byte_buf_clean_up(&connection->thread_data.pending_packet);
+ AWS_ZERO_STRUCT(connection->thread_data.pending_packet);
+
+ if (result) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ while (message_cursor.len) {
+
+ /* Temp byte cursor so we can decode the header without advancing message_cursor. */
+ struct aws_byte_cursor header_decode = message_cursor;
+
+ struct aws_mqtt_fixed_header packet_header;
+ AWS_ZERO_STRUCT(packet_header);
+ int result = aws_mqtt_fixed_header_decode(&header_decode, &packet_header);
+
+ /* Calculate how much data was read. */
+ const size_t fixed_header_size = message_cursor.len - header_decode.len;
+
+ if (result) {
+ if (aws_last_error() == AWS_ERROR_SHORT_BUFFER) {
+ /* Message data too short, store data and come back later. */
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT, "id=%p: message is incomplete, waiting on another read.", (void *)connection);
+ if (aws_byte_buf_init(
+ &connection->thread_data.pending_packet,
+ connection->allocator,
+ fixed_header_size + packet_header.remaining_length)) {
+
+ return AWS_OP_ERR;
+ }
+
+ /* Write the partial packet. */
+ if (!aws_byte_buf_write_from_whole_cursor(&connection->thread_data.pending_packet, message_cursor)) {
+ aws_byte_buf_clean_up(&connection->thread_data.pending_packet);
+ return AWS_OP_ERR;
+ }
+
+ aws_reset_error();
+ goto cleanup;
+ } else {
+ return AWS_OP_ERR;
+ }
+ }
+
+ struct aws_byte_cursor packet_data =
+ aws_byte_cursor_advance(&message_cursor, fixed_header_size + packet_header.remaining_length);
+ AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: full mqtt packet read, dispatching.", (void *)connection);
+ s_process_mqtt_packet(connection, packet_header.packet_type, packet_data);
+ }
+
+cleanup:
+ /* Do cleanup */
+ aws_channel_slot_increment_read_window(slot, message->message_data.len);
+ aws_mem_release(message->allocator, message);
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_shutdown(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ enum aws_channel_direction dir,
+ int error_code,
+ bool free_scarce_resources_immediately) {
+
+ struct aws_mqtt_client_connection *connection = handler->impl;
+
+ if (dir == AWS_CHANNEL_DIR_WRITE) {
+ /* On closing write direction, send out disconnect packet before closing connection. */
+
+ if (!free_scarce_resources_immediately) {
+
+ if (error_code == AWS_OP_SUCCESS) {
+ AWS_LOGF_INFO(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: sending disconnect message as part of graceful shutdown.",
+ (void *)connection);
+ /* On clean shutdown, send the disconnect message */
+ struct aws_mqtt_packet_connection disconnect;
+ aws_mqtt_packet_disconnect_init(&disconnect);
+
+ struct aws_io_message *message = mqtt_get_message_for_packet(connection, &disconnect.fixed_header);
+ if (!message) {
+ goto done;
+ }
+
+ if (aws_mqtt_packet_connection_encode(&message->message_data, &disconnect)) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: failed to encode courteous disconnect io message",
+ (void *)connection);
+ aws_mem_release(message->allocator, message);
+ goto done;
+ }
+
+ if (aws_channel_slot_send_message(slot, message, AWS_CHANNEL_DIR_WRITE)) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: failed to send courteous disconnect io message",
+ (void *)connection);
+ aws_mem_release(message->allocator, message);
+ goto done;
+ }
+ }
+ }
+ }
+
+done:
+ return aws_channel_slot_on_handler_shutdown_complete(slot, dir, error_code, free_scarce_resources_immediately);
+}
+
+static size_t s_initial_window_size(struct aws_channel_handler *handler) {
+
+ (void)handler;
+
+ return SIZE_MAX;
+}
+
+static void s_destroy(struct aws_channel_handler *handler) {
+
+ struct aws_mqtt_client_connection *connection = handler->impl;
+ (void)connection;
+}
+
+static size_t s_message_overhead(struct aws_channel_handler *handler) {
+ (void)handler;
+ return 0;
+}
+
+struct aws_channel_handler_vtable *aws_mqtt_get_client_channel_vtable(void) {
+
+ static struct aws_channel_handler_vtable s_vtable = {
+ .process_read_message = &s_process_read_message,
+ .process_write_message = NULL,
+ .increment_read_window = NULL,
+ .shutdown = &s_shutdown,
+ .initial_window_size = &s_initial_window_size,
+ .message_overhead = &s_message_overhead,
+ .destroy = &s_destroy,
+ };
+
+ return &s_vtable;
+}
+
+/*******************************************************************************
+ * Helpers
+ ******************************************************************************/
+
+struct aws_io_message *mqtt_get_message_for_packet(
+ struct aws_mqtt_client_connection *connection,
+ struct aws_mqtt_fixed_header *header) {
+
+ const size_t required_length = 3 + header->remaining_length;
+
+ struct aws_io_message *message = aws_channel_acquire_message_from_pool(
+ connection->slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, required_length);
+
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Acquiring memory from pool of required_length %zu",
+ (void *)connection,
+ required_length);
+
+ return message;
+}
+
+/*******************************************************************************
+ * Requests
+ ******************************************************************************/
+
+/* Send the request */
+static void s_request_outgoing_task(struct aws_channel_task *task, void *arg, enum aws_task_status status) {
+
+ struct aws_mqtt_request *request = arg;
+ struct aws_mqtt_client_connection *connection = request->connection;
+
+ if (status == AWS_TASK_STATUS_CANCELED) {
+ /* Connection lost before the request ever get send, check the request needs to be retried or not */
+ if (request->retryable) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_CLIENT,
+ "static: task id %p, was canceled due to the channel shutting down. Request for packet id "
+ "%" PRIu16 ". will be retried",
+ (void *)task,
+ request->packet_id);
+
+ /* put it into the offline queue. */
+ { /* BEGIN CRITICAL SECTION */
+ mqtt_connection_lock_synced_data(connection);
+
+ /* Set the status as incomplete */
+ aws_mqtt_connection_statistics_change_operation_statistic_state(
+ connection, request, AWS_MQTT_OSS_INCOMPLETE);
+
+ aws_linked_list_push_back(&connection->synced_data.pending_requests_list, &request->list_node);
+
+ mqtt_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ } else {
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_CLIENT,
+ "static: task id %p, was canceled due to the channel shutting down. Request for packet id "
+ "%" PRIu16 ". will NOT be retried, will be cancelled",
+ (void *)task,
+ request->packet_id);
+
+ /* Fire the callback and clean up the memory, as the connection get destroyed. */
+ if (request->on_complete) {
+ request->on_complete(
+ connection, request->packet_id, AWS_ERROR_MQTT_NOT_CONNECTED, request->on_complete_ud);
+ }
+
+ { /* BEGIN CRITICAL SECTION */
+ mqtt_connection_lock_synced_data(connection);
+
+ /* Cancel the request in the operation statistics */
+ aws_mqtt_connection_statistics_change_operation_statistic_state(connection, request, AWS_MQTT_OSS_NONE);
+
+ aws_hash_table_remove(
+ &connection->synced_data.outstanding_requests_table, &request->packet_id, NULL, NULL);
+ aws_memory_pool_release(&connection->synced_data.requests_pool, request);
+ mqtt_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ }
+ return;
+ }
+
+ /* Send the request */
+ enum aws_mqtt_client_request_state state =
+ request->send_request(request->packet_id, !request->initiated, request->send_request_ud);
+ request->initiated = true;
+ int error_code = AWS_ERROR_SUCCESS;
+ switch (state) {
+ case AWS_MQTT_CLIENT_REQUEST_ERROR:
+ error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: sending request %" PRIu16 " failed with error %d.",
+ (void *)request->connection,
+ request->packet_id,
+ error_code);
+ /* fall-thru */
+
+ case AWS_MQTT_CLIENT_REQUEST_COMPLETE:
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: sending request %" PRIu16 " complete, invoking on_complete callback.",
+ (void *)request->connection,
+ request->packet_id);
+
+ /* If the send_request function reports the request is complete,
+ * remove from the hash table and call the callback. */
+ if (request->on_complete) {
+ request->on_complete(connection, request->packet_id, error_code, request->on_complete_ud);
+ }
+
+ { /* BEGIN CRITICAL SECTION */
+ mqtt_connection_lock_synced_data(connection);
+
+ /* Set the request as complete in the operation statistics */
+ aws_mqtt_connection_statistics_change_operation_statistic_state(
+ request->connection, request, AWS_MQTT_OSS_NONE);
+
+ aws_hash_table_remove(
+ &connection->synced_data.outstanding_requests_table, &request->packet_id, NULL, NULL);
+ aws_memory_pool_release(&connection->synced_data.requests_pool, request);
+ mqtt_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+ break;
+
+ case AWS_MQTT_CLIENT_REQUEST_ONGOING:
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: request %" PRIu16 " sent, but waiting on an acknowledgement from peer.",
+ (void *)request->connection,
+ request->packet_id);
+
+ { /* BEGIN CRITICAL SECTION */
+ mqtt_connection_lock_synced_data(connection);
+
+ /* Set the request as incomplete and un-acked in the operation statistics */
+ aws_mqtt_connection_statistics_change_operation_statistic_state(
+ request->connection, request, AWS_MQTT_OSS_INCOMPLETE | AWS_MQTT_OSS_UNACKED);
+
+ mqtt_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ /* Put the request into the ongoing list */
+ aws_linked_list_push_back(&connection->thread_data.ongoing_requests_list, &request->list_node);
+ break;
+ }
+}
+
+uint16_t mqtt_create_request(
+ struct aws_mqtt_client_connection *connection,
+ aws_mqtt_send_request_fn *send_request,
+ void *send_request_ud,
+ aws_mqtt_op_complete_fn *on_complete,
+ void *on_complete_ud,
+ bool noRetry,
+ uint64_t packet_size) {
+
+ AWS_ASSERT(connection);
+ AWS_ASSERT(send_request);
+ struct aws_mqtt_request *next_request = NULL;
+ bool should_schedule_task = false;
+ struct aws_channel *channel = NULL;
+ { /* BEGIN CRITICAL SECTION */
+ mqtt_connection_lock_synced_data(connection);
+ if (connection->synced_data.state == AWS_MQTT_CLIENT_STATE_DISCONNECTING) {
+ mqtt_connection_unlock_synced_data(connection);
+ /* User requested disconnecting, ensure no new requests are made until the channel finished shutting
+ * down. */
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Disconnect requested, stop creating any new request until disconnect process finishes.",
+ (void *)connection);
+ aws_raise_error(AWS_ERROR_MQTT_CONNECTION_DISCONNECTING);
+ return 0;
+ }
+
+ if (noRetry && connection->synced_data.state != AWS_MQTT_CLIENT_STATE_CONNECTED) {
+ mqtt_connection_unlock_synced_data(connection);
+ /* Not offline queueing QoS 0 publish or PINGREQ. Fail the call. */
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Not currently connected. No offline queueing for QoS 0 publish or pingreq.",
+ (void *)connection);
+ aws_raise_error(AWS_ERROR_MQTT_NOT_CONNECTED);
+ return 0;
+ }
+ /**
+ * Find a free packet ID.
+ * QoS 0 PUBLISH packets don't actually need an ID on the wire,
+ * but we assign them internally anyway just so everything has a unique ID.
+ *
+ * Yes, this is an O(N) search.
+ * We remember the last ID we assigned, so it's O(1) in the common case.
+ * But it's theoretically possible to reach O(N) where N is just above 64000
+ * if the user is letting a ton of un-ack'd messages queue up
+ */
+ uint16_t search_start = connection->synced_data.packet_id;
+ struct aws_hash_element *elem = NULL;
+ while (true) {
+ /* Increment ID, watch out for overflow, ID cannot be 0 */
+ if (connection->synced_data.packet_id == UINT16_MAX) {
+ connection->synced_data.packet_id = 1;
+ } else {
+ connection->synced_data.packet_id++;
+ }
+
+ /* Is there already an outstanding request using this ID? */
+ aws_hash_table_find(
+ &connection->synced_data.outstanding_requests_table, &connection->synced_data.packet_id, &elem);
+
+ if (elem == NULL) {
+ /* Found a free ID! Break out of loop */
+ break;
+ } else if (connection->synced_data.packet_id == search_start) {
+ /* Every ID is taken */
+ mqtt_connection_unlock_synced_data(connection);
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Queue is full. No more packet IDs are available at this time.",
+ (void *)connection);
+ aws_raise_error(AWS_ERROR_MQTT_QUEUE_FULL);
+ return 0;
+ }
+ }
+
+ next_request = aws_memory_pool_acquire(&connection->synced_data.requests_pool);
+ if (!next_request) {
+ mqtt_connection_unlock_synced_data(connection);
+ return 0;
+ }
+ memset(next_request, 0, sizeof(struct aws_mqtt_request));
+
+ next_request->packet_id = connection->synced_data.packet_id;
+
+ if (aws_hash_table_put(
+ &connection->synced_data.outstanding_requests_table, &next_request->packet_id, next_request, NULL)) {
+ /* failed to put the next request into the table */
+ aws_memory_pool_release(&connection->synced_data.requests_pool, next_request);
+ mqtt_connection_unlock_synced_data(connection);
+ return 0;
+ }
+ /* Store the request by packet_id */
+ next_request->allocator = connection->allocator;
+ next_request->connection = connection;
+ next_request->initiated = false;
+ next_request->retryable = !noRetry;
+ next_request->send_request = send_request;
+ next_request->send_request_ud = send_request_ud;
+ next_request->on_complete = on_complete;
+ next_request->on_complete_ud = on_complete_ud;
+ next_request->packet_size = packet_size;
+ aws_channel_task_init(
+ &next_request->outgoing_task, s_request_outgoing_task, next_request, "mqtt_outgoing_request_task");
+ if (connection->synced_data.state != AWS_MQTT_CLIENT_STATE_CONNECTED) {
+ aws_linked_list_push_back(&connection->synced_data.pending_requests_list, &next_request->list_node);
+ } else {
+ AWS_ASSERT(connection->slot);
+ AWS_ASSERT(connection->slot->channel);
+ should_schedule_task = true;
+ channel = connection->slot->channel;
+ /* keep the channel alive until the task is scheduled */
+ aws_channel_acquire_hold(channel);
+ }
+
+ if (next_request && next_request->packet_size > 0) {
+ /* Set the status as incomplete */
+ aws_mqtt_connection_statistics_change_operation_statistic_state(
+ next_request->connection, next_request, AWS_MQTT_OSS_INCOMPLETE);
+ }
+
+ mqtt_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ if (should_schedule_task) {
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: Currently not in the event-loop thread, scheduling a task to send message id %" PRIu16 ".",
+ (void *)connection,
+ next_request->packet_id);
+ aws_channel_schedule_task_now(channel, &next_request->outgoing_task);
+ /* release the refcount we hold with the protection of lock */
+ aws_channel_release_hold(channel);
+ }
+
+ return next_request->packet_id;
+}
+
+void mqtt_request_complete(struct aws_mqtt_client_connection *connection, int error_code, uint16_t packet_id) {
+
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: message id %" PRIu16 " completed with error code %d, removing from outstanding requests list.",
+ (void *)connection,
+ packet_id,
+ error_code);
+
+ bool found_request = false;
+ aws_mqtt_op_complete_fn *on_complete = NULL;
+ void *on_complete_ud = NULL;
+
+ { /* BEGIN CRITICAL SECTION */
+ mqtt_connection_lock_synced_data(connection);
+ struct aws_hash_element *elem = NULL;
+ aws_hash_table_find(&connection->synced_data.outstanding_requests_table, &packet_id, &elem);
+ if (elem != NULL) {
+ found_request = true;
+
+ struct aws_mqtt_request *request = elem->value;
+ on_complete = request->on_complete;
+ on_complete_ud = request->on_complete_ud;
+
+ /* Set the status as complete */
+ aws_mqtt_connection_statistics_change_operation_statistic_state(
+ request->connection, request, AWS_MQTT_OSS_NONE);
+
+ /* clean up request resources */
+ aws_hash_table_remove_element(&connection->synced_data.outstanding_requests_table, elem);
+ /* remove the request from the list, which is thread_data.ongoing_requests_list */
+ aws_linked_list_remove(&request->list_node);
+ aws_memory_pool_release(&connection->synced_data.requests_pool, request);
+ }
+ mqtt_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ if (!found_request) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p: received completion for message id %" PRIu16
+ " but no outstanding request exists. Assuming this is an ack of a resend when the first request has "
+ "already completed.",
+ (void *)connection,
+ packet_id);
+ return;
+ }
+
+ /* Invoke the complete callback. */
+ if (on_complete) {
+ on_complete(connection, packet_id, error_code, on_complete_ud);
+ }
+}
+
+struct mqtt_shutdown_task {
+ int error_code;
+ struct aws_channel_task task;
+};
+
+static void s_mqtt_disconnect_task(struct aws_channel_task *channel_task, void *arg, enum aws_task_status status) {
+
+ (void)status;
+
+ struct mqtt_shutdown_task *task = AWS_CONTAINER_OF(channel_task, struct mqtt_shutdown_task, task);
+ struct aws_mqtt_client_connection *connection = arg;
+
+ AWS_LOGF_TRACE(AWS_LS_MQTT_CLIENT, "id=%p: Doing disconnect", (void *)connection);
+ { /* BEGIN CRITICAL SECTION */
+ mqtt_connection_lock_synced_data(connection);
+ /* If there is an outstanding reconnect task, cancel it */
+ if (connection->synced_data.state == AWS_MQTT_CLIENT_STATE_DISCONNECTING && connection->reconnect_task) {
+ aws_atomic_store_ptr(&connection->reconnect_task->connection_ptr, NULL);
+ /* If the reconnect_task isn't scheduled, free it */
+ if (connection->reconnect_task && !connection->reconnect_task->task.timestamp) {
+ aws_mem_release(connection->reconnect_task->allocator, connection->reconnect_task);
+ }
+ connection->reconnect_task = NULL;
+ }
+ mqtt_connection_unlock_synced_data(connection);
+ } /* END CRITICAL SECTION */
+
+ if (connection->slot && connection->slot->channel) {
+ aws_channel_shutdown(connection->slot->channel, task->error_code);
+ }
+
+ aws_mem_release(connection->allocator, task);
+}
+
+void mqtt_disconnect_impl(struct aws_mqtt_client_connection *connection, int error_code) {
+ if (connection->slot) {
+ struct mqtt_shutdown_task *shutdown_task =
+ aws_mem_calloc(connection->allocator, 1, sizeof(struct mqtt_shutdown_task));
+ shutdown_task->error_code = error_code;
+ aws_channel_task_init(&shutdown_task->task, s_mqtt_disconnect_task, connection, "mqtt_disconnect");
+ aws_channel_schedule_task_now(connection->slot->channel, &shutdown_task->task);
+ }
+}
diff --git a/contrib/restricted/aws/aws-c-mqtt/source/fixed_header.c b/contrib/restricted/aws/aws-c-mqtt/source/fixed_header.c
new file mode 100644
index 0000000000..22372f4194
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/source/fixed_header.c
@@ -0,0 +1,144 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/mqtt/private/fixed_header.h>
+
+/**
+ * Implements encoding & decoding of the remaining_length field across 1-4 bytes [MQTT-2.2.3].
+ *
+ * Any number less than or equal to 127 (7 bit max) can be written into a single byte, where any number larger than 128
+ * may be written into multiple bytes, using the most significant bit (128) as a continuation flag.
+ */
+static int s_encode_remaining_length(struct aws_byte_buf *buf, size_t remaining_length) {
+
+ AWS_PRECONDITION(buf);
+ AWS_PRECONDITION(remaining_length < UINT32_MAX);
+
+ do {
+ uint8_t encoded_byte = remaining_length % 128;
+ remaining_length /= 128;
+ if (remaining_length) {
+ encoded_byte |= 128;
+ }
+ if (!aws_byte_buf_write_u8(buf, encoded_byte)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+ } while (remaining_length);
+
+ return AWS_OP_SUCCESS;
+}
+static int s_decode_remaining_length(struct aws_byte_cursor *cur, size_t *remaining_length_out) {
+
+ AWS_PRECONDITION(cur);
+
+ /* Read remaining_length */
+ size_t multiplier = 1;
+ size_t remaining_length = 0;
+ while (true) {
+ uint8_t encoded_byte;
+ if (!aws_byte_cursor_read_u8(cur, &encoded_byte)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ remaining_length += (encoded_byte & 127) * multiplier;
+ multiplier *= 128;
+
+ if (!(encoded_byte & 128)) {
+ break;
+ }
+ if (multiplier > 128 * 128 * 128) {
+ /* If high order bit is set on last byte, value is malformed */
+ return aws_raise_error(AWS_ERROR_MQTT_INVALID_REMAINING_LENGTH);
+ }
+ }
+
+ *remaining_length_out = remaining_length;
+ return AWS_OP_SUCCESS;
+}
+
+enum aws_mqtt_packet_type aws_mqtt_get_packet_type(const uint8_t *buffer) {
+ return *buffer >> 4;
+}
+
+bool aws_mqtt_packet_has_flags(const struct aws_mqtt_fixed_header *header) {
+
+ /* Parse attributes based on packet type */
+ switch (header->packet_type) {
+ case AWS_MQTT_PACKET_SUBSCRIBE:
+ case AWS_MQTT_PACKET_UNSUBSCRIBE:
+ case AWS_MQTT_PACKET_PUBLISH:
+ case AWS_MQTT_PACKET_PUBREL:
+ return true;
+ break;
+
+ case AWS_MQTT_PACKET_CONNECT:
+ case AWS_MQTT_PACKET_CONNACK:
+ case AWS_MQTT_PACKET_PUBACK:
+ case AWS_MQTT_PACKET_PUBREC:
+ case AWS_MQTT_PACKET_PUBCOMP:
+ case AWS_MQTT_PACKET_SUBACK:
+ case AWS_MQTT_PACKET_UNSUBACK:
+ case AWS_MQTT_PACKET_PINGREQ:
+ case AWS_MQTT_PACKET_PINGRESP:
+ case AWS_MQTT_PACKET_DISCONNECT:
+ return false;
+
+ default:
+ return false;
+ }
+}
+
+int aws_mqtt_fixed_header_encode(struct aws_byte_buf *buf, const struct aws_mqtt_fixed_header *header) {
+
+ AWS_PRECONDITION(buf);
+ AWS_PRECONDITION(header);
+
+ /* Check that flags are 0 if they must not be present */
+ if (!aws_mqtt_packet_has_flags(header) && header->flags != 0) {
+ return aws_raise_error(AWS_ERROR_MQTT_INVALID_RESERVED_BITS);
+ }
+
+ /* Write packet type and flags */
+ uint8_t byte_1 = (uint8_t)((header->packet_type << 4) | (header->flags & 0xF));
+ if (!aws_byte_buf_write_u8(buf, byte_1)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ /* Write remaining length */
+ if (s_encode_remaining_length(buf, header->remaining_length)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt_fixed_header_decode(struct aws_byte_cursor *cur, struct aws_mqtt_fixed_header *header) {
+
+ AWS_PRECONDITION(cur);
+ AWS_PRECONDITION(header);
+
+ /* Read packet type and flags */
+ uint8_t byte_1 = 0;
+ if (!aws_byte_cursor_read_u8(cur, &byte_1)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+ header->packet_type = aws_mqtt_get_packet_type(&byte_1);
+ header->flags = byte_1 & 0xF;
+
+ /* Read remaining length */
+ if (s_decode_remaining_length(cur, &header->remaining_length)) {
+ return AWS_OP_ERR;
+ }
+ if (cur->len < header->remaining_length) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ /* Check that flags are 0 if they must not be present */
+ if (!aws_mqtt_packet_has_flags(header) && header->flags != 0) {
+ return aws_raise_error(AWS_ERROR_MQTT_INVALID_RESERVED_BITS);
+ }
+
+ return AWS_OP_SUCCESS;
+}
diff --git a/contrib/restricted/aws/aws-c-mqtt/source/mqtt.c b/contrib/restricted/aws/aws-c-mqtt/source/mqtt.c
new file mode 100644
index 0000000000..fd39a82747
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/source/mqtt.c
@@ -0,0 +1,282 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/mqtt/mqtt.h>
+
+#include <aws/io/logging.h>
+
+#ifdef AWS_MQTT_WITH_WEBSOCKETS
+# include <aws/http/http.h>
+#endif
+
+/*******************************************************************************
+ * Topic Validation
+ ******************************************************************************/
+
+static bool s_is_valid_topic(const struct aws_byte_cursor *topic, bool is_filter) {
+
+ /* [MQTT-4.7.3-1] Check existance and length */
+ if (!topic->ptr || !topic->len) {
+ return false;
+ }
+
+ /* [MQTT-4.7.3-2] Check for the null character */
+ if (memchr(topic->ptr, 0, topic->len)) {
+ return false;
+ }
+
+ /* [MQTT-4.7.3-3] Topic must not be too long */
+ if (topic->len > 65535) {
+ return false;
+ }
+
+ bool saw_hash = false;
+
+ struct aws_byte_cursor topic_part;
+ AWS_ZERO_STRUCT(topic_part);
+ while (aws_byte_cursor_next_split(topic, '/', &topic_part)) {
+
+ if (saw_hash) {
+ /* [MQTT-4.7.1-2] If last part was a '#' and there's still another part, it's an invalid topic */
+ return false;
+ }
+
+ if (topic_part.len == 0) {
+ /* 0 length parts are fine */
+ continue;
+ }
+
+ /* Check single level wildcard */
+ if (memchr(topic_part.ptr, '+', topic_part.len)) {
+ if (!is_filter) {
+ /* [MQTT-4.7.1-3] + only allowed on filters */
+ return false;
+ }
+ if (topic_part.len > 1) {
+ /* topic part must be 1 character long */
+ return false;
+ }
+ }
+
+ /* Check multi level wildcard */
+ if (memchr(topic_part.ptr, '#', topic_part.len)) {
+ if (!is_filter) {
+ /* [MQTT-4.7.1-2] # only allowed on filters */
+ return false;
+ }
+ if (topic_part.len > 1) {
+ /* topic part must be 1 character long */
+ return false;
+ }
+ saw_hash = true;
+ }
+ }
+
+ return true;
+}
+
+bool aws_mqtt_is_valid_topic(const struct aws_byte_cursor *topic) {
+
+ return s_is_valid_topic(topic, false);
+}
+bool aws_mqtt_is_valid_topic_filter(const struct aws_byte_cursor *topic_filter) {
+
+ return s_is_valid_topic(topic_filter, true);
+}
+
+/*******************************************************************************
+ * Library Init
+ ******************************************************************************/
+
+#define AWS_DEFINE_ERROR_INFO_MQTT(C, ES) AWS_DEFINE_ERROR_INFO(C, ES, "libaws-c-mqtt")
+/* clang-format off */
+ static struct aws_error_info s_errors[] = {
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT_INVALID_RESERVED_BITS,
+ "Bits marked as reserved in the MQTT spec were incorrectly set."),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT_BUFFER_TOO_BIG,
+ "[MQTT-1.5.3] Encoded UTF-8 buffers may be no bigger than 65535 bytes."),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT_INVALID_REMAINING_LENGTH,
+ "[MQTT-2.2.3] Encoded remaining length field is malformed."),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT_UNSUPPORTED_PROTOCOL_NAME,
+ "[MQTT-3.1.2-1] Protocol name specified is unsupported."),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT_UNSUPPORTED_PROTOCOL_LEVEL,
+ "[MQTT-3.1.2-2] Protocol level specified is unsupported."),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT_INVALID_CREDENTIALS,
+ "[MQTT-3.1.2-21] Connect packet may not include password when no username is present."),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT_INVALID_QOS,
+ "Both bits in a QoS field must not be set."),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT_INVALID_PACKET_TYPE,
+ "Packet type in packet fixed header is invalid."),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT_INVALID_TOPIC,
+ "Topic or filter is invalid."),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT_TIMEOUT,
+ "Time limit between request and response has been exceeded."),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT_PROTOCOL_ERROR,
+ "Protocol error occurred."),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT_NOT_CONNECTED,
+ "The requested operation is invalid as the connection is not open."),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT_ALREADY_CONNECTED,
+ "The requested operation is invalid as the connection is already open."),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT_BUILT_WITHOUT_WEBSOCKETS,
+ "Library built without MQTT_WITH_WEBSOCKETS option."),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT_UNEXPECTED_HANGUP,
+ "The connection was closed unexpectedly."),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT_CONNECTION_SHUTDOWN,
+ "MQTT operation interrupted by connection shutdown."),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT_CONNECTION_DESTROYED,
+ "Connection has started destroying process, all uncompleted requests will fail."),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT_CONNECTION_DISCONNECTING,
+ "Connection is disconnecting, it's not safe to do this operation until the connection finishes shutdown."),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT_CANCELLED_FOR_CLEAN_SESSION,
+ "Old requests from the previous session are cancelled, and offline request will not be accept."),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT_QUEUE_FULL,
+ "MQTT request queue is full."),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT5_CLIENT_OPTIONS_VALIDATION,
+ "Invalid mqtt5 client options value."),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT5_CONNECT_OPTIONS_VALIDATION,
+ "Invalid mqtt5 connect packet options value."),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT5_DISCONNECT_OPTIONS_VALIDATION,
+ "Invalid mqtt5 disconnect packet options value."),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION,
+ "Invalid mqtt5 publish packet options value."),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT5_SUBSCRIBE_OPTIONS_VALIDATION,
+ "Invalid mqtt5 subscribe packet options value."),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT5_UNSUBSCRIBE_OPTIONS_VALIDATION,
+ "Invalid mqtt5 unsubscribe packet options value."),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT5_USER_PROPERTY_VALIDATION,
+ "Invalid mqtt5 user property value."),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT5_PACKET_VALIDATION,
+ "General mqtt5 packet validation error"),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT5_ENCODE_FAILURE,
+ "Error occurred while encoding an outgoing mqtt5 packet"),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR,
+ "Mqtt5 decoder received an invalid packet that broke mqtt5 protocol rules"),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT5_CONNACK_CONNECTION_REFUSED,
+ "Remote endpoint rejected the CONNECT attempt by returning an unsuccessful CONNACK"),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT5_CONNACK_TIMEOUT,
+ "Remote endpoint did not respond to a CONNECT request before timeout exceeded"),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT5_PING_RESPONSE_TIMEOUT,
+ "Remote endpoint did not respond to a PINGREQ before timeout exceeded"),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT5_USER_REQUESTED_STOP,
+ "Mqtt5 client connection interrupted by user request."),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT5_DISCONNECT_RECEIVED,
+ "Mqtt5 client connection interrupted by server DISCONNECT."),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT5_CLIENT_TERMINATED,
+ "Mqtt5 client terminated by user request."),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT5_OPERATION_FAILED_DUE_TO_OFFLINE_QUEUE_POLICY,
+ "Mqtt5 operation failed due to a disconnection event in conjunction with the client's offline queue retention policy."),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT5_ENCODE_SIZE_UNSUPPORTED_PACKET_TYPE,
+ "Unsupported packet type for encode size calculation"),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT5_OPERATION_PROCESSING_FAILURE,
+ "Error while processing mqtt5 operational state"),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT5_INVALID_INBOUND_TOPIC_ALIAS,
+ "Incoming publish contained an invalid (too large or unknown) topic alias"),
+ AWS_DEFINE_ERROR_INFO_MQTT(
+ AWS_ERROR_MQTT5_INVALID_OUTBOUND_TOPIC_ALIAS,
+ "Outgoing publish contained an invalid (too large or unknown) topic alias"),
+ };
+/* clang-format on */
+#undef AWS_DEFINE_ERROR_INFO_MQTT
+
+static struct aws_error_info_list s_error_list = {
+ .error_list = s_errors,
+ .count = AWS_ARRAY_SIZE(s_errors),
+};
+
+/* clang-format off */
+ static struct aws_log_subject_info s_logging_subjects[] = {
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_MQTT_GENERAL, "mqtt", "Misc MQTT logging"),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_MQTT_CLIENT, "mqtt-client", "MQTT client and connections"),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_MQTT_TOPIC_TREE, "mqtt-topic-tree", "MQTT subscription tree"),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_MQTT5_GENERAL, "mqtt5-general", "Misc MQTT5 logging"),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_MQTT5_CLIENT, "mqtt5-client", "MQTT5 client and connections"),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_MQTT5_CANARY, "mqtt5-canary", "MQTT5 canary logging"),
+ };
+/* clang-format on */
+
+static struct aws_log_subject_info_list s_logging_subjects_list = {
+ .subject_list = s_logging_subjects,
+ .count = AWS_ARRAY_SIZE(s_logging_subjects),
+};
+
+static bool s_mqtt_library_initialized = false;
+
+void aws_mqtt_library_init(struct aws_allocator *allocator) {
+
+ (void)allocator;
+
+ if (!s_mqtt_library_initialized) {
+ s_mqtt_library_initialized = true;
+ aws_io_library_init(allocator);
+#ifdef AWS_MQTT_WITH_WEBSOCKETS
+ aws_http_library_init(allocator);
+#endif
+ aws_register_error_info(&s_error_list);
+ aws_register_log_subject_info_list(&s_logging_subjects_list);
+ }
+}
+
+void aws_mqtt_library_clean_up(void) {
+ if (s_mqtt_library_initialized) {
+ s_mqtt_library_initialized = false;
+ aws_thread_join_all_managed();
+ aws_unregister_error_info(&s_error_list);
+ aws_unregister_log_subject_info_list(&s_logging_subjects_list);
+#ifdef AWS_MQTT_WITH_WEBSOCKETS
+ aws_http_library_clean_up();
+#endif
+ aws_io_library_clean_up();
+ }
+}
+
+void aws_mqtt_fatal_assert_library_initialized(void) {
+ if (!s_mqtt_library_initialized) {
+ AWS_LOGF_FATAL(
+ AWS_LS_MQTT_GENERAL,
+ "aws_mqtt_library_init() must be called before using any functionality in aws-c-mqtt.");
+
+ AWS_FATAL_ASSERT(s_mqtt_library_initialized);
+ }
+}
diff --git a/contrib/restricted/aws/aws-c-mqtt/source/packets.c b/contrib/restricted/aws/aws-c-mqtt/source/packets.c
new file mode 100644
index 0000000000..7af2af2673
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/source/packets.c
@@ -0,0 +1,1142 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/mqtt/private/packets.h>
+
+enum { S_PROTOCOL_LEVEL = 4 };
+enum { S_BIT_1_FLAGS = 0x2 };
+
+static struct aws_byte_cursor s_protocol_name = {
+ .ptr = (uint8_t *)"MQTT",
+ .len = 4,
+};
+
+static size_t s_sizeof_encoded_buffer(struct aws_byte_cursor *buf) {
+ return sizeof(uint16_t) + buf->len;
+}
+
+static int s_encode_buffer(struct aws_byte_buf *buf, const struct aws_byte_cursor cur) {
+
+ AWS_PRECONDITION(buf);
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&cur));
+
+ /* Make sure the buffer isn't too big */
+ if (cur.len > UINT16_MAX) {
+ return aws_raise_error(AWS_ERROR_MQTT_BUFFER_TOO_BIG);
+ }
+
+ /* Write the length */
+ if (!aws_byte_buf_write_be16(buf, (uint16_t)cur.len)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ /* Write the data */
+ if (!aws_byte_buf_write(buf, cur.ptr, cur.len)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_decode_buffer(struct aws_byte_cursor *cur, struct aws_byte_cursor *buf) {
+
+ AWS_PRECONDITION(cur);
+ AWS_PRECONDITION(buf);
+
+ /* Read the length */
+ uint16_t len;
+ if (!aws_byte_cursor_read_be16(cur, &len)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ /* Store the data */
+ *buf = aws_byte_cursor_advance(cur, len);
+
+ return AWS_OP_SUCCESS;
+}
+
+/*****************************************************************************/
+/* Ack without payload */
+
+static void s_ack_init(struct aws_mqtt_packet_ack *packet, enum aws_mqtt_packet_type type, uint16_t packet_identifier) {
+
+ AWS_PRECONDITION(packet);
+
+ AWS_ZERO_STRUCT(*packet);
+
+ packet->fixed_header.packet_type = type;
+ packet->fixed_header.remaining_length = sizeof(uint16_t);
+
+ packet->packet_identifier = packet_identifier;
+}
+
+int aws_mqtt_packet_ack_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_ack *packet) {
+
+ AWS_PRECONDITION(buf);
+ AWS_PRECONDITION(packet);
+
+ /*************************************************************************/
+ /* Fixed Header */
+
+ if (aws_mqtt_fixed_header_encode(buf, &packet->fixed_header)) {
+ return AWS_OP_ERR;
+ }
+
+ /*************************************************************************/
+ /* Variable Header */
+
+ /* Write packet identifier */
+ if (!aws_byte_buf_write_be16(buf, packet->packet_identifier)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt_packet_ack_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_ack *packet) {
+
+ AWS_PRECONDITION(cur);
+ AWS_PRECONDITION(packet);
+
+ /*************************************************************************/
+ /* Fixed Header */
+
+ if (aws_mqtt_fixed_header_decode(cur, &packet->fixed_header)) {
+ return AWS_OP_ERR;
+ }
+
+ /* Validate flags */
+ if (packet->fixed_header.flags != (aws_mqtt_packet_has_flags(&packet->fixed_header) ? S_BIT_1_FLAGS : 0U)) {
+
+ return aws_raise_error(AWS_ERROR_MQTT_INVALID_RESERVED_BITS);
+ }
+
+ /*************************************************************************/
+ /* Variable Header */
+
+ /* Read packet identifier */
+ if (!aws_byte_cursor_read_be16(cur, &packet->packet_identifier)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/*****************************************************************************/
+/* Connect */
+
+int aws_mqtt_packet_connect_init(
+ struct aws_mqtt_packet_connect *packet,
+ struct aws_byte_cursor client_identifier,
+ bool clean_session,
+ uint16_t keep_alive) {
+
+ AWS_PRECONDITION(packet);
+ AWS_PRECONDITION(client_identifier.len > 0);
+
+ AWS_ZERO_STRUCT(*packet);
+
+ packet->fixed_header.packet_type = AWS_MQTT_PACKET_CONNECT;
+ /* [MQTT-3.1.1] */
+ packet->fixed_header.remaining_length = 10 + s_sizeof_encoded_buffer(&client_identifier);
+
+ packet->client_identifier = client_identifier;
+ packet->clean_session = clean_session;
+ packet->keep_alive_timeout = keep_alive;
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt_packet_connect_add_credentials(
+ struct aws_mqtt_packet_connect *packet,
+ struct aws_byte_cursor username,
+ struct aws_byte_cursor password) {
+
+ AWS_PRECONDITION(packet);
+ AWS_PRECONDITION(username.len > 0);
+
+ if (!packet->has_username) {
+ /* If not already username, add size of length field */
+ packet->fixed_header.remaining_length += 2;
+ }
+
+ /* Add change in size to remaining_length */
+ packet->fixed_header.remaining_length += username.len - packet->username.len;
+ packet->has_username = true;
+
+ packet->username = username;
+
+ if (password.len > 0) {
+
+ if (!packet->has_password) {
+ /* If not already password, add size of length field */
+ packet->fixed_header.remaining_length += 2;
+ }
+
+ /* Add change in size to remaining_length */
+ packet->fixed_header.remaining_length += password.len - packet->password.len;
+ packet->has_password = true;
+
+ packet->password = password;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt_packet_connect_add_will(
+ struct aws_mqtt_packet_connect *packet,
+ struct aws_byte_cursor topic,
+ enum aws_mqtt_qos qos,
+ bool retain,
+ struct aws_byte_cursor payload) {
+
+ packet->has_will = true;
+ packet->will_topic = topic;
+ packet->will_qos = qos;
+ packet->will_retain = retain;
+ packet->will_message = payload;
+
+ packet->fixed_header.remaining_length += s_sizeof_encoded_buffer(&topic) + s_sizeof_encoded_buffer(&payload);
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt_packet_connect_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_connect *packet) {
+
+ AWS_PRECONDITION(buf);
+ AWS_PRECONDITION(packet);
+
+ /* Do validation */
+ if (packet->has_password && !packet->has_username) {
+
+ return aws_raise_error(AWS_ERROR_MQTT_INVALID_CREDENTIALS);
+ }
+
+ /*************************************************************************/
+ /* Fixed Header */
+
+ if (aws_mqtt_fixed_header_encode(buf, &packet->fixed_header)) {
+ return AWS_OP_ERR;
+ }
+
+ /*************************************************************************/
+ /* Variable Header */
+
+ /* Write protocol name */
+ if (s_encode_buffer(buf, s_protocol_name)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ /* Write protocol level */
+ if (!aws_byte_buf_write_u8(buf, S_PROTOCOL_LEVEL)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ /* Write connect flags [MQTT-3.1.2.3] */
+ uint8_t connect_flags = (uint8_t)(
+ packet->clean_session << 1 | packet->has_will << 2 | packet->will_qos << 3 | packet->will_retain << 5 |
+ packet->has_password << 6 | packet->has_username << 7);
+
+ if (!aws_byte_buf_write_u8(buf, connect_flags)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ /* Write keep alive */
+ if (!aws_byte_buf_write_be16(buf, packet->keep_alive_timeout)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ /*************************************************************************/
+ /* Payload */
+
+ /* Client identifier is required, write it */
+ if (s_encode_buffer(buf, packet->client_identifier)) {
+ return AWS_OP_ERR;
+ }
+
+ /* Write will */
+ if (packet->has_will) {
+ if (s_encode_buffer(buf, packet->will_topic)) {
+ return AWS_OP_ERR;
+ }
+ if (s_encode_buffer(buf, packet->will_message)) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ /* Write username */
+ if (packet->has_username) {
+ if (s_encode_buffer(buf, packet->username)) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ /* Write password */
+ if (packet->has_password) {
+ if (s_encode_buffer(buf, packet->password)) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt_packet_connect_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_connect *packet) {
+
+ AWS_PRECONDITION(cur);
+ AWS_PRECONDITION(packet);
+
+ /*************************************************************************/
+ /* Fixed Header */
+
+ if (aws_mqtt_fixed_header_decode(cur, &packet->fixed_header)) {
+ return AWS_OP_ERR;
+ }
+
+ /*************************************************************************/
+ /* Variable Header */
+
+ /* Check protocol name */
+ struct aws_byte_cursor protocol_name = {
+ .ptr = NULL,
+ .len = 0,
+ };
+ if (s_decode_buffer(cur, &protocol_name)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+ AWS_ASSERT(protocol_name.ptr && protocol_name.len);
+ if (protocol_name.len != s_protocol_name.len) {
+ return aws_raise_error(AWS_ERROR_MQTT_UNSUPPORTED_PROTOCOL_NAME);
+ }
+ if (memcmp(protocol_name.ptr, s_protocol_name.ptr, s_protocol_name.len) != 0) {
+ return aws_raise_error(AWS_ERROR_MQTT_UNSUPPORTED_PROTOCOL_NAME);
+ }
+
+ /* Check protocol level */
+ struct aws_byte_cursor protocol_level = aws_byte_cursor_advance(cur, 1);
+ if (protocol_level.len == 0) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+ if (*protocol_level.ptr != S_PROTOCOL_LEVEL) {
+ return aws_raise_error(AWS_ERROR_MQTT_UNSUPPORTED_PROTOCOL_LEVEL);
+ }
+
+ /* Read connect flags [MQTT-3.1.2.3] */
+ uint8_t connect_flags = 0;
+ if (!aws_byte_cursor_read_u8(cur, &connect_flags)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+ packet->clean_session = (connect_flags >> 1) & 0x1;
+ packet->has_will = (connect_flags >> 2) & 0x1;
+ packet->will_qos = (connect_flags >> 3) & 0x3;
+ packet->will_retain = (connect_flags >> 5) & 0x1;
+ packet->has_password = (connect_flags >> 6) & 0x1;
+ packet->has_username = (connect_flags >> 7) & 0x1;
+
+ /* Read keep alive */
+ if (!aws_byte_cursor_read_be16(cur, &packet->keep_alive_timeout)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ /*************************************************************************/
+ /* Payload */
+
+ /* Client identifier is required, Read it */
+ if (s_decode_buffer(cur, &packet->client_identifier)) {
+ return AWS_OP_ERR;
+ }
+
+ /* Read will */
+ if (packet->has_will) {
+ if (s_decode_buffer(cur, &packet->will_topic)) {
+ return AWS_OP_ERR;
+ }
+ if (s_decode_buffer(cur, &packet->will_message)) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ /* Read username */
+ if (packet->has_username) {
+ if (s_decode_buffer(cur, &packet->username)) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ /* Read password */
+ if (packet->has_password) {
+ if (s_decode_buffer(cur, &packet->password)) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ /* Do validation */
+ if (packet->has_password && !packet->has_username) {
+
+ return aws_raise_error(AWS_ERROR_MQTT_INVALID_CREDENTIALS);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/*****************************************************************************/
+/* Connack */
+
+int aws_mqtt_packet_connack_init(
+ struct aws_mqtt_packet_connack *packet,
+ bool session_present,
+ enum aws_mqtt_connect_return_code return_code) {
+
+ AWS_PRECONDITION(packet);
+
+ AWS_ZERO_STRUCT(*packet);
+
+ packet->fixed_header.packet_type = AWS_MQTT_PACKET_CONNACK;
+ packet->fixed_header.remaining_length = 1 + sizeof(packet->connect_return_code);
+
+ packet->session_present = session_present;
+ packet->connect_return_code = (uint8_t)return_code;
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt_packet_connack_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_connack *packet) {
+
+ AWS_PRECONDITION(buf);
+ AWS_PRECONDITION(packet);
+
+ /*************************************************************************/
+ /* Fixed Header */
+
+ if (aws_mqtt_fixed_header_encode(buf, &packet->fixed_header)) {
+ return AWS_OP_ERR;
+ }
+
+ /*************************************************************************/
+ /* Variable Header */
+
+ /* Read connack flags */
+ uint8_t connack_flags = packet->session_present & 0x1;
+ if (!aws_byte_buf_write_u8(buf, connack_flags)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ /* Read return code */
+ if (!aws_byte_buf_write_u8(buf, packet->connect_return_code)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt_packet_connack_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_connack *packet) {
+
+ AWS_PRECONDITION(cur);
+ AWS_PRECONDITION(packet);
+
+ /*************************************************************************/
+ /* Fixed Header */
+
+ if (aws_mqtt_fixed_header_decode(cur, &packet->fixed_header)) {
+ return AWS_OP_ERR;
+ }
+
+ /*************************************************************************/
+ /* Variable Header */
+
+ /* Read connack flags */
+ uint8_t connack_flags = 0;
+ if (!aws_byte_cursor_read_u8(cur, &connack_flags)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+ packet->session_present = connack_flags & 0x1;
+
+ /* Read return code */
+ if (!aws_byte_cursor_read_u8(cur, &packet->connect_return_code)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/*****************************************************************************/
+/* Publish */
+
+int aws_mqtt_packet_publish_init(
+ struct aws_mqtt_packet_publish *packet,
+ bool retain,
+ enum aws_mqtt_qos qos,
+ bool dup,
+ struct aws_byte_cursor topic_name,
+ uint16_t packet_identifier,
+ struct aws_byte_cursor payload) {
+
+ AWS_PRECONDITION(packet);
+ AWS_FATAL_PRECONDITION(topic_name.len > 0); /* [MQTT-4.7.3-1] */
+
+ AWS_ZERO_STRUCT(*packet);
+
+ packet->fixed_header.packet_type = AWS_MQTT_PACKET_PUBLISH;
+ packet->fixed_header.remaining_length = s_sizeof_encoded_buffer(&topic_name) + payload.len;
+
+ if (qos > 0) {
+ packet->fixed_header.remaining_length += sizeof(packet->packet_identifier);
+ }
+
+ /* [MQTT-2.2.2] */
+ uint8_t publish_flags = (uint8_t)((retain & 0x1) | (qos & 0x3) << 1 | (dup & 0x1) << 3);
+ packet->fixed_header.flags = publish_flags;
+
+ packet->topic_name = topic_name;
+ packet->packet_identifier = packet_identifier;
+ packet->payload = payload;
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt_packet_publish_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_publish *packet) {
+
+ if (aws_mqtt_packet_publish_encode_headers(buf, packet)) {
+ return AWS_OP_ERR;
+ }
+
+ /*************************************************************************/
+ /* Payload */
+
+ if (!aws_byte_buf_write(buf, packet->payload.ptr, packet->payload.len)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt_packet_publish_encode_headers(struct aws_byte_buf *buf, const struct aws_mqtt_packet_publish *packet) {
+
+ AWS_PRECONDITION(buf);
+ AWS_PRECONDITION(packet);
+
+ /*************************************************************************/
+ /* Fixed Header */
+
+ if (aws_mqtt_fixed_header_encode(buf, &packet->fixed_header)) {
+ return AWS_OP_ERR;
+ }
+
+ /*************************************************************************/
+ /* Variable Header */
+
+ /* Write topic name */
+ if (s_encode_buffer(buf, packet->topic_name)) {
+ return AWS_OP_ERR;
+ }
+
+ enum aws_mqtt_qos qos = aws_mqtt_packet_publish_get_qos(packet);
+ if (qos > 0) {
+ /* Write packet identifier */
+ if (!aws_byte_buf_write_be16(buf, packet->packet_identifier)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt_packet_publish_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_publish *packet) {
+
+ AWS_PRECONDITION(cur);
+ AWS_PRECONDITION(packet);
+
+ /*************************************************************************/
+ /* Fixed Header */
+
+ if (aws_mqtt_fixed_header_decode(cur, &packet->fixed_header)) {
+ return AWS_OP_ERR;
+ }
+
+ /*************************************************************************/
+ /* Variable Header */
+
+ /* Read topic name */
+ if (s_decode_buffer(cur, &packet->topic_name)) {
+ return AWS_OP_ERR;
+ }
+
+ size_t payload_size = packet->fixed_header.remaining_length - s_sizeof_encoded_buffer(&packet->topic_name);
+
+ /* Read QoS */
+ enum aws_mqtt_qos qos = aws_mqtt_packet_publish_get_qos(packet);
+ if (qos > 2) {
+ return aws_raise_error(AWS_ERROR_MQTT_PROTOCOL_ERROR);
+ }
+
+ /* Read packet identifier */
+ if (qos > 0) {
+ if (!aws_byte_cursor_read_be16(cur, &packet->packet_identifier)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+ payload_size -= sizeof(packet->packet_identifier);
+ } else {
+ packet->packet_identifier = 0;
+ }
+
+ /*************************************************************************/
+ /* Payload */
+ packet->payload = aws_byte_cursor_advance(cur, payload_size);
+ if (packet->payload.len != payload_size) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+bool aws_mqtt_packet_publish_get_dup(const struct aws_mqtt_packet_publish *packet) {
+ return packet->fixed_header.flags & (1 << 3); /* bit 3 */
+}
+
+enum aws_mqtt_qos aws_mqtt_packet_publish_get_qos(const struct aws_mqtt_packet_publish *packet) {
+ return (packet->fixed_header.flags >> 1) & 0x3; /* bits 2,1 */
+}
+
+bool aws_mqtt_packet_publish_get_retain(const struct aws_mqtt_packet_publish *packet) {
+ return packet->fixed_header.flags & 0x1; /* bit 0 */
+}
+
+/*****************************************************************************/
+/* Puback */
+
+int aws_mqtt_packet_puback_init(struct aws_mqtt_packet_ack *packet, uint16_t packet_identifier) {
+
+ s_ack_init(packet, AWS_MQTT_PACKET_PUBACK, packet_identifier);
+
+ return AWS_OP_SUCCESS;
+}
+
+/*****************************************************************************/
+/* Pubrec */
+
+int aws_mqtt_packet_pubrec_init(struct aws_mqtt_packet_ack *packet, uint16_t packet_identifier) {
+
+ s_ack_init(packet, AWS_MQTT_PACKET_PUBREC, packet_identifier);
+
+ return AWS_OP_SUCCESS;
+}
+
+/*****************************************************************************/
+/* Pubrel */
+
+int aws_mqtt_packet_pubrel_init(struct aws_mqtt_packet_ack *packet, uint16_t packet_identifier) {
+
+ s_ack_init(packet, AWS_MQTT_PACKET_PUBREL, packet_identifier);
+ packet->fixed_header.flags = S_BIT_1_FLAGS;
+
+ return AWS_OP_SUCCESS;
+}
+
+/*****************************************************************************/
+/* Pubcomp */
+
+int aws_mqtt_packet_pubcomp_init(struct aws_mqtt_packet_ack *packet, uint16_t packet_identifier) {
+
+ s_ack_init(packet, AWS_MQTT_PACKET_PUBCOMP, packet_identifier);
+
+ return AWS_OP_SUCCESS;
+}
+
+/*****************************************************************************/
+/* Subscribe */
+
+int aws_mqtt_packet_subscribe_init(
+ struct aws_mqtt_packet_subscribe *packet,
+ struct aws_allocator *allocator,
+ uint16_t packet_identifier) {
+
+ AWS_PRECONDITION(packet);
+
+ AWS_ZERO_STRUCT(*packet);
+
+ packet->fixed_header.packet_type = AWS_MQTT_PACKET_SUBSCRIBE;
+ packet->fixed_header.flags = S_BIT_1_FLAGS;
+ packet->fixed_header.remaining_length = sizeof(uint16_t);
+
+ packet->packet_identifier = packet_identifier;
+
+ if (aws_array_list_init_dynamic(&packet->topic_filters, allocator, 1, sizeof(struct aws_mqtt_subscription))) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+void aws_mqtt_packet_subscribe_clean_up(struct aws_mqtt_packet_subscribe *packet) {
+
+ AWS_PRECONDITION(packet);
+
+ aws_array_list_clean_up(&packet->topic_filters);
+
+ AWS_ZERO_STRUCT(*packet);
+}
+
+int aws_mqtt_packet_subscribe_add_topic(
+ struct aws_mqtt_packet_subscribe *packet,
+ struct aws_byte_cursor topic_filter,
+ enum aws_mqtt_qos qos) {
+
+ AWS_PRECONDITION(packet);
+
+ /* Add to the array list */
+ struct aws_mqtt_subscription subscription;
+ subscription.topic_filter = topic_filter;
+ subscription.qos = qos;
+ if (aws_array_list_push_back(&packet->topic_filters, &subscription)) {
+ return AWS_OP_ERR;
+ }
+
+ /* Add to the remaining length */
+ packet->fixed_header.remaining_length += s_sizeof_encoded_buffer(&topic_filter) + 1;
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt_packet_subscribe_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_subscribe *packet) {
+
+ AWS_PRECONDITION(buf);
+ AWS_PRECONDITION(packet);
+
+ /*************************************************************************/
+ /* Fixed Header */
+
+ if (aws_mqtt_fixed_header_encode(buf, &packet->fixed_header)) {
+ return AWS_OP_ERR;
+ }
+
+ /*************************************************************************/
+ /* Variable Header */
+
+ /* Write packet identifier */
+ if (!aws_byte_buf_write_be16(buf, packet->packet_identifier)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ /* Write topic filters */
+ const size_t num_filters = aws_array_list_length(&packet->topic_filters);
+ for (size_t i = 0; i < num_filters; ++i) {
+
+ struct aws_mqtt_subscription *subscription;
+ if (aws_array_list_get_at_ptr(&packet->topic_filters, (void **)&subscription, i)) {
+
+ return AWS_OP_ERR;
+ }
+ s_encode_buffer(buf, subscription->topic_filter);
+
+ uint8_t eos_byte = subscription->qos & 0x3;
+ if (!aws_byte_buf_write_u8(buf, eos_byte)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt_packet_subscribe_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_subscribe *packet) {
+
+ AWS_PRECONDITION(cur);
+ AWS_PRECONDITION(packet);
+
+ /*************************************************************************/
+ /* Fixed Header */
+
+ if (aws_mqtt_fixed_header_decode(cur, &packet->fixed_header)) {
+ return AWS_OP_ERR;
+ }
+
+ if (packet->fixed_header.remaining_length < sizeof(uint16_t)) {
+ return aws_raise_error(AWS_ERROR_MQTT_INVALID_REMAINING_LENGTH);
+ }
+
+ /*************************************************************************/
+ /* Variable Header */
+
+ /* Read packet identifier */
+ if (!aws_byte_cursor_read_be16(cur, &packet->packet_identifier)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ /* Read topic filters */
+ size_t remaining_length = packet->fixed_header.remaining_length - sizeof(uint16_t);
+ while (remaining_length) {
+
+ struct aws_mqtt_subscription subscription = {
+ .topic_filter = {.ptr = NULL, .len = 0},
+ .qos = 0,
+ };
+ if (s_decode_buffer(cur, &subscription.topic_filter)) {
+ return AWS_OP_ERR;
+ }
+
+ uint8_t eos_byte = 0;
+ if (!aws_byte_cursor_read_u8(cur, &eos_byte)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+ if ((eos_byte >> 2) != 0) {
+ return aws_raise_error(AWS_ERROR_MQTT_INVALID_RESERVED_BITS);
+ }
+ if (eos_byte == 0x3) {
+ return aws_raise_error(AWS_ERROR_MQTT_INVALID_QOS);
+ }
+ subscription.qos = eos_byte & 0x3;
+
+ aws_array_list_push_back(&packet->topic_filters, &subscription);
+
+ remaining_length -= s_sizeof_encoded_buffer(&subscription.topic_filter) + 1;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/*****************************************************************************/
+/* Suback */
+
+int aws_mqtt_packet_suback_init(
+ struct aws_mqtt_packet_suback *packet,
+ struct aws_allocator *allocator,
+ uint16_t packet_identifier) {
+
+ AWS_PRECONDITION(packet);
+
+ AWS_ZERO_STRUCT(*packet);
+
+ packet->fixed_header.packet_type = AWS_MQTT_PACKET_SUBACK;
+ packet->fixed_header.remaining_length = sizeof(uint16_t);
+
+ packet->packet_identifier = packet_identifier;
+
+ if (aws_array_list_init_dynamic(&packet->return_codes, allocator, 1, sizeof(uint8_t))) {
+ return AWS_OP_ERR;
+ }
+ return AWS_OP_SUCCESS;
+}
+
+void aws_mqtt_packet_suback_clean_up(struct aws_mqtt_packet_suback *packet) {
+
+ AWS_PRECONDITION(packet);
+
+ aws_array_list_clean_up(&packet->return_codes);
+
+ AWS_ZERO_STRUCT(*packet);
+}
+
+static bool s_return_code_check(uint8_t return_code) {
+ if (return_code != AWS_MQTT_QOS_FAILURE && return_code != AWS_MQTT_QOS_AT_MOST_ONCE &&
+ return_code != AWS_MQTT_QOS_AT_LEAST_ONCE && return_code != AWS_MQTT_QOS_EXACTLY_ONCE) {
+ return false;
+ }
+ return true;
+}
+
+int aws_mqtt_packet_suback_add_return_code(struct aws_mqtt_packet_suback *packet, uint8_t return_code) {
+
+ AWS_PRECONDITION(packet);
+ if (!(s_return_code_check(return_code))) {
+ return aws_raise_error(AWS_ERROR_MQTT_PROTOCOL_ERROR);
+ }
+
+ /* Add to the array list */
+ if (aws_array_list_push_back(&packet->return_codes, &return_code)) {
+ return AWS_OP_ERR;
+ }
+
+ /* Add to the remaining length, each return code takes one byte */
+ packet->fixed_header.remaining_length += 1;
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt_packet_suback_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_suback *packet) {
+
+ AWS_PRECONDITION(buf);
+ AWS_PRECONDITION(packet);
+
+ /*************************************************************************/
+ /* Fixed Header */
+
+ if (aws_mqtt_fixed_header_encode(buf, &packet->fixed_header)) {
+ return AWS_OP_ERR;
+ }
+
+ /*************************************************************************/
+ /* Variable Header */
+
+ /* Write packet identifier */
+ if (!aws_byte_buf_write_be16(buf, packet->packet_identifier)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ /*************************************************************************/
+ /* Payload */
+
+ /* Write topic filters */
+ const size_t num_filters = aws_array_list_length(&packet->return_codes);
+ for (size_t i = 0; i < num_filters; ++i) {
+
+ uint8_t return_code = 0;
+ if (aws_array_list_get_at(&packet->return_codes, (void *)&return_code, i)) {
+ return AWS_OP_ERR;
+ }
+ if (!aws_byte_buf_write_u8(buf, return_code)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+ }
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt_packet_suback_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_suback *packet) {
+
+ AWS_PRECONDITION(cur);
+ AWS_PRECONDITION(packet);
+
+ /*************************************************************************/
+ /* Fixed Header */
+
+ if (aws_mqtt_fixed_header_decode(cur, &packet->fixed_header)) {
+ return AWS_OP_ERR;
+ }
+
+ /* Validate flags */
+ if (packet->fixed_header.flags != (aws_mqtt_packet_has_flags(&packet->fixed_header) ? S_BIT_1_FLAGS : 0U)) {
+
+ return aws_raise_error(AWS_ERROR_MQTT_INVALID_RESERVED_BITS);
+ }
+
+ /*************************************************************************/
+ /* Variable Header */
+
+ /* Read packet identifier */
+ if (!aws_byte_cursor_read_be16(cur, &packet->packet_identifier)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ /*************************************************************************/
+ /* Payload */
+
+ /* Read return codes */
+ size_t remaining_length = packet->fixed_header.remaining_length - sizeof(uint16_t);
+ while (remaining_length) {
+
+ uint8_t return_code = 0;
+ if (!aws_byte_cursor_read_u8(cur, &return_code)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+ if (!(s_return_code_check(return_code))) {
+ return aws_raise_error(AWS_ERROR_MQTT_PROTOCOL_ERROR);
+ }
+
+ aws_array_list_push_back(&packet->return_codes, &return_code);
+
+ remaining_length -= 1;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/*****************************************************************************/
+/* Unsubscribe */
+
+int aws_mqtt_packet_unsubscribe_init(
+ struct aws_mqtt_packet_unsubscribe *packet,
+ struct aws_allocator *allocator,
+ uint16_t packet_identifier) {
+
+ AWS_PRECONDITION(packet);
+ AWS_PRECONDITION(allocator);
+
+ AWS_ZERO_STRUCT(*packet);
+
+ packet->fixed_header.packet_type = AWS_MQTT_PACKET_UNSUBSCRIBE;
+ packet->fixed_header.flags = S_BIT_1_FLAGS;
+ packet->fixed_header.remaining_length = sizeof(uint16_t);
+
+ packet->packet_identifier = packet_identifier;
+
+ if (aws_array_list_init_dynamic(&packet->topic_filters, allocator, 1, sizeof(struct aws_byte_cursor))) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+void aws_mqtt_packet_unsubscribe_clean_up(struct aws_mqtt_packet_unsubscribe *packet) {
+
+ AWS_PRECONDITION(packet);
+
+ aws_array_list_clean_up(&packet->topic_filters);
+
+ AWS_ZERO_STRUCT(*packet);
+}
+
+int aws_mqtt_packet_unsubscribe_add_topic(
+ struct aws_mqtt_packet_unsubscribe *packet,
+ struct aws_byte_cursor topic_filter) {
+
+ AWS_PRECONDITION(packet);
+
+ /* Add to the array list */
+ if (aws_array_list_push_back(&packet->topic_filters, &topic_filter)) {
+ return AWS_OP_ERR;
+ }
+
+ /* Add to the remaining length */
+ packet->fixed_header.remaining_length += s_sizeof_encoded_buffer(&topic_filter);
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt_packet_unsubscribe_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_unsubscribe *packet) {
+
+ AWS_PRECONDITION(buf);
+ AWS_PRECONDITION(packet);
+
+ /*************************************************************************/
+ /* Fixed Header */
+
+ if (aws_mqtt_fixed_header_encode(buf, &packet->fixed_header)) {
+ return AWS_OP_ERR;
+ }
+
+ /*************************************************************************/
+ /* Variable Header */
+
+ /* Write packet identifier */
+ if (!aws_byte_buf_write_be16(buf, packet->packet_identifier)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ /* Write topic filters */
+ const size_t num_filters = aws_array_list_length(&packet->topic_filters);
+ for (size_t i = 0; i < num_filters; ++i) {
+
+ struct aws_byte_cursor topic_filter = {.ptr = NULL, .len = 0};
+ if (aws_array_list_get_at(&packet->topic_filters, (void *)&topic_filter, i)) {
+
+ return AWS_OP_ERR;
+ }
+ s_encode_buffer(buf, topic_filter);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt_packet_unsubscribe_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_unsubscribe *packet) {
+
+ AWS_PRECONDITION(cur);
+ AWS_PRECONDITION(packet);
+
+ /*************************************************************************/
+ /* Fixed Header */
+
+ if (aws_mqtt_fixed_header_decode(cur, &packet->fixed_header)) {
+ return AWS_OP_ERR;
+ }
+
+ if (packet->fixed_header.remaining_length < sizeof(uint16_t)) {
+ return aws_raise_error(AWS_ERROR_MQTT_INVALID_REMAINING_LENGTH);
+ }
+
+ /*************************************************************************/
+ /* Variable Header */
+
+ /* Read packet identifier */
+ if (!aws_byte_cursor_read_be16(cur, &packet->packet_identifier)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+
+ /* Read topic filters */
+ size_t remaining_length = packet->fixed_header.remaining_length - sizeof(uint16_t);
+ while (remaining_length) {
+
+ struct aws_byte_cursor topic_filter;
+ AWS_ZERO_STRUCT(topic_filter);
+ if (s_decode_buffer(cur, &topic_filter)) {
+ return AWS_OP_ERR;
+ }
+
+ aws_array_list_push_back(&packet->topic_filters, &topic_filter);
+
+ remaining_length -= s_sizeof_encoded_buffer(&topic_filter);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/*****************************************************************************/
+/* Unsuback */
+
+int aws_mqtt_packet_unsuback_init(struct aws_mqtt_packet_ack *packet, uint16_t packet_identifier) {
+
+ s_ack_init(packet, AWS_MQTT_PACKET_UNSUBACK, packet_identifier);
+
+ return AWS_OP_SUCCESS;
+}
+
+/*****************************************************************************/
+/* Ping request/response */
+
+static void s_connection_init(struct aws_mqtt_packet_connection *packet, enum aws_mqtt_packet_type type) {
+
+ AWS_PRECONDITION(packet);
+
+ AWS_ZERO_STRUCT(*packet);
+ packet->fixed_header.packet_type = type;
+}
+
+int aws_mqtt_packet_pingreq_init(struct aws_mqtt_packet_connection *packet) {
+
+ s_connection_init(packet, AWS_MQTT_PACKET_PINGREQ);
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt_packet_pingresp_init(struct aws_mqtt_packet_connection *packet) {
+
+ s_connection_init(packet, AWS_MQTT_PACKET_PINGRESP);
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt_packet_disconnect_init(struct aws_mqtt_packet_connection *packet) {
+
+ s_connection_init(packet, AWS_MQTT_PACKET_DISCONNECT);
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt_packet_connection_encode(struct aws_byte_buf *buf, const struct aws_mqtt_packet_connection *packet) {
+
+ AWS_PRECONDITION(buf);
+ AWS_PRECONDITION(packet);
+
+ /*************************************************************************/
+ /* Fixed Header */
+
+ if (aws_mqtt_fixed_header_encode(buf, &packet->fixed_header)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt_packet_connection_decode(struct aws_byte_cursor *cur, struct aws_mqtt_packet_connection *packet) {
+
+ AWS_PRECONDITION(cur);
+ AWS_PRECONDITION(packet);
+
+ /*************************************************************************/
+ /* Fixed Header */
+
+ if (aws_mqtt_fixed_header_decode(cur, &packet->fixed_header)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
diff --git a/contrib/restricted/aws/aws-c-mqtt/source/shared_constants.c b/contrib/restricted/aws/aws-c-mqtt/source/shared_constants.c
new file mode 100644
index 0000000000..8d26079959
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/source/shared_constants.c
@@ -0,0 +1,22 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/mqtt/private/shared_constants.h>
+
+#include <aws/http/request_response.h>
+
+/*
+ * These defaults were chosen because they're commmon in other MQTT libraries.
+ * The user can modify the request in their transform callback if they need to.
+ */
+static const struct aws_byte_cursor s_websocket_handshake_default_path = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/mqtt");
+const struct aws_byte_cursor *g_websocket_handshake_default_path = &s_websocket_handshake_default_path;
+
+static const struct aws_http_header s_websocket_handshake_default_protocol_header = {
+ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Sec-WebSocket-Protocol"),
+ .value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("mqtt"),
+};
+const struct aws_http_header *g_websocket_handshake_default_protocol_header =
+ &s_websocket_handshake_default_protocol_header;
diff --git a/contrib/restricted/aws/aws-c-mqtt/source/topic_tree.c b/contrib/restricted/aws/aws-c-mqtt/source/topic_tree.c
new file mode 100644
index 0000000000..ca75c9028e
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/source/topic_tree.c
@@ -0,0 +1,929 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/mqtt/private/topic_tree.h>
+
+#include <aws/io/logging.h>
+
+#include <aws/common/byte_buf.h>
+#include <aws/common/task_scheduler.h>
+
+#ifdef _MSC_VER
+/* disables warning non const declared initializers for Microsoft compilers */
+# pragma warning(disable : 4204)
+#endif /* _MSC_VER */
+
+AWS_STATIC_STRING_FROM_LITERAL(s_single_level_wildcard, "+");
+AWS_STATIC_STRING_FROM_LITERAL(s_multi_level_wildcard, "#");
+
+/*******************************************************************************
+ * Transactions
+ ******************************************************************************/
+
+struct topic_tree_action {
+ enum {
+ AWS_MQTT_TOPIC_TREE_ADD,
+ AWS_MQTT_TOPIC_TREE_UPDATE,
+ AWS_MQTT_TOPIC_TREE_REMOVE,
+ } mode;
+
+ /* All Nodes */
+ struct aws_mqtt_topic_node *node_to_update;
+
+ /* ADD/UPDATE */
+ struct aws_byte_cursor topic;
+ const struct aws_string *topic_filter;
+ enum aws_mqtt_qos qos;
+ aws_mqtt_publish_received_fn *callback;
+ aws_mqtt_userdata_cleanup_fn *cleanup;
+ void *userdata;
+
+ /* ADD */
+ struct aws_mqtt_topic_node *last_found;
+ struct aws_mqtt_topic_node *first_created;
+
+ /* REMOVE */
+ struct aws_array_list to_remove; /* topic_tree_node* */
+};
+
+size_t aws_mqtt_topic_tree_action_size = sizeof(struct topic_tree_action);
+
+static struct topic_tree_action *s_topic_tree_action_create(struct aws_array_list *transaction) {
+
+ struct topic_tree_action *action = NULL;
+
+ /* Push an empty action into the transaction and get a pointer to it. */
+ struct topic_tree_action empty_action;
+ AWS_ZERO_STRUCT(empty_action);
+ if (aws_array_list_push_back(transaction, &empty_action)) {
+
+ AWS_LOGF_ERROR(AWS_LS_MQTT_TOPIC_TREE, "Failed to insert action into transaction, array_list_push_back failed");
+ goto push_back_failed;
+ }
+
+ if (aws_array_list_get_at_ptr(transaction, (void **)&action, aws_array_list_length(transaction) - 1)) {
+
+ AWS_LOGF_ERROR(AWS_LS_MQTT_TOPIC_TREE, "Failed to retrieve most recent action from transaction");
+ goto get_at_failed;
+ }
+
+ AWS_LOGF_TRACE(AWS_LS_MQTT_TOPIC_TREE, "action=%p: Created action", (void *)action);
+
+ return action;
+
+get_at_failed:
+ aws_array_list_pop_back(transaction);
+
+push_back_failed:
+ return NULL;
+}
+
+static void s_topic_tree_action_destroy(struct topic_tree_action *action) {
+
+ AWS_LOGF_TRACE(AWS_LS_MQTT_TOPIC_TREE, "action=%p: Destroying action", (void *)action);
+
+ if (action->mode == AWS_MQTT_TOPIC_TREE_REMOVE) {
+ aws_array_list_clean_up(&action->to_remove);
+ }
+
+ AWS_ZERO_STRUCT(*action);
+}
+
+static int s_topic_tree_action_to_remove(
+ struct topic_tree_action *action,
+ struct aws_allocator *allocator,
+ size_t size_hint) {
+
+ if (action->mode != AWS_MQTT_TOPIC_TREE_REMOVE) {
+ if (aws_array_list_init_dynamic(&action->to_remove, allocator, size_hint, sizeof(void *))) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_TOPIC_TREE, "action=%p: Failed to initialize to_remove list in action", (void *)action);
+ return AWS_OP_ERR;
+ }
+ action->mode = AWS_MQTT_TOPIC_TREE_REMOVE;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static bool byte_cursor_eq(const void *a, const void *b) {
+ const struct aws_byte_cursor *cur_a = a;
+ const struct aws_byte_cursor *cur_b = b;
+
+ return aws_byte_cursor_eq(cur_a, cur_b);
+}
+
+/*******************************************************************************
+ * Init
+ ******************************************************************************/
+
+static struct aws_mqtt_topic_node *s_topic_node_new(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *topic_filter,
+ const struct aws_string *full_topic) {
+
+ AWS_PRECONDITION(!topic_filter || full_topic);
+
+ struct aws_mqtt_topic_node *node = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt_topic_node));
+ if (!node) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT_TOPIC_TREE, "Failed to allocate new topic node");
+ return NULL;
+ }
+
+ if (topic_filter) {
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_TOPIC_TREE,
+ "node=%p: Creating new node with topic filter " PRInSTR,
+ (void *)node,
+ AWS_BYTE_CURSOR_PRI(*topic_filter));
+ }
+
+ if (topic_filter) {
+ node->topic = *topic_filter;
+ node->topic_filter = full_topic;
+ }
+
+ /* Init the sub topics map */
+ if (aws_hash_table_init(&node->subtopics, allocator, 0, aws_hash_byte_cursor_ptr, byte_cursor_eq, NULL, NULL)) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_TOPIC_TREE, "node=%p: Failed to initialize subtopics table in topic node", (void *)node);
+ aws_mem_release(allocator, node);
+ return NULL;
+ }
+
+ return node;
+}
+
+static int s_topic_node_destroy_hash_foreach_wrap(void *context, struct aws_hash_element *elem);
+
+static void s_topic_node_destroy(struct aws_mqtt_topic_node *node, struct aws_allocator *allocator) {
+
+ AWS_LOGF_TRACE(AWS_LS_MQTT_TOPIC_TREE, "node=%p: Destroying topic tree node", (void *)node);
+
+ /* Traverse all children and remove */
+ aws_hash_table_foreach(&node->subtopics, s_topic_node_destroy_hash_foreach_wrap, allocator);
+
+ if (node->cleanup && node->userdata) {
+ node->cleanup(node->userdata);
+ }
+
+ if (node->owns_topic_filter) {
+ aws_string_destroy((void *)node->topic_filter);
+ }
+
+ aws_hash_table_clean_up(&node->subtopics);
+ aws_mem_release(allocator, node);
+}
+
+static int s_topic_node_destroy_hash_foreach_wrap(void *context, struct aws_hash_element *elem) {
+
+ s_topic_node_destroy(elem->value, context);
+
+ return AWS_COMMON_HASH_TABLE_ITER_CONTINUE | AWS_COMMON_HASH_TABLE_ITER_DELETE;
+}
+
+int aws_mqtt_topic_tree_init(struct aws_mqtt_topic_tree *tree, struct aws_allocator *allocator) {
+
+ AWS_PRECONDITION(tree);
+ AWS_PRECONDITION(allocator);
+
+ AWS_LOGF_DEBUG(AWS_LS_MQTT_TOPIC_TREE, "tree=%p: Creating new topic tree", (void *)tree);
+
+ tree->root = s_topic_node_new(allocator, NULL, NULL);
+ if (!tree->root) {
+ /* Error raised by s_topic_node_new */
+ return AWS_OP_ERR;
+ }
+ tree->allocator = allocator;
+
+ return AWS_OP_SUCCESS;
+}
+
+/*******************************************************************************
+ * Clean Up
+ ******************************************************************************/
+
+void aws_mqtt_topic_tree_clean_up(struct aws_mqtt_topic_tree *tree) {
+
+ AWS_PRECONDITION(tree);
+
+ AWS_LOGF_DEBUG(AWS_LS_MQTT_TOPIC_TREE, "tree=%p: Cleaning up topic tree", (void *)tree);
+
+ if (tree->allocator && tree->root) {
+ s_topic_node_destroy(tree->root, tree->allocator);
+
+ AWS_ZERO_STRUCT(*tree);
+ }
+}
+
+/*******************************************************************************
+ * Iterate
+ ******************************************************************************/
+
+bool s_topic_node_is_subscription(const struct aws_mqtt_topic_node *node) {
+ return node->callback;
+}
+
+struct topic_tree_iterate_context {
+ bool should_continue;
+ aws_mqtt_topic_tree_iterator_fn *iterator;
+ void *user_data;
+};
+
+static int s_topic_tree_iterate_do_recurse(void *context, struct aws_hash_element *current_elem) {
+
+ struct topic_tree_iterate_context *ctx = context;
+ struct aws_mqtt_topic_node *current = current_elem->value;
+
+ if (s_topic_node_is_subscription(current)) {
+ const struct aws_byte_cursor topic_filter = aws_byte_cursor_from_string(current->topic_filter);
+ ctx->should_continue = ctx->iterator(&topic_filter, current->qos, ctx->user_data);
+ }
+
+ if (ctx->should_continue) {
+ aws_hash_table_foreach(&current->subtopics, s_topic_tree_iterate_do_recurse, context);
+ }
+
+ /* One of the children could have updated should_continue, so check again */
+ if (ctx->should_continue) {
+ return AWS_COMMON_HASH_TABLE_ITER_CONTINUE;
+ }
+
+ /* If false returned, return immediately. */
+ return 0;
+}
+
+void aws_mqtt_topic_tree_iterate(
+ const struct aws_mqtt_topic_tree *tree,
+ aws_mqtt_topic_tree_iterator_fn *iterator,
+ void *user_data) {
+
+ AWS_PRECONDITION(tree);
+ AWS_PRECONDITION(tree->root);
+ AWS_PRECONDITION(iterator);
+
+ struct topic_tree_iterate_context itr;
+ itr.should_continue = true;
+ itr.iterator = iterator;
+ itr.user_data = user_data;
+
+ aws_hash_table_foreach(&tree->root->subtopics, s_topic_tree_iterate_do_recurse, &itr);
+}
+
+bool s_topic_tree_sub_count_iterator(const struct aws_byte_cursor *topic, enum aws_mqtt_qos qos, void *user_data) {
+ (void)topic;
+ (void)qos;
+ size_t *sub_count = user_data;
+ *sub_count += 1;
+
+ return true;
+}
+
+size_t aws_mqtt_topic_tree_get_sub_count(const struct aws_mqtt_topic_tree *tree) {
+
+ AWS_PRECONDITION(tree);
+ AWS_PRECONDITION(tree->root);
+
+ size_t sub_count = 0;
+ aws_mqtt_topic_tree_iterate(tree, s_topic_tree_sub_count_iterator, &sub_count);
+
+ return sub_count;
+}
+
+/*******************************************************************************
+ * Action Commit
+ ******************************************************************************/
+
+/* Searches subtree until a topic_filter with a different pointer value is found. */
+static int s_topic_node_string_finder(void *userdata, struct aws_hash_element *elem) {
+
+ const struct aws_string **topic_filter = userdata;
+ struct aws_mqtt_topic_node *node = elem->value;
+
+ /* We've found this node again, search it's children */
+ if (*topic_filter == node->topic_filter) {
+ if (0 == aws_hash_table_get_entry_count(&node->subtopics)) {
+ /* If no children, then there must be siblings, so we can use those */
+ return AWS_COMMON_HASH_TABLE_ITER_CONTINUE;
+ }
+
+ aws_hash_table_foreach(&node->subtopics, s_topic_node_string_finder, userdata);
+
+ if (*topic_filter == node->topic_filter) {
+ /* If the topic filter still hasn't changed, continue iterating */
+ return AWS_COMMON_HASH_TABLE_ITER_CONTINUE;
+ }
+
+ AWS_LOGF_TRACE(AWS_LS_MQTT_TOPIC_TREE, " Found matching topic string, using %s", node->topic_filter->bytes);
+
+ return 0;
+ }
+
+ AWS_LOGF_TRACE(AWS_LS_MQTT_TOPIC_TREE, " Found matching topic string, using %s", node->topic_filter->bytes);
+ *topic_filter = node->topic_filter;
+ return 0;
+}
+
+static void s_topic_tree_action_commit(struct topic_tree_action *action, struct aws_mqtt_topic_tree *tree) {
+ (void)tree;
+
+ AWS_PRECONDITION(action->node_to_update);
+
+ switch (action->mode) {
+ case AWS_MQTT_TOPIC_TREE_ADD:
+ case AWS_MQTT_TOPIC_TREE_UPDATE: {
+
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_TOPIC_TREE,
+ "tree=%p action=%p: Committing %s topic tree action",
+ (void *)tree,
+ (void *)action,
+ (action->mode == AWS_MQTT_TOPIC_TREE_ADD) ? "add" : "update");
+
+ /* Destroy old userdata */
+ if (action->node_to_update->cleanup && action->node_to_update->userdata) {
+ /* If there was userdata assigned to this node, pass it out. */
+ action->node_to_update->cleanup(action->node_to_update->userdata);
+ }
+
+ /* Update data */
+ action->node_to_update->callback = action->callback;
+ action->node_to_update->cleanup = action->cleanup;
+ action->node_to_update->userdata = action->userdata;
+ action->node_to_update->qos = action->qos;
+ if (action->topic.ptr) {
+ action->node_to_update->topic = action->topic;
+ }
+ if (action->topic_filter) {
+ if (action->node_to_update->owns_topic_filter && action->node_to_update->topic_filter) {
+ /* The topic filer is already there, destory the new filter to keep all the byte cursor valid */
+ aws_string_destroy((void *)action->topic_filter);
+ } else {
+ action->node_to_update->topic_filter = action->topic_filter;
+ action->node_to_update->owns_topic_filter = true;
+ }
+ }
+ break;
+ }
+
+ case AWS_MQTT_TOPIC_TREE_REMOVE: {
+
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_TOPIC_TREE,
+ "tree=%p action=%p: Committing remove topic tree action",
+ (void *)tree,
+ (void *)action);
+
+ struct aws_mqtt_topic_node *current = action->node_to_update;
+ const size_t sub_parts_len = aws_array_list_length(&action->to_remove) - 1;
+
+ if (current) {
+ /* If found the node, traverse up and remove each with no sub-topics.
+ * Then update all nodes that were using current's topic_filter for topic. */
+
+ /* "unsubscribe" current. */
+ if (current->cleanup && current->userdata) {
+ AWS_LOGF_TRACE(AWS_LS_MQTT_TOPIC_TREE, "node=%p: Cleaning up node's userdata", (void *)current);
+
+ /* If there was userdata assigned to this node, pass it out. */
+ current->cleanup(current->userdata);
+ }
+ current->callback = NULL;
+ current->cleanup = NULL;
+ current->userdata = NULL;
+
+ /* Set to true if current needs to be cleaned up. */
+ bool destroy_current = false;
+
+ /* How many nodes are left after the great purge. */
+ size_t nodes_left = sub_parts_len;
+
+ /* Remove all subscription-less and child-less nodes. */
+ for (size_t i = sub_parts_len; i > 0; --i) {
+ struct aws_mqtt_topic_node *node = NULL;
+ aws_array_list_get_at(&action->to_remove, &node, i);
+ AWS_ASSUME(node); /* Must be in bounds */
+
+ if (!s_topic_node_is_subscription(node) && 0 == aws_hash_table_get_entry_count(&node->subtopics)) {
+
+ /* No subscription and no children, this node needs to go. */
+ struct aws_mqtt_topic_node *grandma = NULL;
+ aws_array_list_get_at(&action->to_remove, &grandma, i - 1);
+ AWS_ASSUME(grandma); /* Must be in bounds */
+
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_TOPIC_TREE,
+ "tree=%p node=%p: Removing child node %p with topic \"" PRInSTR "\"",
+ (void *)tree,
+ (void *)grandma,
+ (void *)node,
+ AWS_BYTE_CURSOR_PRI(node->topic));
+
+ aws_hash_table_remove(&grandma->subtopics, &node->topic, NULL, NULL);
+
+ /* Make sure the following loop doesn't hit this node. */
+ --nodes_left;
+
+ if (i != sub_parts_len) {
+
+ /* Clean up and delete */
+ s_topic_node_destroy(node, tree->allocator);
+ } else {
+ destroy_current = true;
+ }
+ } else {
+
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_TOPIC_TREE,
+ "tree=%p: Node %p with topic \"" PRInSTR
+ "\" has children or is a subscription, leaving in place",
+ (void *)tree,
+ (void *)node,
+ AWS_BYTE_CURSOR_PRI(node->topic));
+
+ /* Once we've found one node with children, the rest are guaranteed to. */
+ break;
+ }
+ }
+
+ /* If current owns the full string, go fixup the pointer references. */
+ if (nodes_left > 0) {
+
+ /* If a new viable topic filter is found once, it can be used for all parents. */
+ const struct aws_string *new_topic_filter = NULL;
+ const struct aws_string *const old_topic_filter = current->topic_filter;
+ /* How much of new_topic_filter should be lopped off the beginning. */
+
+ struct aws_mqtt_topic_node *parent = NULL;
+ aws_array_list_get_at(&action->to_remove, &parent, nodes_left);
+ AWS_ASSUME(parent);
+
+ size_t topic_offset =
+ parent->topic.ptr - aws_string_bytes(parent->topic_filter) + parent->topic.len + 1;
+
+ /* -1 to avoid touching current */
+ for (size_t i = nodes_left; i > 0; --i) {
+ aws_array_list_get_at(&action->to_remove, &parent, i);
+ AWS_ASSUME(parent); /* Must be in bounds */
+
+ /* Remove this topic and following / from offset. */
+ topic_offset -= (parent->topic.len + 1);
+
+ if (parent->topic_filter == old_topic_filter) {
+ /* Uh oh, Mom's using my topic string again! Steal it and replace it with a new one, Indiana
+ * Jones style. */
+
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_TOPIC_TREE,
+ "tree=%p: Found node %p reusing topic filter part, replacing with next child",
+ (void *)tree,
+ (void *)parent);
+
+ if (!new_topic_filter) {
+ /* Set new_tf to old_tf so it's easier to check against the existing node.
+ * Basically, it's an INOUT param. */
+ new_topic_filter = old_topic_filter;
+
+ /* Search all subtopics until we find one that isn't current. */
+ aws_hash_table_foreach(
+ &parent->subtopics, s_topic_node_string_finder, (void *)&new_topic_filter);
+
+ /* This would only happen if there is only one topic in subtopics (current's) and
+ * it has no children (in which case it should have been removed above). */
+ AWS_ASSERT(new_topic_filter != old_topic_filter);
+
+ /* Now that the new string has been found, the old one can be destroyed. */
+ aws_string_destroy((void *)current->topic_filter);
+ current->owns_topic_filter = false;
+ }
+
+ /* Update the pointers. */
+ parent->topic_filter = new_topic_filter;
+ parent->topic.ptr = (uint8_t *)aws_string_bytes(new_topic_filter) + topic_offset;
+ }
+ }
+ }
+
+ /* Now that the strings are update, remove current. */
+ if (destroy_current) {
+ s_topic_node_destroy(current, tree->allocator);
+ }
+ current = NULL;
+ }
+
+ break;
+ }
+ }
+
+ s_topic_tree_action_destroy(action);
+}
+
+/*******************************************************************************
+ * Action Roll Back
+ ******************************************************************************/
+
+static void s_topic_tree_action_roll_back(struct topic_tree_action *action, struct aws_mqtt_topic_tree *tree) {
+
+ AWS_PRECONDITION(action);
+
+ switch (action->mode) {
+ case AWS_MQTT_TOPIC_TREE_ADD: {
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_TOPIC_TREE,
+ "tree=%p action=%p: Rolling back add transaction action",
+ (void *)tree,
+ (void *)action);
+
+ /* Remove the first new node from it's parent's map */
+ aws_hash_table_remove(&action->last_found->subtopics, &action->first_created->topic, NULL, NULL);
+ /* Recursively destroy all other created nodes */
+ s_topic_node_destroy(action->first_created, tree->allocator);
+
+ if (action->topic_filter) {
+ aws_string_destroy((void *)action->topic_filter);
+ }
+
+ break;
+ }
+ case AWS_MQTT_TOPIC_TREE_REMOVE:
+ case AWS_MQTT_TOPIC_TREE_UPDATE: {
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_TOPIC_TREE,
+ "tree=%p action=%p: Rolling back remove/update transaction, no changes made",
+ (void *)tree,
+ (void *)action);
+
+ /* Aborting a remove or update doesn't require any actions. */
+ break;
+ }
+ }
+
+ s_topic_tree_action_destroy(action);
+}
+
+/*******************************************************************************
+ * Insert
+ ******************************************************************************/
+
+int aws_mqtt_topic_tree_transaction_insert(
+ struct aws_mqtt_topic_tree *tree,
+ struct aws_array_list *transaction,
+ const struct aws_string *topic_filter_ori,
+ enum aws_mqtt_qos qos,
+ aws_mqtt_publish_received_fn *callback,
+ aws_mqtt_userdata_cleanup_fn *cleanup,
+ void *userdata) {
+
+ AWS_PRECONDITION(tree);
+ AWS_PRECONDITION(transaction);
+ AWS_PRECONDITION(topic_filter_ori);
+ AWS_PRECONDITION(callback);
+
+ /* let topic tree take the ownership of the new string and leave the caller string alone. */
+ struct aws_string *topic_filter = aws_string_new_from_string(tree->allocator, topic_filter_ori);
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_TOPIC_TREE,
+ "tree=%p: Inserting topic filter %s into topic tree",
+ (void *)tree,
+ topic_filter->bytes);
+
+ struct aws_mqtt_topic_node *current = tree->root;
+
+ struct topic_tree_action *action = s_topic_tree_action_create(transaction);
+ if (!action) {
+ return AWS_OP_ERR;
+ }
+
+ /* Default to update unless a node was added */
+ action->mode = AWS_MQTT_TOPIC_TREE_UPDATE;
+ action->qos = qos;
+ action->callback = callback;
+ action->cleanup = cleanup;
+ action->userdata = userdata;
+
+ struct aws_byte_cursor topic_filter_cur = aws_byte_cursor_from_string(topic_filter);
+ struct aws_byte_cursor sub_part;
+ AWS_ZERO_STRUCT(sub_part);
+ struct aws_byte_cursor last_part;
+ AWS_ZERO_STRUCT(last_part);
+ while (aws_byte_cursor_next_split(&topic_filter_cur, '/', &sub_part)) {
+
+ last_part = sub_part;
+
+ /* Add or find mid-node */
+ struct aws_hash_element *elem = NULL;
+ int was_created = 0;
+ aws_hash_table_create(&current->subtopics, &sub_part, &elem, &was_created);
+
+ if (was_created) {
+ if (action->mode == AWS_MQTT_TOPIC_TREE_UPDATE) {
+ /* Store the last found node */
+ action->last_found = current;
+ }
+
+ /* Node does not exist, add new one */
+ current = s_topic_node_new(tree->allocator, &sub_part, topic_filter);
+ if (!current) {
+ /* Don't do handle_error logic, the action needs to persist to be rolled back */
+ return AWS_OP_ERR;
+ }
+
+ /* Stash in the hash map */
+ elem->key = &current->topic;
+ elem->value = current;
+
+ if (action->mode == AWS_MQTT_TOPIC_TREE_UPDATE) {
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_TOPIC_TREE,
+ "tree=%p: Topic part \"" PRInSTR "\" is new, it and all children will be added as new nodes",
+ (void *)tree,
+ AWS_BYTE_CURSOR_PRI(sub_part));
+
+ /* Store the node we just made, and make sure we don't store again */
+ action->mode = AWS_MQTT_TOPIC_TREE_ADD;
+ action->first_created = current;
+ }
+ } else {
+ AWS_ASSERT(action->mode == AWS_MQTT_TOPIC_TREE_UPDATE); /* Can't have found an existing node while adding */
+
+ /* If the node exists, just traverse it */
+ current = elem->value;
+ }
+ }
+
+ action->node_to_update = current;
+
+ /* Node found (or created), add the topic filter and callbacks */
+ if (current->owns_topic_filter) {
+
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_TOPIC_TREE,
+ "tree=%p node=%p: Updating existing node that already owns its topic_filter, throwing out parameter",
+ (void *)tree,
+ (void *)current);
+
+ /* If the topic filter was already here, this is already a subscription.
+ Free the new topic_filter so all existing byte_cursors remain valid. */
+ aws_string_destroy(topic_filter);
+ } else {
+ /* Node already existed (or was created) but wasn't subscription. */
+ action->topic = last_part;
+ action->topic_filter = topic_filter;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/*******************************************************************************
+ * Remove
+ ******************************************************************************/
+
+int aws_mqtt_topic_tree_transaction_remove(
+ struct aws_mqtt_topic_tree *tree,
+ struct aws_array_list *transaction,
+ const struct aws_byte_cursor *topic_filter,
+ void **old_userdata) {
+
+ AWS_PRECONDITION(tree);
+ AWS_PRECONDITION(transaction);
+ AWS_PRECONDITION(topic_filter);
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT_TOPIC_TREE,
+ "tree=%p: Removing topic filter \"" PRInSTR "\" from topic tree",
+ (void *)tree,
+ AWS_BYTE_CURSOR_PRI(*topic_filter));
+
+ /* Initialize output parameter to a safe default */
+ if (old_userdata) {
+ *old_userdata = NULL;
+ }
+
+ /* Default to error because that's what handle_error will do in all cases except node not found */
+ int result = AWS_OP_ERR;
+ struct topic_tree_action *action = s_topic_tree_action_create(transaction);
+ if (!action) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_array_list sub_topic_parts;
+ AWS_ZERO_STRUCT(sub_topic_parts);
+
+ if (aws_array_list_init_dynamic(&sub_topic_parts, tree->allocator, 1, sizeof(struct aws_byte_cursor))) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT_TOPIC_TREE, "tree=%p: Failed to initialize topic parts array", (void *)tree);
+ goto handle_error;
+ }
+
+ if (aws_byte_cursor_split_on_char(topic_filter, '/', &sub_topic_parts)) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT_TOPIC_TREE, "tree=%p: Failed to split topic filter", (void *)tree);
+ goto handle_error;
+ }
+ const size_t sub_parts_len = aws_array_list_length(&sub_topic_parts);
+ if (!sub_parts_len) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT_TOPIC_TREE, "tree=%p: Failed to get topic parts length", (void *)tree);
+ goto handle_error;
+ }
+ s_topic_tree_action_to_remove(action, tree->allocator, sub_parts_len);
+
+ struct aws_mqtt_topic_node *current = tree->root;
+ if (aws_array_list_push_back(&action->to_remove, &current)) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT_TOPIC_TREE, "tree=%p: Failed to insert root node into to_remove list", (void *)tree);
+ goto handle_error;
+ }
+
+ for (size_t i = 0; i < sub_parts_len; ++i) {
+
+ /* Get the current topic part */
+ struct aws_byte_cursor *sub_part = NULL;
+ aws_array_list_get_at_ptr(&sub_topic_parts, (void **)&sub_part, i);
+
+ /* Find mid-node */
+ struct aws_hash_element *elem = NULL;
+ aws_hash_table_find(&current->subtopics, sub_part, &elem);
+ if (elem) {
+ /* If the node exists, just traverse it */
+ current = elem->value;
+ if (aws_array_list_push_back(&action->to_remove, &current)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_TOPIC_TREE, "tree=%p: Failed to insert topic node into to_remove list", (void *)tree);
+ goto handle_error;
+ }
+ } else {
+ /* If not, abandon ship */
+ goto handle_not_found;
+ }
+ }
+
+ action->node_to_update = current;
+
+ aws_array_list_clean_up(&sub_topic_parts);
+
+ if (old_userdata) {
+ *old_userdata = current->userdata;
+ }
+
+ return AWS_OP_SUCCESS;
+
+handle_not_found:
+ result = AWS_OP_SUCCESS;
+
+handle_error:
+ aws_array_list_clean_up(&sub_topic_parts);
+
+ s_topic_tree_action_destroy(action);
+ aws_array_list_pop_back(transaction);
+
+ return result;
+}
+
+/*******************************************************************************
+ * Commit
+ ******************************************************************************/
+
+void aws_mqtt_topic_tree_transaction_commit(struct aws_mqtt_topic_tree *tree, struct aws_array_list *transaction) {
+
+ const size_t num_actions = aws_array_list_length(transaction);
+ for (size_t i = 0; i < num_actions; ++i) {
+ struct topic_tree_action *action = NULL;
+ aws_array_list_get_at_ptr(transaction, (void **)&action, i);
+ AWS_ASSUME(action); /* Within bounds */
+
+ s_topic_tree_action_commit(action, tree);
+ }
+ aws_array_list_clear(transaction);
+}
+
+/*******************************************************************************
+ * Roll Back
+ ******************************************************************************/
+
+void aws_mqtt_topic_tree_transaction_roll_back(struct aws_mqtt_topic_tree *tree, struct aws_array_list *transaction) {
+
+ const size_t num_actions = aws_array_list_length(transaction);
+ for (size_t i = 1; i <= num_actions; ++i) {
+ struct topic_tree_action *action = NULL;
+ aws_array_list_get_at_ptr(transaction, (void **)&action, num_actions - i);
+ AWS_ASSUME(action); /* Within bounds */
+
+ s_topic_tree_action_roll_back(action, tree);
+ }
+ aws_array_list_clear(transaction);
+}
+
+int aws_mqtt_topic_tree_insert(
+ struct aws_mqtt_topic_tree *tree,
+ const struct aws_string *topic_filter,
+ enum aws_mqtt_qos qos,
+ aws_mqtt_publish_received_fn *callback,
+ aws_mqtt_userdata_cleanup_fn *cleanup,
+ void *userdata) {
+
+ AWS_VARIABLE_LENGTH_ARRAY(uint8_t, transaction_buf, aws_mqtt_topic_tree_action_size);
+ struct aws_array_list transaction;
+ aws_array_list_init_static(&transaction, transaction_buf, 1, aws_mqtt_topic_tree_action_size);
+
+ if (aws_mqtt_topic_tree_transaction_insert(tree, &transaction, topic_filter, qos, callback, cleanup, userdata)) {
+
+ aws_mqtt_topic_tree_transaction_roll_back(tree, &transaction);
+ return AWS_OP_ERR;
+ }
+
+ aws_mqtt_topic_tree_transaction_commit(tree, &transaction);
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt_topic_tree_remove(struct aws_mqtt_topic_tree *tree, const struct aws_byte_cursor *topic_filter) {
+
+ AWS_PRECONDITION(tree);
+ AWS_PRECONDITION(topic_filter);
+
+ AWS_VARIABLE_LENGTH_ARRAY(uint8_t, transaction_buf, aws_mqtt_topic_tree_action_size);
+ struct aws_array_list transaction;
+ aws_array_list_init_static(&transaction, transaction_buf, 1, aws_mqtt_topic_tree_action_size);
+
+ if (aws_mqtt_topic_tree_transaction_remove(tree, &transaction, topic_filter, NULL)) {
+
+ aws_mqtt_topic_tree_transaction_roll_back(tree, &transaction);
+ return AWS_OP_ERR;
+ }
+
+ aws_mqtt_topic_tree_transaction_commit(tree, &transaction);
+ return AWS_OP_SUCCESS;
+}
+
+/*******************************************************************************
+ * Publish
+ ******************************************************************************/
+
+static void s_topic_tree_publish_do_recurse(
+ const struct aws_byte_cursor *current_sub_part,
+ const struct aws_mqtt_topic_node *current,
+ const struct aws_mqtt_packet_publish *pub) {
+
+ struct aws_byte_cursor hash_cur = aws_byte_cursor_from_string(s_multi_level_wildcard);
+ struct aws_byte_cursor plus_cur = aws_byte_cursor_from_string(s_single_level_wildcard);
+
+ struct aws_hash_element *elem = NULL;
+
+ struct aws_byte_cursor sub_part = *current_sub_part;
+ if (!aws_byte_cursor_next_split(&pub->topic_name, '/', &sub_part)) {
+
+ /* If this is the last node and is a sub, call it */
+ if (s_topic_node_is_subscription(current)) {
+ bool dup = aws_mqtt_packet_publish_get_dup(pub);
+ enum aws_mqtt_qos qos = aws_mqtt_packet_publish_get_qos(pub);
+ bool retain = aws_mqtt_packet_publish_get_retain(pub);
+ current->callback(&pub->topic_name, &pub->payload, dup, qos, retain, current->userdata);
+ }
+ return;
+ }
+
+ /* Check multi-level wildcard */
+ aws_hash_table_find(&current->subtopics, &hash_cur, &elem);
+ if (elem) {
+ /* Match! */
+ struct aws_mqtt_topic_node *multi_wildcard = elem->value;
+ /* Must be a subscription and have no children */
+ AWS_ASSERT(s_topic_node_is_subscription(multi_wildcard));
+ AWS_ASSERT(0 == aws_hash_table_get_entry_count(&multi_wildcard->subtopics));
+ bool dup = aws_mqtt_packet_publish_get_dup(pub);
+ enum aws_mqtt_qos qos = aws_mqtt_packet_publish_get_qos(pub);
+ bool retain = aws_mqtt_packet_publish_get_retain(pub);
+ multi_wildcard->callback(&pub->topic_name, &pub->payload, dup, qos, retain, multi_wildcard->userdata);
+ }
+
+ /* Check single level wildcard */
+ aws_hash_table_find(&current->subtopics, &plus_cur, &elem);
+ if (elem) {
+ /* Recurse sub topics */
+ s_topic_tree_publish_do_recurse(&sub_part, elem->value, pub);
+ }
+
+ /* Check actual topic name */
+ aws_hash_table_find(&current->subtopics, &sub_part, &elem);
+ if (elem) {
+ /* Found the actual topic, recurse to it */
+ s_topic_tree_publish_do_recurse(&sub_part, elem->value, pub);
+ }
+}
+
+void aws_mqtt_topic_tree_publish(const struct aws_mqtt_topic_tree *tree, struct aws_mqtt_packet_publish *pub) {
+
+ AWS_PRECONDITION(tree);
+ AWS_PRECONDITION(pub);
+
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT_TOPIC_TREE,
+ "tree=%p: Publishing on topic " PRInSTR,
+ (void *)tree,
+ AWS_BYTE_CURSOR_PRI(pub->topic_name));
+
+ struct aws_byte_cursor sub_part;
+ AWS_ZERO_STRUCT(sub_part);
+ s_topic_tree_publish_do_recurse(&sub_part, tree->root, pub);
+}
diff --git a/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_callbacks.c b/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_callbacks.c
new file mode 100644
index 0000000000..bb8943cd4e
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_callbacks.c
@@ -0,0 +1,159 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/mqtt/private/v5/mqtt5_callbacks.h>
+
+#include <aws/io/event_loop.h>
+#include <aws/mqtt/private/v5/mqtt5_client_impl.h>
+#include <aws/mqtt/v5/mqtt5_listener.h>
+
+#include <inttypes.h>
+
+struct aws_mqtt5_callback_set_entry {
+ struct aws_allocator *allocator;
+
+ struct aws_linked_list_node node;
+
+ uint64_t id;
+
+ struct aws_mqtt5_callback_set callbacks;
+};
+
+void aws_mqtt5_callback_set_manager_init(
+ struct aws_mqtt5_callback_set_manager *manager,
+ struct aws_mqtt5_client *client) {
+
+ manager->client = client; /* no need to ref count, it's assumed to be owned by the client */
+ manager->next_callback_set_entry_id = 1;
+
+ aws_linked_list_init(&manager->callback_set_entries);
+}
+
+void aws_mqtt5_callback_set_manager_clean_up(struct aws_mqtt5_callback_set_manager *manager) {
+ struct aws_linked_list_node *node = aws_linked_list_begin(&manager->callback_set_entries);
+ while (node != aws_linked_list_end(&manager->callback_set_entries)) {
+ struct aws_mqtt5_callback_set_entry *entry = AWS_CONTAINER_OF(node, struct aws_mqtt5_callback_set_entry, node);
+ node = aws_linked_list_next(node);
+
+ aws_linked_list_remove(&entry->node);
+ aws_mem_release(entry->allocator, entry);
+ }
+}
+
+static struct aws_mqtt5_callback_set_entry *s_new_callback_set_entry(
+ struct aws_mqtt5_callback_set_manager *manager,
+ struct aws_mqtt5_callback_set *callback_set) {
+ struct aws_mqtt5_callback_set_entry *entry =
+ aws_mem_calloc(manager->client->allocator, 1, sizeof(struct aws_mqtt5_callback_set_entry));
+
+ entry->allocator = manager->client->allocator;
+ entry->id = manager->next_callback_set_entry_id++;
+ entry->callbacks = *callback_set;
+
+ AWS_LOGF_INFO(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: callback manager created new entry :%" PRIu64,
+ (void *)manager->client,
+ entry->id);
+
+ return entry;
+}
+
+uint64_t aws_mqtt5_callback_set_manager_push_front(
+ struct aws_mqtt5_callback_set_manager *manager,
+ struct aws_mqtt5_callback_set *callback_set) {
+
+ AWS_FATAL_ASSERT(aws_event_loop_thread_is_callers_thread(manager->client->loop));
+
+ struct aws_mqtt5_callback_set_entry *entry = s_new_callback_set_entry(manager, callback_set);
+
+ aws_linked_list_push_front(&manager->callback_set_entries, &entry->node);
+
+ return entry->id;
+}
+
+void aws_mqtt5_callback_set_manager_remove(struct aws_mqtt5_callback_set_manager *manager, uint64_t callback_set_id) {
+
+ AWS_FATAL_ASSERT(aws_event_loop_thread_is_callers_thread(manager->client->loop));
+
+ struct aws_linked_list_node *node = aws_linked_list_begin(&manager->callback_set_entries);
+ while (node != aws_linked_list_end(&manager->callback_set_entries)) {
+ struct aws_mqtt5_callback_set_entry *entry = AWS_CONTAINER_OF(node, struct aws_mqtt5_callback_set_entry, node);
+ node = aws_linked_list_next(node);
+
+ if (entry->id == callback_set_id) {
+ aws_linked_list_remove(&entry->node);
+
+ AWS_LOGF_INFO(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: callback manager removed entry id=%" PRIu64,
+ (void *)manager->client,
+ entry->id);
+ aws_mem_release(entry->allocator, entry);
+ return;
+ }
+ }
+ AWS_LOGF_INFO(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: callback manager failed to remove entry id=%" PRIu64 ", callback set id not found.",
+ (void *)manager->client,
+ callback_set_id);
+}
+
+void aws_mqtt5_callback_set_manager_on_publish_received(
+ struct aws_mqtt5_callback_set_manager *manager,
+ const struct aws_mqtt5_packet_publish_view *publish_view) {
+
+ AWS_FATAL_ASSERT(aws_event_loop_thread_is_callers_thread(manager->client->loop));
+
+ struct aws_linked_list_node *node = aws_linked_list_begin(&manager->callback_set_entries);
+ while (node != aws_linked_list_end(&manager->callback_set_entries)) {
+ struct aws_mqtt5_callback_set_entry *entry = AWS_CONTAINER_OF(node, struct aws_mqtt5_callback_set_entry, node);
+ node = aws_linked_list_next(node);
+
+ struct aws_mqtt5_callback_set *callback_set = &entry->callbacks;
+ if (callback_set->listener_publish_received_handler != NULL) {
+ bool handled = (*callback_set->listener_publish_received_handler)(
+ publish_view, callback_set->listener_publish_received_handler_user_data);
+ if (handled) {
+ return;
+ }
+ }
+ }
+
+ if (manager->client->config->publish_received_handler != NULL) {
+ (*manager->client->config->publish_received_handler)(
+ publish_view, manager->client->config->publish_received_handler_user_data);
+ }
+}
+
+void aws_mqtt5_callback_set_manager_on_lifecycle_event(
+ struct aws_mqtt5_callback_set_manager *manager,
+ const struct aws_mqtt5_client_lifecycle_event *lifecycle_event) {
+
+ AWS_FATAL_ASSERT(aws_event_loop_thread_is_callers_thread(manager->client->loop));
+
+ struct aws_linked_list_node *node = aws_linked_list_begin(&manager->callback_set_entries);
+ while (node != aws_linked_list_end(&manager->callback_set_entries)) {
+ struct aws_mqtt5_callback_set_entry *entry = AWS_CONTAINER_OF(node, struct aws_mqtt5_callback_set_entry, node);
+ node = aws_linked_list_next(node);
+
+ struct aws_mqtt5_callback_set *callback_set = &entry->callbacks;
+
+ if (callback_set->lifecycle_event_handler != NULL) {
+ struct aws_mqtt5_client_lifecycle_event listener_copy = *lifecycle_event;
+ listener_copy.user_data = callback_set->lifecycle_event_handler_user_data;
+
+ (*callback_set->lifecycle_event_handler)(&listener_copy);
+ }
+ }
+
+ struct aws_mqtt5_client_lifecycle_event client_copy = *lifecycle_event;
+ client_copy.user_data = manager->client->config->lifecycle_event_handler_user_data;
+
+ if (manager->client->config->lifecycle_event_handler != NULL) {
+ (*manager->client->config->lifecycle_event_handler)(&client_copy);
+ }
+}
diff --git a/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_client.c b/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_client.c
new file mode 100644
index 0000000000..27af76038e
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_client.c
@@ -0,0 +1,3344 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/mqtt/v5/mqtt5_client.h>
+
+#include <aws/common/clock.h>
+#include <aws/common/string.h>
+#include <aws/http/proxy.h>
+#include <aws/http/request_response.h>
+#include <aws/http/websocket.h>
+#include <aws/io/channel_bootstrap.h>
+#include <aws/io/event_loop.h>
+#include <aws/mqtt/private/shared_constants.h>
+#include <aws/mqtt/private/v5/mqtt5_client_impl.h>
+#include <aws/mqtt/private/v5/mqtt5_options_storage.h>
+#include <aws/mqtt/private/v5/mqtt5_utils.h>
+
+#include <inttypes.h>
+
+#ifdef _MSC_VER
+# pragma warning(push)
+# pragma warning(disable : 4232) /* function pointer to dll symbol */
+#endif
+
+#define AWS_MQTT5_IO_MESSAGE_DEFAULT_LENGTH 4096
+#define AWS_MQTT5_DEFAULT_CONNACK_PACKET_TIMEOUT_MS 10000
+
+const char *aws_mqtt5_client_state_to_c_string(enum aws_mqtt5_client_state state) {
+ switch (state) {
+ case AWS_MCS_STOPPED:
+ return "STOPPED";
+
+ case AWS_MCS_CONNECTING:
+ return "CONNECTING";
+
+ case AWS_MCS_MQTT_CONNECT:
+ return "MQTT_CONNECT";
+
+ case AWS_MCS_CONNECTED:
+ return "CONNECTED";
+
+ case AWS_MCS_CLEAN_DISCONNECT:
+ return "CLEAN_DISCONNECT";
+
+ case AWS_MCS_CHANNEL_SHUTDOWN:
+ return "CHANNEL_SHUTDOWN";
+
+ case AWS_MCS_PENDING_RECONNECT:
+ return "PENDING_RECONNECT";
+
+ case AWS_MCS_TERMINATED:
+ return "TERMINATED";
+
+ default:
+ return "UNKNOWN";
+ }
+}
+
+static bool s_aws_mqtt5_operation_is_retainable(struct aws_mqtt5_operation *operation) {
+ switch (operation->packet_type) {
+ case AWS_MQTT5_PT_PUBLISH:
+ case AWS_MQTT5_PT_SUBSCRIBE:
+ case AWS_MQTT5_PT_UNSUBSCRIBE:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static void s_init_statistics(struct aws_mqtt5_client_operation_statistics_impl *stats) {
+ aws_atomic_store_int(&stats->incomplete_operation_count_atomic, 0);
+ aws_atomic_store_int(&stats->incomplete_operation_size_atomic, 0);
+ aws_atomic_store_int(&stats->unacked_operation_count_atomic, 0);
+ aws_atomic_store_int(&stats->unacked_operation_size_atomic, 0);
+}
+
+static bool s_aws_mqtt5_operation_satisfies_offline_queue_retention_policy(
+ struct aws_mqtt5_operation *operation,
+ enum aws_mqtt5_client_operation_queue_behavior_type queue_behavior) {
+ switch (aws_mqtt5_client_operation_queue_behavior_type_to_non_default(queue_behavior)) {
+ case AWS_MQTT5_COQBT_FAIL_ALL_ON_DISCONNECT:
+ return false;
+
+ case AWS_MQTT5_COQBT_FAIL_QOS0_PUBLISH_ON_DISCONNECT:
+ if (!s_aws_mqtt5_operation_is_retainable(operation)) {
+ return false;
+ }
+
+ if (operation->packet_type == AWS_MQTT5_PT_PUBLISH) {
+ const struct aws_mqtt5_packet_publish_view *publish_view = operation->packet_view;
+ if (publish_view->qos == AWS_MQTT5_QOS_AT_MOST_ONCE) {
+ return false;
+ }
+ }
+
+ return true;
+
+ case AWS_MQTT5_COQBT_FAIL_NON_QOS1_PUBLISH_ON_DISCONNECT:
+ if (!s_aws_mqtt5_operation_is_retainable(operation)) {
+ return false;
+ }
+
+ if (operation->packet_type == AWS_MQTT5_PT_PUBLISH) {
+ const struct aws_mqtt5_packet_publish_view *publish_view = operation->packet_view;
+ if (publish_view->qos != AWS_MQTT5_QOS_AT_MOST_ONCE) {
+ return true;
+ }
+ }
+
+ return false;
+
+ default:
+ return false;
+ }
+}
+
+typedef bool(mqtt5_operation_filter)(struct aws_mqtt5_operation *operation, void *filter_context);
+
+static void s_filter_operation_list(
+ struct aws_linked_list *source_operations,
+ mqtt5_operation_filter *filter_fn,
+ struct aws_linked_list *filtered_operations,
+ void *filter_context) {
+ struct aws_linked_list_node *node = aws_linked_list_begin(source_operations);
+ while (node != aws_linked_list_end(source_operations)) {
+ struct aws_mqtt5_operation *operation = AWS_CONTAINER_OF(node, struct aws_mqtt5_operation, node);
+ node = aws_linked_list_next(node);
+
+ if (filter_fn(operation, filter_context)) {
+ aws_linked_list_remove(&operation->node);
+ aws_linked_list_push_back(filtered_operations, &operation->node);
+ }
+ }
+}
+
+typedef void(mqtt5_operation_applicator)(struct aws_mqtt5_operation *operation, void *applicator_context);
+
+static void s_apply_to_operation_list(
+ struct aws_linked_list *operations,
+ mqtt5_operation_applicator *applicator_fn,
+ void *applicator_context) {
+ struct aws_linked_list_node *node = aws_linked_list_begin(operations);
+ while (node != aws_linked_list_end(operations)) {
+ struct aws_mqtt5_operation *operation = AWS_CONTAINER_OF(node, struct aws_mqtt5_operation, node);
+ node = aws_linked_list_next(node);
+
+ applicator_fn(operation, applicator_context);
+ }
+}
+
+static int s_aws_mqtt5_client_change_desired_state(
+ struct aws_mqtt5_client *client,
+ enum aws_mqtt5_client_state desired_state,
+ struct aws_mqtt5_operation_disconnect *disconnect_operation);
+
+static uint64_t s_hash_uint16_t(const void *item) {
+ return *(uint16_t *)item;
+}
+
+static bool s_uint16_t_eq(const void *a, const void *b) {
+ return *(uint16_t *)a == *(uint16_t *)b;
+}
+
+static uint64_t s_aws_mqtt5_client_compute_operational_state_service_time(
+ const struct aws_mqtt5_client_operational_state *client_operational_state,
+ uint64_t now);
+
+static int s_submit_operation(struct aws_mqtt5_client *client, struct aws_mqtt5_operation *operation);
+
+static void s_complete_operation(
+ struct aws_mqtt5_client *client,
+ struct aws_mqtt5_operation *operation,
+ int error_code,
+ enum aws_mqtt5_packet_type packet_type,
+ const void *view) {
+ if (client != NULL) {
+ aws_mqtt5_client_statistics_change_operation_statistic_state(client, operation, AWS_MQTT5_OSS_NONE);
+ }
+
+ aws_mqtt5_operation_complete(operation, error_code, packet_type, view);
+ aws_mqtt5_operation_release(operation);
+}
+
+static void s_complete_operation_list(
+ struct aws_mqtt5_client *client,
+ struct aws_linked_list *operation_list,
+ int error_code) {
+
+ struct aws_linked_list_node *node = aws_linked_list_begin(operation_list);
+ while (node != aws_linked_list_end(operation_list)) {
+ struct aws_mqtt5_operation *operation = AWS_CONTAINER_OF(node, struct aws_mqtt5_operation, node);
+
+ node = aws_linked_list_next(node);
+
+ s_complete_operation(client, operation, error_code, AWS_MQTT5_PT_NONE, NULL);
+ }
+
+ /* we've released everything, so reset the list to empty */
+ aws_linked_list_init(operation_list);
+}
+
+static void s_check_timeouts(struct aws_mqtt5_client *client, uint64_t now) {
+ if (client->config->ack_timeout_seconds == 0) {
+ return;
+ }
+
+ struct aws_linked_list_node *node = aws_linked_list_begin(&client->operational_state.unacked_operations);
+ while (node != aws_linked_list_end(&client->operational_state.unacked_operations)) {
+ struct aws_mqtt5_operation *operation = AWS_CONTAINER_OF(node, struct aws_mqtt5_operation, node);
+ node = aws_linked_list_next(node);
+ if (operation->ack_timeout_timepoint_ns < now) {
+ /* Timeout for this packet has been reached */
+ aws_mqtt5_packet_id_t packet_id = aws_mqtt5_operation_get_packet_id(operation);
+
+ switch (operation->packet_type) {
+ case AWS_MQTT5_PT_SUBSCRIBE:
+ /* SUBSCRIBE has timed out. */
+ AWS_LOGF_INFO(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: SUBSCRIBE packet with id:%d has timed out",
+ (void *)client,
+ packet_id);
+ break;
+
+ case AWS_MQTT5_PT_UNSUBSCRIBE:
+ /* UNSUBSCRIBE has timed out. */
+ AWS_LOGF_INFO(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: UNSUBSCRIBE packet with id:%d has timed out",
+ (void *)client,
+ packet_id);
+ break;
+
+ case AWS_MQTT5_PT_PUBLISH:
+ /* PUBLISH has timed out. */
+ AWS_LOGF_INFO(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: PUBLISH packet with id:%d has timed out",
+ (void *)client,
+ packet_id);
+
+ aws_mqtt5_client_flow_control_state_on_puback(client);
+ break;
+
+ default:
+ /* something is wrong, there should be no other packet type in this linked list */
+ break;
+ }
+
+ struct aws_hash_element *elem = NULL;
+ aws_hash_table_find(&client->operational_state.unacked_operations_table, &packet_id, &elem);
+
+ if (elem == NULL || elem->value == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: timeout for unknown operation with id %d",
+ (void *)client,
+ (int)packet_id);
+ return;
+ }
+
+ aws_linked_list_remove(&operation->node);
+ aws_hash_table_remove(&client->operational_state.unacked_operations_table, &packet_id, NULL, NULL);
+
+ s_complete_operation(client, operation, AWS_ERROR_MQTT_TIMEOUT, AWS_MQTT5_PT_NONE, NULL);
+ } else {
+ break;
+ }
+ }
+}
+
+static void s_mqtt5_client_final_destroy(struct aws_mqtt5_client *client) {
+ if (client == NULL) {
+ return;
+ }
+
+ aws_mqtt5_client_termination_completion_fn *client_termination_handler = NULL;
+ void *client_termination_handler_user_data = NULL;
+ if (client->config != NULL) {
+ client_termination_handler = client->config->client_termination_handler;
+ client_termination_handler_user_data = client->config->client_termination_handler_user_data;
+ }
+
+ aws_mqtt5_callback_set_manager_clean_up(&client->callback_manager);
+
+ aws_mqtt5_client_operational_state_clean_up(&client->operational_state);
+
+ aws_mqtt5_client_options_storage_destroy((struct aws_mqtt5_client_options_storage *)client->config);
+
+ aws_mqtt5_negotiated_settings_clean_up(&client->negotiated_settings);
+
+ aws_http_message_release(client->handshake);
+
+ aws_mqtt5_encoder_clean_up(&client->encoder);
+ aws_mqtt5_decoder_clean_up(&client->decoder);
+
+ aws_mqtt5_inbound_topic_alias_resolver_clean_up(&client->inbound_topic_alias_resolver);
+ aws_mqtt5_outbound_topic_alias_resolver_destroy(client->outbound_topic_alias_resolver);
+
+ aws_mem_release(client->allocator, client);
+
+ if (client_termination_handler != NULL) {
+ (*client_termination_handler)(client_termination_handler_user_data);
+ }
+}
+
+static void s_on_mqtt5_client_zero_ref_count(void *user_data) {
+ struct aws_mqtt5_client *client = user_data;
+
+ s_aws_mqtt5_client_change_desired_state(client, AWS_MCS_TERMINATED, NULL);
+}
+
+static void s_aws_mqtt5_client_emit_stopped_lifecycle_event(struct aws_mqtt5_client *client) {
+ AWS_LOGF_INFO(AWS_LS_MQTT5_CLIENT, "id=%p: emitting stopped lifecycle event", (void *)client);
+
+ struct aws_mqtt5_client_lifecycle_event event;
+ AWS_ZERO_STRUCT(event);
+
+ event.event_type = AWS_MQTT5_CLET_STOPPED;
+ event.client = client;
+
+ aws_mqtt5_callback_set_manager_on_lifecycle_event(&client->callback_manager, &event);
+}
+
+static void s_aws_mqtt5_client_emit_connecting_lifecycle_event(struct aws_mqtt5_client *client) {
+ AWS_LOGF_INFO(AWS_LS_MQTT5_CLIENT, "id=%p: emitting connecting lifecycle event", (void *)client);
+
+ client->lifecycle_state = AWS_MQTT5_LS_CONNECTING;
+
+ struct aws_mqtt5_client_lifecycle_event event;
+ AWS_ZERO_STRUCT(event);
+
+ event.event_type = AWS_MQTT5_CLET_ATTEMPTING_CONNECT;
+ event.client = client;
+
+ aws_mqtt5_callback_set_manager_on_lifecycle_event(&client->callback_manager, &event);
+}
+
+static void s_aws_mqtt5_client_emit_connection_success_lifecycle_event(
+ struct aws_mqtt5_client *client,
+ const struct aws_mqtt5_packet_connack_view *connack_view) {
+
+ AWS_LOGF_INFO(AWS_LS_MQTT5_CLIENT, "id=%p: emitting connection success lifecycle event", (void *)client);
+
+ client->lifecycle_state = AWS_MQTT5_LS_CONNECTED;
+
+ struct aws_mqtt5_client_lifecycle_event event;
+ AWS_ZERO_STRUCT(event);
+
+ event.event_type = AWS_MQTT5_CLET_CONNECTION_SUCCESS;
+ event.client = client;
+ event.settings = &client->negotiated_settings;
+ event.connack_data = connack_view;
+
+ aws_mqtt5_callback_set_manager_on_lifecycle_event(&client->callback_manager, &event);
+}
+
+/*
+ * Emits either a CONNECTION_FAILED or DISCONNECT event based on the current life cycle state. Once a "final"
+ * event is emitted by the client, it must attempt to reconnect before another one will be emitted, since the
+ * lifecycle state check will early out until then. It is expected that this function may get called unnecessarily
+ * often during various channel shutdown or disconnection/failure flows. This will not affect overall correctness.
+ */
+static void s_aws_mqtt5_client_emit_final_lifecycle_event(
+ struct aws_mqtt5_client *client,
+ int error_code,
+ const struct aws_mqtt5_packet_connack_view *connack_view,
+ const struct aws_mqtt5_packet_disconnect_view *disconnect_view) {
+
+ if (client->lifecycle_state == AWS_MQTT5_LS_NONE) {
+ /* we already emitted a final event earlier */
+ return;
+ }
+
+ struct aws_mqtt5_client_lifecycle_event event;
+ AWS_ZERO_STRUCT(event);
+
+ if (client->lifecycle_state == AWS_MQTT5_LS_CONNECTING) {
+ AWS_FATAL_ASSERT(disconnect_view == NULL);
+ event.event_type = AWS_MQTT5_CLET_CONNECTION_FAILURE;
+
+ AWS_LOGF_INFO(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: emitting connection failure lifecycle event with error code %d(%s)",
+ (void *)client,
+ error_code,
+ aws_error_debug_str(error_code));
+ } else {
+ AWS_FATAL_ASSERT(client->lifecycle_state == AWS_MQTT5_LS_CONNECTED);
+ AWS_FATAL_ASSERT(connack_view == NULL);
+ event.event_type = AWS_MQTT5_CLET_DISCONNECTION;
+
+ AWS_LOGF_INFO(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: emitting disconnection lifecycle event with error code %d(%s)",
+ (void *)client,
+ error_code,
+ aws_error_debug_str(error_code));
+ }
+
+ event.error_code = error_code;
+ event.connack_data = connack_view;
+ event.disconnect_data = disconnect_view;
+
+ client->lifecycle_state = AWS_MQTT5_LS_NONE;
+
+ aws_mqtt5_callback_set_manager_on_lifecycle_event(&client->callback_manager, &event);
+}
+
+/*
+ * next_service_time == 0 means to not service the client, i.e. a state that only cares about external events
+ *
+ * This includes connecting and channel shutdown. Terminated is also included, but it's a state that only exists
+ * instantaneously before final destruction.
+ */
+static uint64_t s_compute_next_service_time_client_stopped(struct aws_mqtt5_client *client, uint64_t now) {
+ /* have we been told to connect or terminate? */
+ if (client->desired_state != AWS_MCS_STOPPED) {
+ return now;
+ }
+
+ return 0;
+}
+
+static uint64_t s_compute_next_service_time_client_connecting(struct aws_mqtt5_client *client, uint64_t now) {
+ (void)client;
+ (void)now;
+
+ return 0;
+}
+
+static uint64_t s_compute_next_service_time_client_mqtt_connect(struct aws_mqtt5_client *client, uint64_t now) {
+ /* This state is interruptable by a stop/terminate */
+ if (client->desired_state != AWS_MCS_CONNECTED) {
+ return now;
+ }
+
+ uint64_t operation_processing_time =
+ s_aws_mqtt5_client_compute_operational_state_service_time(&client->operational_state, now);
+ if (operation_processing_time == 0) {
+ return client->next_mqtt_connect_packet_timeout_time;
+ }
+
+ return aws_min_u64(client->next_mqtt_connect_packet_timeout_time, operation_processing_time);
+}
+
+static uint64_t s_min_non_0_64(uint64_t a, uint64_t b) {
+ if (a == 0) {
+ return b;
+ }
+
+ if (b == 0) {
+ return a;
+ }
+
+ return aws_min_u64(a, b);
+}
+
+static uint64_t s_compute_next_service_time_client_connected(struct aws_mqtt5_client *client, uint64_t now) {
+
+ /* ping and ping timeout */
+ uint64_t next_service_time = client->next_ping_time;
+ if (client->next_ping_timeout_time != 0) {
+ next_service_time = aws_min_u64(next_service_time, client->next_ping_timeout_time);
+ }
+
+ /* unacked operations timeout */
+ if (client->config->ack_timeout_seconds != 0 &&
+ !aws_linked_list_empty(&client->operational_state.unacked_operations)) {
+ struct aws_linked_list_node *node = aws_linked_list_begin(&client->operational_state.unacked_operations);
+ struct aws_mqtt5_operation *operation = AWS_CONTAINER_OF(node, struct aws_mqtt5_operation, node);
+ next_service_time = aws_min_u64(next_service_time, operation->ack_timeout_timepoint_ns);
+ }
+
+ if (client->desired_state != AWS_MCS_CONNECTED) {
+ next_service_time = now;
+ }
+
+ uint64_t operation_processing_time =
+ s_aws_mqtt5_client_compute_operational_state_service_time(&client->operational_state, now);
+
+ next_service_time = s_min_non_0_64(operation_processing_time, next_service_time);
+
+ /* reset reconnect delay interval */
+ next_service_time = s_min_non_0_64(client->next_reconnect_delay_reset_time_ns, next_service_time);
+
+ return next_service_time;
+}
+
+static uint64_t s_compute_next_service_time_client_clean_disconnect(struct aws_mqtt5_client *client, uint64_t now) {
+ uint64_t ack_timeout_time = 0;
+
+ /* unacked operations timeout */
+ if (client->config->ack_timeout_seconds != 0 &&
+ !aws_linked_list_empty(&client->operational_state.unacked_operations)) {
+ struct aws_linked_list_node *node = aws_linked_list_begin(&client->operational_state.unacked_operations);
+ struct aws_mqtt5_operation *operation = AWS_CONTAINER_OF(node, struct aws_mqtt5_operation, node);
+ ack_timeout_time = operation->ack_timeout_timepoint_ns;
+ }
+
+ uint64_t operation_processing_time =
+ s_aws_mqtt5_client_compute_operational_state_service_time(&client->operational_state, now);
+
+ return s_min_non_0_64(ack_timeout_time, operation_processing_time);
+}
+
+static uint64_t s_compute_next_service_time_client_channel_shutdown(struct aws_mqtt5_client *client, uint64_t now) {
+ (void)client;
+ (void)now;
+
+ return 0;
+}
+
+static uint64_t s_compute_next_service_time_client_pending_reconnect(struct aws_mqtt5_client *client, uint64_t now) {
+ if (client->desired_state != AWS_MCS_CONNECTED) {
+ return now;
+ }
+
+ return client->next_reconnect_time_ns;
+}
+
+static uint64_t s_compute_next_service_time_client_terminated(struct aws_mqtt5_client *client, uint64_t now) {
+ (void)client;
+ (void)now;
+
+ return 0;
+}
+
+static uint64_t s_compute_next_service_time_by_current_state(struct aws_mqtt5_client *client, uint64_t now) {
+ switch (client->current_state) {
+ case AWS_MCS_STOPPED:
+ return s_compute_next_service_time_client_stopped(client, now);
+ case AWS_MCS_CONNECTING:
+ return s_compute_next_service_time_client_connecting(client, now);
+ case AWS_MCS_MQTT_CONNECT:
+ return s_compute_next_service_time_client_mqtt_connect(client, now);
+ case AWS_MCS_CONNECTED:
+ return s_compute_next_service_time_client_connected(client, now);
+ case AWS_MCS_CLEAN_DISCONNECT:
+ return s_compute_next_service_time_client_clean_disconnect(client, now);
+ case AWS_MCS_CHANNEL_SHUTDOWN:
+ return s_compute_next_service_time_client_channel_shutdown(client, now);
+ case AWS_MCS_PENDING_RECONNECT:
+ return s_compute_next_service_time_client_pending_reconnect(client, now);
+ case AWS_MCS_TERMINATED:
+ return s_compute_next_service_time_client_terminated(client, now);
+ }
+
+ return 0;
+}
+
+static void s_reevaluate_service_task(struct aws_mqtt5_client *client) {
+ /*
+ * This causes the client to only reevaluate service schedule time at the end of the service call or in
+ * a callback from an external event.
+ */
+ if (client->in_service) {
+ return;
+ }
+
+ uint64_t now = (*client->vtable->get_current_time_fn)();
+ uint64_t next_service_time = s_compute_next_service_time_by_current_state(client, now);
+
+ /*
+ * This catches both the case when there's an existing service schedule and we either want to not
+ * perform it (next_service_time == 0) or need to run service at a different time than the current scheduled time.
+ */
+ if (next_service_time != client->next_service_task_run_time && client->next_service_task_run_time > 0) {
+ aws_event_loop_cancel_task(client->loop, &client->service_task);
+ client->next_service_task_run_time = 0;
+
+ AWS_LOGF_TRACE(AWS_LS_MQTT5_CLIENT, "id=%p: cancelling previously scheduled service task", (void *)client);
+ }
+
+ if (next_service_time > 0 &&
+ (next_service_time < client->next_service_task_run_time || client->next_service_task_run_time == 0)) {
+ aws_event_loop_schedule_task_future(client->loop, &client->service_task, next_service_time);
+
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT5_CLIENT, "id=%p: scheduled service task for time %" PRIu64, (void *)client, next_service_time);
+ }
+
+ client->next_service_task_run_time = next_service_time;
+}
+
+static void s_enqueue_operation_back(struct aws_mqtt5_client *client, struct aws_mqtt5_operation *operation) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: enqueuing %s operation to back",
+ (void *)client,
+ aws_mqtt5_packet_type_to_c_string(operation->packet_type));
+
+ aws_linked_list_push_back(&client->operational_state.queued_operations, &operation->node);
+
+ s_reevaluate_service_task(client);
+}
+
+static void s_enqueue_operation_front(struct aws_mqtt5_client *client, struct aws_mqtt5_operation *operation) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: enqueuing %s operation to front",
+ (void *)client,
+ aws_mqtt5_packet_type_to_c_string(operation->packet_type));
+
+ aws_linked_list_push_front(&client->operational_state.queued_operations, &operation->node);
+
+ s_reevaluate_service_task(client);
+}
+
+static void s_aws_mqtt5_client_operational_state_reset(
+ struct aws_mqtt5_client_operational_state *client_operational_state,
+ int completion_error_code,
+ bool is_final) {
+
+ struct aws_mqtt5_client *client = client_operational_state->client;
+
+ s_complete_operation_list(client, &client_operational_state->queued_operations, completion_error_code);
+ s_complete_operation_list(client, &client_operational_state->write_completion_operations, completion_error_code);
+ s_complete_operation_list(client, &client_operational_state->unacked_operations, completion_error_code);
+
+ if (is_final) {
+ aws_hash_table_clean_up(&client_operational_state->unacked_operations_table);
+ } else {
+ aws_hash_table_clear(&client_operational_state->unacked_operations_table);
+ }
+}
+
+static void s_change_current_state(struct aws_mqtt5_client *client, enum aws_mqtt5_client_state next_state);
+
+static void s_change_current_state_to_stopped(struct aws_mqtt5_client *client) {
+ client->current_state = AWS_MCS_STOPPED;
+
+ s_aws_mqtt5_client_operational_state_reset(&client->operational_state, AWS_ERROR_MQTT5_USER_REQUESTED_STOP, false);
+
+ /* Stop works as a complete session wipe, and so the next time we connect, we want it to be clean */
+ client->has_connected_successfully = false;
+
+ s_aws_mqtt5_client_emit_stopped_lifecycle_event(client);
+}
+
+static void s_aws_mqtt5_client_shutdown_channel(struct aws_mqtt5_client *client, int error_code) {
+ if (error_code == AWS_ERROR_SUCCESS) {
+ error_code = AWS_ERROR_UNKNOWN;
+ }
+
+ s_aws_mqtt5_client_emit_final_lifecycle_event(client, error_code, NULL, NULL);
+
+ if (client->current_state != AWS_MCS_MQTT_CONNECT && client->current_state != AWS_MCS_CONNECTED &&
+ client->current_state != AWS_MCS_CLEAN_DISCONNECT) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: client channel shutdown invoked from unexpected state %d(%s)",
+ (void *)client,
+ (int)client->current_state,
+ aws_mqtt5_client_state_to_c_string(client->current_state));
+ return;
+ }
+
+ if (client->slot == NULL || client->slot->channel == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "id=%p: client channel shutdown invoked without a channel", (void *)client);
+ return;
+ }
+
+ s_change_current_state(client, AWS_MCS_CHANNEL_SHUTDOWN);
+ (*client->vtable->channel_shutdown_fn)(client->slot->channel, error_code);
+}
+
+static void s_aws_mqtt5_client_shutdown_channel_with_disconnect(
+ struct aws_mqtt5_client *client,
+ int error_code,
+ struct aws_mqtt5_operation_disconnect *disconnect_op) {
+ if (client->current_state != AWS_MCS_CONNECTED && client->current_state != AWS_MCS_MQTT_CONNECT) {
+ s_aws_mqtt5_client_shutdown_channel(client, error_code);
+ return;
+ }
+
+ aws_linked_list_push_front(&client->operational_state.queued_operations, &disconnect_op->base.node);
+ aws_mqtt5_operation_disconnect_acquire(disconnect_op);
+ client->clean_disconnect_error_code = error_code;
+
+ s_change_current_state(client, AWS_MCS_CLEAN_DISCONNECT);
+}
+
+static void s_on_disconnect_operation_complete(int error_code, void *user_data) {
+ struct aws_mqtt5_client *client = user_data;
+
+ s_aws_mqtt5_client_shutdown_channel(
+ client, (error_code != AWS_ERROR_SUCCESS) ? error_code : client->clean_disconnect_error_code);
+}
+
+static void s_aws_mqtt5_client_shutdown_channel_clean(
+ struct aws_mqtt5_client *client,
+ int error_code,
+ enum aws_mqtt5_disconnect_reason_code reason_code) {
+ struct aws_mqtt5_packet_disconnect_view disconnect_options = {
+ .reason_code = reason_code,
+ };
+
+ struct aws_mqtt5_disconnect_completion_options internal_completion_options = {
+ .completion_callback = s_on_disconnect_operation_complete,
+ .completion_user_data = client,
+ };
+
+ struct aws_mqtt5_operation_disconnect *disconnect_op =
+ aws_mqtt5_operation_disconnect_new(client->allocator, &disconnect_options, NULL, &internal_completion_options);
+ if (disconnect_op == NULL) {
+ s_aws_mqtt5_client_shutdown_channel(client, error_code);
+ return;
+ }
+
+ s_aws_mqtt5_client_shutdown_channel_with_disconnect(client, error_code, disconnect_op);
+ aws_mqtt5_operation_disconnect_release(disconnect_op);
+}
+
+struct aws_mqtt5_shutdown_task {
+ struct aws_task task;
+ struct aws_allocator *allocator;
+ int error_code;
+ struct aws_mqtt5_client *client;
+};
+
+static void s_mqtt5_client_shutdown_final(int error_code, struct aws_mqtt5_client *client);
+
+static void s_shutdown_task_fn(struct aws_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+
+ struct aws_mqtt5_shutdown_task *shutdown_task = arg;
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ goto done;
+ }
+
+ s_mqtt5_client_shutdown_final(shutdown_task->error_code, shutdown_task->client);
+
+done:
+
+ aws_mem_release(shutdown_task->allocator, shutdown_task);
+}
+
+static void s_mqtt5_client_shutdown_final(int error_code, struct aws_mqtt5_client *client) {
+
+ AWS_FATAL_ASSERT(aws_event_loop_thread_is_callers_thread(client->loop));
+
+ s_aws_mqtt5_client_emit_final_lifecycle_event(client, error_code, NULL, NULL);
+
+ AWS_LOGF_INFO(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: channel tore down with error code %d(%s)",
+ (void *)client,
+ error_code,
+ aws_error_debug_str(error_code));
+
+ if (client->slot) {
+ aws_channel_slot_remove(client->slot);
+ AWS_LOGF_TRACE(AWS_LS_MQTT5_CLIENT, "id=%p: slot removed successfully", (void *)client);
+ client->slot = NULL;
+ }
+
+ aws_mqtt5_client_on_disconnection_update_operational_state(client);
+
+ if (client->desired_state == AWS_MCS_CONNECTED) {
+ s_change_current_state(client, AWS_MCS_PENDING_RECONNECT);
+ } else {
+ s_change_current_state(client, AWS_MCS_STOPPED);
+ }
+}
+
+static void s_mqtt5_client_shutdown(
+ struct aws_client_bootstrap *bootstrap,
+ int error_code,
+ struct aws_channel *channel,
+ void *user_data) {
+
+ (void)bootstrap;
+ (void)channel;
+
+ struct aws_mqtt5_client *client = user_data;
+
+ if (error_code == AWS_ERROR_SUCCESS) {
+ error_code = AWS_ERROR_MQTT_UNEXPECTED_HANGUP;
+ }
+
+ if (aws_event_loop_thread_is_callers_thread(client->loop)) {
+ s_mqtt5_client_shutdown_final(error_code, client);
+ return;
+ }
+
+ struct aws_mqtt5_shutdown_task *shutdown_task =
+ aws_mem_calloc(client->allocator, 1, sizeof(struct aws_mqtt5_shutdown_task));
+
+ aws_task_init(&shutdown_task->task, s_shutdown_task_fn, (void *)shutdown_task, "ShutdownTask");
+ shutdown_task->allocator = client->allocator;
+ shutdown_task->client = client;
+ shutdown_task->error_code = error_code;
+ aws_event_loop_schedule_task_now(client->loop, &shutdown_task->task);
+}
+
+static void s_mqtt5_client_setup(
+ struct aws_client_bootstrap *bootstrap,
+ int error_code,
+ struct aws_channel *channel,
+ void *user_data) {
+
+ (void)bootstrap;
+
+ /* Setup callback contract is: if error_code is non-zero then channel is NULL. */
+ AWS_FATAL_ASSERT((error_code != 0) == (channel == NULL));
+ struct aws_mqtt5_client *client = user_data;
+
+ if (error_code != AWS_OP_SUCCESS) {
+ /* client shutdown already handles this case, so just call that. */
+ s_mqtt5_client_shutdown(bootstrap, error_code, channel, user_data);
+ return;
+ }
+
+ AWS_FATAL_ASSERT(client->current_state == AWS_MCS_CONNECTING);
+ AWS_FATAL_ASSERT(aws_event_loop_thread_is_callers_thread(client->loop));
+
+ if (client->desired_state != AWS_MCS_CONNECTED) {
+ aws_raise_error(AWS_ERROR_MQTT5_USER_REQUESTED_STOP);
+ goto error;
+ }
+
+ client->slot = aws_channel_slot_new(channel); /* allocs or crashes */
+
+ if (aws_channel_slot_insert_end(channel, client->slot)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: Failed to insert slot into channel %p, error %d (%s).",
+ (void *)client,
+ (void *)channel,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+ goto error;
+ }
+
+ if (aws_channel_slot_set_handler(client->slot, &client->handler)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: Failed to set MQTT handler into slot on channel %p, error %d (%s).",
+ (void *)client,
+ (void *)channel,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ goto error;
+ }
+
+ s_change_current_state(client, AWS_MCS_MQTT_CONNECT);
+
+ return;
+
+error:
+
+ s_change_current_state(client, AWS_MCS_CHANNEL_SHUTDOWN);
+ (*client->vtable->channel_shutdown_fn)(channel, aws_last_error());
+}
+
+static void s_on_websocket_shutdown(struct aws_websocket *websocket, int error_code, void *user_data) {
+ struct aws_mqtt5_client *client = user_data;
+
+ struct aws_channel *channel = client->slot ? client->slot->channel : NULL;
+
+ s_mqtt5_client_shutdown(client->config->bootstrap, error_code, channel, client);
+
+ if (websocket) {
+ aws_websocket_release(websocket);
+ }
+}
+
+static void s_on_websocket_setup(const struct aws_websocket_on_connection_setup_data *setup, void *user_data) {
+
+ struct aws_mqtt5_client *client = user_data;
+ client->handshake = aws_http_message_release(client->handshake);
+
+ /* Setup callback contract is: if error_code is non-zero then websocket is NULL. */
+ AWS_FATAL_ASSERT((setup->error_code != 0) == (setup->websocket == NULL));
+
+ struct aws_channel *channel = NULL;
+
+ if (setup->websocket) {
+ channel = aws_websocket_get_channel(setup->websocket);
+ AWS_ASSERT(channel);
+
+ /* Websocket must be "converted" before the MQTT handler can be installed next to it. */
+ if (aws_websocket_convert_to_midchannel_handler(setup->websocket)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: Failed converting websocket, error %d (%s)",
+ (void *)client,
+ aws_last_error(),
+ aws_error_name(aws_last_error()));
+
+ (*client->vtable->channel_shutdown_fn)(channel, aws_last_error());
+ return;
+ }
+ }
+
+ /* Call into the channel-setup callback, the rest of the logic is the same. */
+ s_mqtt5_client_setup(client->config->bootstrap, setup->error_code, channel, client);
+}
+
+struct aws_mqtt5_websocket_transform_complete_task {
+ struct aws_task task;
+ struct aws_allocator *allocator;
+ struct aws_mqtt5_client *client;
+ int error_code;
+ struct aws_http_message *handshake;
+};
+
+void s_websocket_transform_complete_task_fn(struct aws_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+
+ struct aws_mqtt5_websocket_transform_complete_task *websocket_transform_complete_task = arg;
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ goto done;
+ }
+
+ struct aws_mqtt5_client *client = websocket_transform_complete_task->client;
+
+ aws_http_message_release(client->handshake);
+ client->handshake = aws_http_message_acquire(websocket_transform_complete_task->handshake);
+
+ int error_code = websocket_transform_complete_task->error_code;
+ if (error_code == 0 && client->desired_state == AWS_MCS_CONNECTED) {
+
+ struct aws_websocket_client_connection_options websocket_options = {
+ .allocator = client->allocator,
+ .bootstrap = client->config->bootstrap,
+ .socket_options = &client->config->socket_options,
+ .tls_options = client->config->tls_options_ptr,
+ .host = aws_byte_cursor_from_string(client->config->host_name),
+ .port = client->config->port,
+ .handshake_request = websocket_transform_complete_task->handshake,
+ .initial_window_size = 0, /* Prevent websocket data from arriving before the MQTT handler is installed */
+ .user_data = client,
+ .on_connection_setup = s_on_websocket_setup,
+ .on_connection_shutdown = s_on_websocket_shutdown,
+ .requested_event_loop = client->loop,
+ };
+
+ if (client->config->http_proxy_config != NULL) {
+ websocket_options.proxy_options = &client->config->http_proxy_options;
+ }
+
+ if (client->vtable->websocket_connect_fn(&websocket_options)) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "id=%p: Failed to initiate websocket connection.", (void *)client);
+ error_code = aws_last_error();
+ goto error;
+ }
+
+ goto done;
+
+ } else {
+ if (error_code == AWS_ERROR_SUCCESS) {
+ AWS_ASSERT(client->desired_state != AWS_MCS_CONNECTED);
+ error_code = AWS_ERROR_MQTT5_USER_REQUESTED_STOP;
+ }
+ }
+
+error:;
+ struct aws_websocket_on_connection_setup_data websocket_setup = {.error_code = error_code};
+ s_on_websocket_setup(&websocket_setup, client);
+
+done:
+
+ aws_http_message_release(websocket_transform_complete_task->handshake);
+ aws_mqtt5_client_release(websocket_transform_complete_task->client);
+
+ aws_mem_release(websocket_transform_complete_task->allocator, websocket_transform_complete_task);
+}
+
+static void s_websocket_handshake_transform_complete(
+ struct aws_http_message *handshake_request,
+ int error_code,
+ void *complete_ctx) {
+
+ struct aws_mqtt5_client *client = complete_ctx;
+
+ struct aws_mqtt5_websocket_transform_complete_task *task =
+ aws_mem_calloc(client->allocator, 1, sizeof(struct aws_mqtt5_websocket_transform_complete_task));
+
+ aws_task_init(
+ &task->task, s_websocket_transform_complete_task_fn, (void *)task, "WebsocketHandshakeTransformComplete");
+
+ task->allocator = client->allocator;
+ task->client = aws_mqtt5_client_acquire(client);
+ task->error_code = error_code;
+ task->handshake = handshake_request;
+
+ aws_event_loop_schedule_task_now(client->loop, &task->task);
+
+ aws_mqtt5_client_release(client);
+}
+
+static int s_websocket_connect(struct aws_mqtt5_client *client) {
+ AWS_ASSERT(client);
+ AWS_ASSERT(client->config->websocket_handshake_transform);
+
+ /* Build websocket handshake request */
+ struct aws_http_message *handshake = aws_http_message_new_websocket_handshake_request(
+ client->allocator, *g_websocket_handshake_default_path, aws_byte_cursor_from_string(client->config->host_name));
+
+ if (handshake == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "id=%p: Failed to generate websocket handshake request", (void *)client);
+ return AWS_OP_ERR;
+ }
+
+ if (aws_http_message_add_header(handshake, *g_websocket_handshake_default_protocol_header)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT, "id=%p: Failed to add default header to websocket handshake request", (void *)client);
+ goto on_error;
+ }
+
+ AWS_LOGF_TRACE(AWS_LS_MQTT5_CLIENT, "id=%p: Transforming websocket handshake request.", (void *)client);
+
+ aws_mqtt5_client_acquire(client);
+ client->config->websocket_handshake_transform(
+ handshake,
+ client->config->websocket_handshake_transform_user_data,
+ s_websocket_handshake_transform_complete,
+ client);
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+
+ aws_http_message_release(handshake);
+
+ return AWS_OP_ERR;
+}
+
+static void s_change_current_state_to_connecting(struct aws_mqtt5_client *client) {
+ AWS_ASSERT(client->current_state == AWS_MCS_STOPPED || client->current_state == AWS_MCS_PENDING_RECONNECT);
+
+ client->current_state = AWS_MCS_CONNECTING;
+ client->clean_disconnect_error_code = AWS_ERROR_SUCCESS;
+
+ s_aws_mqtt5_client_emit_connecting_lifecycle_event(client);
+
+ int result = 0;
+ if (client->config->websocket_handshake_transform != NULL) {
+ result = s_websocket_connect(client);
+ } else {
+ struct aws_socket_channel_bootstrap_options channel_options;
+ AWS_ZERO_STRUCT(channel_options);
+ channel_options.bootstrap = client->config->bootstrap;
+ channel_options.host_name = aws_string_c_str(client->config->host_name);
+ channel_options.port = client->config->port;
+ channel_options.socket_options = &client->config->socket_options;
+ channel_options.tls_options = client->config->tls_options_ptr;
+ channel_options.setup_callback = &s_mqtt5_client_setup;
+ channel_options.shutdown_callback = &s_mqtt5_client_shutdown;
+ channel_options.user_data = client;
+ channel_options.requested_event_loop = client->loop;
+
+ if (client->config->http_proxy_config == NULL) {
+ result = (*client->vtable->client_bootstrap_new_socket_channel_fn)(&channel_options);
+ } else {
+ result = (*client->vtable->http_proxy_new_socket_channel_fn)(
+ &channel_options, &client->config->http_proxy_options);
+ }
+ }
+
+ if (result) {
+ int error_code = aws_last_error();
+ AWS_LOGF_INFO(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: failed to kick off connection with error %d(%s)",
+ (void *)client,
+ error_code,
+ aws_error_debug_str(error_code));
+
+ s_aws_mqtt5_client_emit_final_lifecycle_event(client, aws_last_error(), NULL, NULL);
+
+ s_change_current_state(client, AWS_MCS_PENDING_RECONNECT);
+ }
+}
+
+static int s_aws_mqtt5_client_set_current_operation(
+ struct aws_mqtt5_client *client,
+ struct aws_mqtt5_operation *operation) {
+
+ if (aws_mqtt5_operation_bind_packet_id(operation, &client->operational_state)) {
+ int error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: failed to bind mqtt packet id for current operation, with error %d(%s)",
+ (void *)client,
+ error_code,
+ aws_error_debug_str(error_code));
+
+ return AWS_OP_ERR;
+ }
+
+ if (aws_mqtt5_encoder_append_packet_encoding(&client->encoder, operation->packet_type, operation->packet_view)) {
+ int error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: failed to append packet encoding sequence for current operation with error %d(%s)",
+ (void *)client,
+ error_code,
+ aws_error_debug_str(error_code));
+
+ return AWS_OP_ERR;
+ }
+
+ client->operational_state.current_operation = operation;
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_reset_ping(struct aws_mqtt5_client *client) {
+ uint64_t now = (*client->vtable->get_current_time_fn)();
+ uint16_t keep_alive_seconds = client->negotiated_settings.server_keep_alive;
+
+ uint64_t keep_alive_interval_nanos =
+ aws_timestamp_convert(keep_alive_seconds, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL);
+ client->next_ping_time = aws_add_u64_saturating(now, keep_alive_interval_nanos);
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT5_CLIENT, "id=%p: next PINGREQ scheduled for time %" PRIu64, (void *)client, client->next_ping_time);
+}
+
+static void s_aws_mqtt5_on_socket_write_completion_mqtt_connect(struct aws_mqtt5_client *client, int error_code) {
+ if (error_code != AWS_ERROR_SUCCESS) {
+ s_aws_mqtt5_client_shutdown_channel(client, error_code);
+ return;
+ }
+
+ s_reevaluate_service_task(client);
+}
+
+static void s_aws_mqtt5_on_socket_write_completion_connected(struct aws_mqtt5_client *client, int error_code) {
+ if (error_code != AWS_ERROR_SUCCESS) {
+ s_aws_mqtt5_client_shutdown_channel(client, error_code);
+ return;
+ }
+
+ s_reevaluate_service_task(client);
+}
+
+static void s_aws_mqtt5_on_socket_write_completion(
+ struct aws_channel *channel,
+ struct aws_io_message *message,
+ int error_code,
+ void *user_data) {
+
+ (void)channel;
+ (void)message;
+
+ struct aws_mqtt5_client *client = user_data;
+ client->operational_state.pending_write_completion = false;
+
+ if (error_code != AWS_ERROR_SUCCESS) {
+ AWS_LOGF_INFO(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: socket write completion invoked with error %d(%s)",
+ (void *)client,
+ error_code,
+ aws_error_debug_str(error_code));
+ }
+
+ switch (client->current_state) {
+ case AWS_MCS_MQTT_CONNECT:
+ s_aws_mqtt5_on_socket_write_completion_mqtt_connect(client, error_code);
+ break;
+
+ case AWS_MCS_CONNECTED:
+ s_aws_mqtt5_on_socket_write_completion_connected(client, error_code);
+ break;
+
+ case AWS_MCS_CLEAN_DISCONNECT:
+ /* the CONNECTED callback works just fine for CLEAN_DISCONNECT */
+ s_aws_mqtt5_on_socket_write_completion_connected(client, error_code);
+ break;
+
+ default:
+ break;
+ }
+
+ s_complete_operation_list(client, &client->operational_state.write_completion_operations, error_code);
+}
+
+static bool s_should_resume_session(const struct aws_mqtt5_client *client) {
+ enum aws_mqtt5_client_session_behavior_type session_behavior =
+ aws_mqtt5_client_session_behavior_type_to_non_default(client->config->session_behavior);
+
+ return (session_behavior == AWS_MQTT5_CSBT_REJOIN_POST_SUCCESS && client->has_connected_successfully) ||
+ (session_behavior == AWS_MQTT5_CSBT_REJOIN_ALWAYS);
+}
+
+static void s_change_current_state_to_mqtt_connect(struct aws_mqtt5_client *client) {
+ AWS_FATAL_ASSERT(client->current_state == AWS_MCS_CONNECTING);
+ AWS_FATAL_ASSERT(client->operational_state.current_operation == NULL);
+
+ client->current_state = AWS_MCS_MQTT_CONNECT;
+ client->operational_state.pending_write_completion = false;
+
+ aws_mqtt5_encoder_reset(&client->encoder);
+ aws_mqtt5_decoder_reset(&client->decoder);
+
+ bool resume_session = s_should_resume_session(client);
+ struct aws_mqtt5_packet_connect_view connect_view = client->config->connect.storage_view;
+ connect_view.clean_start = !resume_session;
+
+ if (aws_mqtt5_inbound_topic_alias_behavior_type_to_non_default(
+ client->config->topic_aliasing_options.inbound_topic_alias_behavior) == AWS_MQTT5_CITABT_ENABLED) {
+ connect_view.topic_alias_maximum = &client->config->topic_aliasing_options.inbound_alias_cache_size;
+ }
+
+ aws_mqtt5_negotiated_settings_reset(&client->negotiated_settings, &connect_view);
+ connect_view.client_id = aws_byte_cursor_from_buf(&client->negotiated_settings.client_id_storage);
+
+ struct aws_mqtt5_operation_connect *connect_op = aws_mqtt5_operation_connect_new(client->allocator, &connect_view);
+ if (connect_op == NULL) {
+ int error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: failed to create CONNECT operation with error %d(%s)",
+ (void *)client,
+ error_code,
+ aws_error_debug_str(error_code));
+
+ s_aws_mqtt5_client_shutdown_channel(client, error_code);
+ return;
+ }
+
+ s_enqueue_operation_front(client, &connect_op->base);
+
+ uint32_t timeout_ms = client->config->connack_timeout_ms;
+ if (timeout_ms == 0) {
+ timeout_ms = AWS_MQTT5_DEFAULT_CONNACK_PACKET_TIMEOUT_MS;
+ }
+
+ uint64_t now = (*client->vtable->get_current_time_fn)();
+ client->next_mqtt_connect_packet_timeout_time =
+ now + aws_timestamp_convert(timeout_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL);
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: setting CONNECT timeout to %" PRIu64,
+ (void *)client,
+ client->next_mqtt_connect_packet_timeout_time);
+}
+
+static void s_reset_reconnection_delay_time(struct aws_mqtt5_client *client) {
+ uint64_t now = (*client->vtable->get_current_time_fn)();
+ uint64_t reset_reconnection_delay_time_nanos = aws_timestamp_convert(
+ client->config->min_connected_time_to_reset_reconnect_delay_ms,
+ AWS_TIMESTAMP_MILLIS,
+ AWS_TIMESTAMP_NANOS,
+ NULL);
+ client->next_reconnect_delay_reset_time_ns = aws_add_u64_saturating(now, reset_reconnection_delay_time_nanos);
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: reconnection delay reset time set to %" PRIu64,
+ (void *)client,
+ client->next_reconnect_delay_reset_time_ns);
+}
+
+static void s_change_current_state_to_connected(struct aws_mqtt5_client *client) {
+ AWS_FATAL_ASSERT(client->current_state == AWS_MCS_MQTT_CONNECT);
+
+ client->current_state = AWS_MCS_CONNECTED;
+
+ aws_mqtt5_client_on_connection_update_operational_state(client);
+
+ client->has_connected_successfully = true;
+ client->next_ping_timeout_time = 0;
+ s_reset_ping(client);
+ s_reset_reconnection_delay_time(client);
+}
+
+static void s_change_current_state_to_clean_disconnect(struct aws_mqtt5_client *client) {
+ (void)client;
+ AWS_FATAL_ASSERT(client->current_state == AWS_MCS_MQTT_CONNECT || client->current_state == AWS_MCS_CONNECTED);
+
+ client->current_state = AWS_MCS_CLEAN_DISCONNECT;
+}
+
+static void s_change_current_state_to_channel_shutdown(struct aws_mqtt5_client *client) {
+ enum aws_mqtt5_client_state current_state = client->current_state;
+ AWS_FATAL_ASSERT(
+ current_state == AWS_MCS_MQTT_CONNECT || current_state == AWS_MCS_CONNECTING ||
+ current_state == AWS_MCS_CONNECTED || current_state == AWS_MCS_CLEAN_DISCONNECT);
+
+ client->current_state = AWS_MCS_CHANNEL_SHUTDOWN;
+
+ /*
+ * Critical requirement: The caller must invoke the channel shutdown function themselves (with the desired error
+ * code) *after* changing state.
+ *
+ * The caller is the only one with the error context and we want to be safe and avoid the possibility of a
+ * synchronous channel shutdown (mocks) leading to a situation where we get the shutdown callback before we've
+ * transitioned into the CHANNEL_SHUTDOWN state.
+ *
+ * We could relax this if a synchronous channel shutdown is literally impossible even with mocked channels.
+ */
+}
+
+/* TODO: refactor and reunify with internals of retry strategy to expose these as usable functions in aws-c-io */
+
+static uint64_t s_aws_mqtt5_compute_reconnect_backoff_no_jitter(struct aws_mqtt5_client *client) {
+ uint64_t retry_count = aws_min_u64(client->reconnect_count, 63);
+ return aws_mul_u64_saturating((uint64_t)1 << retry_count, client->config->min_reconnect_delay_ms);
+}
+
+static uint64_t s_aws_mqtt5_compute_reconnect_backoff_full_jitter(struct aws_mqtt5_client *client) {
+ uint64_t non_jittered = s_aws_mqtt5_compute_reconnect_backoff_no_jitter(client);
+ return aws_mqtt5_client_random_in_range(0, non_jittered);
+}
+
+static uint64_t s_compute_deccorelated_jitter(struct aws_mqtt5_client *client) {
+ uint64_t last_backoff_val = client->current_reconnect_delay_ms;
+
+ if (!last_backoff_val) {
+ return s_aws_mqtt5_compute_reconnect_backoff_full_jitter(client);
+ }
+
+ return aws_mqtt5_client_random_in_range(
+ client->config->min_reconnect_delay_ms, aws_mul_u64_saturating(last_backoff_val, 3));
+}
+
+static void s_update_reconnect_delay_for_pending_reconnect(struct aws_mqtt5_client *client) {
+ uint64_t delay_ms = 0;
+
+ switch (client->config->retry_jitter_mode) {
+ case AWS_EXPONENTIAL_BACKOFF_JITTER_DECORRELATED:
+ delay_ms = s_compute_deccorelated_jitter(client);
+ break;
+
+ case AWS_EXPONENTIAL_BACKOFF_JITTER_NONE:
+ delay_ms = s_aws_mqtt5_compute_reconnect_backoff_no_jitter(client);
+ break;
+
+ case AWS_EXPONENTIAL_BACKOFF_JITTER_FULL:
+ case AWS_EXPONENTIAL_BACKOFF_JITTER_DEFAULT:
+ default:
+ delay_ms = s_aws_mqtt5_compute_reconnect_backoff_full_jitter(client);
+ break;
+ }
+
+ delay_ms = aws_min_u64(delay_ms, client->config->max_reconnect_delay_ms);
+ uint64_t now = (*client->vtable->get_current_time_fn)();
+
+ client->next_reconnect_time_ns =
+ aws_add_u64_saturating(now, aws_timestamp_convert(delay_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL));
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT5_CLIENT, "id=%p: next connection attempt in %" PRIu64 " milliseconds", (void *)client, delay_ms);
+
+ client->reconnect_count++;
+}
+
+static void s_change_current_state_to_pending_reconnect(struct aws_mqtt5_client *client) {
+ client->current_state = AWS_MCS_PENDING_RECONNECT;
+
+ s_update_reconnect_delay_for_pending_reconnect(client);
+}
+
+static void s_change_current_state_to_terminated(struct aws_mqtt5_client *client) {
+ client->current_state = AWS_MCS_TERMINATED;
+
+ s_mqtt5_client_final_destroy(client);
+}
+
+static void s_change_current_state(struct aws_mqtt5_client *client, enum aws_mqtt5_client_state next_state) {
+ AWS_ASSERT(next_state != client->current_state);
+ if (next_state == client->current_state) {
+ return;
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: switching current state from %s to %s",
+ (void *)client,
+ aws_mqtt5_client_state_to_c_string(client->current_state),
+ aws_mqtt5_client_state_to_c_string(next_state));
+
+ if (client->vtable->on_client_state_change_callback_fn != NULL) {
+ (*client->vtable->on_client_state_change_callback_fn)(
+ client, client->current_state, next_state, client->vtable->vtable_user_data);
+ }
+
+ switch (next_state) {
+ case AWS_MCS_STOPPED:
+ s_change_current_state_to_stopped(client);
+ break;
+ case AWS_MCS_CONNECTING:
+ s_change_current_state_to_connecting(client);
+ break;
+ case AWS_MCS_MQTT_CONNECT:
+ s_change_current_state_to_mqtt_connect(client);
+ break;
+ case AWS_MCS_CONNECTED:
+ s_change_current_state_to_connected(client);
+ break;
+ case AWS_MCS_CLEAN_DISCONNECT:
+ s_change_current_state_to_clean_disconnect(client);
+ break;
+ case AWS_MCS_CHANNEL_SHUTDOWN:
+ s_change_current_state_to_channel_shutdown(client);
+ break;
+ case AWS_MCS_PENDING_RECONNECT:
+ s_change_current_state_to_pending_reconnect(client);
+ break;
+ case AWS_MCS_TERMINATED:
+ s_change_current_state_to_terminated(client);
+ return;
+ }
+
+ s_reevaluate_service_task(client);
+}
+
+static bool s_service_state_stopped(struct aws_mqtt5_client *client) {
+ enum aws_mqtt5_client_state desired_state = client->desired_state;
+ if (desired_state == AWS_MCS_CONNECTED) {
+ s_change_current_state(client, AWS_MCS_CONNECTING);
+ } else if (desired_state == AWS_MCS_TERMINATED) {
+ s_change_current_state(client, AWS_MCS_TERMINATED);
+ return true;
+ }
+
+ return false;
+}
+
+static void s_service_state_connecting(struct aws_mqtt5_client *client) {
+ (void)client;
+}
+
+static void s_service_state_mqtt_connect(struct aws_mqtt5_client *client, uint64_t now) {
+ enum aws_mqtt5_client_state desired_state = client->desired_state;
+ if (desired_state != AWS_MCS_CONNECTED) {
+ s_aws_mqtt5_client_emit_final_lifecycle_event(client, AWS_ERROR_MQTT5_USER_REQUESTED_STOP, NULL, NULL);
+ s_aws_mqtt5_client_shutdown_channel(client, AWS_ERROR_MQTT5_USER_REQUESTED_STOP);
+ return;
+ }
+
+ if (now >= client->next_mqtt_connect_packet_timeout_time) {
+ s_aws_mqtt5_client_emit_final_lifecycle_event(client, AWS_ERROR_MQTT5_CONNACK_TIMEOUT, NULL, NULL);
+
+ AWS_LOGF_INFO(AWS_LS_MQTT5_CLIENT, "id=%p: shutting down channel due to CONNACK timeout", (void *)client);
+ s_aws_mqtt5_client_shutdown_channel(client, AWS_ERROR_MQTT5_CONNACK_TIMEOUT);
+ return;
+ }
+
+ if (aws_mqtt5_client_service_operational_state(&client->operational_state)) {
+ int error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: failed to service outgoing CONNECT packet to channel with error %d(%s)",
+ (void *)client,
+ error_code,
+ aws_error_debug_str(error_code));
+
+ s_aws_mqtt5_client_shutdown_channel(client, error_code);
+ return;
+ }
+}
+
+static int s_aws_mqtt5_client_queue_ping(struct aws_mqtt5_client *client) {
+ s_reset_ping(client);
+
+ AWS_LOGF_DEBUG(AWS_LS_MQTT5_CLIENT, "id=%p: queuing PINGREQ", (void *)client);
+
+ struct aws_mqtt5_operation_pingreq *pingreq_op = aws_mqtt5_operation_pingreq_new(client->allocator);
+ s_enqueue_operation_front(client, &pingreq_op->base);
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_service_state_connected(struct aws_mqtt5_client *client, uint64_t now) {
+ enum aws_mqtt5_client_state desired_state = client->desired_state;
+ if (desired_state != AWS_MCS_CONNECTED) {
+ s_aws_mqtt5_client_emit_final_lifecycle_event(client, AWS_ERROR_MQTT5_USER_REQUESTED_STOP, NULL, NULL);
+
+ AWS_LOGF_INFO(AWS_LS_MQTT5_CLIENT, "id=%p: channel shutdown due to user Stop request", (void *)client);
+ s_aws_mqtt5_client_shutdown_channel(client, AWS_ERROR_MQTT5_USER_REQUESTED_STOP);
+ return;
+ }
+
+ if (now >= client->next_ping_timeout_time && client->next_ping_timeout_time != 0) {
+ s_aws_mqtt5_client_emit_final_lifecycle_event(client, AWS_ERROR_MQTT5_PING_RESPONSE_TIMEOUT, NULL, NULL);
+
+ AWS_LOGF_INFO(AWS_LS_MQTT5_CLIENT, "id=%p: channel shutdown due to PINGRESP timeout", (void *)client);
+
+ s_aws_mqtt5_client_shutdown_channel_clean(
+ client, AWS_ERROR_MQTT5_PING_RESPONSE_TIMEOUT, AWS_MQTT5_DRC_KEEP_ALIVE_TIMEOUT);
+ return;
+ }
+
+ if (now >= client->next_ping_time) {
+ if (s_aws_mqtt5_client_queue_ping(client)) {
+ int error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: failed to queue PINGREQ with error %d(%s)",
+ (void *)client,
+ error_code,
+ aws_error_debug_str(error_code));
+
+ s_aws_mqtt5_client_shutdown_channel(client, error_code);
+ return;
+ }
+ }
+
+ if (now >= client->next_reconnect_delay_reset_time_ns && client->next_reconnect_delay_reset_time_ns != 0) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: connected sufficiently long that reconnect backoff delay has been reset back to "
+ "minimum value",
+ (void *)client);
+
+ client->reconnect_count = 0;
+ client->current_reconnect_delay_ms = 0;
+ client->next_reconnect_delay_reset_time_ns = 0;
+ }
+
+ s_check_timeouts(client, now);
+
+ if (aws_mqtt5_client_service_operational_state(&client->operational_state)) {
+ int error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: failed to service CONNECTED operation queue with error %d(%s)",
+ (void *)client,
+ error_code,
+ aws_error_debug_str(error_code));
+
+ s_aws_mqtt5_client_shutdown_channel(client, error_code);
+ return;
+ }
+}
+
+static void s_service_state_clean_disconnect(struct aws_mqtt5_client *client, uint64_t now) {
+ if (aws_mqtt5_client_service_operational_state(&client->operational_state)) {
+ int error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: failed to service CLEAN_DISCONNECT operation queue with error %d(%s)",
+ (void *)client,
+ error_code,
+ aws_error_debug_str(error_code));
+
+ s_aws_mqtt5_client_shutdown_channel(client, error_code);
+ return;
+ }
+
+ s_check_timeouts(client, now);
+}
+
+static void s_service_state_channel_shutdown(struct aws_mqtt5_client *client) {
+ (void)client;
+}
+
+static void s_service_state_pending_reconnect(struct aws_mqtt5_client *client, uint64_t now) {
+ if (client->desired_state != AWS_MCS_CONNECTED) {
+ s_change_current_state(client, AWS_MCS_STOPPED);
+ return;
+ }
+
+ if (now >= client->next_reconnect_time_ns) {
+ s_change_current_state(client, AWS_MCS_CONNECTING);
+ return;
+ }
+}
+
+static void s_mqtt5_service_task_fn(struct aws_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ return;
+ }
+
+ struct aws_mqtt5_client *client = arg;
+ client->next_service_task_run_time = 0;
+ client->in_service = true;
+
+ uint64_t now = (*client->vtable->get_current_time_fn)();
+ bool terminated = false;
+ switch (client->current_state) {
+ case AWS_MCS_STOPPED:
+ terminated = s_service_state_stopped(client);
+ break;
+ case AWS_MCS_CONNECTING:
+ s_service_state_connecting(client);
+ break;
+ case AWS_MCS_MQTT_CONNECT:
+ s_service_state_mqtt_connect(client, now);
+ break;
+ case AWS_MCS_CONNECTED:
+ s_service_state_connected(client, now);
+ break;
+ case AWS_MCS_CLEAN_DISCONNECT:
+ s_service_state_clean_disconnect(client, now);
+ break;
+ case AWS_MCS_CHANNEL_SHUTDOWN:
+ s_service_state_channel_shutdown(client);
+ break;
+ case AWS_MCS_PENDING_RECONNECT:
+ s_service_state_pending_reconnect(client, now);
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * We can only enter the terminated state from stopped. If we do so, the client memory is now freed and we
+ * will crash if we access anything anymore.
+ */
+ if (terminated) {
+ return;
+ }
+
+ /* we're not scheduled anymore, reschedule as needed */
+ client->in_service = false;
+ s_reevaluate_service_task(client);
+}
+
+static bool s_should_client_disconnect_cleanly(struct aws_mqtt5_client *client) {
+ enum aws_mqtt5_client_state current_state = client->current_state;
+
+ return current_state == AWS_MCS_CONNECTED;
+}
+
+static int s_process_read_message(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ struct aws_io_message *message) {
+
+ struct aws_mqtt5_client *client = handler->impl;
+
+ if (message->message_type != AWS_IO_MESSAGE_APPLICATION_DATA) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "id=%p: unexpected io message data", (void *)client);
+ return AWS_OP_ERR;
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT5_CLIENT, "id=%p: processing read message of size %zu", (void *)client, message->message_data.len);
+
+ struct aws_byte_cursor message_cursor = aws_byte_cursor_from_buf(&message->message_data);
+
+ int result = aws_mqtt5_decoder_on_data_received(&client->decoder, message_cursor);
+ if (result != AWS_OP_SUCCESS) {
+ int error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: decode failure with error %d(%s)",
+ (void *)client,
+ error_code,
+ aws_error_debug_str(error_code));
+
+ if (error_code == AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR && s_should_client_disconnect_cleanly(client)) {
+ s_aws_mqtt5_client_shutdown_channel_clean(client, error_code, AWS_MQTT5_DRC_PROTOCOL_ERROR);
+ } else {
+ s_aws_mqtt5_client_shutdown_channel(client, error_code);
+ }
+
+ goto done;
+ }
+
+ aws_channel_slot_increment_read_window(slot, message->message_data.len);
+
+done:
+
+ aws_mem_release(message->allocator, message);
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_shutdown(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *slot,
+ enum aws_channel_direction dir,
+ int error_code,
+ bool free_scarce_resources_immediately) {
+
+ (void)handler;
+
+ return aws_channel_slot_on_handler_shutdown_complete(slot, dir, error_code, free_scarce_resources_immediately);
+}
+
+static size_t s_initial_window_size(struct aws_channel_handler *handler) {
+ (void)handler;
+
+ return SIZE_MAX;
+}
+
+static void s_destroy(struct aws_channel_handler *handler) {
+ (void)handler;
+}
+
+static size_t s_message_overhead(struct aws_channel_handler *handler) {
+ (void)handler;
+
+ return 0;
+}
+
+static struct aws_channel_handler_vtable s_mqtt5_channel_handler_vtable = {
+ .process_read_message = &s_process_read_message,
+ .process_write_message = NULL,
+ .increment_read_window = NULL,
+ .shutdown = &s_shutdown,
+ .initial_window_size = &s_initial_window_size,
+ .message_overhead = &s_message_overhead,
+ .destroy = &s_destroy,
+};
+
+static bool s_aws_is_successful_reason_code(int value) {
+ return value < 128;
+}
+
+static void s_aws_mqtt5_client_on_connack(
+ struct aws_mqtt5_client *client,
+ struct aws_mqtt5_packet_connack_view *connack_view) {
+ AWS_FATAL_ASSERT(client->current_state == AWS_MCS_MQTT_CONNECT);
+
+ bool is_successful = s_aws_is_successful_reason_code((int)connack_view->reason_code);
+ if (!is_successful) {
+ s_aws_mqtt5_client_emit_final_lifecycle_event(
+ client, AWS_ERROR_MQTT5_CONNACK_CONNECTION_REFUSED, connack_view, NULL);
+
+ enum aws_mqtt5_connect_reason_code reason_code = connack_view->reason_code;
+
+ AWS_LOGF_INFO(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: connection refused (via failed CONNACK) by remote host with reason code %d(%s)",
+ (void *)client,
+ (int)reason_code,
+ aws_mqtt5_connect_reason_code_to_c_string(reason_code));
+
+ s_aws_mqtt5_client_shutdown_channel(client, AWS_ERROR_MQTT5_CONNACK_CONNECTION_REFUSED);
+ return;
+ }
+
+ aws_mqtt5_negotiated_settings_apply_connack(&client->negotiated_settings, connack_view);
+
+ /* Check if a session is being rejoined and perform associated rejoin connect logic here */
+ if (client->negotiated_settings.rejoined_session) {
+ /* Disconnect if the server is attempting to connect the client to an unexpected session */
+ if (!s_should_resume_session(client)) {
+ s_aws_mqtt5_client_emit_final_lifecycle_event(
+ client, AWS_ERROR_MQTT_CANCELLED_FOR_CLEAN_SESSION, connack_view, NULL);
+ s_aws_mqtt5_client_shutdown_channel(client, AWS_ERROR_MQTT_CANCELLED_FOR_CLEAN_SESSION);
+ return;
+ } else if (!client->has_connected_successfully) {
+ /*
+ * We were configured with REJOIN_ALWAYS and this is the first connection. This is technically not safe
+ * and so let's log a warning for future diagnostics should it cause the user problems.
+ */
+ AWS_LOGF_WARN(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: initial connection rejoined existing session. This may cause packet id collisions.",
+ (void *)client);
+ }
+ }
+
+ s_change_current_state(client, AWS_MCS_CONNECTED);
+ s_aws_mqtt5_client_emit_connection_success_lifecycle_event(client, connack_view);
+}
+
+static void s_aws_mqtt5_client_log_received_packet(
+ struct aws_mqtt5_client *client,
+ enum aws_mqtt5_packet_type type,
+ void *packet_view) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT5_CLIENT, "id=%p: Received %s packet", (void *)client, aws_mqtt5_packet_type_to_c_string(type));
+
+ switch (type) {
+ case AWS_MQTT5_PT_CONNACK:
+ aws_mqtt5_packet_connack_view_log(packet_view, AWS_LL_DEBUG);
+ break;
+
+ case AWS_MQTT5_PT_PUBLISH:
+ aws_mqtt5_packet_publish_view_log(packet_view, AWS_LL_DEBUG);
+ break;
+
+ case AWS_MQTT5_PT_PUBACK:
+ aws_mqtt5_packet_puback_view_log(packet_view, AWS_LL_DEBUG);
+ break;
+
+ case AWS_MQTT5_PT_SUBACK:
+ aws_mqtt5_packet_suback_view_log(packet_view, AWS_LL_DEBUG);
+ break;
+
+ case AWS_MQTT5_PT_UNSUBACK:
+ aws_mqtt5_packet_unsuback_view_log(packet_view, AWS_LL_DEBUG);
+ break;
+
+ case AWS_MQTT5_PT_PINGRESP:
+ break; /* nothing to log */
+
+ case AWS_MQTT5_PT_DISCONNECT:
+ aws_mqtt5_packet_disconnect_view_log(packet_view, AWS_LL_DEBUG);
+ break;
+
+ default:
+ break;
+ }
+}
+
+static void s_aws_mqtt5_client_mqtt_connect_on_packet_received(
+ struct aws_mqtt5_client *client,
+ enum aws_mqtt5_packet_type type,
+ void *packet_view) {
+ if (type == AWS_MQTT5_PT_CONNACK) {
+ s_aws_mqtt5_client_on_connack(client, (struct aws_mqtt5_packet_connack_view *)packet_view);
+ } else {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT, "id=%p: Invalid packet type received while in MQTT_CONNECT state", (void *)client);
+
+ s_aws_mqtt5_client_shutdown_channel_clean(
+ client, AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR, AWS_MQTT5_DRC_PROTOCOL_ERROR);
+ }
+}
+
+typedef bool(aws_linked_list_node_predicate_fn)(struct aws_linked_list_node *);
+
+/*
+ * This predicate finds the first (if any) operation in the queue that is not a PUBACK or a PINGREQ.
+ */
+static bool s_is_ping_or_puback(struct aws_linked_list_node *operation_node) {
+ struct aws_mqtt5_operation *operation = AWS_CONTAINER_OF(operation_node, struct aws_mqtt5_operation, node);
+
+ return operation->packet_type == AWS_MQTT5_PT_PUBACK || operation->packet_type == AWS_MQTT5_PT_PINGREQ;
+}
+
+/*
+ * Helper function to insert a node (operation) into a list (operation queue) in the correct spot. Currently, this
+ * is only used to enqueue PUBACKs after existing PUBACKs and PINGREQs. This ensure that PUBACKs go out in the order
+ * the corresponding PUBLISH was received, regardless of whether or not there was an intervening service call.
+ */
+static void s_insert_node_before_predicate_failure(
+ struct aws_linked_list *list,
+ struct aws_linked_list_node *node,
+ aws_linked_list_node_predicate_fn predicate) {
+ struct aws_linked_list_node *current_node = NULL;
+ for (current_node = aws_linked_list_begin(list); current_node != aws_linked_list_end(list);
+ current_node = aws_linked_list_next(current_node)) {
+ if (!predicate(current_node)) {
+ break;
+ }
+ }
+
+ AWS_FATAL_ASSERT(current_node != NULL);
+
+ aws_linked_list_insert_before(current_node, node);
+}
+
+static int s_aws_mqtt5_client_queue_puback(struct aws_mqtt5_client *client, uint16_t packet_id) {
+ AWS_PRECONDITION(client != NULL);
+
+ const struct aws_mqtt5_packet_puback_view puback_view = {
+ .packet_id = packet_id,
+ .reason_code = AWS_MQTT5_PARC_SUCCESS,
+ };
+
+ struct aws_mqtt5_operation_puback *puback_op = aws_mqtt5_operation_puback_new(client->allocator, &puback_view);
+
+ if (puback_op == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: enqueuing PUBACK operation to first position in queue that is not a PUBACK or PINGREQ",
+ (void *)client);
+
+ /*
+ * Put the PUBACK ahead of all user-submitted operations (PUBLISH, SUBSCRIBE, UNSUBSCRIBE, DISCONNECT), but behind
+ * all pre-existing "internal" operations (PINGREQ, PUBACK).
+ *
+ * Qos 2 support will need to extend the predicate to include Qos 2 publish packets.
+ */
+ s_insert_node_before_predicate_failure(
+ &client->operational_state.queued_operations, &puback_op->base.node, s_is_ping_or_puback);
+
+ s_reevaluate_service_task(client);
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_aws_mqtt5_client_connected_on_packet_received(
+ struct aws_mqtt5_client *client,
+ enum aws_mqtt5_packet_type type,
+ void *packet_view) {
+
+ switch (type) {
+ case AWS_MQTT5_PT_PINGRESP:
+ AWS_LOGF_DEBUG(AWS_LS_MQTT5_CLIENT, "id=%p: resetting PINGREQ timer", (void *)client);
+ client->next_ping_timeout_time = 0;
+ break;
+
+ case AWS_MQTT5_PT_DISCONNECT:
+ s_aws_mqtt5_client_emit_final_lifecycle_event(
+ client, AWS_ERROR_MQTT5_DISCONNECT_RECEIVED, NULL, packet_view);
+
+ AWS_LOGF_INFO(AWS_LS_MQTT5_CLIENT, "id=%p: shutting down channel due to DISCONNECT", (void *)client);
+
+ s_aws_mqtt5_client_shutdown_channel(client, AWS_ERROR_MQTT5_DISCONNECT_RECEIVED);
+ break;
+
+ case AWS_MQTT5_PT_SUBACK: {
+ uint16_t packet_id = ((const struct aws_mqtt5_packet_suback_view *)packet_view)->packet_id;
+ aws_mqtt5_client_operational_state_handle_ack(
+ &client->operational_state, packet_id, AWS_MQTT5_PT_SUBACK, packet_view, AWS_ERROR_SUCCESS);
+ break;
+ }
+
+ case AWS_MQTT5_PT_UNSUBACK: {
+ uint16_t packet_id = ((const struct aws_mqtt5_packet_unsuback_view *)packet_view)->packet_id;
+ aws_mqtt5_client_operational_state_handle_ack(
+ &client->operational_state, packet_id, AWS_MQTT5_PT_UNSUBACK, packet_view, AWS_ERROR_SUCCESS);
+ break;
+ }
+
+ case AWS_MQTT5_PT_PUBLISH: {
+ const struct aws_mqtt5_packet_publish_view *publish_view = packet_view;
+
+ aws_mqtt5_callback_set_manager_on_publish_received(&client->callback_manager, publish_view);
+
+ /* Send a puback if QoS 1+ */
+ if (publish_view->qos != AWS_MQTT5_QOS_AT_MOST_ONCE) {
+
+ int result = s_aws_mqtt5_client_queue_puback(client, publish_view->packet_id);
+ if (result != AWS_OP_SUCCESS) {
+ int error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: decode failure with error %d(%s)",
+ (void *)client,
+ error_code,
+ aws_error_debug_str(error_code));
+
+ s_aws_mqtt5_client_shutdown_channel(client, error_code);
+ }
+ }
+ break;
+ }
+
+ case AWS_MQTT5_PT_PUBACK: {
+ uint16_t packet_id = ((const struct aws_mqtt5_packet_puback_view *)packet_view)->packet_id;
+ aws_mqtt5_client_operational_state_handle_ack(
+ &client->operational_state, packet_id, AWS_MQTT5_PT_PUBACK, packet_view, AWS_ERROR_SUCCESS);
+ break;
+ }
+
+ default:
+ break;
+ }
+}
+
+static int s_aws_mqtt5_client_on_packet_received(
+ enum aws_mqtt5_packet_type type,
+ void *packet_view,
+ void *decoder_callback_user_data) {
+
+ struct aws_mqtt5_client *client = decoder_callback_user_data;
+
+ s_aws_mqtt5_client_log_received_packet(client, type, packet_view);
+
+ switch (client->current_state) {
+ case AWS_MCS_MQTT_CONNECT:
+ s_aws_mqtt5_client_mqtt_connect_on_packet_received(client, type, packet_view);
+ break;
+
+ case AWS_MCS_CONNECTED:
+ case AWS_MCS_CLEAN_DISCONNECT:
+ s_aws_mqtt5_client_connected_on_packet_received(client, type, packet_view);
+ break;
+
+ default:
+ break;
+ }
+
+ s_reevaluate_service_task(client);
+
+ return AWS_OP_SUCCESS;
+}
+
+static uint64_t s_aws_high_res_clock_get_ticks_proxy(void) {
+ uint64_t current_time = 0;
+ AWS_FATAL_ASSERT(aws_high_res_clock_get_ticks(&current_time) == AWS_OP_SUCCESS);
+
+ return current_time;
+}
+
+struct aws_io_message *s_aws_channel_acquire_message_from_pool_default(
+ struct aws_channel *channel,
+ enum aws_io_message_type message_type,
+ size_t size_hint,
+ void *user_data) {
+ (void)user_data;
+
+ return aws_channel_acquire_message_from_pool(channel, message_type, size_hint);
+}
+
+static int s_aws_channel_slot_send_message_default(
+ struct aws_channel_slot *slot,
+ struct aws_io_message *message,
+ enum aws_channel_direction dir,
+ void *user_data) {
+ (void)user_data;
+
+ return aws_channel_slot_send_message(slot, message, dir);
+}
+
+static struct aws_mqtt5_client_vtable s_default_client_vtable = {
+ .get_current_time_fn = s_aws_high_res_clock_get_ticks_proxy,
+ .channel_shutdown_fn = aws_channel_shutdown,
+ .websocket_connect_fn = aws_websocket_client_connect,
+ .client_bootstrap_new_socket_channel_fn = aws_client_bootstrap_new_socket_channel,
+ .http_proxy_new_socket_channel_fn = aws_http_proxy_new_socket_channel,
+ .on_client_state_change_callback_fn = NULL,
+ .aws_channel_acquire_message_from_pool_fn = s_aws_channel_acquire_message_from_pool_default,
+ .aws_channel_slot_send_message_fn = s_aws_channel_slot_send_message_default,
+
+ .vtable_user_data = NULL,
+};
+
+void aws_mqtt5_client_set_vtable(struct aws_mqtt5_client *client, const struct aws_mqtt5_client_vtable *vtable) {
+ client->vtable = vtable;
+}
+
+const struct aws_mqtt5_client_vtable *aws_mqtt5_client_get_default_vtable(void) {
+ return &s_default_client_vtable;
+}
+
+struct aws_mqtt5_client *aws_mqtt5_client_new(
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_client_options *options) {
+ AWS_FATAL_ASSERT(allocator != NULL);
+ AWS_FATAL_ASSERT(options != NULL);
+
+ struct aws_mqtt5_client *client = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_client));
+ if (client == NULL) {
+ return NULL;
+ }
+
+ aws_task_init(&client->service_task, s_mqtt5_service_task_fn, client, "Mqtt5Service");
+
+ client->allocator = allocator;
+ client->vtable = &s_default_client_vtable;
+
+ aws_ref_count_init(&client->ref_count, client, s_on_mqtt5_client_zero_ref_count);
+
+ aws_mqtt5_callback_set_manager_init(&client->callback_manager, client);
+
+ if (aws_mqtt5_client_operational_state_init(&client->operational_state, allocator, client)) {
+ goto on_error;
+ }
+
+ client->config = aws_mqtt5_client_options_storage_new(allocator, options);
+ if (client->config == NULL) {
+ goto on_error;
+ }
+
+ aws_mqtt5_client_flow_control_state_init(client);
+
+ /* all client activity will take place on this event loop, serializing things like reconnect, ping, etc... */
+ client->loop = aws_event_loop_group_get_next_loop(client->config->bootstrap->event_loop_group);
+ if (client->loop == NULL) {
+ goto on_error;
+ }
+
+ client->desired_state = AWS_MCS_STOPPED;
+ client->current_state = AWS_MCS_STOPPED;
+ client->lifecycle_state = AWS_MQTT5_LS_NONE;
+
+ struct aws_mqtt5_decoder_options decoder_options = {
+ .callback_user_data = client,
+ .on_packet_received = s_aws_mqtt5_client_on_packet_received,
+ };
+
+ if (aws_mqtt5_decoder_init(&client->decoder, allocator, &decoder_options)) {
+ goto on_error;
+ }
+
+ struct aws_mqtt5_encoder_options encoder_options = {
+ .client = client,
+ };
+
+ if (aws_mqtt5_encoder_init(&client->encoder, allocator, &encoder_options)) {
+ goto on_error;
+ }
+
+ if (aws_mqtt5_inbound_topic_alias_resolver_init(&client->inbound_topic_alias_resolver, allocator)) {
+ goto on_error;
+ }
+
+ client->outbound_topic_alias_resolver = aws_mqtt5_outbound_topic_alias_resolver_new(
+ allocator, client->config->topic_aliasing_options.outbound_topic_alias_behavior);
+ if (client->outbound_topic_alias_resolver == NULL) {
+ goto on_error;
+ }
+
+ if (aws_mqtt5_negotiated_settings_init(
+ allocator, &client->negotiated_settings, &options->connect_options->client_id)) {
+ goto on_error;
+ }
+
+ client->current_reconnect_delay_ms = 0;
+
+ client->handler.alloc = client->allocator;
+ client->handler.vtable = &s_mqtt5_channel_handler_vtable;
+ client->handler.impl = client;
+
+ aws_mqtt5_client_options_storage_log(client->config, AWS_LL_DEBUG);
+
+ s_init_statistics(&client->operation_statistics_impl);
+
+ return client;
+
+on_error:
+
+ /* release isn't usable here since we may not even have an event loop */
+ s_mqtt5_client_final_destroy(client);
+
+ return NULL;
+}
+
+struct aws_mqtt5_client *aws_mqtt5_client_acquire(struct aws_mqtt5_client *client) {
+ if (client != NULL) {
+ aws_ref_count_acquire(&client->ref_count);
+ }
+
+ return client;
+}
+
+struct aws_mqtt5_client *aws_mqtt5_client_release(struct aws_mqtt5_client *client) {
+ if (client != NULL) {
+ aws_ref_count_release(&client->ref_count);
+ }
+
+ return NULL;
+}
+
+struct aws_mqtt_change_desired_state_task {
+ struct aws_task task;
+ struct aws_allocator *allocator;
+ struct aws_mqtt5_client *client;
+ enum aws_mqtt5_client_state desired_state;
+ struct aws_mqtt5_operation_disconnect *disconnect_operation;
+};
+
+static void s_change_state_task_fn(struct aws_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+
+ struct aws_mqtt_change_desired_state_task *change_state_task = arg;
+ struct aws_mqtt5_client *client = change_state_task->client;
+ enum aws_mqtt5_client_state desired_state = change_state_task->desired_state;
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ goto done;
+ }
+
+ if (client->desired_state != desired_state) {
+ AWS_LOGF_INFO(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: changing desired client state from %s to %s",
+ (void *)client,
+ aws_mqtt5_client_state_to_c_string(client->desired_state),
+ aws_mqtt5_client_state_to_c_string(desired_state));
+
+ client->desired_state = desired_state;
+
+ struct aws_mqtt5_operation_disconnect *disconnect_op = change_state_task->disconnect_operation;
+ if (desired_state == AWS_MCS_STOPPED && disconnect_op != NULL) {
+ s_aws_mqtt5_client_shutdown_channel_with_disconnect(
+ client, AWS_ERROR_MQTT5_USER_REQUESTED_STOP, disconnect_op);
+ }
+
+ s_reevaluate_service_task(client);
+ }
+
+done:
+
+ aws_mqtt5_operation_disconnect_release(change_state_task->disconnect_operation);
+ if (desired_state != AWS_MCS_TERMINATED) {
+ aws_mqtt5_client_release(client);
+ }
+
+ aws_mem_release(change_state_task->allocator, change_state_task);
+}
+
+static struct aws_mqtt_change_desired_state_task *s_aws_mqtt_change_desired_state_task_new(
+ struct aws_allocator *allocator,
+ struct aws_mqtt5_client *client,
+ enum aws_mqtt5_client_state desired_state,
+ struct aws_mqtt5_operation_disconnect *disconnect_operation) {
+
+ struct aws_mqtt_change_desired_state_task *change_state_task =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt_change_desired_state_task));
+ if (change_state_task == NULL) {
+ return NULL;
+ }
+
+ aws_task_init(&change_state_task->task, s_change_state_task_fn, (void *)change_state_task, "ChangeStateTask");
+ change_state_task->allocator = client->allocator;
+ change_state_task->client = (desired_state == AWS_MCS_TERMINATED) ? client : aws_mqtt5_client_acquire(client);
+ change_state_task->desired_state = desired_state;
+ change_state_task->disconnect_operation = aws_mqtt5_operation_disconnect_acquire(disconnect_operation);
+
+ return change_state_task;
+}
+
+static bool s_is_valid_desired_state(enum aws_mqtt5_client_state desired_state) {
+ switch (desired_state) {
+ case AWS_MCS_STOPPED:
+ case AWS_MCS_CONNECTED:
+ case AWS_MCS_TERMINATED:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static int s_aws_mqtt5_client_change_desired_state(
+ struct aws_mqtt5_client *client,
+ enum aws_mqtt5_client_state desired_state,
+ struct aws_mqtt5_operation_disconnect *disconnect_operation) {
+ AWS_FATAL_ASSERT(client != NULL);
+ AWS_FATAL_ASSERT(client->loop != NULL);
+ AWS_FATAL_ASSERT(disconnect_operation == NULL || desired_state == AWS_MCS_STOPPED);
+
+ if (!s_is_valid_desired_state(desired_state)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: invalid desired state argument %d(%s)",
+ (void *)client,
+ (int)desired_state,
+ aws_mqtt5_client_state_to_c_string(desired_state));
+
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ struct aws_mqtt_change_desired_state_task *task =
+ s_aws_mqtt_change_desired_state_task_new(client->allocator, client, desired_state, disconnect_operation);
+ if (task == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "id=%p: failed to create change desired state task", (void *)client);
+ return AWS_OP_ERR;
+ }
+
+ aws_event_loop_schedule_task_now(client->loop, &task->task);
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt5_client_start(struct aws_mqtt5_client *client) {
+ return s_aws_mqtt5_client_change_desired_state(client, AWS_MCS_CONNECTED, NULL);
+}
+
+int aws_mqtt5_client_stop(
+ struct aws_mqtt5_client *client,
+ const struct aws_mqtt5_packet_disconnect_view *options,
+ const struct aws_mqtt5_disconnect_completion_options *completion_options) {
+ AWS_FATAL_ASSERT(client != NULL);
+ struct aws_mqtt5_operation_disconnect *disconnect_op = NULL;
+ if (options != NULL) {
+ struct aws_mqtt5_disconnect_completion_options internal_completion_options = {
+ .completion_callback = s_on_disconnect_operation_complete,
+ .completion_user_data = client,
+ };
+
+ disconnect_op = aws_mqtt5_operation_disconnect_new(
+ client->allocator, options, completion_options, &internal_completion_options);
+ if (disconnect_op == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT, "id=%p: failed to create requested DISCONNECT operation", (void *)client);
+ return AWS_OP_ERR;
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: Stopping client via DISCONNECT operation (%p)",
+ (void *)client,
+ (void *)disconnect_op);
+ aws_mqtt5_packet_disconnect_view_log(disconnect_op->base.packet_view, AWS_LL_DEBUG);
+ } else {
+ AWS_LOGF_DEBUG(AWS_LS_MQTT5_CLIENT, "id=%p: Stopping client immediately", (void *)client);
+ }
+
+ int result = s_aws_mqtt5_client_change_desired_state(client, AWS_MCS_STOPPED, disconnect_op);
+
+ aws_mqtt5_operation_disconnect_release(disconnect_op);
+
+ return result;
+}
+
+struct aws_mqtt5_submit_operation_task {
+ struct aws_task task;
+ struct aws_allocator *allocator;
+ struct aws_mqtt5_client *client;
+ struct aws_mqtt5_operation *operation;
+};
+
+static void s_mqtt5_submit_operation_task_fn(struct aws_task *task, void *arg, enum aws_task_status status) {
+ (void)task;
+
+ int completion_error_code = AWS_ERROR_MQTT5_CLIENT_TERMINATED;
+ struct aws_mqtt5_submit_operation_task *submit_operation_task = arg;
+
+ /*
+ * Take a ref to the operation that represents the client taking ownership
+ * If we subsequently reject it (task cancel or offline queue policy), then the operation completion
+ * will undo this ref acquisition.
+ */
+ aws_mqtt5_operation_acquire(submit_operation_task->operation);
+
+ if (status != AWS_TASK_STATUS_RUN_READY) {
+ goto error;
+ }
+
+ /*
+ * If we're offline and this operation doesn't meet the requirements of the offline queue retention policy,
+ * fail it immediately.
+ */
+ struct aws_mqtt5_client *client = submit_operation_task->client;
+ struct aws_mqtt5_operation *operation = submit_operation_task->operation;
+ if (client->current_state != AWS_MCS_CONNECTED) {
+ if (!s_aws_mqtt5_operation_satisfies_offline_queue_retention_policy(
+ operation, client->config->offline_queue_behavior)) {
+ completion_error_code = AWS_ERROR_MQTT5_OPERATION_FAILED_DUE_TO_OFFLINE_QUEUE_POLICY;
+ goto error;
+ }
+ }
+
+ /* newly-submitted operations must have a 0 packet id */
+ aws_mqtt5_operation_set_packet_id(submit_operation_task->operation, 0);
+
+ s_enqueue_operation_back(submit_operation_task->client, submit_operation_task->operation);
+ aws_mqtt5_client_statistics_change_operation_statistic_state(
+ submit_operation_task->client, submit_operation_task->operation, AWS_MQTT5_OSS_INCOMPLETE);
+
+ goto done;
+
+error:
+
+ s_complete_operation(NULL, submit_operation_task->operation, completion_error_code, AWS_MQTT5_PT_NONE, NULL);
+
+done:
+
+ aws_mqtt5_operation_release(submit_operation_task->operation);
+ aws_mqtt5_client_release(submit_operation_task->client);
+
+ aws_mem_release(submit_operation_task->allocator, submit_operation_task);
+}
+
+static int s_submit_operation(struct aws_mqtt5_client *client, struct aws_mqtt5_operation *operation) {
+ struct aws_mqtt5_submit_operation_task *submit_task =
+ aws_mem_calloc(client->allocator, 1, sizeof(struct aws_mqtt5_submit_operation_task));
+ if (submit_task == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ aws_task_init(&submit_task->task, s_mqtt5_submit_operation_task_fn, submit_task, "Mqtt5SubmitOperation");
+ submit_task->allocator = client->allocator;
+ submit_task->client = aws_mqtt5_client_acquire(client);
+ submit_task->operation = operation;
+
+ aws_event_loop_schedule_task_now(client->loop, &submit_task->task);
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt5_client_publish(
+ struct aws_mqtt5_client *client,
+ const struct aws_mqtt5_packet_publish_view *publish_options,
+ const struct aws_mqtt5_publish_completion_options *completion_options) {
+
+ AWS_PRECONDITION(client != NULL);
+ AWS_PRECONDITION(publish_options != NULL);
+
+ struct aws_mqtt5_operation_publish *publish_op =
+ aws_mqtt5_operation_publish_new(client->allocator, client, publish_options, completion_options);
+
+ if (publish_op == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ AWS_LOGF_DEBUG(AWS_LS_MQTT5_CLIENT, "id=%p: Submitting PUBLISH operation (%p)", (void *)client, (void *)publish_op);
+ aws_mqtt5_packet_publish_view_log(publish_op->base.packet_view, AWS_LL_DEBUG);
+
+ if (s_submit_operation(client, &publish_op->base)) {
+ goto error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+error:
+
+ aws_mqtt5_operation_release(&publish_op->base);
+
+ return AWS_OP_ERR;
+}
+
+int aws_mqtt5_client_subscribe(
+ struct aws_mqtt5_client *client,
+ const struct aws_mqtt5_packet_subscribe_view *subscribe_options,
+ const struct aws_mqtt5_subscribe_completion_options *completion_options) {
+
+ AWS_PRECONDITION(client != NULL);
+ AWS_PRECONDITION(subscribe_options != NULL);
+
+ struct aws_mqtt5_operation_subscribe *subscribe_op =
+ aws_mqtt5_operation_subscribe_new(client->allocator, client, subscribe_options, completion_options);
+
+ if (subscribe_op == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT5_CLIENT, "id=%p: Submitting SUBSCRIBE operation (%p)", (void *)client, (void *)subscribe_op);
+ aws_mqtt5_packet_subscribe_view_log(subscribe_op->base.packet_view, AWS_LL_DEBUG);
+
+ if (s_submit_operation(client, &subscribe_op->base)) {
+ goto error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+error:
+
+ aws_mqtt5_operation_release(&subscribe_op->base);
+
+ return AWS_OP_ERR;
+}
+
+int aws_mqtt5_client_unsubscribe(
+ struct aws_mqtt5_client *client,
+ const struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_options,
+ const struct aws_mqtt5_unsubscribe_completion_options *completion_options) {
+
+ AWS_PRECONDITION(client != NULL);
+ AWS_PRECONDITION(unsubscribe_options != NULL);
+
+ struct aws_mqtt5_operation_unsubscribe *unsubscribe_op =
+ aws_mqtt5_operation_unsubscribe_new(client->allocator, client, unsubscribe_options, completion_options);
+
+ if (unsubscribe_op == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT5_CLIENT, "id=%p: Submitting UNSUBSCRIBE operation (%p)", (void *)client, (void *)unsubscribe_op);
+ aws_mqtt5_packet_unsubscribe_view_log(unsubscribe_op->base.packet_view, AWS_LL_DEBUG);
+
+ if (s_submit_operation(client, &unsubscribe_op->base)) {
+ goto error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+error:
+
+ aws_mqtt5_operation_release(&unsubscribe_op->base);
+
+ return AWS_OP_ERR;
+}
+
+static bool s_needs_packet_id(const struct aws_mqtt5_operation *operation) {
+ switch (operation->packet_type) {
+ case AWS_MQTT5_PT_SUBSCRIBE:
+ case AWS_MQTT5_PT_UNSUBSCRIBE:
+ return aws_mqtt5_operation_get_packet_id(operation) == 0;
+
+ case AWS_MQTT5_PT_PUBLISH: {
+ const struct aws_mqtt5_packet_publish_view *publish_view = operation->packet_view;
+ if (publish_view->qos == AWS_MQTT5_QOS_AT_MOST_ONCE) {
+ return false;
+ }
+
+ return aws_mqtt5_operation_get_packet_id(operation) == 0;
+ }
+
+ default:
+ return false;
+ }
+}
+
+static uint16_t s_next_packet_id(uint16_t current_id) {
+ if (++current_id == 0) {
+ current_id = 1;
+ }
+
+ return current_id;
+}
+
+int aws_mqtt5_operation_bind_packet_id(
+ struct aws_mqtt5_operation *operation,
+ struct aws_mqtt5_client_operational_state *client_operational_state) {
+ if (!s_needs_packet_id(operation)) {
+ return AWS_OP_SUCCESS;
+ }
+
+ uint16_t current_id = client_operational_state->next_mqtt_packet_id;
+ struct aws_hash_element *elem = NULL;
+ for (uint16_t i = 0; i < UINT16_MAX; ++i) {
+ aws_hash_table_find(&client_operational_state->unacked_operations_table, &current_id, &elem);
+
+ if (elem == NULL) {
+ aws_mqtt5_operation_set_packet_id(operation, current_id);
+ client_operational_state->next_mqtt_packet_id = s_next_packet_id(current_id);
+
+ return AWS_OP_SUCCESS;
+ }
+
+ current_id = s_next_packet_id(current_id);
+ }
+
+ aws_raise_error(AWS_ERROR_INVALID_STATE);
+ return AWS_OP_ERR;
+}
+
+int aws_mqtt5_client_operational_state_init(
+ struct aws_mqtt5_client_operational_state *client_operational_state,
+ struct aws_allocator *allocator,
+ struct aws_mqtt5_client *client) {
+
+ aws_linked_list_init(&client_operational_state->queued_operations);
+ aws_linked_list_init(&client_operational_state->write_completion_operations);
+ aws_linked_list_init(&client_operational_state->unacked_operations);
+
+ if (aws_hash_table_init(
+ &client_operational_state->unacked_operations_table,
+ allocator,
+ sizeof(struct aws_mqtt5_operation *),
+ s_hash_uint16_t,
+ s_uint16_t_eq,
+ NULL,
+ NULL)) {
+ return AWS_OP_ERR;
+ }
+
+ client_operational_state->next_mqtt_packet_id = 1;
+ client_operational_state->current_operation = NULL;
+ client_operational_state->client = client;
+
+ return AWS_OP_SUCCESS;
+}
+
+void aws_mqtt5_client_operational_state_clean_up(struct aws_mqtt5_client_operational_state *client_operational_state) {
+ AWS_ASSERT(client_operational_state->current_operation == NULL);
+
+ s_aws_mqtt5_client_operational_state_reset(client_operational_state, AWS_ERROR_MQTT5_CLIENT_TERMINATED, true);
+}
+
+static bool s_filter_queued_operations_for_offline(struct aws_mqtt5_operation *operation, void *context) {
+ struct aws_mqtt5_client *client = context;
+ enum aws_mqtt5_client_operation_queue_behavior_type queue_behavior = client->config->offline_queue_behavior;
+
+ return !s_aws_mqtt5_operation_satisfies_offline_queue_retention_policy(operation, queue_behavior);
+}
+
+static void s_process_unacked_operations_for_disconnect(struct aws_mqtt5_operation *operation, void *context) {
+ (void)context;
+
+ if (operation->packet_type == AWS_MQTT5_PT_PUBLISH) {
+ struct aws_mqtt5_packet_publish_view *publish_view =
+ (struct aws_mqtt5_packet_publish_view *)operation->packet_view;
+ if (publish_view->qos != AWS_MQTT5_QOS_AT_MOST_ONCE) {
+ publish_view->duplicate = true;
+ return;
+ }
+ }
+
+ aws_mqtt5_operation_set_packet_id(operation, 0);
+}
+
+static bool s_filter_unacked_operations_for_offline(struct aws_mqtt5_operation *operation, void *context) {
+ struct aws_mqtt5_client *client = context;
+ enum aws_mqtt5_client_operation_queue_behavior_type queue_behavior = client->config->offline_queue_behavior;
+
+ if (operation->packet_type == AWS_MQTT5_PT_PUBLISH) {
+ const struct aws_mqtt5_packet_publish_view *publish_view = operation->packet_view;
+ if (publish_view->qos != AWS_MQTT5_QOS_AT_MOST_ONCE) {
+ return false;
+ }
+ }
+
+ return !s_aws_mqtt5_operation_satisfies_offline_queue_retention_policy(operation, queue_behavior);
+}
+
+/*
+ * Resets the client's operational state based on a disconnection (from above comment):
+ *
+ * If current_operation
+ * move current_operation to head of queued_operations
+ * Fail all operations in the pending write completion list
+ * Fail, remove, and release operations in queued_operations where they fail the offline queue policy
+ * Iterate unacked_operations:
+ * If qos1+ publish
+ * set dup flag
+ * else
+ * unset/release packet id
+ * Fail, remove, and release unacked_operations if:
+ * (1) They fail the offline queue policy AND
+ * (2) the operation is not Qos 1+ publish
+ *
+ * Clears the unacked_operations table
+ */
+void aws_mqtt5_client_on_disconnection_update_operational_state(struct aws_mqtt5_client *client) {
+ struct aws_mqtt5_client_operational_state *client_operational_state = &client->operational_state;
+
+ /* move current operation to the head of the queue */
+ if (client_operational_state->current_operation != NULL) {
+ aws_linked_list_push_front(
+ &client_operational_state->queued_operations, &client_operational_state->current_operation->node);
+ client_operational_state->current_operation = NULL;
+ }
+
+ /* fail everything in pending write completion */
+ s_complete_operation_list(
+ client,
+ &client_operational_state->write_completion_operations,
+ AWS_ERROR_MQTT5_OPERATION_FAILED_DUE_TO_OFFLINE_QUEUE_POLICY);
+
+ struct aws_linked_list operations_to_fail;
+ AWS_ZERO_STRUCT(operations_to_fail);
+ aws_linked_list_init(&operations_to_fail);
+
+ /* fail everything in the pending queue that doesn't meet the offline queue behavior retention requirements */
+ s_filter_operation_list(
+ &client_operational_state->queued_operations,
+ s_filter_queued_operations_for_offline,
+ &operations_to_fail,
+ client);
+ s_complete_operation_list(
+ client, &operations_to_fail, AWS_ERROR_MQTT5_OPERATION_FAILED_DUE_TO_OFFLINE_QUEUE_POLICY);
+
+ /* Mark unacked qos1+ publishes as duplicate and release packet ids for non qos1+ publish */
+ s_apply_to_operation_list(
+ &client_operational_state->unacked_operations, s_process_unacked_operations_for_disconnect, NULL);
+
+ /*
+ * fail everything in the pending queue that
+ * (1) isn't a qos1+ publish AND
+ * (2) doesn't meet the offline queue behavior retention requirements
+ */
+ s_filter_operation_list(
+ &client_operational_state->unacked_operations,
+ s_filter_unacked_operations_for_offline,
+ &operations_to_fail,
+ client);
+ s_complete_operation_list(
+ client, &operations_to_fail, AWS_ERROR_MQTT5_OPERATION_FAILED_DUE_TO_OFFLINE_QUEUE_POLICY);
+
+ aws_hash_table_clear(&client->operational_state.unacked_operations_table);
+
+ /*
+ * Prevents inbound resolution on the highly unlikely, illegal server behavior of sending a PUBLISH before
+ * a CONNACK on next connection establishment.
+ */
+ aws_mqtt5_decoder_set_inbound_topic_alias_resolver(&client->decoder, NULL);
+}
+
+static void s_set_operation_list_statistic_state(
+ struct aws_mqtt5_client *client,
+ struct aws_linked_list *operation_list,
+ enum aws_mqtt5_operation_statistic_state_flags new_state_flags) {
+ struct aws_linked_list_node *node = aws_linked_list_begin(operation_list);
+ while (node != aws_linked_list_end(operation_list)) {
+ struct aws_mqtt5_operation *operation = AWS_CONTAINER_OF(node, struct aws_mqtt5_operation, node);
+ node = aws_linked_list_next(node);
+
+ aws_mqtt5_client_statistics_change_operation_statistic_state(client, operation, new_state_flags);
+ }
+}
+
+static bool s_filter_unacked_operations_for_session_rejoin(struct aws_mqtt5_operation *operation, void *context) {
+ (void)context;
+
+ if (operation->packet_type == AWS_MQTT5_PT_PUBLISH) {
+ const struct aws_mqtt5_packet_publish_view *publish_view = operation->packet_view;
+ if (publish_view->qos != AWS_MQTT5_QOS_AT_MOST_ONCE) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/*
+ * Updates the client's operational state based on a successfully established connection event:
+ *
+ * if rejoined_session:
+ * Move-and-append all non-qos1+-publishes in unacked_operations to the front of queued_operations
+ * Move-and-append remaining operations (qos1+ publishes) to the front of queued_operations
+ * else:
+ * Fail, remove, and release unacked_operations that fail the offline queue policy
+ * Move and append unacked operations to front of queued_operations
+ */
+void aws_mqtt5_client_on_connection_update_operational_state(struct aws_mqtt5_client *client) {
+
+ struct aws_mqtt5_client_operational_state *client_operational_state = &client->operational_state;
+
+ if (client->negotiated_settings.rejoined_session) {
+ struct aws_linked_list requeued_operations;
+ AWS_ZERO_STRUCT(requeued_operations);
+ aws_linked_list_init(&requeued_operations);
+
+ /*
+ * qos1+ publishes must go out first, so split the unacked operation list into two sets: qos1+ publishes and
+ * everything else.
+ */
+ s_filter_operation_list(
+ &client_operational_state->unacked_operations,
+ s_filter_unacked_operations_for_session_rejoin,
+ &requeued_operations,
+ client);
+
+ /*
+ * Put non-qos1+ publishes on the front of the pending queue
+ */
+ aws_linked_list_move_all_front(&client->operational_state.queued_operations, &requeued_operations);
+
+ /*
+ * Put qos1+ publishes on the front of the pending queue
+ */
+ aws_linked_list_move_all_front(
+ &client->operational_state.queued_operations, &client_operational_state->unacked_operations);
+ } else {
+ struct aws_linked_list failed_operations;
+ AWS_ZERO_STRUCT(failed_operations);
+ aws_linked_list_init(&failed_operations);
+
+ s_filter_operation_list(
+ &client_operational_state->unacked_operations,
+ s_filter_queued_operations_for_offline,
+ &failed_operations,
+ client);
+
+ /*
+ * fail operations that we aren't going to requeue. In this particular case it's only qos1+ publishes
+ * that we didn't fail because we didn't know if we were going to rejoin a sesison or not.
+ */
+ s_complete_operation_list(
+ client, &failed_operations, AWS_ERROR_MQTT5_OPERATION_FAILED_DUE_TO_OFFLINE_QUEUE_POLICY);
+
+ /* requeue operations that we are going to perform again */
+ aws_linked_list_move_all_front(
+ &client->operational_state.queued_operations, &client->operational_state.unacked_operations);
+ }
+
+ /* set everything remaining to incomplete */
+ s_set_operation_list_statistic_state(
+ client, &client->operational_state.queued_operations, AWS_MQTT5_OSS_INCOMPLETE);
+
+ aws_mqtt5_client_flow_control_state_reset(client);
+
+ uint16_t inbound_alias_maximum = client->negotiated_settings.topic_alias_maximum_to_client;
+
+ if (aws_mqtt5_inbound_topic_alias_resolver_reset(&client->inbound_topic_alias_resolver, inbound_alias_maximum)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: client unable to reset inbound alias resolver",
+ (void *)client_operational_state->client);
+ goto on_error;
+ }
+
+ if (inbound_alias_maximum > 0) {
+ aws_mqtt5_decoder_set_inbound_topic_alias_resolver(&client->decoder, &client->inbound_topic_alias_resolver);
+ } else {
+ aws_mqtt5_decoder_set_inbound_topic_alias_resolver(&client->decoder, NULL);
+ }
+
+ uint16_t outbound_alias_maximum = client->negotiated_settings.topic_alias_maximum_to_server;
+ if (aws_mqtt5_outbound_topic_alias_resolver_reset(client->outbound_topic_alias_resolver, outbound_alias_maximum)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: client unable to reset outbound alias resolver",
+ (void *)client_operational_state->client);
+ goto on_error;
+ }
+
+ aws_mqtt5_encoder_set_outbound_topic_alias_resolver(&client->encoder, client->outbound_topic_alias_resolver);
+
+ return;
+
+on_error:
+
+ s_aws_mqtt5_client_shutdown_channel(client, aws_last_error());
+}
+
+static bool s_aws_mqtt5_client_has_pending_operational_work(
+ const struct aws_mqtt5_client_operational_state *client_operational_state,
+ enum aws_mqtt5_client_state client_state) {
+ if (aws_linked_list_empty(&client_operational_state->queued_operations)) {
+ return false;
+ }
+
+ struct aws_linked_list_node *next_operation_node =
+ aws_linked_list_front(&client_operational_state->queued_operations);
+ struct aws_mqtt5_operation *next_operation =
+ AWS_CONTAINER_OF(next_operation_node, struct aws_mqtt5_operation, node);
+
+ switch (client_state) {
+ case AWS_MCS_MQTT_CONNECT:
+ /* Only allowed to send a CONNECT packet in this state */
+ return next_operation->packet_type == AWS_MQTT5_PT_CONNECT;
+
+ case AWS_MCS_CLEAN_DISCONNECT:
+ /* Except for finishing the current operation, only allowed to send a DISCONNECT packet in this state */
+ return next_operation->packet_type == AWS_MQTT5_PT_DISCONNECT;
+
+ case AWS_MCS_CONNECTED:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static uint64_t s_aws_mqtt5_client_compute_next_operation_flow_control_service_time(
+ struct aws_mqtt5_client *client,
+ struct aws_mqtt5_operation *operation,
+ uint64_t now) {
+ (void)operation;
+
+ switch (client->current_state) {
+ case AWS_MCS_MQTT_CONNECT:
+ case AWS_MCS_CLEAN_DISCONNECT:
+ return now;
+
+ case AWS_MCS_CONNECTED:
+ return aws_mqtt5_client_flow_control_state_get_next_operation_service_time(client, operation, now);
+
+ default:
+ /* no outbound traffic is allowed outside of the above states */
+ return 0;
+ }
+}
+
+/*
+ * We don't presently know if IoT Core's throughput limit is on the plaintext or encrypted data stream. Assume
+ * it's on the encrypted stream for now and make a reasonable guess at the additional cost TLS imposes on data size:
+ *
+ * This calculation is intended to be a reasonable default but will not be accurate in all cases
+ *
+ * Estimate the # of ethernet frames (max 1444 bytes) and add in potential TLS framing and padding values per.
+ *
+ * TODO: query IoT Core to determine if this calculation is needed after all
+ * TODO: may eventually want to expose the ethernet frame size here as a configurable option for networks that have a
+ * lower MTU
+ *
+ * References:
+ * https://tools.ietf.org/id/draft-mattsson-uta-tls-overhead-01.xml#rfc.section.3
+ *
+ */
+
+#define ETHERNET_FRAME_MAX_PAYLOAD_SIZE 1500
+#define TCP_SIZE_OVERESTIMATE 72
+#define TLS_FRAMING_AND_PADDING_OVERESTIMATE 64
+#define AVAILABLE_ETHERNET_FRAME_SIZE \
+ (ETHERNET_FRAME_MAX_PAYLOAD_SIZE - (TCP_SIZE_OVERESTIMATE + TLS_FRAMING_AND_PADDING_OVERESTIMATE))
+#define ETHERNET_FRAMES_PER_IO_MESSAGE_ESTIMATE \
+ ((AWS_MQTT5_IO_MESSAGE_DEFAULT_LENGTH + AVAILABLE_ETHERNET_FRAME_SIZE - 1) / AVAILABLE_ETHERNET_FRAME_SIZE)
+#define THROUGHPUT_TOKENS_PER_IO_MESSAGE_OVERESTIMATE \
+ (AWS_MQTT5_IO_MESSAGE_DEFAULT_LENGTH + \
+ ETHERNET_FRAMES_PER_IO_MESSAGE_ESTIMATE * TLS_FRAMING_AND_PADDING_OVERESTIMATE)
+
+static uint64_t s_compute_throughput_throttle_wait(const struct aws_mqtt5_client *client, uint64_t now) {
+
+ /* flow control only applies during CONNECTED/CLEAN_DISCONNECT */
+ if (!aws_mqtt5_client_are_negotiated_settings_valid(client)) {
+ return now;
+ }
+
+ uint64_t throughput_wait = 0;
+ if (client->config->extended_validation_and_flow_control_options != AWS_MQTT5_EVAFCO_NONE) {
+ throughput_wait = aws_rate_limiter_token_bucket_compute_wait_for_tokens(
+ (struct aws_rate_limiter_token_bucket *)&client->flow_control_state.throughput_throttle,
+ THROUGHPUT_TOKENS_PER_IO_MESSAGE_OVERESTIMATE);
+ }
+
+ return aws_add_u64_saturating(now, throughput_wait);
+}
+
+static uint64_t s_aws_mqtt5_client_compute_operational_state_service_time(
+ const struct aws_mqtt5_client_operational_state *client_operational_state,
+ uint64_t now) {
+ /* If an io message is in transit down the channel, then wait for it to complete */
+ if (client_operational_state->pending_write_completion) {
+ return 0;
+ }
+
+ /* Throughput flow control check */
+ uint64_t next_throttled_time = s_compute_throughput_throttle_wait(client_operational_state->client, now);
+ if (next_throttled_time > now) {
+ return next_throttled_time;
+ }
+
+ /* If we're in the middle of something, keep going */
+ if (client_operational_state->current_operation != NULL) {
+ return now;
+ }
+
+ /* If nothing is queued, there's nothing to do */
+ enum aws_mqtt5_client_state client_state = client_operational_state->client->current_state;
+ if (!s_aws_mqtt5_client_has_pending_operational_work(client_operational_state, client_state)) {
+ return 0;
+ }
+
+ AWS_FATAL_ASSERT(!aws_linked_list_empty(&client_operational_state->queued_operations));
+
+ struct aws_linked_list_node *next_operation_node =
+ aws_linked_list_front(&client_operational_state->queued_operations);
+ struct aws_mqtt5_operation *next_operation =
+ AWS_CONTAINER_OF(next_operation_node, struct aws_mqtt5_operation, node);
+
+ AWS_FATAL_ASSERT(next_operation != NULL);
+
+ /*
+ * Check the head of the pending operation queue against flow control and client state restrictions
+ */
+ return s_aws_mqtt5_client_compute_next_operation_flow_control_service_time(
+ client_operational_state->client, next_operation, now);
+}
+
+static bool s_aws_mqtt5_client_should_service_operational_state(
+ const struct aws_mqtt5_client_operational_state *client_operational_state,
+ uint64_t now) {
+
+ return now == s_aws_mqtt5_client_compute_operational_state_service_time(client_operational_state, now);
+}
+
+static bool s_operation_requires_ack(const struct aws_mqtt5_operation *operation) {
+ switch (operation->packet_type) {
+ case AWS_MQTT5_PT_SUBSCRIBE:
+ case AWS_MQTT5_PT_UNSUBSCRIBE:
+ return true;
+
+ case AWS_MQTT5_PT_PUBLISH: {
+ const struct aws_mqtt5_packet_publish_view *publish_view = operation->packet_view;
+ return publish_view->qos != AWS_MQTT5_QOS_AT_MOST_ONCE;
+ }
+
+ default:
+ return false;
+ }
+}
+
+static void s_on_pingreq_send(struct aws_mqtt5_client *client) {
+ uint64_t now = client->vtable->get_current_time_fn();
+ uint64_t ping_timeout_nanos =
+ aws_timestamp_convert(client->config->ping_timeout_ms, AWS_TIMESTAMP_MILLIS, AWS_TIMESTAMP_NANOS, NULL);
+ client->next_ping_timeout_time = aws_add_u64_saturating(now, ping_timeout_nanos);
+}
+
+static int s_apply_throughput_flow_control(struct aws_mqtt5_client *client) {
+ /* flow control only applies during CONNECTED/CLEAN_DISCONNECT */
+ if (!aws_mqtt5_client_are_negotiated_settings_valid(client)) {
+ return AWS_OP_SUCCESS;
+ }
+
+ if (client->config->extended_validation_and_flow_control_options == AWS_MQTT5_EVAFCO_NONE) {
+ return AWS_OP_SUCCESS;
+ }
+
+ return aws_rate_limiter_token_bucket_take_tokens(
+ (struct aws_rate_limiter_token_bucket *)&client->flow_control_state.throughput_throttle,
+ THROUGHPUT_TOKENS_PER_IO_MESSAGE_OVERESTIMATE);
+}
+
+static int s_apply_publish_tps_flow_control(struct aws_mqtt5_client *client, struct aws_mqtt5_operation *operation) {
+ if (client->config->extended_validation_and_flow_control_options == AWS_MQTT5_EVAFCO_NONE) {
+ return AWS_OP_SUCCESS;
+ }
+
+ if (operation->packet_type != AWS_MQTT5_PT_PUBLISH) {
+ return AWS_OP_SUCCESS;
+ }
+
+ return aws_rate_limiter_token_bucket_take_tokens(
+ (struct aws_rate_limiter_token_bucket *)&client->flow_control_state.publish_throttle, 1);
+}
+
+int aws_mqtt5_client_service_operational_state(struct aws_mqtt5_client_operational_state *client_operational_state) {
+ struct aws_mqtt5_client *client = client_operational_state->client;
+ struct aws_channel_slot *slot = client->slot;
+ const struct aws_mqtt5_client_vtable *vtable = client->vtable;
+ uint64_t now = (*vtable->get_current_time_fn)();
+
+ /* Should we write data? */
+ bool should_service = s_aws_mqtt5_client_should_service_operational_state(client_operational_state, now);
+ if (!should_service) {
+ return AWS_OP_SUCCESS;
+ }
+
+ if (s_apply_throughput_flow_control(client)) {
+ return AWS_OP_SUCCESS;
+ }
+
+ /* If we're going to write data, we need something to write to */
+ struct aws_io_message *io_message = (*vtable->aws_channel_acquire_message_from_pool_fn)(
+ slot->channel, AWS_IO_MESSAGE_APPLICATION_DATA, AWS_MQTT5_IO_MESSAGE_DEFAULT_LENGTH, vtable->vtable_user_data);
+ if (io_message == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ int operational_error_code = AWS_ERROR_SUCCESS;
+
+ do {
+ /* if no current operation, pull one in and setup encode */
+ if (client_operational_state->current_operation == NULL) {
+
+ /*
+ * Loop through queued operations, discarding ones that fail validation, until we run out or find
+ * a good one. Failing validation against negotiated settings is expected to be a rare event.
+ */
+ struct aws_mqtt5_operation *next_operation = NULL;
+ while (!aws_linked_list_empty(&client_operational_state->queued_operations)) {
+ struct aws_linked_list_node *next_operation_node =
+ aws_linked_list_pop_front(&client_operational_state->queued_operations);
+ struct aws_mqtt5_operation *operation =
+ AWS_CONTAINER_OF(next_operation_node, struct aws_mqtt5_operation, node);
+
+ if (s_apply_publish_tps_flow_control(client, operation)) {
+ break;
+ }
+
+ if (!aws_mqtt5_operation_validate_vs_connection_settings(operation, client)) {
+ next_operation = operation;
+ break;
+ }
+
+ enum aws_mqtt5_packet_type packet_type = operation->packet_type;
+ int validation_error_code = aws_last_error();
+ s_complete_operation(client, operation, validation_error_code, AWS_MQTT5_PT_NONE, NULL);
+
+ /* A DISCONNECT packet failing dynamic validation should shut down the whole channel */
+ if (packet_type == AWS_MQTT5_PT_DISCONNECT) {
+ operational_error_code = AWS_ERROR_MQTT5_OPERATION_PROCESSING_FAILURE;
+ break;
+ }
+ }
+
+ if (next_operation != NULL && s_aws_mqtt5_client_set_current_operation(client, next_operation)) {
+ operational_error_code = AWS_ERROR_MQTT5_OPERATION_PROCESSING_FAILURE;
+ break;
+ }
+ }
+
+ struct aws_mqtt5_operation *current_operation = client_operational_state->current_operation;
+ if (current_operation == NULL) {
+ break;
+ }
+
+ /* write current operation to message, handle errors */
+ enum aws_mqtt5_encoding_result encoding_result =
+ aws_mqtt5_encoder_encode_to_buffer(&client->encoder, &io_message->message_data);
+ if (encoding_result == AWS_MQTT5_ER_ERROR) {
+ operational_error_code = AWS_ERROR_MQTT5_ENCODE_FAILURE;
+ break;
+ }
+
+ /* if encoding finished:
+ * push to write completion or unacked
+ * clear current
+ * else (message full)
+ * break
+ */
+ if (encoding_result == AWS_MQTT5_ER_FINISHED) {
+ aws_mqtt5_client_flow_control_state_on_outbound_operation(client, current_operation);
+
+ if (s_operation_requires_ack(current_operation)) {
+ /* track the operation in the unacked data structures by packet id */
+ AWS_FATAL_ASSERT(aws_mqtt5_operation_get_packet_id(current_operation) != 0);
+
+ if (aws_hash_table_put(
+ &client_operational_state->unacked_operations_table,
+ aws_mqtt5_operation_get_packet_id_address(current_operation),
+ current_operation,
+ NULL)) {
+ operational_error_code = aws_last_error();
+ break;
+ }
+
+ if (client->config->ack_timeout_seconds != 0) {
+ current_operation->ack_timeout_timepoint_ns =
+ now + aws_timestamp_convert(
+ client->config->ack_timeout_seconds, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_NANOS, NULL);
+ }
+
+ aws_linked_list_push_back(&client_operational_state->unacked_operations, &current_operation->node);
+ aws_mqtt5_client_statistics_change_operation_statistic_state(
+ client, current_operation, AWS_MQTT5_OSS_INCOMPLETE | AWS_MQTT5_OSS_UNACKED);
+ } else {
+ /* no ack is necessary, just add to socket write completion list */
+ aws_linked_list_push_back(
+ &client_operational_state->write_completion_operations, &current_operation->node);
+
+ /*
+ * We special-case setting the ping timeout here. Other possible places are not appropriate:
+ *
+ * (1) Socket write completion - this leads to a race condition where our domain socket tests can
+ * sporadically fail because the PINGRESP is processed before the write completion callback is
+ * invoked.
+ *
+ * (2) Enqueue the ping - if the current operation is a large payload over a poor connection, it may
+ * be an arbitrarily long time before the current operation completes and the ping even has a chance
+ * to go out, meaning we will trigger a ping time out before it's even sent.
+ *
+ * Given a reasonable io message size, this is the best place to set the timeout.
+ */
+ if (current_operation->packet_type == AWS_MQTT5_PT_PINGREQ) {
+ s_on_pingreq_send(client);
+ }
+ }
+
+ client->operational_state.current_operation = NULL;
+ } else {
+ AWS_FATAL_ASSERT(encoding_result == AWS_MQTT5_ER_OUT_OF_ROOM);
+ break;
+ }
+
+ now = (*vtable->get_current_time_fn)();
+ should_service = s_aws_mqtt5_client_should_service_operational_state(client_operational_state, now);
+ } while (should_service);
+
+ if (operational_error_code != AWS_ERROR_SUCCESS) {
+ aws_mem_release(io_message->allocator, io_message);
+ return aws_raise_error(operational_error_code);
+ }
+
+ /* It's possible for there to be no data if we serviced operations that failed validation */
+ if (io_message->message_data.len == 0) {
+ aws_mem_release(io_message->allocator, io_message);
+ return AWS_OP_SUCCESS;
+ }
+
+ /* send io_message down channel in write direction, handle errors */
+ io_message->on_completion = s_aws_mqtt5_on_socket_write_completion;
+ io_message->user_data = client_operational_state->client;
+ client_operational_state->pending_write_completion = true;
+
+ if ((*vtable->aws_channel_slot_send_message_fn)(
+ slot, io_message, AWS_CHANNEL_DIR_WRITE, vtable->vtable_user_data)) {
+ client_operational_state->pending_write_completion = false;
+ aws_mem_release(io_message->allocator, io_message);
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+void aws_mqtt5_client_operational_state_handle_ack(
+ struct aws_mqtt5_client_operational_state *client_operational_state,
+ aws_mqtt5_packet_id_t packet_id,
+ enum aws_mqtt5_packet_type packet_type,
+ const void *packet_view,
+ int error_code) {
+
+ if (packet_type == AWS_MQTT5_PT_PUBACK) {
+ aws_mqtt5_client_flow_control_state_on_puback(client_operational_state->client);
+ }
+
+ struct aws_hash_element *elem = NULL;
+ aws_hash_table_find(&client_operational_state->unacked_operations_table, &packet_id, &elem);
+
+ if (elem == NULL || elem->value == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: received an ACK for an unknown operation with id %d",
+ (void *)client_operational_state->client,
+ (int)packet_id);
+ return;
+ } else {
+ AWS_LOGF_TRACE(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: Processing ACK with id %d",
+ (void *)client_operational_state->client,
+ (int)packet_id);
+ }
+
+ struct aws_mqtt5_operation *operation = elem->value;
+
+ aws_linked_list_remove(&operation->node);
+ aws_hash_table_remove(&client_operational_state->unacked_operations_table, &packet_id, NULL, NULL);
+
+ s_complete_operation(client_operational_state->client, operation, error_code, packet_type, packet_view);
+}
+
+bool aws_mqtt5_client_are_negotiated_settings_valid(const struct aws_mqtt5_client *client) {
+ return client->current_state == AWS_MCS_CONNECTED || client->current_state == AWS_MCS_CLEAN_DISCONNECT;
+}
+
+void aws_mqtt5_client_flow_control_state_init(struct aws_mqtt5_client *client) {
+ struct aws_mqtt5_client_flow_control_state *flow_control = &client->flow_control_state;
+
+ struct aws_rate_limiter_token_bucket_options publish_throttle_config = {
+ .tokens_per_second = AWS_IOT_CORE_PUBLISH_PER_SECOND_LIMIT,
+ .maximum_token_count = AWS_IOT_CORE_PUBLISH_PER_SECOND_LIMIT,
+ .initial_token_count = 0,
+ };
+ aws_rate_limiter_token_bucket_init(&flow_control->publish_throttle, &publish_throttle_config);
+
+ struct aws_rate_limiter_token_bucket_options throughput_throttle_config = {
+ .tokens_per_second = AWS_IOT_CORE_THROUGHPUT_LIMIT,
+ .maximum_token_count = AWS_IOT_CORE_THROUGHPUT_LIMIT,
+ .initial_token_count = 0,
+ };
+ aws_rate_limiter_token_bucket_init(&flow_control->throughput_throttle, &throughput_throttle_config);
+}
+
+void aws_mqtt5_client_flow_control_state_reset(struct aws_mqtt5_client *client) {
+ struct aws_mqtt5_client_flow_control_state *flow_control = &client->flow_control_state;
+
+ AWS_FATAL_ASSERT(aws_mqtt5_client_are_negotiated_settings_valid(client));
+
+ flow_control->unacked_publish_token_count = client->negotiated_settings.receive_maximum_from_server;
+
+ aws_rate_limiter_token_bucket_reset(&client->flow_control_state.publish_throttle);
+ aws_rate_limiter_token_bucket_reset(&client->flow_control_state.throughput_throttle);
+}
+
+void aws_mqtt5_client_flow_control_state_on_puback(struct aws_mqtt5_client *client) {
+ struct aws_mqtt5_client_flow_control_state *flow_control = &client->flow_control_state;
+
+ bool was_zero = flow_control->unacked_publish_token_count == 0;
+ flow_control->unacked_publish_token_count = aws_min_u32(
+ client->negotiated_settings.receive_maximum_from_server, flow_control->unacked_publish_token_count + 1);
+
+ if (was_zero) {
+ s_reevaluate_service_task(client);
+ }
+}
+
+void aws_mqtt5_client_flow_control_state_on_outbound_operation(
+ struct aws_mqtt5_client *client,
+ struct aws_mqtt5_operation *operation) {
+ if (operation->packet_type != AWS_MQTT5_PT_PUBLISH) {
+ return;
+ }
+
+ const struct aws_mqtt5_packet_publish_view *publish_view = operation->packet_view;
+ if (publish_view->qos == AWS_MQTT5_QOS_AT_MOST_ONCE) {
+ return;
+ }
+
+ struct aws_mqtt5_client_flow_control_state *flow_control = &client->flow_control_state;
+
+ AWS_FATAL_ASSERT(flow_control->unacked_publish_token_count > 0);
+ --flow_control->unacked_publish_token_count;
+}
+
+uint64_t aws_mqtt5_client_flow_control_state_get_next_operation_service_time(
+ struct aws_mqtt5_client *client,
+ struct aws_mqtt5_operation *next_operation,
+ uint64_t now) {
+
+ if (next_operation->packet_type != AWS_MQTT5_PT_PUBLISH) {
+ return now;
+ }
+
+ /* publish tps check */
+ if (client->config->extended_validation_and_flow_control_options != AWS_MQTT5_EVAFCO_NONE) {
+ uint64_t publish_wait =
+ aws_rate_limiter_token_bucket_compute_wait_for_tokens(&client->flow_control_state.publish_throttle, 1);
+ if (publish_wait > 0) {
+ return now + publish_wait;
+ }
+ }
+
+ /* receive maximum check */
+ const struct aws_mqtt5_packet_publish_view *publish_view = next_operation->packet_view;
+ if (publish_view->qos == AWS_MQTT5_QOS_AT_MOST_ONCE) {
+ return now;
+ }
+
+ if (client->flow_control_state.unacked_publish_token_count > 0) {
+ return now;
+ }
+
+ return 0;
+}
+
+void aws_mqtt5_client_statistics_change_operation_statistic_state(
+ struct aws_mqtt5_client *client,
+ struct aws_mqtt5_operation *operation,
+ enum aws_mqtt5_operation_statistic_state_flags new_state_flags) {
+ enum aws_mqtt5_packet_type packet_type = operation->packet_type;
+ if (packet_type != AWS_MQTT5_PT_PUBLISH && packet_type != AWS_MQTT5_PT_SUBSCRIBE &&
+ packet_type != AWS_MQTT5_PT_UNSUBSCRIBE) {
+ return;
+ }
+
+ if (operation->packet_size == 0) {
+ if (aws_mqtt5_packet_view_get_encoded_size(packet_type, operation->packet_view, &operation->packet_size)) {
+ return;
+ }
+ }
+
+ AWS_FATAL_ASSERT(operation->packet_size > 0);
+ uint64_t packet_size = (uint64_t)operation->packet_size;
+
+ enum aws_mqtt5_operation_statistic_state_flags old_state_flags = operation->statistic_state_flags;
+ if (new_state_flags == old_state_flags) {
+ return;
+ }
+
+ struct aws_mqtt5_client_operation_statistics_impl *stats = &client->operation_statistics_impl;
+
+ if ((old_state_flags & AWS_MQTT5_OSS_INCOMPLETE) != (new_state_flags & AWS_MQTT5_OSS_INCOMPLETE)) {
+ if ((new_state_flags & AWS_MQTT5_OSS_INCOMPLETE) != 0) {
+ aws_atomic_fetch_add(&stats->incomplete_operation_count_atomic, 1);
+ aws_atomic_fetch_add(&stats->incomplete_operation_size_atomic, (size_t)packet_size);
+ } else {
+ aws_atomic_fetch_sub(&stats->incomplete_operation_count_atomic, 1);
+ aws_atomic_fetch_sub(&stats->incomplete_operation_size_atomic, (size_t)packet_size);
+ }
+ }
+
+ if ((old_state_flags & AWS_MQTT5_OSS_UNACKED) != (new_state_flags & AWS_MQTT5_OSS_UNACKED)) {
+ if ((new_state_flags & AWS_MQTT5_OSS_UNACKED) != 0) {
+ aws_atomic_fetch_add(&stats->unacked_operation_count_atomic, 1);
+ aws_atomic_fetch_add(&stats->unacked_operation_size_atomic, (size_t)packet_size);
+ } else {
+ aws_atomic_fetch_sub(&stats->unacked_operation_count_atomic, 1);
+ aws_atomic_fetch_sub(&stats->unacked_operation_size_atomic, (size_t)packet_size);
+ }
+ }
+
+ operation->statistic_state_flags = new_state_flags;
+
+ if (client->vtable != NULL && client->vtable->on_client_statistics_changed_callback_fn != NULL) {
+ (*client->vtable->on_client_statistics_changed_callback_fn)(
+ client, operation, client->vtable->vtable_user_data);
+ }
+}
+
+void aws_mqtt5_client_get_stats(struct aws_mqtt5_client *client, struct aws_mqtt5_client_operation_statistics *stats) {
+ stats->incomplete_operation_count =
+ (uint64_t)aws_atomic_load_int(&client->operation_statistics_impl.incomplete_operation_count_atomic);
+ stats->incomplete_operation_size =
+ (uint64_t)aws_atomic_load_int(&client->operation_statistics_impl.incomplete_operation_size_atomic);
+ stats->unacked_operation_count =
+ (uint64_t)aws_atomic_load_int(&client->operation_statistics_impl.unacked_operation_count_atomic);
+ stats->unacked_operation_size =
+ (uint64_t)aws_atomic_load_int(&client->operation_statistics_impl.unacked_operation_size_atomic);
+}
diff --git a/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_decoder.c b/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_decoder.c
new file mode 100644
index 0000000000..1df6be0b51
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_decoder.c
@@ -0,0 +1,1174 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/mqtt/private/v5/mqtt5_decoder.h>
+
+#include <aws/mqtt/private/v5/mqtt5_topic_alias.h>
+#include <aws/mqtt/private/v5/mqtt5_utils.h>
+
+#define AWS_MQTT5_DECODER_BUFFER_START_SIZE 2048
+#define PUBLISH_PACKET_FIXED_HEADER_DUPLICATE_FLAG 8
+#define PUBLISH_PACKET_FIXED_HEADER_RETAIN_FLAG 1
+#define PUBLISH_PACKET_FIXED_HEADER_QOS_FLAG 3
+
+static void s_reset_decoder_for_new_packet(struct aws_mqtt5_decoder *decoder) {
+ aws_byte_buf_reset(&decoder->scratch_space, false);
+
+ decoder->packet_first_byte = 0;
+ decoder->remaining_length = 0;
+ AWS_ZERO_STRUCT(decoder->packet_cursor);
+}
+
+static void s_enter_state(struct aws_mqtt5_decoder *decoder, enum aws_mqtt5_decoder_state state) {
+ decoder->state = state;
+
+ if (state == AWS_MQTT5_DS_READ_PACKET_TYPE) {
+ s_reset_decoder_for_new_packet(decoder);
+ } else {
+ aws_byte_buf_reset(&decoder->scratch_space, false);
+ }
+}
+
+static bool s_is_decodable_packet_type(struct aws_mqtt5_decoder *decoder, enum aws_mqtt5_packet_type type) {
+ return (uint32_t)type < AWS_ARRAY_SIZE(decoder->options.decoder_table->decoders_by_packet_type) &&
+ decoder->options.decoder_table->decoders_by_packet_type[type] != NULL;
+}
+
+/*
+ * Every mqtt packet has a first byte that, amongst other things, determines the packet type
+ */
+static int s_aws_mqtt5_decoder_read_packet_type_on_data(
+ struct aws_mqtt5_decoder *decoder,
+ struct aws_byte_cursor *data) {
+
+ if (data->len == 0) {
+ return AWS_MQTT5_DRT_MORE_DATA;
+ }
+
+ uint8_t byte = *data->ptr;
+ aws_byte_cursor_advance(data, 1);
+ aws_byte_buf_append_byte_dynamic(&decoder->scratch_space, byte);
+
+ enum aws_mqtt5_packet_type packet_type = (byte >> 4);
+
+ if (!s_is_decodable_packet_type(decoder, packet_type)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: unsupported or illegal packet type value: %d",
+ decoder->options.callback_user_data,
+ (int)packet_type);
+ return AWS_MQTT5_DRT_ERROR;
+ }
+
+ decoder->packet_first_byte = byte;
+
+ s_enter_state(decoder, AWS_MQTT5_DS_READ_REMAINING_LENGTH);
+
+ return AWS_MQTT5_DRT_SUCCESS;
+}
+
+/*
+ * non-streaming variable length integer decode. cursor is updated only if the value was successfully read
+ */
+enum aws_mqtt5_decode_result_type aws_mqtt5_decode_vli(struct aws_byte_cursor *cursor, uint32_t *dest) {
+ uint32_t value = 0;
+ bool more_data = false;
+ size_t bytes_used = 0;
+
+ uint32_t shift = 0;
+
+ struct aws_byte_cursor cursor_copy = *cursor;
+ for (; bytes_used < 4; ++bytes_used) {
+ uint8_t byte = 0;
+ if (!aws_byte_cursor_read_u8(&cursor_copy, &byte)) {
+ return AWS_MQTT5_DRT_MORE_DATA;
+ }
+
+ value |= ((byte & 0x7F) << shift);
+ shift += 7;
+
+ more_data = (byte & 0x80) != 0;
+ if (!more_data) {
+ break;
+ }
+ }
+
+ if (more_data) {
+ /* A variable length integer with the 4th byte high bit set is not valid */
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "(static) aws_mqtt5_decoder - illegal variable length integer encoding");
+ return AWS_MQTT5_DRT_ERROR;
+ }
+
+ aws_byte_cursor_advance(cursor, bytes_used + 1);
+ *dest = value;
+
+ return AWS_MQTT5_DRT_SUCCESS;
+}
+
+/* "streaming" variable length integer decode */
+static enum aws_mqtt5_decode_result_type s_aws_mqtt5_decoder_read_vli_on_data(
+ struct aws_mqtt5_decoder *decoder,
+ uint32_t *vli_dest,
+ struct aws_byte_cursor *data) {
+
+ enum aws_mqtt5_decode_result_type decode_vli_result = AWS_MQTT5_DRT_MORE_DATA;
+
+ /* try to decode the vli integer one byte at a time */
+ while (data->len > 0 && decode_vli_result == AWS_MQTT5_DRT_MORE_DATA) {
+ /* append a single byte to the scratch buffer */
+ struct aws_byte_cursor byte_cursor = aws_byte_cursor_advance(data, 1);
+ aws_byte_buf_append_dynamic(&decoder->scratch_space, &byte_cursor);
+
+ /* now try and decode a vli integer based on the range implied by the offset into the buffer */
+ struct aws_byte_cursor vli_cursor = {
+ .ptr = decoder->scratch_space.buffer,
+ .len = decoder->scratch_space.len,
+ };
+
+ decode_vli_result = aws_mqtt5_decode_vli(&vli_cursor, vli_dest);
+ }
+
+ return decode_vli_result;
+}
+
+/* attempts to read the variable length integer that is always the second piece of data in an mqtt packet */
+static enum aws_mqtt5_decode_result_type s_aws_mqtt5_decoder_read_remaining_length_on_data(
+ struct aws_mqtt5_decoder *decoder,
+ struct aws_byte_cursor *data) {
+
+ enum aws_mqtt5_decode_result_type result =
+ s_aws_mqtt5_decoder_read_vli_on_data(decoder, &decoder->remaining_length, data);
+ if (result != AWS_MQTT5_DRT_SUCCESS) {
+ return result;
+ }
+
+ s_enter_state(decoder, AWS_MQTT5_DS_READ_PACKET);
+
+ return AWS_MQTT5_DRT_SUCCESS;
+}
+
+/* non-streaming decode of a user property; failure implies connection termination */
+int aws_mqtt5_decode_user_property(
+ struct aws_byte_cursor *packet_cursor,
+ struct aws_mqtt5_user_property_set *properties) {
+ struct aws_mqtt5_user_property property;
+
+ AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR(packet_cursor, &property.name, error);
+ AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR(packet_cursor, &property.value, error);
+
+ if (aws_array_list_push_back(&properties->properties, &property)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+
+error:
+
+ return AWS_OP_ERR;
+}
+
+/* decode function for all CONNACK properties */
+static int s_read_connack_property(
+ struct aws_mqtt5_packet_connack_storage *storage,
+ struct aws_byte_cursor *packet_cursor) {
+ int result = AWS_OP_ERR;
+
+ uint8_t property_type = 0;
+ AWS_MQTT5_DECODE_U8(packet_cursor, &property_type, done);
+
+ struct aws_mqtt5_packet_connack_view *storage_view = &storage->storage_view;
+
+ switch (property_type) {
+ case AWS_MQTT5_PROPERTY_TYPE_SESSION_EXPIRY_INTERVAL:
+ AWS_MQTT5_DECODE_U32_OPTIONAL(
+ packet_cursor, &storage->session_expiry_interval, &storage_view->session_expiry_interval, done);
+ break;
+
+ case AWS_MQTT5_PROPERTY_TYPE_RECEIVE_MAXIMUM:
+ AWS_MQTT5_DECODE_U16_OPTIONAL(
+ packet_cursor, &storage->receive_maximum, &storage_view->receive_maximum, done);
+ break;
+
+ case AWS_MQTT5_PROPERTY_TYPE_MAXIMUM_QOS:
+ AWS_MQTT5_DECODE_U8_OPTIONAL(packet_cursor, &storage->maximum_qos, &storage_view->maximum_qos, done);
+ break;
+
+ case AWS_MQTT5_PROPERTY_TYPE_RETAIN_AVAILABLE:
+ AWS_MQTT5_DECODE_U8_OPTIONAL(
+ packet_cursor, &storage->retain_available, &storage_view->retain_available, done);
+ break;
+
+ case AWS_MQTT5_PROPERTY_TYPE_MAXIMUM_PACKET_SIZE:
+ AWS_MQTT5_DECODE_U32_OPTIONAL(
+ packet_cursor, &storage->maximum_packet_size, &storage_view->maximum_packet_size, done);
+ break;
+
+ case AWS_MQTT5_PROPERTY_TYPE_ASSIGNED_CLIENT_IDENTIFIER:
+ AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL(
+ packet_cursor, &storage->assigned_client_identifier, &storage_view->assigned_client_identifier, done);
+ break;
+
+ case AWS_MQTT5_PROPERTY_TYPE_TOPIC_ALIAS_MAXIMUM:
+ AWS_MQTT5_DECODE_U16_OPTIONAL(
+ packet_cursor, &storage->topic_alias_maximum, &storage_view->topic_alias_maximum, done);
+ break;
+
+ case AWS_MQTT5_PROPERTY_TYPE_REASON_STRING:
+ AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL(
+ packet_cursor, &storage->reason_string, &storage_view->reason_string, done);
+ break;
+
+ case AWS_MQTT5_PROPERTY_TYPE_WILDCARD_SUBSCRIPTIONS_AVAILABLE:
+ AWS_MQTT5_DECODE_U8_OPTIONAL(
+ packet_cursor,
+ &storage->wildcard_subscriptions_available,
+ &storage_view->wildcard_subscriptions_available,
+ done);
+ break;
+
+ case AWS_MQTT5_PROPERTY_TYPE_SUBSCRIPTION_IDENTIFIERS_AVAILABLE:
+ AWS_MQTT5_DECODE_U8_OPTIONAL(
+ packet_cursor,
+ &storage->subscription_identifiers_available,
+ &storage_view->subscription_identifiers_available,
+ done);
+ break;
+
+ case AWS_MQTT5_PROPERTY_TYPE_SHARED_SUBSCRIPTIONS_AVAILABLE:
+ AWS_MQTT5_DECODE_U8_OPTIONAL(
+ packet_cursor,
+ &storage->shared_subscriptions_available,
+ &storage_view->shared_subscriptions_available,
+ done);
+ break;
+
+ case AWS_MQTT5_PROPERTY_TYPE_SERVER_KEEP_ALIVE:
+ AWS_MQTT5_DECODE_U16_OPTIONAL(
+ packet_cursor, &storage->server_keep_alive, &storage_view->server_keep_alive, done);
+ break;
+
+ case AWS_MQTT5_PROPERTY_TYPE_RESPONSE_INFORMATION:
+ AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL(
+ packet_cursor, &storage->response_information, &storage_view->response_information, done);
+ break;
+
+ case AWS_MQTT5_PROPERTY_TYPE_SERVER_REFERENCE:
+ AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL(
+ packet_cursor, &storage->server_reference, &storage_view->server_reference, done);
+ break;
+
+ case AWS_MQTT5_PROPERTY_TYPE_AUTHENTICATION_METHOD:
+ AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL(
+ packet_cursor, &storage->authentication_method, &storage_view->authentication_method, done);
+ break;
+
+ case AWS_MQTT5_PROPERTY_TYPE_AUTHENTICATION_DATA:
+ AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL(
+ packet_cursor, &storage->authentication_data, &storage_view->authentication_data, done);
+ break;
+
+ case AWS_MQTT5_PROPERTY_TYPE_USER_PROPERTY:
+ if (aws_mqtt5_decode_user_property(packet_cursor, &storage->user_properties)) {
+ goto done;
+ }
+ break;
+
+ default:
+ goto done;
+ }
+
+ result = AWS_OP_SUCCESS;
+
+done:
+
+ if (result != AWS_OP_SUCCESS) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "Read CONNACK property decode failure");
+ aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR);
+ }
+
+ return result;
+}
+
+/* decodes a CONNACK packet whose data must be in the scratch buffer */
+static int s_aws_mqtt5_decoder_decode_connack(struct aws_mqtt5_decoder *decoder) {
+ struct aws_mqtt5_packet_connack_storage storage;
+ if (aws_mqtt5_packet_connack_storage_init_from_external_storage(&storage, decoder->allocator)) {
+ return AWS_OP_ERR;
+ }
+
+ int result = AWS_OP_ERR;
+
+ uint8_t first_byte = decoder->packet_first_byte;
+ /* CONNACK flags must be zero by protocol */
+ if ((first_byte & 0x0F) != 0) {
+ goto done;
+ }
+
+ struct aws_byte_cursor packet_cursor = decoder->packet_cursor;
+ uint32_t remaining_length = decoder->remaining_length;
+ if (remaining_length != (uint32_t)packet_cursor.len) {
+ goto done;
+ }
+
+ uint8_t connect_flags = 0;
+ AWS_MQTT5_DECODE_U8(&packet_cursor, &connect_flags, done);
+
+ /* everything but the 0-bit must be 0 */
+ if ((connect_flags & 0xFE) != 0) {
+ goto done;
+ }
+
+ struct aws_mqtt5_packet_connack_view *storage_view = &storage.storage_view;
+
+ storage_view->session_present = (connect_flags & 0x01) != 0;
+
+ uint8_t reason_code = 0;
+ AWS_MQTT5_DECODE_U8(&packet_cursor, &reason_code, done);
+ storage_view->reason_code = reason_code;
+
+ uint32_t property_length = 0;
+ AWS_MQTT5_DECODE_VLI(&packet_cursor, &property_length, done);
+ if (property_length != (uint32_t)packet_cursor.len) {
+ goto done;
+ }
+
+ while (packet_cursor.len > 0) {
+ if (s_read_connack_property(&storage, &packet_cursor)) {
+ goto done;
+ }
+ }
+
+ storage_view->user_property_count = aws_mqtt5_user_property_set_size(&storage.user_properties);
+ storage_view->user_properties = storage.user_properties.properties.data;
+
+ result = AWS_OP_SUCCESS;
+
+done:
+
+ if (result == AWS_OP_SUCCESS) {
+ if (decoder->options.on_packet_received != NULL) {
+ result = (*decoder->options.on_packet_received)(
+ AWS_MQTT5_PT_CONNACK, &storage.storage_view, decoder->options.callback_user_data);
+ }
+ } else {
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "id=%p: CONNACK decode failure", decoder->options.callback_user_data);
+ aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR);
+ }
+
+ aws_mqtt5_packet_connack_storage_clean_up(&storage);
+
+ return result;
+}
+
+/* decode function for all PUBLISH properties */
+static int s_read_publish_property(
+ struct aws_mqtt5_packet_publish_storage *storage,
+ struct aws_byte_cursor *packet_cursor) {
+ int result = AWS_OP_ERR;
+
+ uint8_t property_type = 0;
+ AWS_MQTT5_DECODE_U8(packet_cursor, &property_type, done);
+
+ struct aws_mqtt5_packet_publish_view *storage_view = &storage->storage_view;
+
+ switch (property_type) {
+ case AWS_MQTT5_PROPERTY_TYPE_PAYLOAD_FORMAT_INDICATOR:
+ AWS_MQTT5_DECODE_U8_OPTIONAL(packet_cursor, &storage->payload_format, &storage_view->payload_format, done);
+ break;
+
+ case AWS_MQTT5_PROPERTY_TYPE_MESSAGE_EXPIRY_INTERVAL:
+ AWS_MQTT5_DECODE_U32_OPTIONAL(
+ packet_cursor,
+ &storage->message_expiry_interval_seconds,
+ &storage_view->message_expiry_interval_seconds,
+ done);
+ break;
+
+ case AWS_MQTT5_PROPERTY_TYPE_TOPIC_ALIAS:
+ AWS_MQTT5_DECODE_U16_OPTIONAL(packet_cursor, &storage->topic_alias, &storage_view->topic_alias, done);
+ break;
+
+ case AWS_MQTT5_PROPERTY_TYPE_RESPONSE_TOPIC:
+ AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL(
+ packet_cursor, &storage->response_topic, &storage_view->response_topic, done);
+ break;
+
+ case AWS_MQTT5_PROPERTY_TYPE_CORRELATION_DATA:
+ AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL(
+ packet_cursor, &storage->correlation_data, &storage_view->correlation_data, done);
+ break;
+
+ case AWS_MQTT5_PROPERTY_TYPE_USER_PROPERTY:
+ if (aws_mqtt5_decode_user_property(packet_cursor, &storage->user_properties)) {
+ goto done;
+ }
+ break;
+
+ case AWS_MQTT5_PROPERTY_TYPE_SUBSCRIPTION_IDENTIFIER: {
+ uint32_t subscription_identifier = 0;
+ AWS_MQTT5_DECODE_VLI(packet_cursor, &subscription_identifier, done);
+ aws_array_list_push_back(&storage->subscription_identifiers, &subscription_identifier);
+ break;
+ }
+
+ case AWS_MQTT5_PROPERTY_TYPE_CONTENT_TYPE:
+ AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL(
+ packet_cursor, &storage->content_type, &storage_view->content_type, done);
+ break;
+
+ default:
+ goto done;
+ }
+
+ result = AWS_OP_SUCCESS;
+
+done:
+
+ if (result != AWS_OP_SUCCESS) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "Read PUBLISH property decode failure");
+ aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR);
+ }
+
+ return result;
+}
+
+/* decodes a PUBLISH packet whose data must be in the scratch buffer */
+static int s_aws_mqtt5_decoder_decode_publish(struct aws_mqtt5_decoder *decoder) {
+ struct aws_mqtt5_packet_publish_storage storage;
+ if (aws_mqtt5_packet_publish_storage_init_from_external_storage(&storage, decoder->allocator)) {
+ return AWS_OP_ERR;
+ }
+
+ int result = AWS_OP_ERR;
+ struct aws_mqtt5_packet_publish_view *storage_view = &storage.storage_view;
+
+ /*
+ * Fixed Header
+ * byte 1:
+ * bits 4-7: MQTT Control Packet Type
+ * bit 3: DUP flag
+ * bit 1-2: QoS level
+ * bit 0: RETAIN
+ * byte 2-x: Remaining Length as Variable Byte Integer (1-4 bytes)
+ */
+
+ uint8_t first_byte = decoder->packet_first_byte;
+ if ((first_byte & PUBLISH_PACKET_FIXED_HEADER_DUPLICATE_FLAG) != 0) {
+ storage_view->duplicate = true;
+ }
+ if ((first_byte & PUBLISH_PACKET_FIXED_HEADER_RETAIN_FLAG) != 0) {
+ storage_view->retain = true;
+ }
+ storage_view->qos = (enum aws_mqtt5_qos)((first_byte >> 1) & PUBLISH_PACKET_FIXED_HEADER_QOS_FLAG);
+
+ struct aws_byte_cursor packet_cursor = decoder->packet_cursor;
+ uint32_t remaining_length = decoder->remaining_length;
+ if (remaining_length != (uint32_t)packet_cursor.len) {
+ goto done;
+ }
+
+ /*
+ * Topic Name
+ * Packet Identifier (only present for > QoS 0)
+ * Properties
+ * - Property Length
+ * - Properties
+ * Payload
+ */
+ AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR(&packet_cursor, &storage_view->topic, done);
+
+ if (storage_view->qos > 0) {
+ AWS_MQTT5_DECODE_U16(&packet_cursor, &storage_view->packet_id, done);
+ }
+
+ uint32_t property_length = 0;
+ AWS_MQTT5_DECODE_VLI(&packet_cursor, &property_length, done);
+ if (property_length > (uint32_t)packet_cursor.len) {
+ goto done;
+ }
+ struct aws_byte_cursor properties_cursor = aws_byte_cursor_advance(&packet_cursor, property_length);
+
+ while (properties_cursor.len > 0) {
+ if (s_read_publish_property(&storage, &properties_cursor)) {
+ goto done;
+ }
+ }
+
+ storage_view->subscription_identifier_count = aws_array_list_length(&storage.subscription_identifiers);
+ storage_view->subscription_identifiers = storage.subscription_identifiers.data;
+
+ storage_view->user_property_count = aws_mqtt5_user_property_set_size(&storage.user_properties);
+ storage_view->user_properties = storage.user_properties.properties.data;
+ storage_view->payload = packet_cursor;
+
+ if (storage_view->topic_alias != NULL) {
+ if (decoder->topic_alias_resolver == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: PUBLISH packet contained topic alias when not allowed",
+ decoder->options.callback_user_data);
+ goto done;
+ }
+
+ uint16_t topic_alias_id = *storage_view->topic_alias;
+ if (topic_alias_id == 0) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: PUBLISH packet contained illegal topic alias",
+ decoder->options.callback_user_data);
+ goto done;
+ }
+
+ if (storage_view->topic.len > 0) {
+ if (aws_mqtt5_inbound_topic_alias_resolver_register_alias(
+ decoder->topic_alias_resolver, topic_alias_id, storage_view->topic)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT, "id=%p: unable to register topic alias", decoder->options.callback_user_data);
+ goto done;
+ }
+ } else {
+ if (aws_mqtt5_inbound_topic_alias_resolver_resolve_alias(
+ decoder->topic_alias_resolver, topic_alias_id, &storage_view->topic)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: PUBLISH packet contained unknown topic alias",
+ decoder->options.callback_user_data);
+ goto done;
+ }
+ }
+ }
+
+ result = AWS_OP_SUCCESS;
+
+done:
+
+ if (result == AWS_OP_SUCCESS) {
+ if (decoder->options.on_packet_received != NULL) {
+ result = (*decoder->options.on_packet_received)(
+ AWS_MQTT5_PT_PUBLISH, &storage.storage_view, decoder->options.callback_user_data);
+ }
+ } else {
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "id=%p: PUBLISH decode failure", decoder->options.callback_user_data);
+ aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR);
+ }
+
+ aws_mqtt5_packet_publish_storage_clean_up(&storage);
+
+ return result;
+}
+
+/* decode function for all PUBACK properties */
+static int s_read_puback_property(
+ struct aws_mqtt5_packet_puback_storage *storage,
+ struct aws_byte_cursor *packet_cursor) {
+ int result = AWS_OP_ERR;
+
+ uint8_t property_type = 0;
+ AWS_MQTT5_DECODE_U8(packet_cursor, &property_type, done);
+
+ struct aws_mqtt5_packet_puback_view *storage_view = &storage->storage_view;
+
+ switch (property_type) {
+ case AWS_MQTT5_PROPERTY_TYPE_REASON_STRING:
+ AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL(
+ packet_cursor, &storage->reason_string, &storage_view->reason_string, done);
+ break;
+
+ case AWS_MQTT5_PROPERTY_TYPE_USER_PROPERTY:
+ if (aws_mqtt5_decode_user_property(packet_cursor, &storage->user_properties)) {
+ goto done;
+ }
+ break;
+
+ default:
+ goto done;
+ }
+
+ result = AWS_OP_SUCCESS;
+
+done:
+
+ if (result != AWS_OP_SUCCESS) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "Read PUBACK property decode failure");
+ aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR);
+ }
+
+ return result;
+}
+
+/* decodes a PUBACK packet whose data must be in the scratch buffer */
+static int s_aws_mqtt5_decoder_decode_puback(struct aws_mqtt5_decoder *decoder) {
+ struct aws_mqtt5_packet_puback_storage storage;
+ if (aws_mqtt5_packet_puback_storage_init_from_external_storage(&storage, decoder->allocator)) {
+ return AWS_OP_ERR;
+ }
+ int result = AWS_OP_ERR;
+
+ uint8_t first_byte = decoder->packet_first_byte;
+ /* PUBACK flags must be zero by protocol */
+ if ((first_byte & 0x0F) != 0) {
+ goto done;
+ }
+
+ struct aws_byte_cursor packet_cursor = decoder->packet_cursor;
+ uint32_t remaining_length = decoder->remaining_length;
+ if (remaining_length != (uint32_t)packet_cursor.len) {
+ goto done;
+ }
+
+ struct aws_mqtt5_packet_puback_view *storage_view = &storage.storage_view;
+
+ AWS_MQTT5_DECODE_U16(&packet_cursor, &storage_view->packet_id, done);
+
+ /* Packet can end immediately following packet id with default success reason code */
+ uint8_t reason_code = 0;
+ if (packet_cursor.len > 0) {
+ AWS_MQTT5_DECODE_U8(&packet_cursor, &reason_code, done);
+
+ /* Packet can end immediately following reason code */
+ if (packet_cursor.len > 0) {
+ uint32_t property_length = 0;
+ AWS_MQTT5_DECODE_VLI(&packet_cursor, &property_length, done);
+ if (property_length != (uint32_t)packet_cursor.len) {
+ goto done;
+ }
+ while (packet_cursor.len > 0) {
+ if (s_read_puback_property(&storage, &packet_cursor)) {
+ goto done;
+ }
+ }
+ }
+ }
+
+ storage_view->user_property_count = aws_mqtt5_user_property_set_size(&storage.user_properties);
+ storage_view->user_properties = storage.user_properties.properties.data;
+ storage_view->reason_code = reason_code;
+
+ result = AWS_OP_SUCCESS;
+
+done:
+
+ if (result == AWS_OP_SUCCESS) {
+ if (decoder->options.on_packet_received != NULL) {
+ result = (*decoder->options.on_packet_received)(
+ AWS_MQTT5_PT_PUBACK, &storage.storage_view, decoder->options.callback_user_data);
+ }
+ } else {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "(%p) aws_mqtt5_decoder - PUBACK decode failure",
+ decoder->options.callback_user_data);
+ aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR);
+ }
+
+ aws_mqtt5_packet_puback_storage_clean_up(&storage);
+
+ return result;
+}
+
+/* decode function for all SUBACK properties */
+static int s_read_suback_property(
+ struct aws_mqtt5_packet_suback_storage *storage,
+ struct aws_byte_cursor *packet_cursor) {
+ int result = AWS_OP_ERR;
+
+ uint8_t property_type = 0;
+ AWS_MQTT5_DECODE_U8(packet_cursor, &property_type, done);
+
+ struct aws_mqtt5_packet_suback_view *storage_view = &storage->storage_view;
+
+ switch (property_type) {
+ case AWS_MQTT5_PROPERTY_TYPE_REASON_STRING:
+ AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL(
+ packet_cursor, &storage->reason_string, &storage_view->reason_string, done);
+ break;
+
+ case AWS_MQTT5_PROPERTY_TYPE_USER_PROPERTY:
+ if (aws_mqtt5_decode_user_property(packet_cursor, &storage->user_properties)) {
+ goto done;
+ }
+ break;
+
+ default:
+ goto done;
+ }
+
+ result = AWS_OP_SUCCESS;
+
+done:
+
+ if (result != AWS_OP_SUCCESS) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "Read SUBACK property decode failure");
+ aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR);
+ }
+
+ return result;
+}
+
+/* decodes a SUBACK packet whose data must be in the scratch buffer */
+static int s_aws_mqtt5_decoder_decode_suback(struct aws_mqtt5_decoder *decoder) {
+ struct aws_mqtt5_packet_suback_storage storage;
+ if (aws_mqtt5_packet_suback_storage_init_from_external_storage(&storage, decoder->allocator)) {
+ return AWS_OP_ERR;
+ }
+ int result = AWS_OP_ERR;
+
+ struct aws_mqtt5_packet_suback_view *storage_view = &storage.storage_view;
+
+ struct aws_byte_cursor packet_cursor = decoder->packet_cursor;
+
+ AWS_MQTT5_DECODE_U16(&packet_cursor, &storage_view->packet_id, done);
+
+ uint32_t property_length = 0;
+ AWS_MQTT5_DECODE_VLI(&packet_cursor, &property_length, done);
+ struct aws_byte_cursor properties_cursor = aws_byte_cursor_advance(&packet_cursor, property_length);
+ while (properties_cursor.len > 0) {
+ if (s_read_suback_property(&storage, &properties_cursor)) {
+ goto done;
+ }
+ }
+
+ aws_array_list_init_dynamic(
+ &storage.reason_codes, decoder->allocator, packet_cursor.len, sizeof(enum aws_mqtt5_suback_reason_code));
+
+ while (packet_cursor.len > 0) {
+ uint8_t reason_code;
+ AWS_MQTT5_DECODE_U8(&packet_cursor, &reason_code, done);
+ enum aws_mqtt5_suback_reason_code reason_code_enum = reason_code;
+ aws_array_list_push_back(&storage.reason_codes, &reason_code_enum);
+ }
+
+ storage_view->reason_code_count = aws_array_list_length(&storage.reason_codes);
+ storage_view->reason_codes = storage.reason_codes.data;
+ storage_view->user_property_count = aws_mqtt5_user_property_set_size(&storage.user_properties);
+ storage_view->user_properties = storage.user_properties.properties.data;
+
+ result = AWS_OP_SUCCESS;
+
+done:
+
+ if (result == AWS_OP_SUCCESS) {
+ if (decoder->options.on_packet_received != NULL) {
+ result = (*decoder->options.on_packet_received)(
+ AWS_MQTT5_PT_SUBACK, &storage.storage_view, decoder->options.callback_user_data);
+ }
+ } else {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "(%p) aws_mqtt5_decoder - SUBACK decode failure",
+ decoder->options.callback_user_data);
+ aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR);
+ }
+
+ aws_mqtt5_packet_suback_storage_clean_up(&storage);
+
+ return result;
+}
+
+/* decode function for all UNSUBACK properties */
+static int s_read_unsuback_property(
+ struct aws_mqtt5_packet_unsuback_storage *storage,
+ struct aws_byte_cursor *packet_cursor) {
+ int result = AWS_OP_ERR;
+
+ uint8_t property_type = 0;
+ AWS_MQTT5_DECODE_U8(packet_cursor, &property_type, done);
+
+ struct aws_mqtt5_packet_unsuback_view *storage_view = &storage->storage_view;
+
+ switch (property_type) {
+ case AWS_MQTT5_PROPERTY_TYPE_REASON_STRING:
+ AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL(
+ packet_cursor, &storage->reason_string, &storage_view->reason_string, done);
+ break;
+
+ case AWS_MQTT5_PROPERTY_TYPE_USER_PROPERTY:
+ if (aws_mqtt5_decode_user_property(packet_cursor, &storage->user_properties)) {
+ goto done;
+ }
+ break;
+
+ default:
+ goto done;
+ }
+
+ result = AWS_OP_SUCCESS;
+
+done:
+
+ if (result != AWS_OP_SUCCESS) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "Read UNSUBACK property decode failure");
+ aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR);
+ }
+
+ return result;
+}
+
+/* decodes an UNSUBACK packet whose data must be in the scratch buffer */
+static int s_aws_mqtt5_decoder_decode_unsuback(struct aws_mqtt5_decoder *decoder) {
+ struct aws_mqtt5_packet_unsuback_storage storage;
+
+ /*
+ * Fixed Header
+ * byte 1: MQTT5 Control Packet - Reserved 0
+ * byte 2 - x: VLI Remaining Length
+ *
+ * Variable Header
+ * byte 1-2: Packet Identifier
+ * Byte 3 - x: VLI Property Length
+ *
+ * Properties
+ * byte 1: Idenfier
+ * bytes 2 - x: Property content
+ *
+ * Payload
+ * 1 byte per reason code in order of unsub requests
+ */
+
+ if (aws_mqtt5_packet_unsuback_storage_init_from_external_storage(&storage, decoder->allocator)) {
+ return AWS_OP_ERR;
+ }
+ int result = AWS_OP_ERR;
+
+ struct aws_byte_cursor packet_cursor = decoder->packet_cursor;
+
+ struct aws_mqtt5_packet_unsuback_view *storage_view = &storage.storage_view;
+
+ AWS_MQTT5_DECODE_U16(&packet_cursor, &storage_view->packet_id, done);
+
+ uint32_t property_length = 0;
+ AWS_MQTT5_DECODE_VLI(&packet_cursor, &property_length, done);
+ struct aws_byte_cursor properties_cursor = aws_byte_cursor_advance(&packet_cursor, property_length);
+ while (properties_cursor.len > 0) {
+ if (s_read_unsuback_property(&storage, &properties_cursor)) {
+ goto done;
+ }
+ }
+
+ aws_array_list_init_dynamic(
+ &storage.reason_codes, decoder->allocator, packet_cursor.len, sizeof(enum aws_mqtt5_unsuback_reason_code));
+
+ while (packet_cursor.len > 0) {
+ uint8_t reason_code;
+ AWS_MQTT5_DECODE_U8(&packet_cursor, &reason_code, done);
+ enum aws_mqtt5_unsuback_reason_code reason_code_enum = reason_code;
+ aws_array_list_push_back(&storage.reason_codes, &reason_code_enum);
+ }
+
+ storage_view->reason_code_count = aws_array_list_length(&storage.reason_codes);
+ storage_view->reason_codes = storage.reason_codes.data;
+ storage_view->user_property_count = aws_mqtt5_user_property_set_size(&storage.user_properties);
+ storage_view->user_properties = storage.user_properties.properties.data;
+
+ result = AWS_OP_SUCCESS;
+
+done:
+
+ if (result == AWS_OP_SUCCESS) {
+ if (decoder->options.on_packet_received != NULL) {
+ result = (*decoder->options.on_packet_received)(
+ AWS_MQTT5_PT_UNSUBACK, &storage.storage_view, decoder->options.callback_user_data);
+ }
+ } else {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "(%p) aws_mqtt5_decoder - UNSUBACK decode failure",
+ decoder->options.callback_user_data);
+ aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR);
+ }
+
+ aws_mqtt5_packet_unsuback_storage_clean_up(&storage);
+
+ return result;
+}
+
+/* decode function for all DISCONNECT properties */
+static int s_read_disconnect_property(
+ struct aws_mqtt5_packet_disconnect_storage *storage,
+ struct aws_byte_cursor *packet_cursor) {
+ int result = AWS_OP_ERR;
+
+ uint8_t property_type = 0;
+ AWS_MQTT5_DECODE_U8(packet_cursor, &property_type, done);
+
+ struct aws_mqtt5_packet_disconnect_view *storage_view = &storage->storage_view;
+
+ switch (property_type) {
+ case AWS_MQTT5_PROPERTY_TYPE_SESSION_EXPIRY_INTERVAL:
+ AWS_MQTT5_DECODE_U32_OPTIONAL(
+ packet_cursor,
+ &storage->session_expiry_interval_seconds,
+ &storage_view->session_expiry_interval_seconds,
+ done);
+ break;
+
+ case AWS_MQTT5_PROPERTY_TYPE_SERVER_REFERENCE:
+ AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL(
+ packet_cursor, &storage->server_reference, &storage_view->server_reference, done);
+ break;
+
+ case AWS_MQTT5_PROPERTY_TYPE_REASON_STRING:
+ AWS_MQTT5_DECODE_LENGTH_PREFIXED_CURSOR_OPTIONAL(
+ packet_cursor, &storage->reason_string, &storage_view->reason_string, done);
+ break;
+
+ case AWS_MQTT5_PROPERTY_TYPE_USER_PROPERTY:
+ if (aws_mqtt5_decode_user_property(packet_cursor, &storage->user_properties)) {
+ goto done;
+ }
+ break;
+
+ default:
+ goto done;
+ }
+
+ result = AWS_OP_SUCCESS;
+
+done:
+
+ if (result == AWS_OP_ERR) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "Read DISCONNECT property decode failure");
+ aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR);
+ }
+
+ return result;
+}
+
+/* decodes a DISCONNECT packet whose data must be in the scratch buffer */
+static int s_aws_mqtt5_decoder_decode_disconnect(struct aws_mqtt5_decoder *decoder) {
+ struct aws_mqtt5_packet_disconnect_storage storage;
+ if (aws_mqtt5_packet_disconnect_storage_init_from_external_storage(&storage, decoder->allocator)) {
+ return AWS_OP_ERR;
+ }
+
+ int result = AWS_OP_ERR;
+
+ uint8_t first_byte = decoder->packet_first_byte;
+ /* DISCONNECT flags must be zero by protocol */
+ if ((first_byte & 0x0F) != 0) {
+ goto done;
+ }
+
+ struct aws_byte_cursor packet_cursor = decoder->packet_cursor;
+ uint32_t remaining_length = decoder->remaining_length;
+ if (remaining_length != (uint32_t)packet_cursor.len) {
+ goto done;
+ }
+
+ struct aws_mqtt5_packet_disconnect_view *storage_view = &storage.storage_view;
+ if (remaining_length > 0) {
+ uint8_t reason_code = 0;
+ AWS_MQTT5_DECODE_U8(&packet_cursor, &reason_code, done);
+ storage_view->reason_code = reason_code;
+ if (packet_cursor.len == 0) {
+ result = AWS_OP_SUCCESS;
+ goto done;
+ }
+
+ uint32_t property_length = 0;
+ AWS_MQTT5_DECODE_VLI(&packet_cursor, &property_length, done);
+ if (property_length != (uint32_t)packet_cursor.len) {
+ goto done;
+ }
+
+ while (packet_cursor.len > 0) {
+ if (s_read_disconnect_property(&storage, &packet_cursor)) {
+ goto done;
+ }
+ }
+ }
+
+ storage_view->user_property_count = aws_mqtt5_user_property_set_size(&storage.user_properties);
+ storage_view->user_properties = storage.user_properties.properties.data;
+
+ result = AWS_OP_SUCCESS;
+
+done:
+
+ if (result == AWS_OP_SUCCESS) {
+ if (decoder->options.on_packet_received != NULL) {
+ result = (*decoder->options.on_packet_received)(
+ AWS_MQTT5_PT_DISCONNECT, &storage.storage_view, decoder->options.callback_user_data);
+ }
+ } else {
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "id=%p: DISCONNECT decode failure", decoder->options.callback_user_data);
+ aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR);
+ }
+
+ aws_mqtt5_packet_disconnect_storage_clean_up(&storage);
+
+ return result;
+}
+
+static int s_aws_mqtt5_decoder_decode_pingresp(struct aws_mqtt5_decoder *decoder) {
+ if (decoder->packet_cursor.len != 0) {
+ goto error;
+ }
+
+ uint8_t expected_first_byte = aws_mqtt5_compute_fixed_header_byte1(AWS_MQTT5_PT_PINGRESP, 0);
+ if (decoder->packet_first_byte != expected_first_byte || decoder->remaining_length != 0) {
+ goto error;
+ }
+
+ int result = AWS_OP_SUCCESS;
+ if (decoder->options.on_packet_received != NULL) {
+ result =
+ (*decoder->options.on_packet_received)(AWS_MQTT5_PT_PINGRESP, NULL, decoder->options.callback_user_data);
+ }
+
+ return result;
+
+error:
+
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "id=%p: PINGRESP decode failure", decoder->options.callback_user_data);
+ return aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR);
+}
+
+static int s_aws_mqtt5_decoder_decode_packet(struct aws_mqtt5_decoder *decoder) {
+ enum aws_mqtt5_packet_type packet_type = (enum aws_mqtt5_packet_type)(decoder->packet_first_byte >> 4);
+ aws_mqtt5_decoding_fn *decoder_fn = decoder->options.decoder_table->decoders_by_packet_type[packet_type];
+ if (decoder_fn == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "Decoder decode packet function missing for enum: %d", packet_type);
+ return aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR);
+ }
+
+ return (*decoder_fn)(decoder);
+}
+
+/*
+ * (Streaming) Given a packet type and a variable length integer specifying the packet length, this state either
+ * (1) decodes directly from the cursor if possible
+ * (2) reads the packet into the scratch buffer and then decodes it once it is completely present
+ *
+ */
+static enum aws_mqtt5_decode_result_type s_aws_mqtt5_decoder_read_packet_on_data(
+ struct aws_mqtt5_decoder *decoder,
+ struct aws_byte_cursor *data) {
+
+ /* Are we able to decode directly from the channel message data buffer? */
+ if (decoder->scratch_space.len == 0 && decoder->remaining_length <= data->len) {
+ /* The cursor contains the entire packet, so decode directly from the backing io message buffer */
+ decoder->packet_cursor = aws_byte_cursor_advance(data, decoder->remaining_length);
+ } else {
+ /* If the packet is fragmented across multiple io messages, then we buffer it internally */
+ size_t unread_length = decoder->remaining_length - decoder->scratch_space.len;
+ size_t copy_length = aws_min_size(unread_length, data->len);
+
+ struct aws_byte_cursor copy_cursor = aws_byte_cursor_advance(data, copy_length);
+ if (aws_byte_buf_append_dynamic(&decoder->scratch_space, &copy_cursor)) {
+ return AWS_MQTT5_DRT_ERROR;
+ }
+
+ if (copy_length < unread_length) {
+ return AWS_MQTT5_DRT_MORE_DATA;
+ }
+
+ decoder->packet_cursor = aws_byte_cursor_from_buf(&decoder->scratch_space);
+ }
+
+ if (s_aws_mqtt5_decoder_decode_packet(decoder)) {
+ return AWS_MQTT5_DRT_ERROR;
+ }
+
+ s_enter_state(decoder, AWS_MQTT5_DS_READ_PACKET_TYPE);
+
+ return AWS_MQTT5_DRT_SUCCESS;
+}
+
+/* top-level entry function for all new data received from the remote mqtt endpoint */
+int aws_mqtt5_decoder_on_data_received(struct aws_mqtt5_decoder *decoder, struct aws_byte_cursor data) {
+ enum aws_mqtt5_decode_result_type result = AWS_MQTT5_DRT_SUCCESS;
+ while (result == AWS_MQTT5_DRT_SUCCESS) {
+ switch (decoder->state) {
+ case AWS_MQTT5_DS_READ_PACKET_TYPE:
+ result = s_aws_mqtt5_decoder_read_packet_type_on_data(decoder, &data);
+ break;
+
+ case AWS_MQTT5_DS_READ_REMAINING_LENGTH:
+ result = s_aws_mqtt5_decoder_read_remaining_length_on_data(decoder, &data);
+ break;
+
+ case AWS_MQTT5_DS_READ_PACKET:
+ result = s_aws_mqtt5_decoder_read_packet_on_data(decoder, &data);
+ break;
+
+ default:
+ result = AWS_MQTT5_DRT_ERROR;
+ break;
+ }
+ }
+
+ if (result == AWS_MQTT5_DRT_ERROR) {
+ aws_raise_error(AWS_ERROR_MQTT5_DECODE_PROTOCOL_ERROR);
+ decoder->state = AWS_MQTT5_DS_FATAL_ERROR;
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static struct aws_mqtt5_decoder_function_table s_aws_mqtt5_decoder_default_function_table = {
+ .decoders_by_packet_type =
+ {
+ NULL, /* RESERVED = 0 */
+ NULL, /* CONNECT */
+ &s_aws_mqtt5_decoder_decode_connack, /* CONNACK */
+ &s_aws_mqtt5_decoder_decode_publish, /* PUBLISH */
+ &s_aws_mqtt5_decoder_decode_puback, /* PUBACK */
+ NULL, /* PUBREC */
+ NULL, /* PUBREL */
+ NULL, /* PUBCOMP */
+ NULL, /* SUBSCRIBE */
+ &s_aws_mqtt5_decoder_decode_suback, /* SUBACK */
+ NULL, /* UNSUBSCRIBE */
+ &s_aws_mqtt5_decoder_decode_unsuback, /* UNSUBACK */
+ NULL, /* PINGREQ */
+ &s_aws_mqtt5_decoder_decode_pingresp, /* PINGRESP */
+ &s_aws_mqtt5_decoder_decode_disconnect, /* DISCONNECT */
+ NULL /* AUTH */
+ },
+};
+
+const struct aws_mqtt5_decoder_function_table *g_aws_mqtt5_default_decoder_table =
+ &s_aws_mqtt5_decoder_default_function_table;
+
+int aws_mqtt5_decoder_init(
+ struct aws_mqtt5_decoder *decoder,
+ struct aws_allocator *allocator,
+ struct aws_mqtt5_decoder_options *options) {
+ AWS_ZERO_STRUCT(*decoder);
+
+ decoder->options = *options;
+
+ if (decoder->options.decoder_table == NULL) {
+ decoder->options.decoder_table = g_aws_mqtt5_default_decoder_table;
+ }
+
+ decoder->allocator = allocator;
+
+ decoder->state = AWS_MQTT5_DS_READ_PACKET_TYPE;
+
+ if (aws_byte_buf_init(&decoder->scratch_space, allocator, AWS_MQTT5_DECODER_BUFFER_START_SIZE)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+void aws_mqtt5_decoder_reset(struct aws_mqtt5_decoder *decoder) {
+ s_reset_decoder_for_new_packet(decoder);
+
+ decoder->state = AWS_MQTT5_DS_READ_PACKET_TYPE;
+}
+
+void aws_mqtt5_decoder_clean_up(struct aws_mqtt5_decoder *decoder) {
+ aws_byte_buf_clean_up(&decoder->scratch_space);
+}
+
+void aws_mqtt5_decoder_set_inbound_topic_alias_resolver(
+ struct aws_mqtt5_decoder *decoder,
+ struct aws_mqtt5_inbound_topic_alias_resolver *resolver) {
+ decoder->topic_alias_resolver = resolver;
+}
diff --git a/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_encoder.c b/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_encoder.c
new file mode 100644
index 0000000000..b9a3ec56ec
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_encoder.c
@@ -0,0 +1,1283 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/mqtt/private/v5/mqtt5_encoder.h>
+
+#include <aws/io/stream.h>
+#include <aws/mqtt/private/v5/mqtt5_topic_alias.h>
+#include <aws/mqtt/private/v5/mqtt5_utils.h>
+#include <aws/mqtt/v5/mqtt5_types.h>
+
+#include <inttypes.h>
+
+#define INITIAL_ENCODING_STEP_COUNT 64
+#define SUBSCRIBE_PACKET_FIXED_HEADER_RESERVED_BITS 2
+#define UNSUBSCRIBE_PACKET_FIXED_HEADER_RESERVED_BITS 2
+
+int aws_mqtt5_encode_variable_length_integer(struct aws_byte_buf *buf, uint32_t value) {
+ AWS_PRECONDITION(buf);
+
+ if (value > AWS_MQTT5_MAXIMUM_VARIABLE_LENGTH_INTEGER) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ do {
+ uint8_t encoded_byte = value % 128;
+ value /= 128;
+ if (value) {
+ encoded_byte |= 128;
+ }
+ if (!aws_byte_buf_write_u8(buf, encoded_byte)) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+ } while (value);
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt5_get_variable_length_encode_size(size_t value, size_t *encode_size) {
+ if (value > AWS_MQTT5_MAXIMUM_VARIABLE_LENGTH_INTEGER) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (value < 128) {
+ *encode_size = 1;
+ } else if (value < 16384) {
+ *encode_size = 2;
+ } else if (value < 2097152) {
+ *encode_size = 3;
+ } else {
+ *encode_size = 4;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* helper functions that add a single type of encoding step to the list of steps in an encoder */
+
+void aws_mqtt5_encoder_push_step_u8(struct aws_mqtt5_encoder *encoder, uint8_t value) {
+ struct aws_mqtt5_encoding_step step;
+ AWS_ZERO_STRUCT(step);
+
+ step.type = AWS_MQTT5_EST_U8;
+ step.value.value_u8 = value;
+
+ aws_array_list_push_back(&encoder->encoding_steps, &step);
+}
+
+void aws_mqtt5_encoder_push_step_u16(struct aws_mqtt5_encoder *encoder, uint16_t value) {
+ struct aws_mqtt5_encoding_step step;
+ AWS_ZERO_STRUCT(step);
+
+ step.type = AWS_MQTT5_EST_U16;
+ step.value.value_u16 = value;
+
+ aws_array_list_push_back(&encoder->encoding_steps, &step);
+}
+
+void aws_mqtt5_encoder_push_step_u32(struct aws_mqtt5_encoder *encoder, uint32_t value) {
+ struct aws_mqtt5_encoding_step step;
+ AWS_ZERO_STRUCT(step);
+
+ step.type = AWS_MQTT5_EST_U32;
+ step.value.value_u32 = value;
+
+ aws_array_list_push_back(&encoder->encoding_steps, &step);
+}
+
+int aws_mqtt5_encoder_push_step_vli(struct aws_mqtt5_encoder *encoder, uint32_t value) {
+ if (value > AWS_MQTT5_MAXIMUM_VARIABLE_LENGTH_INTEGER) {
+ return aws_raise_error(AWS_ERROR_MQTT5_ENCODE_FAILURE);
+ }
+
+ struct aws_mqtt5_encoding_step step;
+ AWS_ZERO_STRUCT(step);
+
+ step.type = AWS_MQTT5_EST_VLI;
+ step.value.value_u32 = value;
+
+ aws_array_list_push_back(&encoder->encoding_steps, &step);
+
+ return AWS_OP_SUCCESS;
+}
+
+void aws_mqtt5_encoder_push_step_cursor(struct aws_mqtt5_encoder *encoder, struct aws_byte_cursor value) {
+ struct aws_mqtt5_encoding_step step;
+ AWS_ZERO_STRUCT(step);
+
+ step.type = AWS_MQTT5_EST_CURSOR;
+ step.value.value_cursor = value;
+
+ aws_array_list_push_back(&encoder->encoding_steps, &step);
+}
+
+/*
+ * All size calculations are done with size_t. We assume that view validation will catch and fail all packets
+ * that violate length constraints either from the MQTT5 spec or additional constraints that we impose on packets
+ * to ensure that the size calculations do not need to perform checked arithmetic. The only place where we need
+ * to use checked arithmetic is a PUBLISH packet when combining the payload size and "sizeof everything else"
+ *
+ * The additional beyond-spec constraints we apply to view validation ensure our results actually fit in 32 bits.
+ */
+size_t aws_mqtt5_compute_user_property_encode_length(
+ const struct aws_mqtt5_user_property *properties,
+ size_t user_property_count) {
+ /*
+ * for each user property, in addition to the raw name-value bytes, we also have 5 bytes of prefix required:
+ * 1 byte for the property type
+ * 2 bytes for the name length
+ * 2 bytes for the value length
+ */
+ size_t length = 5 * user_property_count;
+
+ for (size_t i = 0; i < user_property_count; ++i) {
+ const struct aws_mqtt5_user_property *property = &properties[i];
+
+ length += property->name.len;
+ length += property->value.len;
+ }
+
+ return length;
+}
+
+void aws_mqtt5_add_user_property_encoding_steps(
+ struct aws_mqtt5_encoder *encoder,
+ const struct aws_mqtt5_user_property *user_properties,
+ size_t user_property_count) {
+ for (size_t i = 0; i < user_property_count; ++i) {
+ const struct aws_mqtt5_user_property *property = &user_properties[i];
+
+ /* https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901054 */
+ ADD_ENCODE_STEP_U8(encoder, AWS_MQTT5_PROPERTY_TYPE_USER_PROPERTY);
+ ADD_ENCODE_STEP_U16(encoder, (uint16_t)property->name.len);
+ ADD_ENCODE_STEP_CURSOR(encoder, property->name);
+ ADD_ENCODE_STEP_U16(encoder, (uint16_t)property->value.len);
+ ADD_ENCODE_STEP_CURSOR(encoder, property->value);
+ }
+}
+
+static int s_aws_mqtt5_encoder_begin_pingreq(struct aws_mqtt5_encoder *encoder, const void *view) {
+ (void)view;
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT5_CLIENT, "id=%p: setting up encode for a PINGREQ packet", (void *)encoder->config.client);
+
+ /* A ping is just a fixed header with a 0-valued remaining length which we encode as a 0 u8 rather than a 0 vli */
+ ADD_ENCODE_STEP_U8(encoder, aws_mqtt5_compute_fixed_header_byte1(AWS_MQTT5_PT_PINGREQ, 0));
+ ADD_ENCODE_STEP_U8(encoder, 0);
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_compute_disconnect_variable_length_fields(
+ const struct aws_mqtt5_packet_disconnect_view *disconnect_view,
+ size_t *total_remaining_length,
+ size_t *property_length) {
+ size_t local_property_length = aws_mqtt5_compute_user_property_encode_length(
+ disconnect_view->user_properties, disconnect_view->user_property_count);
+
+ ADD_OPTIONAL_U32_PROPERTY_LENGTH(disconnect_view->session_expiry_interval_seconds, local_property_length);
+ ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(disconnect_view->server_reference, local_property_length);
+ ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(disconnect_view->reason_string, local_property_length);
+
+ *property_length = local_property_length;
+
+ size_t property_length_encoding_length = 0;
+ if (aws_mqtt5_get_variable_length_encode_size(local_property_length, &property_length_encoding_length)) {
+ return AWS_OP_ERR;
+ }
+
+ /* reason code is the only other thing to worry about */
+ *total_remaining_length = 1 + *property_length + property_length_encoding_length;
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_aws_mqtt5_encoder_begin_disconnect(struct aws_mqtt5_encoder *encoder, const void *view) {
+
+ const struct aws_mqtt5_packet_disconnect_view *disconnect_view = view;
+
+ size_t total_remaining_length = 0;
+ size_t property_length = 0;
+ if (s_compute_disconnect_variable_length_fields(disconnect_view, &total_remaining_length, &property_length)) {
+ int error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: failed to compute variable length values for DISCONNECT packet with error "
+ "%d(%s)",
+ (void *)encoder->config.client,
+ error_code,
+ aws_error_debug_str(error_code));
+ return AWS_OP_ERR;
+ }
+
+ uint32_t total_remaining_length_u32 = (uint32_t)total_remaining_length;
+ uint32_t property_length_u32 = (uint32_t)property_length;
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: setting up encode for a DISCONNECT packet with remaining length %" PRIu32,
+ (void *)encoder->config.client,
+ total_remaining_length_u32);
+
+ ADD_ENCODE_STEP_U8(encoder, aws_mqtt5_compute_fixed_header_byte1(AWS_MQTT5_PT_DISCONNECT, 0));
+ ADD_ENCODE_STEP_VLI(encoder, total_remaining_length_u32);
+ ADD_ENCODE_STEP_U8(encoder, (uint8_t)disconnect_view->reason_code);
+ ADD_ENCODE_STEP_VLI(encoder, property_length_u32);
+
+ if (property_length > 0) {
+ ADD_ENCODE_STEP_OPTIONAL_U32_PROPERTY(
+ encoder, AWS_MQTT5_PROPERTY_TYPE_SESSION_EXPIRY_INTERVAL, disconnect_view->session_expiry_interval_seconds);
+ ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY(
+ encoder, AWS_MQTT5_PROPERTY_TYPE_REASON_STRING, disconnect_view->reason_string);
+ ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY(
+ encoder, AWS_MQTT5_PROPERTY_TYPE_SERVER_REFERENCE, disconnect_view->server_reference);
+
+ aws_mqtt5_add_user_property_encoding_steps(
+ encoder, disconnect_view->user_properties, disconnect_view->user_property_count);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_compute_connect_variable_length_fields(
+ const struct aws_mqtt5_packet_connect_view *connect_view,
+ size_t *total_remaining_length,
+ size_t *connect_property_length,
+ size_t *will_property_length) {
+
+ size_t connect_property_section_length =
+ aws_mqtt5_compute_user_property_encode_length(connect_view->user_properties, connect_view->user_property_count);
+
+ ADD_OPTIONAL_U32_PROPERTY_LENGTH(connect_view->session_expiry_interval_seconds, connect_property_section_length);
+ ADD_OPTIONAL_U16_PROPERTY_LENGTH(connect_view->receive_maximum, connect_property_section_length);
+ ADD_OPTIONAL_U32_PROPERTY_LENGTH(connect_view->maximum_packet_size_bytes, connect_property_section_length);
+ ADD_OPTIONAL_U16_PROPERTY_LENGTH(connect_view->topic_alias_maximum, connect_property_section_length);
+ ADD_OPTIONAL_U8_PROPERTY_LENGTH(connect_view->request_response_information, connect_property_section_length);
+ ADD_OPTIONAL_U8_PROPERTY_LENGTH(connect_view->request_problem_information, connect_property_section_length);
+ ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(connect_view->authentication_method, connect_property_section_length);
+ ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(connect_view->authentication_data, connect_property_section_length);
+
+ *connect_property_length = (uint32_t)connect_property_section_length;
+
+ /* variable header length =
+ * 10 bytes (6 for mqtt string, 1 for protocol version, 1 for flags, 2 for keep alive)
+ * + # bytes(variable_length_encoding(connect_property_section_length))
+ * + connect_property_section_length
+ */
+ size_t variable_header_length = 0;
+ if (aws_mqtt5_get_variable_length_encode_size(connect_property_section_length, &variable_header_length)) {
+ return AWS_OP_ERR;
+ }
+
+ variable_header_length += 10 + connect_property_section_length;
+
+ size_t payload_length = 2 + connect_view->client_id.len;
+
+ *will_property_length = 0;
+ if (connect_view->will != NULL) {
+ const struct aws_mqtt5_packet_publish_view *publish_view = connect_view->will;
+
+ *will_property_length = aws_mqtt5_compute_user_property_encode_length(
+ publish_view->user_properties, publish_view->user_property_count);
+
+ ADD_OPTIONAL_U32_PROPERTY_LENGTH(connect_view->will_delay_interval_seconds, *will_property_length);
+ ADD_OPTIONAL_U8_PROPERTY_LENGTH(publish_view->payload_format, *will_property_length);
+ ADD_OPTIONAL_U32_PROPERTY_LENGTH(publish_view->message_expiry_interval_seconds, *will_property_length);
+ ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(publish_view->content_type, *will_property_length);
+ ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(publish_view->response_topic, *will_property_length);
+ ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(publish_view->correlation_data, *will_property_length);
+
+ size_t will_properties_length_encode_size = 0;
+ if (aws_mqtt5_get_variable_length_encode_size(
+ (uint32_t)*will_property_length, &will_properties_length_encode_size)) {
+ return AWS_OP_ERR;
+ }
+
+ payload_length += *will_property_length;
+ payload_length += will_properties_length_encode_size;
+
+ payload_length += 2 + publish_view->topic.len;
+ payload_length += 2 + publish_view->payload.len;
+ }
+
+ /* Can't use the optional property macros because these don't have a leading property type byte */
+ if (connect_view->username != NULL) {
+ payload_length += connect_view->username->len + 2;
+ }
+
+ if (connect_view->password != NULL) {
+ payload_length += connect_view->password->len + 2;
+ }
+
+ *total_remaining_length = payload_length + variable_header_length;
+
+ return AWS_OP_SUCCESS;
+}
+
+static uint8_t s_aws_mqtt5_connect_compute_connect_flags(const struct aws_mqtt5_packet_connect_view *connect_view) {
+ uint8_t flags = 0;
+
+ if (connect_view->clean_start) {
+ flags |= 1 << 1;
+ }
+
+ const struct aws_mqtt5_packet_publish_view *will = connect_view->will;
+ if (will != NULL) {
+ flags |= 1 << 2;
+ flags |= ((uint8_t)will->qos) << 3;
+
+ if (will->retain) {
+ flags |= 1 << 5;
+ }
+ }
+
+ if (connect_view->password != NULL) {
+ flags |= 1 << 6;
+ }
+
+ if (connect_view->username != NULL) {
+ flags |= 1 << 7;
+ }
+
+ return flags;
+}
+
+static int s_aws_mqtt5_encoder_begin_connect(struct aws_mqtt5_encoder *encoder, const void *view) {
+
+ const struct aws_mqtt5_packet_connect_view *connect_view = view;
+ const struct aws_mqtt5_packet_publish_view *will = connect_view->will;
+
+ size_t total_remaining_length = 0;
+ size_t connect_property_length = 0;
+ size_t will_property_length = 0;
+ if (s_compute_connect_variable_length_fields(
+ connect_view, &total_remaining_length, &connect_property_length, &will_property_length)) {
+ int error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: failed to compute variable length values for CONNECT packet with error %d(%s)",
+ (void *)encoder->config.client,
+ error_code,
+ aws_error_debug_str(error_code));
+ return AWS_OP_ERR;
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: setting up encode for a CONNECT packet with remaining length %zu",
+ (void *)encoder->config.client,
+ total_remaining_length);
+
+ uint32_t total_remaining_length_u32 = (uint32_t)total_remaining_length;
+ uint32_t connect_property_length_u32 = (uint32_t)connect_property_length;
+ uint32_t will_property_length_u32 = (uint32_t)will_property_length;
+
+ ADD_ENCODE_STEP_U8(encoder, aws_mqtt5_compute_fixed_header_byte1(AWS_MQTT5_PT_CONNECT, 0));
+ ADD_ENCODE_STEP_VLI(encoder, total_remaining_length_u32);
+ ADD_ENCODE_STEP_CURSOR(encoder, g_aws_mqtt5_connect_protocol_cursor);
+ ADD_ENCODE_STEP_U8(encoder, s_aws_mqtt5_connect_compute_connect_flags(connect_view));
+ ADD_ENCODE_STEP_U16(encoder, connect_view->keep_alive_interval_seconds);
+
+ ADD_ENCODE_STEP_VLI(encoder, connect_property_length_u32);
+ ADD_ENCODE_STEP_OPTIONAL_U32_PROPERTY(
+ encoder, AWS_MQTT5_PROPERTY_TYPE_SESSION_EXPIRY_INTERVAL, connect_view->session_expiry_interval_seconds);
+ ADD_ENCODE_STEP_OPTIONAL_U16_PROPERTY(
+ encoder, AWS_MQTT5_PROPERTY_TYPE_RECEIVE_MAXIMUM, connect_view->receive_maximum);
+ ADD_ENCODE_STEP_OPTIONAL_U32_PROPERTY(
+ encoder, AWS_MQTT5_PROPERTY_TYPE_MAXIMUM_PACKET_SIZE, connect_view->maximum_packet_size_bytes);
+ ADD_ENCODE_STEP_OPTIONAL_U16_PROPERTY(
+ encoder, AWS_MQTT5_PROPERTY_TYPE_TOPIC_ALIAS_MAXIMUM, connect_view->topic_alias_maximum);
+ ADD_ENCODE_STEP_OPTIONAL_U8_PROPERTY(
+ encoder, AWS_MQTT5_PROPERTY_TYPE_REQUEST_RESPONSE_INFORMATION, connect_view->request_response_information);
+ ADD_ENCODE_STEP_OPTIONAL_U8_PROPERTY(
+ encoder, AWS_MQTT5_PROPERTY_TYPE_REQUEST_PROBLEM_INFORMATION, connect_view->request_problem_information);
+ ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY(
+ encoder, AWS_MQTT5_PROPERTY_TYPE_AUTHENTICATION_METHOD, connect_view->authentication_method);
+ ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY(
+ encoder, AWS_MQTT5_PROPERTY_TYPE_AUTHENTICATION_DATA, connect_view->authentication_data);
+
+ aws_mqtt5_add_user_property_encoding_steps(
+ encoder, connect_view->user_properties, connect_view->user_property_count);
+
+ ADD_ENCODE_STEP_LENGTH_PREFIXED_CURSOR(encoder, connect_view->client_id);
+
+ if (will != NULL) {
+ ADD_ENCODE_STEP_VLI(encoder, will_property_length_u32);
+ ADD_ENCODE_STEP_OPTIONAL_U32_PROPERTY(
+ encoder, AWS_MQTT5_PROPERTY_TYPE_WILL_DELAY_INTERVAL, connect_view->will_delay_interval_seconds);
+ ADD_ENCODE_STEP_OPTIONAL_U8_PROPERTY(
+ encoder, AWS_MQTT5_PROPERTY_TYPE_PAYLOAD_FORMAT_INDICATOR, will->payload_format);
+ ADD_ENCODE_STEP_OPTIONAL_U32_PROPERTY(
+ encoder, AWS_MQTT5_PROPERTY_TYPE_MESSAGE_EXPIRY_INTERVAL, will->message_expiry_interval_seconds);
+ ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY(encoder, AWS_MQTT5_PROPERTY_TYPE_CONTENT_TYPE, will->content_type);
+ ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY(encoder, AWS_MQTT5_PROPERTY_TYPE_RESPONSE_TOPIC, will->response_topic);
+ ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY(
+ encoder, AWS_MQTT5_PROPERTY_TYPE_CORRELATION_DATA, will->correlation_data);
+
+ aws_mqtt5_add_user_property_encoding_steps(encoder, will->user_properties, will->user_property_count);
+
+ ADD_ENCODE_STEP_LENGTH_PREFIXED_CURSOR(encoder, will->topic);
+ ADD_ENCODE_STEP_U16(encoder, (uint16_t)will->payload.len);
+ ADD_ENCODE_STEP_CURSOR(encoder, will->payload);
+ }
+
+ ADD_ENCODE_STEP_OPTIONAL_LENGTH_PREFIXED_CURSOR(encoder, connect_view->username);
+ ADD_ENCODE_STEP_OPTIONAL_LENGTH_PREFIXED_CURSOR(encoder, connect_view->password);
+
+ return AWS_OP_SUCCESS;
+}
+
+static uint8_t s_aws_mqtt5_subscribe_compute_subscription_flags(
+ const struct aws_mqtt5_subscription_view *subscription_view) {
+
+ uint8_t flags = (uint8_t)subscription_view->qos;
+
+ if (subscription_view->no_local) {
+ flags |= 1 << 2;
+ }
+
+ if (subscription_view->retain_as_published) {
+ flags |= 1 << 3;
+ }
+
+ flags |= ((uint8_t)subscription_view->retain_handling_type) << 4;
+
+ return flags;
+}
+
+static void aws_mqtt5_add_subscribe_topic_filter_encoding_steps(
+ struct aws_mqtt5_encoder *encoder,
+ const struct aws_mqtt5_subscription_view *subscriptions,
+ size_t subscription_count) {
+ /* https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901169 */
+ for (size_t i = 0; i < subscription_count; ++i) {
+ const struct aws_mqtt5_subscription_view *subscription = &subscriptions[i];
+ ADD_ENCODE_STEP_LENGTH_PREFIXED_CURSOR(encoder, subscription->topic_filter);
+ ADD_ENCODE_STEP_U8(encoder, s_aws_mqtt5_subscribe_compute_subscription_flags(subscription));
+ }
+}
+
+static void aws_mqtt5_add_unsubscribe_topic_filter_encoding_steps(
+ struct aws_mqtt5_encoder *encoder,
+ const struct aws_byte_cursor *topics,
+ size_t unsubscription_count) {
+ /* https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901185 */
+ for (size_t i = 0; i < unsubscription_count; ++i) {
+ const struct aws_byte_cursor topic_filter = topics[i];
+ ADD_ENCODE_STEP_LENGTH_PREFIXED_CURSOR(encoder, topic_filter);
+ }
+}
+
+static int s_compute_subscribe_variable_length_fields(
+ const struct aws_mqtt5_packet_subscribe_view *subscribe_view,
+ size_t *total_remaining_length,
+ size_t *subscribe_properties_length) {
+
+ size_t subscribe_variable_header_property_length = aws_mqtt5_compute_user_property_encode_length(
+ subscribe_view->user_properties, subscribe_view->user_property_count);
+
+ /*
+ * Add the length of 1 byte for the identifier of a Subscription Identifier property
+ * and the VLI of the subscription_identifier itself
+ */
+ if (subscribe_view->subscription_identifier != 0) {
+ size_t subscription_identifier_length = 0;
+ aws_mqtt5_get_variable_length_encode_size(
+ *subscribe_view->subscription_identifier, &subscription_identifier_length);
+ subscribe_variable_header_property_length += subscription_identifier_length + 1;
+ }
+
+ *subscribe_properties_length = subscribe_variable_header_property_length;
+
+ /* variable header total length =
+ * 2 bytes for Packet Identifier
+ * + # bytes (variable_length_encoding(subscribe_variable_header_property_length))
+ * + subscribe_variable_header_property_length
+ */
+ size_t variable_header_length = 0;
+ if (aws_mqtt5_get_variable_length_encode_size(subscribe_variable_header_property_length, &variable_header_length)) {
+ return AWS_OP_ERR;
+ }
+ variable_header_length += 2 + subscribe_variable_header_property_length;
+
+ size_t payload_length = 0;
+
+ /*
+ * for each subscription view, in addition to the raw name-value bytes, we also have 2 bytes of
+ * prefix and one byte suffix required.
+ * 2 bytes for the Topic Filter length
+ * 1 byte for the Subscription Options Flags
+ */
+
+ for (size_t i = 0; i < subscribe_view->subscription_count; ++i) {
+ const struct aws_mqtt5_subscription_view *subscription = &subscribe_view->subscriptions[i];
+ payload_length += subscription->topic_filter.len;
+ }
+ payload_length += (3 * subscribe_view->subscription_count);
+
+ *total_remaining_length = variable_header_length + payload_length;
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_aws_mqtt5_encoder_begin_subscribe(struct aws_mqtt5_encoder *encoder, const void *view) {
+
+ const struct aws_mqtt5_packet_subscribe_view *subscription_view = view;
+
+ size_t total_remaining_length = 0;
+ size_t subscribe_properties_length = 0;
+
+ if (s_compute_subscribe_variable_length_fields(
+ subscription_view, &total_remaining_length, &subscribe_properties_length)) {
+ int error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "(%p) mqtt5 client encoder - failed to compute variable length values for SUBSCRIBE packet with error "
+ "%d(%s)",
+ (void *)encoder->config.client,
+ error_code,
+ aws_error_debug_str(error_code));
+ return AWS_OP_ERR;
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT5_GENERAL,
+ "(%p) mqtt5 client encoder - setting up encode for a SUBSCRIBE packet with remaining length %zu",
+ (void *)encoder->config.client,
+ total_remaining_length);
+
+ uint32_t total_remaining_length_u32 = (uint32_t)total_remaining_length;
+ uint32_t subscribe_property_length_u32 = (uint32_t)subscribe_properties_length;
+
+ /*
+ * Fixed Header
+ * byte 1:
+ * bits 7-4 MQTT Control Packet Type
+ * bits 3-0 Reserved, must be set to 0, 0, 1, 0
+ * byte 2-x: Remaining Length as Variable Byte Integer (1-4 bytes)
+ */
+ ADD_ENCODE_STEP_U8(
+ encoder,
+ aws_mqtt5_compute_fixed_header_byte1(AWS_MQTT5_PT_SUBSCRIBE, SUBSCRIBE_PACKET_FIXED_HEADER_RESERVED_BITS));
+ ADD_ENCODE_STEP_VLI(encoder, total_remaining_length_u32);
+
+ /*
+ * Variable Header
+ * byte 1-2: Packet Identifier
+ * byte 3-x: Property Length as Variable Byte Integer (1-4 bytes)
+ */
+ ADD_ENCODE_STEP_U16(encoder, (uint16_t)subscription_view->packet_id);
+ ADD_ENCODE_STEP_VLI(encoder, subscribe_property_length_u32);
+
+ /*
+ * Subscribe Properties
+ * (optional) Subscription Identifier
+ * (optional) User Properties
+ */
+ if (subscription_view->subscription_identifier != 0) {
+ ADD_ENCODE_STEP_U8(encoder, AWS_MQTT5_PROPERTY_TYPE_SUBSCRIPTION_IDENTIFIER);
+ ADD_ENCODE_STEP_VLI(encoder, *subscription_view->subscription_identifier);
+ }
+
+ aws_mqtt5_add_user_property_encoding_steps(
+ encoder, subscription_view->user_properties, subscription_view->user_property_count);
+
+ /*
+ * Payload
+ * n Topic Filters
+ * byte 1-2: Length
+ * byte 3..N: UTF-8 encoded Topic Filter
+ * byte N+1:
+ * bits 7-6 Reserved
+ * bits 5-4 Retain Handling
+ * bit 3 Retain as Published
+ * bit 2 No Local
+ * bits 1-0 Maximum QoS
+ */
+ aws_mqtt5_add_subscribe_topic_filter_encoding_steps(
+ encoder, subscription_view->subscriptions, subscription_view->subscription_count);
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_compute_unsubscribe_variable_length_fields(
+ const struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_view,
+ size_t *total_remaining_length,
+ size_t *unsubscribe_properties_length) {
+
+ size_t unsubscribe_variable_header_property_length = aws_mqtt5_compute_user_property_encode_length(
+ unsubscribe_view->user_properties, unsubscribe_view->user_property_count);
+
+ *unsubscribe_properties_length = unsubscribe_variable_header_property_length;
+
+ /* variable header total length =
+ * 2 bytes for Packet Identifier
+ * + # bytes (variable_length_encoding(subscribe_variable_header_property_length))
+ * + unsubscribe_variable_header_property_length
+ */
+ size_t variable_header_length = 0;
+ if (aws_mqtt5_get_variable_length_encode_size(
+ unsubscribe_variable_header_property_length, &variable_header_length)) {
+ return AWS_OP_ERR;
+ }
+ variable_header_length += 2 + unsubscribe_variable_header_property_length;
+
+ size_t payload_length = 0;
+
+ /*
+ * for each unsubscribe topic filter
+ * 2 bytes for the Topic Filter length
+ * n bytes for Topic Filter
+ */
+
+ for (size_t i = 0; i < unsubscribe_view->topic_filter_count; ++i) {
+ const struct aws_byte_cursor topic_filter = unsubscribe_view->topic_filters[i];
+ payload_length += topic_filter.len;
+ }
+
+ payload_length += (2 * unsubscribe_view->topic_filter_count);
+
+ *total_remaining_length = variable_header_length + payload_length;
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_aws_mqtt5_encoder_begin_unsubscribe(struct aws_mqtt5_encoder *encoder, const void *view) {
+
+ const struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_view = view;
+
+ size_t total_remaining_length = 0;
+ size_t unsubscribe_properties_length = 0;
+
+ if (s_compute_unsubscribe_variable_length_fields(
+ unsubscribe_view, &total_remaining_length, &unsubscribe_properties_length)) {
+ int error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "(%p) mqtt5 client encoder - failed to compute variable length values for UNSUBSCRIBE packet with error "
+ "%d(%s)",
+ (void *)encoder->config.client,
+ error_code,
+ aws_error_debug_str(error_code));
+ return AWS_OP_ERR;
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT5_GENERAL,
+ "(%p) mqtt5 client encoder - setting up encode for a UNSUBSCRIBE packet with remaining length %zu",
+ (void *)encoder->config.client,
+ total_remaining_length);
+
+ uint32_t total_remaining_length_u32 = (uint32_t)total_remaining_length;
+ uint32_t unsubscribe_property_length_u32 = (uint32_t)unsubscribe_properties_length;
+
+ /*
+ * Fixed Header
+ * byte 1:
+ * bits 7-4 MQTT Control Packet type (10)
+ * bits 3-0 Reserved, must be set to 0, 0, 1, 0
+ * byte 2-x: Remaining Length as Variable Byte Integer (1-4 bytes)
+ */
+ ADD_ENCODE_STEP_U8(
+ encoder,
+ aws_mqtt5_compute_fixed_header_byte1(AWS_MQTT5_PT_UNSUBSCRIBE, UNSUBSCRIBE_PACKET_FIXED_HEADER_RESERVED_BITS));
+ ADD_ENCODE_STEP_VLI(encoder, total_remaining_length_u32);
+
+ /*
+ * Variable Header
+ * byte 1-2: Packet Identifier
+ * byte 3-x: Properties length as Variable Byte Integer (1-4 bytes)
+ */
+ ADD_ENCODE_STEP_U16(encoder, (uint16_t)unsubscribe_view->packet_id);
+ ADD_ENCODE_STEP_VLI(encoder, unsubscribe_property_length_u32);
+
+ /*
+ * (optional) User Properties
+ */
+ aws_mqtt5_add_user_property_encoding_steps(
+ encoder, unsubscribe_view->user_properties, unsubscribe_view->user_property_count);
+
+ /*
+ * Payload
+ * n Topic Filters
+ * byte 1-2: Length
+ * byte 3..N: UTF-8 encoded Topic Filter
+ */
+
+ aws_mqtt5_add_unsubscribe_topic_filter_encoding_steps(
+ encoder, unsubscribe_view->topic_filters, unsubscribe_view->topic_filter_count);
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_compute_publish_variable_length_fields(
+ const struct aws_mqtt5_packet_publish_view *publish_view,
+ size_t *total_remaining_length,
+ size_t *publish_properties_length) {
+
+ size_t publish_property_section_length =
+ aws_mqtt5_compute_user_property_encode_length(publish_view->user_properties, publish_view->user_property_count);
+
+ ADD_OPTIONAL_U8_PROPERTY_LENGTH(publish_view->payload_format, publish_property_section_length);
+ ADD_OPTIONAL_U32_PROPERTY_LENGTH(publish_view->message_expiry_interval_seconds, publish_property_section_length);
+ ADD_OPTIONAL_U16_PROPERTY_LENGTH(publish_view->topic_alias, publish_property_section_length);
+ ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(publish_view->response_topic, publish_property_section_length);
+ ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(publish_view->correlation_data, publish_property_section_length);
+ ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(publish_view->content_type, publish_property_section_length);
+
+ for (size_t i = 0; i < publish_view->subscription_identifier_count; ++i) {
+ size_t encoding_size = 0;
+ if (aws_mqtt5_get_variable_length_encode_size(publish_view->subscription_identifiers[i], &encoding_size)) {
+ return AWS_OP_ERR;
+ }
+ publish_property_section_length += 1 + encoding_size;
+ }
+
+ *publish_properties_length = (uint32_t)publish_property_section_length;
+
+ /*
+ * Remaining Length:
+ * Variable Header
+ * - Topic Name
+ * - Packet Identifier
+ * - Property Length as VLI x
+ * - All Properties x
+ * Payload
+ */
+
+ size_t remaining_length = 0;
+
+ /* Property Length VLI size */
+ if (aws_mqtt5_get_variable_length_encode_size(publish_property_section_length, &remaining_length)) {
+ return AWS_OP_ERR;
+ }
+
+ /* Topic name */
+ remaining_length += 2 + publish_view->topic.len;
+
+ /* Optional packet id */
+ if (publish_view->packet_id != 0) {
+ remaining_length += 2;
+ }
+
+ /* Properties */
+ remaining_length += publish_property_section_length;
+
+ /* Payload */
+ remaining_length += publish_view->payload.len;
+
+ *total_remaining_length = remaining_length;
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_aws_mqtt5_encoder_begin_publish(struct aws_mqtt5_encoder *encoder, const void *view) {
+
+ /* We do a shallow copy of the stored view in order to temporarily side affect it for topic aliasing */
+ struct aws_mqtt5_packet_publish_view local_publish_view = *((const struct aws_mqtt5_packet_publish_view *)view);
+
+ uint16_t outbound_topic_alias = 0;
+ struct aws_byte_cursor outbound_topic;
+
+ if (encoder->topic_alias_resolver != NULL) {
+ AWS_ZERO_STRUCT(outbound_topic);
+ if (aws_mqtt5_outbound_topic_alias_resolver_resolve_outbound_publish(
+ encoder->topic_alias_resolver, &local_publish_view, &outbound_topic_alias, &outbound_topic)) {
+ int error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "(%p) mqtt5 client encoder - failed to perform outbound topic alias resolution on PUBLISH packet with "
+ "error "
+ "%d(%s)",
+ (void *)encoder->config.client,
+ error_code,
+ aws_error_debug_str(error_code));
+ return AWS_OP_ERR;
+ }
+
+ local_publish_view.topic = outbound_topic;
+ if (outbound_topic_alias != 0) {
+ local_publish_view.topic_alias = &outbound_topic_alias;
+ }
+ }
+
+ /*
+ * We're going to encode the local mutated view copy, not the stored view. This lets the original packet stay
+ * unchanged for the entire time it is owned by the client. Otherwise, events that disrupt the alias cache
+ * (like disconnections) would make correct aliasing impossible (because we'd have mutated and potentially lost
+ * topic information).
+ */
+ const struct aws_mqtt5_packet_publish_view *publish_view = &local_publish_view;
+
+ size_t total_remaining_length = 0;
+ size_t publish_properties_length = 0;
+
+ if (s_compute_publish_variable_length_fields(publish_view, &total_remaining_length, &publish_properties_length)) {
+ int error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "(%p) mqtt5 client encoder - failed to compute variable length values for PUBLISH packet with error "
+ "%d(%s)",
+ (void *)encoder->config.client,
+ error_code,
+ aws_error_debug_str(error_code));
+ return AWS_OP_ERR;
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT5_GENERAL,
+ "(%p) mqtt5 client encoder - setting up encode for a PUBLISH packet with remaining length %zu",
+ (void *)encoder->config.client,
+ total_remaining_length);
+
+ uint32_t total_remaining_length_u32 = (uint32_t)total_remaining_length;
+ uint32_t publish_property_length_u32 = (uint32_t)publish_properties_length;
+
+ /*
+ * Fixed Header
+ * byte 1:
+ * bits 4-7: MQTT Control Packet Type
+ * bit 3: DUP flag
+ * bit 1-2: QoS level
+ * bit 0: RETAIN
+ * byte 2-x: Remaining Length as Variable Byte Integer (1-4 bytes)
+ */
+
+ uint8_t flags = 0;
+
+ if (publish_view->duplicate) {
+ flags |= 1 << 3;
+ }
+
+ flags |= ((uint8_t)publish_view->qos) << 1;
+
+ if (publish_view->retain) {
+ flags |= 1;
+ }
+
+ ADD_ENCODE_STEP_U8(encoder, aws_mqtt5_compute_fixed_header_byte1(AWS_MQTT5_PT_PUBLISH, flags));
+
+ ADD_ENCODE_STEP_VLI(encoder, total_remaining_length_u32);
+
+ /*
+ * Variable Header
+ * UTF-8 Encoded Topic Name
+ * 2 byte Packet Identifier
+ * 1-4 byte Property Length as Variable Byte Integer
+ * n bytes Properties
+ */
+
+ ADD_ENCODE_STEP_LENGTH_PREFIXED_CURSOR(encoder, publish_view->topic);
+ if (publish_view->qos != AWS_MQTT5_QOS_AT_MOST_ONCE) {
+ ADD_ENCODE_STEP_U16(encoder, (uint16_t)publish_view->packet_id);
+ }
+ ADD_ENCODE_STEP_VLI(encoder, publish_property_length_u32);
+
+ ADD_ENCODE_STEP_OPTIONAL_U8_PROPERTY(
+ encoder, AWS_MQTT5_PROPERTY_TYPE_PAYLOAD_FORMAT_INDICATOR, publish_view->payload_format);
+ ADD_ENCODE_STEP_OPTIONAL_U32_PROPERTY(
+ encoder, AWS_MQTT5_PROPERTY_TYPE_MESSAGE_EXPIRY_INTERVAL, publish_view->message_expiry_interval_seconds);
+ ADD_ENCODE_STEP_OPTIONAL_U16_PROPERTY(encoder, AWS_MQTT5_PROPERTY_TYPE_TOPIC_ALIAS, publish_view->topic_alias);
+ ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY(
+ encoder, AWS_MQTT5_PROPERTY_TYPE_RESPONSE_TOPIC, publish_view->response_topic);
+ ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY(
+ encoder, AWS_MQTT5_PROPERTY_TYPE_CORRELATION_DATA, publish_view->correlation_data);
+
+ for (size_t i = 0; i < publish_view->subscription_identifier_count; ++i) {
+ ADD_ENCODE_STEP_OPTIONAL_VLI_PROPERTY(
+ encoder, AWS_MQTT5_PROPERTY_TYPE_SUBSCRIPTION_IDENTIFIER, &publish_view->subscription_identifiers[i]);
+ }
+
+ ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY(encoder, AWS_MQTT5_PROPERTY_TYPE_CONTENT_TYPE, publish_view->content_type);
+
+ aws_mqtt5_add_user_property_encoding_steps(
+ encoder, publish_view->user_properties, publish_view->user_property_count);
+
+ /*
+ * Payload
+ * Content and format of data is application specific
+ */
+ if (publish_view->payload.len > 0) {
+ ADD_ENCODE_STEP_CURSOR(encoder, publish_view->payload);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_compute_puback_variable_length_fields(
+ const struct aws_mqtt5_packet_puback_view *puback_view,
+ size_t *total_remaining_length,
+ size_t *puback_properties_length) {
+
+ size_t local_property_length =
+ aws_mqtt5_compute_user_property_encode_length(puback_view->user_properties, puback_view->user_property_count);
+
+ ADD_OPTIONAL_CURSOR_PROPERTY_LENGTH(puback_view->reason_string, local_property_length);
+
+ *puback_properties_length = (uint32_t)local_property_length;
+
+ /* variable header total length =
+ * 2 bytes for Packet Identifier
+ * + 1 byte for PUBACK reason code if it exists
+ * + subscribe_variable_header_property_length
+ *
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901124
+ * If there are no properties and Reason Code is success, PUBACK ends with the packet id
+ *
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901124
+ * If there are no properties and Reason Code is not success, PUBACK ends with the reason code
+ */
+ if (local_property_length == 0) {
+ if (puback_view->reason_code == AWS_MQTT5_PARC_SUCCESS) {
+ *total_remaining_length = 2;
+ } else {
+ *total_remaining_length = 3;
+ }
+ return AWS_OP_SUCCESS;
+ }
+
+ size_t variable_property_length_size = 0;
+ if (aws_mqtt5_get_variable_length_encode_size(local_property_length, &variable_property_length_size)) {
+ return AWS_OP_ERR;
+ }
+ /* vli of property length + packet id + reason code + properties length */
+ *total_remaining_length = variable_property_length_size + 3 + local_property_length;
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_aws_mqtt5_encoder_begin_puback(struct aws_mqtt5_encoder *encoder, const void *view) {
+ const struct aws_mqtt5_packet_puback_view *puback_view = view;
+
+ size_t total_remaining_length = 0;
+ size_t puback_properties_length = 0;
+
+ if (s_compute_puback_variable_length_fields(puback_view, &total_remaining_length, &puback_properties_length)) {
+ int error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "(%p) mqtt5 client encoder - failed to compute variable length values for PUBACK packet with error "
+ "%d(%s)",
+ (void *)encoder->config.client,
+ error_code,
+ aws_error_debug_str(error_code));
+ return AWS_OP_ERR;
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT5_GENERAL,
+ "(%p) mqtt5 client encoder - setting up encode for a PUBACK packet with remaining length %zu",
+ (void *)encoder->config.client,
+ total_remaining_length);
+
+ uint32_t total_remaining_length_u32 = (uint32_t)total_remaining_length;
+ uint32_t puback_property_length_u32 = (uint32_t)puback_properties_length;
+
+ /*
+ * Fixed Header
+ * byte 1:
+ * bits 7-4 MQTT Control Packet Type
+ * bits 3-0 Reserved, bust be set to 0, 0, 0, 0
+ * byte 2-x: Remaining Length as a Variable Byte Integer (1-4 bytes)
+ */
+
+ ADD_ENCODE_STEP_U8(encoder, aws_mqtt5_compute_fixed_header_byte1(AWS_MQTT5_PT_PUBACK, 0));
+ ADD_ENCODE_STEP_VLI(encoder, total_remaining_length_u32);
+
+ /*
+ * Variable Header
+ * byte 1-2: Packet Identifier
+ * byte 3: PUBACK Reason Code
+ * byte 4-x: Property Length
+ * Properties
+ */
+ ADD_ENCODE_STEP_U16(encoder, (uint16_t)puback_view->packet_id);
+ /*
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901124
+ * If Reason Code is success and there are no properties, PUBACK ends with the packet id
+ */
+ if (total_remaining_length == 2) {
+ return AWS_OP_SUCCESS;
+ }
+
+ ADD_ENCODE_STEP_U8(encoder, puback_view->reason_code);
+
+ /*
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901126
+ * If remaining length < 4 there is no property length
+ */
+ if (total_remaining_length < 4) {
+ return AWS_OP_SUCCESS;
+ }
+
+ ADD_ENCODE_STEP_VLI(encoder, puback_property_length_u32);
+ ADD_ENCODE_STEP_OPTIONAL_CURSOR_PROPERTY(
+ encoder, AWS_MQTT5_PROPERTY_TYPE_REASON_STRING, puback_view->reason_string);
+ aws_mqtt5_add_user_property_encoding_steps(encoder, puback_view->user_properties, puback_view->user_property_count);
+
+ return AWS_OP_SUCCESS;
+}
+
+static enum aws_mqtt5_encoding_result s_execute_encode_step(
+ struct aws_mqtt5_encoder *encoder,
+ struct aws_mqtt5_encoding_step *step,
+ struct aws_byte_buf *buffer) {
+ size_t buffer_room = buffer->capacity - buffer->len;
+
+ switch (step->type) {
+ case AWS_MQTT5_EST_U8:
+ if (buffer_room < 1) {
+ return AWS_MQTT5_ER_OUT_OF_ROOM;
+ }
+
+ aws_byte_buf_write_u8(buffer, step->value.value_u8);
+
+ return AWS_MQTT5_ER_FINISHED;
+
+ case AWS_MQTT5_EST_U16:
+ if (buffer_room < 2) {
+ return AWS_MQTT5_ER_OUT_OF_ROOM;
+ }
+
+ aws_byte_buf_write_be16(buffer, step->value.value_u16);
+
+ return AWS_MQTT5_ER_FINISHED;
+
+ case AWS_MQTT5_EST_U32:
+ if (buffer_room < 4) {
+ return AWS_MQTT5_ER_OUT_OF_ROOM;
+ }
+
+ aws_byte_buf_write_be32(buffer, step->value.value_u32);
+
+ return AWS_MQTT5_ER_FINISHED;
+
+ case AWS_MQTT5_EST_VLI:
+ /* being lazy here and just assuming the worst case */
+ if (buffer_room < 4) {
+ return AWS_MQTT5_ER_OUT_OF_ROOM;
+ }
+
+ /* This can't fail. We've already validated the vli value when we made the step */
+ aws_mqtt5_encode_variable_length_integer(buffer, step->value.value_u32);
+
+ return AWS_MQTT5_ER_FINISHED;
+
+ case AWS_MQTT5_EST_CURSOR:
+ if (buffer_room < 1) {
+ return AWS_MQTT5_ER_OUT_OF_ROOM;
+ }
+
+ aws_byte_buf_write_to_capacity(buffer, &step->value.value_cursor);
+
+ return (step->value.value_cursor.len == 0) ? AWS_MQTT5_ER_FINISHED : AWS_MQTT5_ER_OUT_OF_ROOM;
+
+ case AWS_MQTT5_EST_STREAM:
+ while (buffer->len < buffer->capacity) {
+ if (aws_input_stream_read(step->value.value_stream, buffer)) {
+ int error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: failed to read from stream with error %d(%s)",
+ (void *)encoder->config.client,
+ error_code,
+ aws_error_debug_str(error_code));
+ return AWS_MQTT5_ER_ERROR;
+ }
+
+ struct aws_stream_status status;
+ if (aws_input_stream_get_status(step->value.value_stream, &status)) {
+ int error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: failed to query stream status with error %d(%s)",
+ (void *)encoder->config.client,
+ error_code,
+ aws_error_debug_str(error_code));
+ return AWS_MQTT5_ER_ERROR;
+ }
+
+ if (status.is_end_of_stream) {
+ return AWS_MQTT5_ER_FINISHED;
+ }
+ }
+
+ if (buffer->len == buffer->capacity) {
+ return AWS_MQTT5_ER_OUT_OF_ROOM;
+ }
+
+ /* fall through intentional */
+ }
+
+ /* shouldn't be reachable */
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "id=%p: encoder reached an unreachable state", (void *)encoder->config.client);
+ aws_raise_error(AWS_ERROR_INVALID_STATE);
+ return AWS_MQTT5_ER_ERROR;
+}
+
+enum aws_mqtt5_encoding_result aws_mqtt5_encoder_encode_to_buffer(
+ struct aws_mqtt5_encoder *encoder,
+ struct aws_byte_buf *buffer) {
+
+ enum aws_mqtt5_encoding_result result = AWS_MQTT5_ER_FINISHED;
+ size_t step_count = aws_array_list_length(&encoder->encoding_steps);
+ while (result == AWS_MQTT5_ER_FINISHED && encoder->current_encoding_step_index < step_count) {
+ struct aws_mqtt5_encoding_step *step = NULL;
+ aws_array_list_get_at_ptr(&encoder->encoding_steps, (void **)&step, encoder->current_encoding_step_index);
+
+ result = s_execute_encode_step(encoder, step, buffer);
+ if (result == AWS_MQTT5_ER_FINISHED) {
+ encoder->current_encoding_step_index++;
+ }
+ }
+
+ if (result == AWS_MQTT5_ER_FINISHED) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT5_CLIENT, "id=%p: finished encoding current operation", (void *)encoder->config.client);
+ aws_mqtt5_encoder_reset(encoder);
+ }
+
+ return result;
+}
+
+static struct aws_mqtt5_encoder_function_table s_aws_mqtt5_encoder_default_function_table = {
+ .encoders_by_packet_type =
+ {
+ NULL, /* RESERVED = 0 */
+ &s_aws_mqtt5_encoder_begin_connect, /* CONNECT */
+ NULL, /* CONNACK */
+ &s_aws_mqtt5_encoder_begin_publish, /* PUBLISH */
+ &s_aws_mqtt5_encoder_begin_puback, /* PUBACK */
+ NULL, /* PUBREC */
+ NULL, /* PUBREL */
+ NULL, /* PUBCOMP */
+ &s_aws_mqtt5_encoder_begin_subscribe, /* SUBSCRIBE */
+ NULL, /* SUBACK */
+ &s_aws_mqtt5_encoder_begin_unsubscribe, /* UNSUBSCRIBE */
+ NULL, /* UNSUBACK */
+ &s_aws_mqtt5_encoder_begin_pingreq, /* PINGREQ */
+ NULL, /* PINGRESP */
+ &s_aws_mqtt5_encoder_begin_disconnect, /* DISCONNECT */
+ NULL /* AUTH */
+ },
+};
+
+const struct aws_mqtt5_encoder_function_table *g_aws_mqtt5_encoder_default_function_table =
+ &s_aws_mqtt5_encoder_default_function_table;
+
+int aws_mqtt5_encoder_init(
+ struct aws_mqtt5_encoder *encoder,
+ struct aws_allocator *allocator,
+ struct aws_mqtt5_encoder_options *options) {
+ AWS_ZERO_STRUCT(*encoder);
+
+ encoder->config = *options;
+ if (encoder->config.encoders == NULL) {
+ encoder->config.encoders = &s_aws_mqtt5_encoder_default_function_table;
+ }
+
+ if (aws_array_list_init_dynamic(
+ &encoder->encoding_steps, allocator, INITIAL_ENCODING_STEP_COUNT, sizeof(struct aws_mqtt5_encoding_step))) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+void aws_mqtt5_encoder_clean_up(struct aws_mqtt5_encoder *encoder) {
+ aws_array_list_clean_up(&encoder->encoding_steps);
+}
+
+void aws_mqtt5_encoder_reset(struct aws_mqtt5_encoder *encoder) {
+ aws_array_list_clear(&encoder->encoding_steps);
+ encoder->current_encoding_step_index = 0;
+}
+
+int aws_mqtt5_encoder_append_packet_encoding(
+ struct aws_mqtt5_encoder *encoder,
+ enum aws_mqtt5_packet_type packet_type,
+ const void *packet_view) {
+ aws_mqtt5_encode_begin_packet_type_fn *encoding_fn = encoder->config.encoders->encoders_by_packet_type[packet_type];
+ if (encoding_fn == NULL) {
+ return aws_raise_error(AWS_ERROR_MQTT5_ENCODE_FAILURE);
+ }
+
+ return (*encoding_fn)(encoder, packet_view);
+}
+
+static int s_compute_packet_size(size_t total_remaining_length, size_t *packet_size) {
+ /* 1 (packet type + flags) + vli_length(total_remaining_length) + total_remaining_length */
+ size_t encode_size = 0;
+ if (aws_mqtt5_get_variable_length_encode_size(total_remaining_length, &encode_size)) {
+ return AWS_OP_ERR;
+ }
+
+ size_t prefix = (size_t)1 + encode_size;
+
+ if (aws_add_size_checked(prefix, total_remaining_length, packet_size)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt5_packet_view_get_encoded_size(
+ enum aws_mqtt5_packet_type packet_type,
+ const void *packet_view,
+ size_t *packet_size) {
+ size_t total_remaining_length = 0;
+ size_t properties_length = 0;
+
+ if (packet_type == AWS_MQTT5_PT_PINGREQ) {
+ *packet_size = AWS_MQTT5_PINGREQ_ENCODED_SIZE;
+ return AWS_OP_SUCCESS;
+ }
+
+ switch (packet_type) {
+ case AWS_MQTT5_PT_PUBLISH:
+ if (s_compute_publish_variable_length_fields(packet_view, &total_remaining_length, &properties_length)) {
+ return AWS_OP_ERR;
+ }
+ break;
+
+ case AWS_MQTT5_PT_SUBSCRIBE:
+ if (s_compute_subscribe_variable_length_fields(packet_view, &total_remaining_length, &properties_length)) {
+ return AWS_OP_ERR;
+ }
+ break;
+
+ case AWS_MQTT5_PT_UNSUBSCRIBE:
+ if (s_compute_unsubscribe_variable_length_fields(
+ packet_view, &total_remaining_length, &properties_length)) {
+ return AWS_OP_ERR;
+ }
+ break;
+
+ case AWS_MQTT5_PT_DISCONNECT:
+ if (s_compute_disconnect_variable_length_fields(packet_view, &total_remaining_length, &properties_length)) {
+ return AWS_OP_ERR;
+ }
+ break;
+
+ case AWS_MQTT5_PT_PUBACK:
+ if (s_compute_puback_variable_length_fields(packet_view, &total_remaining_length, &properties_length)) {
+ return AWS_OP_ERR;
+ }
+ break;
+
+ default:
+ return aws_raise_error(AWS_ERROR_MQTT5_ENCODE_SIZE_UNSUPPORTED_PACKET_TYPE);
+ }
+
+ return s_compute_packet_size(total_remaining_length, packet_size);
+}
+
+void aws_mqtt5_encoder_set_outbound_topic_alias_resolver(
+ struct aws_mqtt5_encoder *encoder,
+ struct aws_mqtt5_outbound_topic_alias_resolver *resolver) {
+
+ encoder->topic_alias_resolver = resolver;
+}
diff --git a/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_listener.c b/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_listener.c
new file mode 100644
index 0000000000..04170394e3
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_listener.c
@@ -0,0 +1,121 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/mqtt/v5/mqtt5_listener.h>
+
+#include <aws/common/ref_count.h>
+#include <aws/common/task_scheduler.h>
+#include <aws/io/event_loop.h>
+#include <aws/mqtt/private/v5/mqtt5_client_impl.h>
+
+struct aws_mqtt5_listener {
+ struct aws_allocator *allocator;
+
+ struct aws_ref_count ref_count;
+
+ struct aws_mqtt5_listener_config config;
+
+ uint64_t callback_set_id;
+
+ struct aws_task initialize_task;
+ struct aws_task terminate_task;
+};
+
+static void s_mqtt5_listener_destroy(struct aws_mqtt5_listener *listener) {
+
+ aws_mqtt5_client_release(listener->config.client);
+
+ aws_mqtt5_listener_termination_completion_fn *termination_callback = listener->config.termination_callback;
+ void *temination_callback_user_data = listener->config.termination_callback_user_data;
+
+ aws_mem_release(listener->allocator, listener);
+
+ if (termination_callback != NULL) {
+ (*termination_callback)(temination_callback_user_data);
+ }
+}
+
+static void s_mqtt5_listener_initialize_task_fn(struct aws_task *task, void *arg, enum aws_task_status task_status) {
+ (void)task;
+
+ struct aws_mqtt5_listener *listener = arg;
+
+ if (task_status == AWS_TASK_STATUS_RUN_READY) {
+ listener->callback_set_id = aws_mqtt5_callback_set_manager_push_front(
+ &listener->config.client->callback_manager, &listener->config.listener_callbacks);
+ AWS_LOGF_INFO(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: Mqtt5 Listener initialized, listener id=%p",
+ (void *)listener->config.client,
+ (void *)listener);
+ aws_mqtt5_listener_release(listener);
+ } else {
+ s_mqtt5_listener_destroy(listener);
+ }
+}
+
+static void s_mqtt5_listener_terminate_task_fn(struct aws_task *task, void *arg, enum aws_task_status task_status) {
+ (void)task;
+
+ struct aws_mqtt5_listener *listener = arg;
+
+ if (task_status == AWS_TASK_STATUS_RUN_READY) {
+ aws_mqtt5_callback_set_manager_remove(&listener->config.client->callback_manager, listener->callback_set_id);
+ }
+
+ AWS_LOGF_INFO(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: Mqtt5 Listener terminated, listener id=%p",
+ (void *)listener->config.client,
+ (void *)listener);
+
+ s_mqtt5_listener_destroy(listener);
+}
+
+static void s_aws_mqtt5_listener_on_zero_ref_count(void *context) {
+ struct aws_mqtt5_listener *listener = context;
+
+ aws_event_loop_schedule_task_now(listener->config.client->loop, &listener->terminate_task);
+}
+
+struct aws_mqtt5_listener *aws_mqtt5_listener_new(
+ struct aws_allocator *allocator,
+ struct aws_mqtt5_listener_config *config) {
+ if (config->client == NULL) {
+ return NULL;
+ }
+
+ struct aws_mqtt5_listener *listener = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_listener));
+
+ listener->allocator = allocator;
+ listener->config = *config;
+
+ aws_mqtt5_client_acquire(config->client);
+ aws_ref_count_init(&listener->ref_count, listener, s_aws_mqtt5_listener_on_zero_ref_count);
+
+ aws_task_init(&listener->initialize_task, s_mqtt5_listener_initialize_task_fn, listener, "Mqtt5ListenerInitialize");
+ aws_task_init(&listener->terminate_task, s_mqtt5_listener_terminate_task_fn, listener, "Mqtt5ListenerTerminate");
+
+ aws_mqtt5_listener_acquire(listener);
+ aws_event_loop_schedule_task_now(config->client->loop, &listener->initialize_task);
+
+ return listener;
+}
+
+struct aws_mqtt5_listener *aws_mqtt5_listener_acquire(struct aws_mqtt5_listener *listener) {
+ if (listener != NULL) {
+ aws_ref_count_acquire(&listener->ref_count);
+ }
+
+ return listener;
+}
+
+struct aws_mqtt5_listener *aws_mqtt5_listener_release(struct aws_mqtt5_listener *listener) {
+ if (listener != NULL) {
+ aws_ref_count_release(&listener->ref_count);
+ }
+
+ return NULL;
+}
diff --git a/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_options_storage.c b/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_options_storage.c
new file mode 100644
index 0000000000..b8b566e910
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_options_storage.c
@@ -0,0 +1,3984 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/mqtt/private/v5/mqtt5_options_storage.h>
+
+#include <aws/common/clock.h>
+#include <aws/common/encoding.h>
+#include <aws/common/string.h>
+#include <aws/io/channel_bootstrap.h>
+#include <aws/io/event_loop.h>
+#include <aws/io/stream.h>
+#include <aws/mqtt/private/v5/mqtt5_client_impl.h>
+#include <aws/mqtt/private/v5/mqtt5_utils.h>
+#include <aws/mqtt/v5/mqtt5_client.h>
+
+#include <inttypes.h>
+
+/*********************************************************************************************************************
+ * Property set
+ ********************************************************************************************************************/
+
+int aws_mqtt5_user_property_set_init(
+ struct aws_mqtt5_user_property_set *property_set,
+ struct aws_allocator *allocator) {
+ AWS_ZERO_STRUCT(*property_set);
+
+ if (aws_array_list_init_dynamic(&property_set->properties, allocator, 0, sizeof(struct aws_mqtt5_user_property))) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt5_user_property_set_init_with_storage(
+ struct aws_mqtt5_user_property_set *property_set,
+ struct aws_allocator *allocator,
+ struct aws_byte_buf *storage,
+ size_t property_count,
+ const struct aws_mqtt5_user_property *properties) {
+ AWS_ZERO_STRUCT(*property_set);
+
+ if (aws_array_list_init_dynamic(
+ &property_set->properties, allocator, property_count, sizeof(struct aws_mqtt5_user_property))) {
+ goto error;
+ }
+
+ for (size_t i = 0; i < property_count; ++i) {
+ const struct aws_mqtt5_user_property *property = &properties[i];
+ struct aws_mqtt5_user_property property_clone = *property;
+
+ if (aws_byte_buf_append_and_update(storage, &property_clone.name)) {
+ goto error;
+ }
+
+ if (aws_byte_buf_append_and_update(storage, &property_clone.value)) {
+ goto error;
+ }
+
+ if (aws_array_list_push_back(&property_set->properties, &property_clone)) {
+ goto error;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+
+error:
+
+ aws_mqtt5_user_property_set_clean_up(property_set);
+
+ return AWS_OP_ERR;
+}
+
+void aws_mqtt5_user_property_set_clean_up(struct aws_mqtt5_user_property_set *property_set) {
+ aws_array_list_clean_up(&property_set->properties);
+}
+
+size_t aws_mqtt5_user_property_set_size(const struct aws_mqtt5_user_property_set *property_set) {
+ return aws_array_list_length(&property_set->properties);
+}
+
+int aws_mqtt5_user_property_set_get_property(
+ const struct aws_mqtt5_user_property_set *property_set,
+ size_t index,
+ struct aws_mqtt5_user_property *property_out) {
+ return aws_array_list_get_at(&property_set->properties, property_out, index);
+}
+
+int aws_mqtt5_user_property_set_add_stored_property(
+ struct aws_mqtt5_user_property_set *property_set,
+ struct aws_mqtt5_user_property *property) {
+ return aws_array_list_push_back(&property_set->properties, property);
+}
+
+static void s_aws_mqtt5_user_property_set_log(
+ struct aws_logger *log_handle,
+ const struct aws_mqtt5_user_property *properties,
+ size_t property_count,
+ void *log_context,
+ enum aws_log_level level,
+ const char *log_prefix) {
+
+ if (property_count == 0) {
+ return;
+ }
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: %s with %zu user properties:",
+ log_context,
+ log_prefix,
+ property_count);
+
+ for (size_t i = 0; i < property_count; ++i) {
+ const struct aws_mqtt5_user_property *property = &properties[i];
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: %s user property %zu with name \"" PRInSTR "\", value \"" PRInSTR "\"",
+ log_context,
+ log_prefix,
+ i,
+ AWS_BYTE_CURSOR_PRI(property->name),
+ AWS_BYTE_CURSOR_PRI(property->value));
+ }
+}
+
+static size_t s_aws_mqtt5_user_property_set_compute_storage_size(
+ const struct aws_mqtt5_user_property *properties,
+ size_t property_count) {
+ size_t storage_size = 0;
+ for (size_t i = 0; i < property_count; ++i) {
+ const struct aws_mqtt5_user_property *property = &properties[i];
+ storage_size += property->name.len;
+ storage_size += property->value.len;
+ }
+
+ return storage_size;
+}
+
+static int s_aws_mqtt5_user_property_set_validate(
+ const struct aws_mqtt5_user_property *properties,
+ size_t property_count,
+ const char *log_prefix,
+ void *log_context) {
+ if (properties == NULL) {
+ if (property_count == 0) {
+ return AWS_OP_SUCCESS;
+ } else {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: %s - Invalid user property configuration, null properties, non-zero property count",
+ log_context,
+ log_prefix);
+ return aws_raise_error(AWS_ERROR_MQTT5_USER_PROPERTY_VALIDATION);
+ }
+ }
+
+ if (property_count > AWS_MQTT5_CLIENT_MAXIMUM_USER_PROPERTIES) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: %s - user property limit (%d) exceeded (%zu)",
+ log_context,
+ log_prefix,
+ (int)AWS_MQTT5_CLIENT_MAXIMUM_USER_PROPERTIES,
+ property_count);
+ return aws_raise_error(AWS_ERROR_MQTT5_USER_PROPERTY_VALIDATION);
+ }
+
+ for (size_t i = 0; i < property_count; ++i) {
+ const struct aws_mqtt5_user_property *property = &properties[i];
+ if (property->name.len > UINT16_MAX) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: %s - user property #%zu name too long (%zu)",
+ log_context,
+ log_prefix,
+ i,
+ property->name.len);
+ return aws_raise_error(AWS_ERROR_MQTT5_USER_PROPERTY_VALIDATION);
+ }
+
+ if (aws_mqtt5_validate_utf8_text(property->name)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL, "id=%p: %s - user property #%zu name not valid UTF8", log_context, log_prefix, i);
+ return aws_raise_error(AWS_ERROR_MQTT5_USER_PROPERTY_VALIDATION);
+ }
+ if (property->value.len > UINT16_MAX) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: %s - user property #%zu value too long (%zu)",
+ log_context,
+ log_prefix,
+ i,
+ property->value.len);
+ return aws_raise_error(AWS_ERROR_MQTT5_USER_PROPERTY_VALIDATION);
+ }
+ if (aws_mqtt5_validate_utf8_text(property->value)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: %s - user property #%zu value not valid UTF8",
+ log_context,
+ log_prefix,
+ i);
+ return aws_raise_error(AWS_ERROR_MQTT5_USER_PROPERTY_VALIDATION);
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/*********************************************************************************************************************
+ * Operation base
+ ********************************************************************************************************************/
+
+struct aws_mqtt5_operation *aws_mqtt5_operation_acquire(struct aws_mqtt5_operation *operation) {
+ if (operation == NULL) {
+ return NULL;
+ }
+
+ aws_ref_count_acquire(&operation->ref_count);
+
+ return operation;
+}
+
+struct aws_mqtt5_operation *aws_mqtt5_operation_release(struct aws_mqtt5_operation *operation) {
+ if (operation != NULL) {
+ aws_ref_count_release(&operation->ref_count);
+ }
+
+ return NULL;
+}
+
+void aws_mqtt5_operation_complete(
+ struct aws_mqtt5_operation *operation,
+ int error_code,
+ enum aws_mqtt5_packet_type packet_type,
+ const void *associated_view) {
+ AWS_FATAL_ASSERT(operation->vtable != NULL);
+ if (operation->vtable->aws_mqtt5_operation_completion_fn != NULL) {
+ (*operation->vtable->aws_mqtt5_operation_completion_fn)(operation, error_code, packet_type, associated_view);
+ }
+}
+
+void aws_mqtt5_operation_set_packet_id(struct aws_mqtt5_operation *operation, aws_mqtt5_packet_id_t packet_id) {
+ AWS_FATAL_ASSERT(operation->vtable != NULL);
+ if (operation->vtable->aws_mqtt5_operation_set_packet_id_fn != NULL) {
+ (*operation->vtable->aws_mqtt5_operation_set_packet_id_fn)(operation, packet_id);
+ }
+}
+
+aws_mqtt5_packet_id_t aws_mqtt5_operation_get_packet_id(const struct aws_mqtt5_operation *operation) {
+ AWS_FATAL_ASSERT(operation->vtable != NULL);
+ if (operation->vtable->aws_mqtt5_operation_get_packet_id_address_fn != NULL) {
+ aws_mqtt5_packet_id_t *packet_id_ptr =
+ (*operation->vtable->aws_mqtt5_operation_get_packet_id_address_fn)(operation);
+ if (packet_id_ptr != NULL) {
+ return *packet_id_ptr;
+ }
+ }
+
+ return 0;
+}
+
+aws_mqtt5_packet_id_t *aws_mqtt5_operation_get_packet_id_address(const struct aws_mqtt5_operation *operation) {
+ AWS_FATAL_ASSERT(operation->vtable != NULL);
+ if (operation->vtable->aws_mqtt5_operation_get_packet_id_address_fn != NULL) {
+ return (*operation->vtable->aws_mqtt5_operation_get_packet_id_address_fn)(operation);
+ }
+
+ return NULL;
+}
+
+int aws_mqtt5_operation_validate_vs_connection_settings(
+ const struct aws_mqtt5_operation *operation,
+ const struct aws_mqtt5_client *client) {
+
+ AWS_FATAL_ASSERT(operation->vtable != NULL);
+ AWS_FATAL_ASSERT(client->loop == NULL || aws_event_loop_thread_is_callers_thread(client->loop));
+
+ /* If we have valid negotiated settings, check against them as well */
+ if (aws_mqtt5_client_are_negotiated_settings_valid(client)) {
+ const struct aws_mqtt5_negotiated_settings *settings = &client->negotiated_settings;
+
+ size_t packet_size_in_bytes = 0;
+ if (aws_mqtt5_packet_view_get_encoded_size(
+ operation->packet_type, operation->packet_view, &packet_size_in_bytes)) {
+ int error_code = aws_last_error();
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: error %d (%s) computing %s packet size",
+ (void *)client,
+ error_code,
+ aws_error_debug_str(error_code),
+ aws_mqtt5_packet_type_to_c_string(operation->packet_type));
+ return aws_raise_error(AWS_ERROR_MQTT5_PACKET_VALIDATION);
+ }
+
+ if (packet_size_in_bytes > settings->maximum_packet_size_to_server) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT,
+ "id=%p: encoded %s packet size (%zu) exceeds server's maximum "
+ "packet size (%" PRIu32 ")",
+ (void *)client,
+ aws_mqtt5_packet_type_to_c_string(operation->packet_type),
+ packet_size_in_bytes,
+ settings->maximum_packet_size_to_server);
+ return aws_raise_error(AWS_ERROR_MQTT5_PACKET_VALIDATION);
+ }
+ }
+
+ if (operation->vtable->aws_mqtt5_operation_validate_vs_connection_settings_fn != NULL) {
+ return (*operation->vtable->aws_mqtt5_operation_validate_vs_connection_settings_fn)(
+ operation->packet_view, client);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static struct aws_mqtt5_operation_vtable s_empty_operation_vtable = {
+ .aws_mqtt5_operation_completion_fn = NULL,
+ .aws_mqtt5_operation_set_packet_id_fn = NULL,
+ .aws_mqtt5_operation_get_packet_id_address_fn = NULL,
+ .aws_mqtt5_operation_validate_vs_connection_settings_fn = NULL,
+};
+
+/*********************************************************************************************************************
+ * Connect
+ ********************************************************************************************************************/
+
+int aws_mqtt5_packet_connect_view_validate(const struct aws_mqtt5_packet_connect_view *connect_options) {
+ if (connect_options == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "Null CONNECT options");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (connect_options->client_id.len > UINT16_MAX) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_connect_view - client id too long", (void *)connect_options);
+ return aws_raise_error(AWS_ERROR_MQTT5_CONNECT_OPTIONS_VALIDATION);
+ }
+
+ if (aws_mqtt5_validate_utf8_text(connect_options->client_id)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connect_view - client id not valid UTF-8",
+ (void *)connect_options);
+ return aws_raise_error(AWS_ERROR_MQTT5_CONNECT_OPTIONS_VALIDATION);
+ }
+
+ if (connect_options->username != NULL) {
+ if (connect_options->username->len > UINT16_MAX) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connect_view - username too long",
+ (void *)connect_options);
+ return aws_raise_error(AWS_ERROR_MQTT5_CONNECT_OPTIONS_VALIDATION);
+ }
+
+ if (aws_mqtt5_validate_utf8_text(*connect_options->username)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connect_view - username not valid UTF-8",
+ (void *)connect_options);
+ return aws_raise_error(AWS_ERROR_MQTT5_CONNECT_OPTIONS_VALIDATION);
+ }
+ }
+
+ if (connect_options->password != NULL) {
+ if (connect_options->password->len > UINT16_MAX) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connect_view - password too long",
+ (void *)connect_options);
+ return aws_raise_error(AWS_ERROR_MQTT5_CONNECT_OPTIONS_VALIDATION);
+ }
+ }
+
+ if (connect_options->receive_maximum != NULL) {
+ if (*connect_options->receive_maximum == 0) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connect_view - receive maximum property of CONNECT packet may not be zero.",
+ (void *)connect_options);
+ return aws_raise_error(AWS_ERROR_MQTT5_CONNECT_OPTIONS_VALIDATION);
+ }
+ }
+
+ if (connect_options->maximum_packet_size_bytes != NULL) {
+ if (*connect_options->maximum_packet_size_bytes == 0) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connect_view - maximum packet size property of CONNECT packet may not be "
+ "zero.",
+ (void *)connect_options);
+ return aws_raise_error(AWS_ERROR_MQTT5_CONNECT_OPTIONS_VALIDATION);
+ }
+ }
+
+ if (connect_options->will != NULL) {
+ const struct aws_mqtt5_packet_publish_view *will_options = connect_options->will;
+ if (aws_mqtt5_packet_publish_view_validate(will_options)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connect_view - CONNECT packet Will message failed validation",
+ (void *)connect_options);
+ return AWS_OP_ERR;
+ }
+
+ if (will_options->payload.len > UINT16_MAX) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connect_view - will payload larger than %d",
+ (void *)connect_options,
+ (int)UINT16_MAX);
+ return aws_raise_error(AWS_ERROR_MQTT5_CONNECT_OPTIONS_VALIDATION);
+ }
+ }
+
+ if (connect_options->request_problem_information != NULL) {
+ if (*connect_options->request_problem_information > 1) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connect_view - CONNECT packet request problem information has invalid value",
+ (void *)connect_options);
+ return aws_raise_error(AWS_ERROR_MQTT5_CONNECT_OPTIONS_VALIDATION);
+ }
+ }
+
+ if (connect_options->request_response_information != NULL) {
+ if (*connect_options->request_response_information > 1) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connect_view - CONNECT packet request response information has invalid value",
+ (void *)connect_options);
+ return aws_raise_error(AWS_ERROR_MQTT5_CONNECT_OPTIONS_VALIDATION);
+ }
+ }
+
+ if (s_aws_mqtt5_user_property_set_validate(
+ connect_options->user_properties,
+ connect_options->user_property_count,
+ "aws_mqtt5_packet_connect_view",
+ (void *)connect_options)) {
+ return AWS_OP_ERR;
+ }
+
+ if (connect_options->authentication_method != NULL || connect_options->authentication_data != NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connect_view - CONNECT packet has unsupported authentication fields set.",
+ (void *)connect_options);
+ return aws_raise_error(AWS_ERROR_MQTT5_CONNECT_OPTIONS_VALIDATION);
+
+ // TODO: UTF-8 validation for authentication_method once supported.
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+void aws_mqtt5_packet_connect_view_log(
+ const struct aws_mqtt5_packet_connect_view *connect_view,
+ enum aws_log_level level) {
+ struct aws_logger *log_handle = aws_logger_get_conditional(AWS_LS_MQTT5_GENERAL, level);
+ if (log_handle == NULL) {
+ return;
+ }
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connect_view keep alive interval set to %" PRIu16,
+ (void *)connect_view,
+ connect_view->keep_alive_interval_seconds);
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connect_view client id set to \"" PRInSTR "\"",
+ (void *)connect_view,
+ AWS_BYTE_CURSOR_PRI(connect_view->client_id));
+
+ if (connect_view->username != NULL) {
+ /* Intentionally do not log username since it too can contain sensitive information */
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connect_view username set",
+ (void *)connect_view);
+ }
+
+ if (connect_view->password != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connect_view password set",
+ (void *)connect_view);
+ }
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connect_view clean start set to %d",
+ (void *)connect_view,
+ (int)(connect_view->clean_start ? 1 : 0));
+
+ if (connect_view->session_expiry_interval_seconds != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connect_view session expiry interval set to %" PRIu32,
+ (void *)connect_view,
+ *connect_view->session_expiry_interval_seconds);
+ }
+
+ if (connect_view->request_response_information != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connect_view request response information set to %d",
+ (void *)connect_view,
+ (int)*connect_view->request_response_information);
+ }
+
+ if (connect_view->request_problem_information) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connect_view request problem information set to %d",
+ (void *)connect_view,
+ (int)*connect_view->request_problem_information);
+ }
+
+ if (connect_view->receive_maximum != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connect_view receive maximum set to %" PRIu16,
+ (void *)connect_view,
+ *connect_view->receive_maximum);
+ }
+
+ if (connect_view->topic_alias_maximum != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connect_view topic alias maximum set to %" PRIu16,
+ (void *)connect_view,
+ *connect_view->topic_alias_maximum);
+ }
+
+ if (connect_view->maximum_packet_size_bytes != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connect_view maximum packet size set to %" PRIu32,
+ (void *)connect_view,
+ *connect_view->maximum_packet_size_bytes);
+ }
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connect_view set will to (%p)",
+ (void *)connect_view,
+ (void *)connect_view->will);
+
+ if (connect_view->will != NULL) {
+ aws_mqtt5_packet_publish_view_log(connect_view->will, level);
+ }
+
+ if (connect_view->will_delay_interval_seconds != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connect_view will delay interval set to %" PRIu32,
+ (void *)connect_view,
+ *connect_view->will_delay_interval_seconds);
+ }
+
+ s_aws_mqtt5_user_property_set_log(
+ log_handle,
+ connect_view->user_properties,
+ connect_view->user_property_count,
+ (void *)connect_view,
+ level,
+ "aws_mqtt5_packet_connect_view");
+
+ if (connect_view->authentication_method != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connect_view authentication method set",
+ (void *)connect_view);
+ }
+
+ if (connect_view->password != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connect_view authentication data set",
+ (void *)connect_view);
+ }
+}
+
+void aws_mqtt5_packet_connect_storage_clean_up(struct aws_mqtt5_packet_connect_storage *storage) {
+ if (storage == NULL) {
+ return;
+ }
+
+ if (storage->will != NULL) {
+ aws_mqtt5_packet_publish_storage_clean_up(storage->will);
+ aws_mem_release(storage->allocator, storage->will);
+ }
+
+ aws_mqtt5_user_property_set_clean_up(&storage->user_properties);
+
+ aws_byte_buf_clean_up_secure(&storage->storage);
+}
+
+static size_t s_aws_mqtt5_packet_connect_compute_storage_size(const struct aws_mqtt5_packet_connect_view *view) {
+ if (view == NULL) {
+ return 0;
+ }
+
+ size_t storage_size = 0;
+
+ storage_size += view->client_id.len;
+ if (view->username != NULL) {
+ storage_size += view->username->len;
+ }
+ if (view->password != NULL) {
+ storage_size += view->password->len;
+ }
+
+ storage_size +=
+ s_aws_mqtt5_user_property_set_compute_storage_size(view->user_properties, view->user_property_count);
+
+ if (view->authentication_method != NULL) {
+ storage_size += view->authentication_method->len;
+ }
+
+ if (view->authentication_data != NULL) {
+ storage_size += view->authentication_data->len;
+ }
+
+ return storage_size;
+}
+
+int aws_mqtt5_packet_connect_storage_init(
+ struct aws_mqtt5_packet_connect_storage *storage,
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_packet_connect_view *view) {
+ AWS_ZERO_STRUCT(*storage);
+
+ struct aws_mqtt5_packet_connect_view *storage_view = &storage->storage_view;
+
+ size_t storage_capacity = s_aws_mqtt5_packet_connect_compute_storage_size(view);
+ if (aws_byte_buf_init(&storage->storage, allocator, storage_capacity)) {
+ return AWS_OP_ERR;
+ }
+
+ storage->allocator = allocator;
+ storage_view->keep_alive_interval_seconds = view->keep_alive_interval_seconds;
+
+ storage_view->client_id = view->client_id;
+ if (aws_byte_buf_append_and_update(&storage->storage, &storage_view->client_id)) {
+ return AWS_OP_ERR;
+ }
+
+ if (view->username != NULL) {
+ storage->username = *view->username;
+ if (aws_byte_buf_append_and_update(&storage->storage, &storage->username)) {
+ return AWS_OP_ERR;
+ }
+
+ storage_view->username = &storage->username;
+ }
+
+ if (view->password != NULL) {
+ storage->password = *view->password;
+ if (aws_byte_buf_append_and_update(&storage->storage, &storage->password)) {
+ return AWS_OP_ERR;
+ }
+
+ storage_view->password = &storage->password;
+ }
+
+ storage_view->clean_start = view->clean_start;
+
+ if (view->session_expiry_interval_seconds != NULL) {
+ storage->session_expiry_interval_seconds = *view->session_expiry_interval_seconds;
+ storage_view->session_expiry_interval_seconds = &storage->session_expiry_interval_seconds;
+ }
+
+ if (view->request_response_information != NULL) {
+ storage->request_response_information = *view->request_response_information;
+ storage_view->request_response_information = &storage->request_response_information;
+ }
+
+ if (view->request_problem_information != NULL) {
+ storage->request_problem_information = *view->request_problem_information;
+ storage_view->request_problem_information = &storage->request_problem_information;
+ }
+
+ if (view->receive_maximum != NULL) {
+ storage->receive_maximum = *view->receive_maximum;
+ storage_view->receive_maximum = &storage->receive_maximum;
+ }
+
+ if (view->topic_alias_maximum != NULL) {
+ storage->topic_alias_maximum = *view->topic_alias_maximum;
+ storage_view->topic_alias_maximum = &storage->topic_alias_maximum;
+ }
+
+ if (view->maximum_packet_size_bytes != NULL) {
+ storage->maximum_packet_size_bytes = *view->maximum_packet_size_bytes;
+ storage_view->maximum_packet_size_bytes = &storage->maximum_packet_size_bytes;
+ }
+
+ if (view->will != NULL) {
+ storage->will = aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_packet_publish_storage));
+ if (storage->will == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_mqtt5_packet_publish_storage_init(storage->will, allocator, view->will)) {
+ return AWS_OP_ERR;
+ }
+
+ storage_view->will = &storage->will->storage_view;
+ }
+
+ if (view->will_delay_interval_seconds != 0) {
+ storage->will_delay_interval_seconds = *view->will_delay_interval_seconds;
+ storage_view->will_delay_interval_seconds = &storage->will_delay_interval_seconds;
+ }
+
+ if (aws_mqtt5_user_property_set_init_with_storage(
+ &storage->user_properties,
+ allocator,
+ &storage->storage,
+ view->user_property_count,
+ view->user_properties)) {
+ return AWS_OP_ERR;
+ }
+
+ storage_view->user_property_count = aws_mqtt5_user_property_set_size(&storage->user_properties);
+ storage_view->user_properties = storage->user_properties.properties.data;
+
+ if (view->authentication_method != NULL) {
+ storage->authentication_method = *view->authentication_method;
+ if (aws_byte_buf_append_and_update(&storage->storage, &storage->authentication_method)) {
+ return AWS_OP_ERR;
+ }
+
+ storage_view->authentication_method = &storage->authentication_method;
+ }
+
+ if (view->authentication_data != NULL) {
+ storage->authentication_data = *view->authentication_data;
+ if (aws_byte_buf_append_and_update(&storage->storage, &storage->authentication_data)) {
+ return AWS_OP_ERR;
+ }
+
+ storage_view->authentication_data = &storage->authentication_data;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt5_packet_connect_storage_init_from_external_storage(
+ struct aws_mqtt5_packet_connect_storage *connect_storage,
+ struct aws_allocator *allocator) {
+ AWS_ZERO_STRUCT(*connect_storage);
+
+ if (aws_mqtt5_user_property_set_init(&connect_storage->user_properties, allocator)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_destroy_operation_connect(void *object) {
+ if (object == NULL) {
+ return;
+ }
+
+ struct aws_mqtt5_operation_connect *connect_op = object;
+
+ aws_mqtt5_packet_connect_storage_clean_up(&connect_op->options_storage);
+
+ aws_mem_release(connect_op->allocator, connect_op);
+}
+
+struct aws_mqtt5_operation_connect *aws_mqtt5_operation_connect_new(
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_packet_connect_view *connect_options) {
+ AWS_PRECONDITION(allocator != NULL);
+ AWS_PRECONDITION(connect_options != NULL);
+
+ if (aws_mqtt5_packet_connect_view_validate(connect_options)) {
+ return NULL;
+ }
+
+ struct aws_mqtt5_operation_connect *connect_op =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_operation_connect));
+ if (connect_op == NULL) {
+ return NULL;
+ }
+
+ connect_op->allocator = allocator;
+ connect_op->base.vtable = &s_empty_operation_vtable;
+ connect_op->base.packet_type = AWS_MQTT5_PT_CONNECT;
+ aws_ref_count_init(&connect_op->base.ref_count, connect_op, s_destroy_operation_connect);
+ connect_op->base.impl = connect_op;
+
+ if (aws_mqtt5_packet_connect_storage_init(&connect_op->options_storage, allocator, connect_options)) {
+ goto error;
+ }
+
+ connect_op->base.packet_view = &connect_op->options_storage.storage_view;
+
+ return connect_op;
+
+error:
+
+ aws_mqtt5_operation_release(&connect_op->base);
+
+ return NULL;
+}
+
+/*********************************************************************************************************************
+ * Connack
+ ********************************************************************************************************************/
+
+static size_t s_aws_mqtt5_packet_connack_compute_storage_size(const struct aws_mqtt5_packet_connack_view *view) {
+ if (view == NULL) {
+ return 0;
+ }
+
+ size_t storage_size = 0;
+
+ if (view->assigned_client_identifier != NULL) {
+ storage_size += view->assigned_client_identifier->len;
+ }
+
+ if (view->reason_string != NULL) {
+ storage_size += view->reason_string->len;
+ }
+
+ if (view->response_information != NULL) {
+ storage_size += view->response_information->len;
+ }
+
+ if (view->server_reference != NULL) {
+ storage_size += view->server_reference->len;
+ }
+
+ if (view->authentication_method != NULL) {
+ storage_size += view->authentication_method->len;
+ }
+
+ if (view->authentication_data != NULL) {
+ storage_size += view->authentication_data->len;
+ }
+
+ storage_size +=
+ s_aws_mqtt5_user_property_set_compute_storage_size(view->user_properties, view->user_property_count);
+
+ return storage_size;
+}
+
+int aws_mqtt5_packet_connack_storage_init(
+ struct aws_mqtt5_packet_connack_storage *connack_storage,
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_packet_connack_view *connack_view) {
+
+ AWS_ZERO_STRUCT(*connack_storage);
+ size_t storage_capacity = s_aws_mqtt5_packet_connack_compute_storage_size(connack_view);
+ if (aws_byte_buf_init(&connack_storage->storage, allocator, storage_capacity)) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_mqtt5_packet_connack_view *stored_view = &connack_storage->storage_view;
+
+ connack_storage->allocator = allocator;
+
+ stored_view->session_present = connack_view->session_present;
+ stored_view->reason_code = connack_view->reason_code;
+
+ if (connack_view->session_expiry_interval != NULL) {
+ connack_storage->session_expiry_interval = *connack_view->session_expiry_interval;
+ stored_view->session_expiry_interval = &connack_storage->session_expiry_interval;
+ }
+
+ if (connack_view->receive_maximum != NULL) {
+ connack_storage->receive_maximum = *connack_view->receive_maximum;
+ stored_view->receive_maximum = &connack_storage->receive_maximum;
+ }
+
+ if (connack_view->maximum_qos != NULL) {
+ connack_storage->maximum_qos = *connack_view->maximum_qos;
+ stored_view->maximum_qos = &connack_storage->maximum_qos;
+ }
+
+ if (connack_view->retain_available != NULL) {
+ connack_storage->retain_available = *connack_view->retain_available;
+ stored_view->retain_available = &connack_storage->retain_available;
+ }
+
+ if (connack_view->maximum_packet_size != NULL) {
+ connack_storage->maximum_packet_size = *connack_view->maximum_packet_size;
+ stored_view->maximum_packet_size = &connack_storage->maximum_packet_size;
+ }
+
+ if (connack_view->assigned_client_identifier != NULL) {
+ connack_storage->assigned_client_identifier = *connack_view->assigned_client_identifier;
+ if (aws_byte_buf_append_and_update(&connack_storage->storage, &connack_storage->assigned_client_identifier)) {
+ return AWS_OP_ERR;
+ }
+
+ stored_view->assigned_client_identifier = &connack_storage->assigned_client_identifier;
+ }
+
+ if (connack_view->topic_alias_maximum != NULL) {
+ connack_storage->topic_alias_maximum = *connack_view->topic_alias_maximum;
+ stored_view->topic_alias_maximum = &connack_storage->topic_alias_maximum;
+ }
+
+ if (connack_view->reason_string != NULL) {
+ connack_storage->reason_string = *connack_view->reason_string;
+ if (aws_byte_buf_append_and_update(&connack_storage->storage, &connack_storage->reason_string)) {
+ return AWS_OP_ERR;
+ }
+
+ stored_view->reason_string = &connack_storage->reason_string;
+ }
+
+ if (connack_view->wildcard_subscriptions_available != NULL) {
+ connack_storage->wildcard_subscriptions_available = *connack_view->wildcard_subscriptions_available;
+ stored_view->wildcard_subscriptions_available = &connack_storage->wildcard_subscriptions_available;
+ }
+
+ if (connack_view->subscription_identifiers_available != NULL) {
+ connack_storage->subscription_identifiers_available = *connack_view->subscription_identifiers_available;
+ stored_view->subscription_identifiers_available = &connack_storage->subscription_identifiers_available;
+ }
+
+ if (connack_view->shared_subscriptions_available != NULL) {
+ connack_storage->shared_subscriptions_available = *connack_view->shared_subscriptions_available;
+ stored_view->shared_subscriptions_available = &connack_storage->shared_subscriptions_available;
+ }
+
+ if (connack_view->server_keep_alive != NULL) {
+ connack_storage->server_keep_alive = *connack_view->server_keep_alive;
+ stored_view->server_keep_alive = &connack_storage->server_keep_alive;
+ }
+
+ if (connack_view->response_information != NULL) {
+ connack_storage->response_information = *connack_view->response_information;
+ if (aws_byte_buf_append_and_update(&connack_storage->storage, &connack_storage->response_information)) {
+ return AWS_OP_ERR;
+ }
+
+ stored_view->response_information = &connack_storage->response_information;
+ }
+
+ if (connack_view->server_reference != NULL) {
+ connack_storage->server_reference = *connack_view->server_reference;
+ if (aws_byte_buf_append_and_update(&connack_storage->storage, &connack_storage->server_reference)) {
+ return AWS_OP_ERR;
+ }
+
+ stored_view->server_reference = &connack_storage->server_reference;
+ }
+
+ if (connack_view->authentication_method != NULL) {
+ connack_storage->authentication_method = *connack_view->authentication_method;
+ if (aws_byte_buf_append_and_update(&connack_storage->storage, &connack_storage->authentication_method)) {
+ return AWS_OP_ERR;
+ }
+
+ stored_view->authentication_method = &connack_storage->authentication_method;
+ }
+
+ if (connack_view->authentication_data != NULL) {
+ connack_storage->authentication_data = *connack_view->authentication_data;
+ if (aws_byte_buf_append_and_update(&connack_storage->storage, &connack_storage->authentication_data)) {
+ return AWS_OP_ERR;
+ }
+
+ stored_view->authentication_data = &connack_storage->authentication_data;
+ }
+
+ if (aws_mqtt5_user_property_set_init_with_storage(
+ &connack_storage->user_properties,
+ allocator,
+ &connack_storage->storage,
+ connack_view->user_property_count,
+ connack_view->user_properties)) {
+ return AWS_OP_ERR;
+ }
+
+ stored_view->user_property_count = aws_mqtt5_user_property_set_size(&connack_storage->user_properties);
+ stored_view->user_properties = connack_storage->user_properties.properties.data;
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt5_packet_connack_storage_init_from_external_storage(
+ struct aws_mqtt5_packet_connack_storage *connack_storage,
+ struct aws_allocator *allocator) {
+ AWS_ZERO_STRUCT(*connack_storage);
+
+ if (aws_mqtt5_user_property_set_init(&connack_storage->user_properties, allocator)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+void aws_mqtt5_packet_connack_storage_clean_up(struct aws_mqtt5_packet_connack_storage *connack_storage) {
+ if (connack_storage == NULL) {
+ return;
+ }
+
+ aws_mqtt5_user_property_set_clean_up(&connack_storage->user_properties);
+ aws_byte_buf_clean_up(&connack_storage->storage);
+}
+
+void aws_mqtt5_packet_connack_view_log(
+ const struct aws_mqtt5_packet_connack_view *connack_view,
+ enum aws_log_level level) {
+ struct aws_logger *log_handle = aws_logger_get_conditional(AWS_LS_MQTT5_GENERAL, level);
+ if (log_handle == NULL) {
+ return;
+ }
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connack_view reason code set to %d (%s)",
+ (void *)connack_view,
+ (int)connack_view->reason_code,
+ aws_mqtt5_connect_reason_code_to_c_string(connack_view->reason_code));
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connack_view session present set to %d",
+ (void *)connack_view,
+ (int)connack_view->session_present);
+
+ if (connack_view->session_expiry_interval != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connack_view session expiry interval set to %" PRIu32,
+ (void *)connack_view,
+ *connack_view->session_expiry_interval);
+ }
+
+ if (connack_view->receive_maximum != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connack_view receive maximum set to %" PRIu16,
+ (void *)connack_view,
+ *connack_view->receive_maximum);
+ }
+
+ if (connack_view->maximum_qos != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connack_view maximum qos set to %d",
+ (void *)connack_view,
+ (int)(*connack_view->maximum_qos));
+ }
+
+ if (connack_view->retain_available != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connack_view retain available set to %d",
+ (void *)connack_view,
+ (int)(*connack_view->retain_available));
+ }
+
+ if (connack_view->maximum_packet_size != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connack_view maximum packet size set to %" PRIu32,
+ (void *)connack_view,
+ *connack_view->maximum_packet_size);
+ }
+
+ if (connack_view->assigned_client_identifier != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connack_view assigned client identifier set to \"" PRInSTR "\"",
+ (void *)connack_view,
+ AWS_BYTE_CURSOR_PRI(*connack_view->assigned_client_identifier));
+ }
+
+ if (connack_view->topic_alias_maximum != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connack_view topic alias maximum set to %" PRIu16,
+ (void *)connack_view,
+ *connack_view->topic_alias_maximum);
+ }
+
+ if (connack_view->reason_string != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connack_view reason string set to \"" PRInSTR "\"",
+ (void *)connack_view,
+ AWS_BYTE_CURSOR_PRI(*connack_view->reason_string));
+ }
+
+ if (connack_view->wildcard_subscriptions_available != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connack_view wildcard subscriptions available set to %d",
+ (void *)connack_view,
+ (int)(*connack_view->wildcard_subscriptions_available));
+ }
+
+ if (connack_view->subscription_identifiers_available != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connack_view subscription identifiers available set to %d",
+ (void *)connack_view,
+ (int)(*connack_view->subscription_identifiers_available));
+ }
+
+ if (connack_view->shared_subscriptions_available != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connack_view shared subscriptions available set to %d",
+ (void *)connack_view,
+ (int)(*connack_view->shared_subscriptions_available));
+ }
+
+ if (connack_view->server_keep_alive != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connack_view server keep alive set to %" PRIu16,
+ (void *)connack_view,
+ *connack_view->server_keep_alive);
+ }
+
+ if (connack_view->response_information != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connack_view response information set to \"" PRInSTR "\"",
+ (void *)connack_view,
+ AWS_BYTE_CURSOR_PRI(*connack_view->response_information));
+ }
+
+ if (connack_view->server_reference != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connack_view server reference set to \"" PRInSTR "\"",
+ (void *)connack_view,
+ AWS_BYTE_CURSOR_PRI(*connack_view->server_reference));
+ }
+
+ if (connack_view->authentication_method != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connack_view authentication method set",
+ (void *)connack_view);
+ }
+
+ if (connack_view->authentication_data != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_connack_view authentication data set",
+ (void *)connack_view);
+ }
+
+ s_aws_mqtt5_user_property_set_log(
+ log_handle,
+ connack_view->user_properties,
+ connack_view->user_property_count,
+ (void *)connack_view,
+ level,
+ "aws_mqtt5_packet_connack_view");
+}
+
+/*********************************************************************************************************************
+ * Disconnect
+ ********************************************************************************************************************/
+
+int aws_mqtt5_packet_disconnect_view_validate(const struct aws_mqtt5_packet_disconnect_view *disconnect_view) {
+
+ if (disconnect_view == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "null DISCONNECT packet options");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ bool is_valid_reason_code = true;
+ aws_mqtt5_disconnect_reason_code_to_c_string(disconnect_view->reason_code, &is_valid_reason_code);
+ if (!is_valid_reason_code) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_disconnect_view - invalid DISCONNECT reason code:%d",
+ (void *)disconnect_view,
+ (int)disconnect_view->reason_code);
+ return aws_raise_error(AWS_ERROR_MQTT5_DISCONNECT_OPTIONS_VALIDATION);
+ }
+
+ if (disconnect_view->reason_string != NULL) {
+ if (disconnect_view->reason_string->len > UINT16_MAX) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_disconnect_view - reason string too long",
+ (void *)disconnect_view);
+ return aws_raise_error(AWS_ERROR_MQTT5_DISCONNECT_OPTIONS_VALIDATION);
+ }
+
+ if (aws_mqtt5_validate_utf8_text(*disconnect_view->reason_string)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_disconnect_view - reason string not valid UTF-8",
+ (void *)disconnect_view);
+ return aws_raise_error(AWS_ERROR_MQTT5_DISCONNECT_OPTIONS_VALIDATION);
+ }
+ }
+
+ if (disconnect_view->server_reference != NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_disconnect_view - sending a server reference with a client-sourced DISCONNECT is "
+ "not allowed",
+ (void *)disconnect_view);
+ return aws_raise_error(AWS_ERROR_MQTT5_DISCONNECT_OPTIONS_VALIDATION);
+ }
+
+ if (s_aws_mqtt5_user_property_set_validate(
+ disconnect_view->user_properties,
+ disconnect_view->user_property_count,
+ "aws_mqtt5_packet_disconnect_view",
+ (void *)disconnect_view)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_aws_mqtt5_packet_disconnect_view_validate_vs_connection_settings(
+ const void *packet_view,
+ const struct aws_mqtt5_client *client) {
+
+ const struct aws_mqtt5_packet_disconnect_view *disconnect_view = packet_view;
+
+ if (disconnect_view->session_expiry_interval_seconds != NULL) {
+ /*
+ * By spec (https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901211), you
+ * cannot set a non-zero value here if you sent a 0-value or no value in the CONNECT (presumably allows
+ * the server to skip tracking session state, and we can't undo that now)
+ */
+ const uint32_t *session_expiry_ptr = client->config->connect.storage_view.session_expiry_interval_seconds;
+ if (*disconnect_view->session_expiry_interval_seconds > 0 &&
+ (session_expiry_ptr == NULL || *session_expiry_ptr == 0)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_disconnect_view - cannot specify a positive session expiry after "
+ "committing "
+ "to 0-valued session expiry in CONNECT",
+ (void *)disconnect_view);
+ return aws_raise_error(AWS_ERROR_MQTT5_DISCONNECT_OPTIONS_VALIDATION);
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+void aws_mqtt5_packet_disconnect_view_log(
+ const struct aws_mqtt5_packet_disconnect_view *disconnect_view,
+ enum aws_log_level level) {
+ struct aws_logger *log_handle = aws_logger_get_conditional(AWS_LS_MQTT5_GENERAL, level);
+ if (log_handle == NULL) {
+ return;
+ }
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_disconnect_view reason code set to %d (%s)",
+ (void *)disconnect_view,
+ (int)disconnect_view->reason_code,
+ aws_mqtt5_disconnect_reason_code_to_c_string(disconnect_view->reason_code, NULL));
+
+ if (disconnect_view->session_expiry_interval_seconds != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_disconnect_view session expiry interval set to %" PRIu32,
+ (void *)disconnect_view,
+ *disconnect_view->session_expiry_interval_seconds);
+ }
+
+ if (disconnect_view->reason_string != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_disconnect_view reason string set to \"" PRInSTR "\"",
+ (void *)disconnect_view,
+ AWS_BYTE_CURSOR_PRI(*disconnect_view->reason_string));
+ }
+
+ if (disconnect_view->server_reference != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_disconnect_view server reference set to \"" PRInSTR "\"",
+ (void *)disconnect_view,
+ AWS_BYTE_CURSOR_PRI(*disconnect_view->server_reference));
+ }
+
+ s_aws_mqtt5_user_property_set_log(
+ log_handle,
+ disconnect_view->user_properties,
+ disconnect_view->user_property_count,
+ (void *)disconnect_view,
+ level,
+ "aws_mqtt5_packet_disconnect_view");
+}
+
+void aws_mqtt5_packet_disconnect_storage_clean_up(struct aws_mqtt5_packet_disconnect_storage *disconnect_storage) {
+ if (disconnect_storage == NULL) {
+ return;
+ }
+
+ aws_mqtt5_user_property_set_clean_up(&disconnect_storage->user_properties);
+ aws_byte_buf_clean_up(&disconnect_storage->storage);
+}
+
+static size_t s_aws_mqtt5_packet_disconnect_compute_storage_size(
+ const struct aws_mqtt5_packet_disconnect_view *disconnect_view) {
+ size_t storage_size = s_aws_mqtt5_user_property_set_compute_storage_size(
+ disconnect_view->user_properties, disconnect_view->user_property_count);
+
+ if (disconnect_view->reason_string != NULL) {
+ storage_size += disconnect_view->reason_string->len;
+ }
+
+ if (disconnect_view->server_reference != NULL) {
+ storage_size += disconnect_view->server_reference->len;
+ }
+
+ return storage_size;
+}
+
+int aws_mqtt5_packet_disconnect_storage_init(
+ struct aws_mqtt5_packet_disconnect_storage *disconnect_storage,
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_packet_disconnect_view *disconnect_options) {
+
+ AWS_ZERO_STRUCT(*disconnect_storage);
+ size_t storage_capacity = s_aws_mqtt5_packet_disconnect_compute_storage_size(disconnect_options);
+ if (aws_byte_buf_init(&disconnect_storage->storage, allocator, storage_capacity)) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_mqtt5_packet_disconnect_view *storage_view = &disconnect_storage->storage_view;
+
+ storage_view->reason_code = disconnect_options->reason_code;
+
+ if (disconnect_options->session_expiry_interval_seconds != NULL) {
+ disconnect_storage->session_expiry_interval_seconds = *disconnect_options->session_expiry_interval_seconds;
+ storage_view->session_expiry_interval_seconds = &disconnect_storage->session_expiry_interval_seconds;
+ }
+
+ if (disconnect_options->reason_string != NULL) {
+ disconnect_storage->reason_string = *disconnect_options->reason_string;
+ if (aws_byte_buf_append_and_update(&disconnect_storage->storage, &disconnect_storage->reason_string)) {
+ return AWS_OP_ERR;
+ }
+
+ storage_view->reason_string = &disconnect_storage->reason_string;
+ }
+
+ if (disconnect_options->server_reference != NULL) {
+ disconnect_storage->server_reference = *disconnect_options->server_reference;
+ if (aws_byte_buf_append_and_update(&disconnect_storage->storage, &disconnect_storage->server_reference)) {
+ return AWS_OP_ERR;
+ }
+
+ storage_view->server_reference = &disconnect_storage->server_reference;
+ }
+
+ if (aws_mqtt5_user_property_set_init_with_storage(
+ &disconnect_storage->user_properties,
+ allocator,
+ &disconnect_storage->storage,
+ disconnect_options->user_property_count,
+ disconnect_options->user_properties)) {
+ return AWS_OP_ERR;
+ }
+
+ storage_view->user_property_count = aws_mqtt5_user_property_set_size(&disconnect_storage->user_properties);
+ storage_view->user_properties = disconnect_storage->user_properties.properties.data;
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt5_packet_disconnect_storage_init_from_external_storage(
+ struct aws_mqtt5_packet_disconnect_storage *disconnect_storage,
+ struct aws_allocator *allocator) {
+ AWS_ZERO_STRUCT(*disconnect_storage);
+
+ if (aws_mqtt5_user_property_set_init(&disconnect_storage->user_properties, allocator)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_destroy_operation_disconnect(void *object) {
+ if (object == NULL) {
+ return;
+ }
+
+ struct aws_mqtt5_operation_disconnect *disconnect_op = object;
+
+ aws_mqtt5_packet_disconnect_storage_clean_up(&disconnect_op->options_storage);
+
+ aws_mem_release(disconnect_op->allocator, disconnect_op);
+}
+
+static void s_aws_mqtt5_disconnect_operation_completion(
+ struct aws_mqtt5_operation *operation,
+ int error_code,
+ enum aws_mqtt5_packet_type packet_type,
+ const void *completion_view) {
+
+ (void)completion_view;
+ (void)packet_type;
+
+ struct aws_mqtt5_operation_disconnect *disconnect_op = operation->impl;
+
+ if (disconnect_op->internal_completion_options.completion_callback != NULL) {
+ (*disconnect_op->internal_completion_options.completion_callback)(
+ error_code, disconnect_op->internal_completion_options.completion_user_data);
+ }
+
+ if (disconnect_op->external_completion_options.completion_callback != NULL) {
+ (*disconnect_op->external_completion_options.completion_callback)(
+ error_code, disconnect_op->external_completion_options.completion_user_data);
+ }
+}
+
+static struct aws_mqtt5_operation_vtable s_disconnect_operation_vtable = {
+ .aws_mqtt5_operation_completion_fn = s_aws_mqtt5_disconnect_operation_completion,
+ .aws_mqtt5_operation_set_packet_id_fn = NULL,
+ .aws_mqtt5_operation_get_packet_id_address_fn = NULL,
+ .aws_mqtt5_operation_validate_vs_connection_settings_fn =
+ s_aws_mqtt5_packet_disconnect_view_validate_vs_connection_settings,
+};
+
+struct aws_mqtt5_operation_disconnect *aws_mqtt5_operation_disconnect_new(
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_packet_disconnect_view *disconnect_options,
+ const struct aws_mqtt5_disconnect_completion_options *external_completion_options,
+ const struct aws_mqtt5_disconnect_completion_options *internal_completion_options) {
+ AWS_PRECONDITION(allocator != NULL);
+
+ if (aws_mqtt5_packet_disconnect_view_validate(disconnect_options)) {
+ return NULL;
+ }
+
+ struct aws_mqtt5_operation_disconnect *disconnect_op =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_operation_disconnect));
+ if (disconnect_op == NULL) {
+ return NULL;
+ }
+
+ disconnect_op->allocator = allocator;
+ disconnect_op->base.vtable = &s_disconnect_operation_vtable;
+ disconnect_op->base.packet_type = AWS_MQTT5_PT_DISCONNECT;
+ aws_ref_count_init(&disconnect_op->base.ref_count, disconnect_op, s_destroy_operation_disconnect);
+ disconnect_op->base.impl = disconnect_op;
+
+ if (aws_mqtt5_packet_disconnect_storage_init(&disconnect_op->options_storage, allocator, disconnect_options)) {
+ goto error;
+ }
+
+ disconnect_op->base.packet_view = &disconnect_op->options_storage.storage_view;
+ if (external_completion_options != NULL) {
+ disconnect_op->external_completion_options = *external_completion_options;
+ }
+
+ if (internal_completion_options != NULL) {
+ disconnect_op->internal_completion_options = *internal_completion_options;
+ }
+
+ return disconnect_op;
+
+error:
+
+ aws_mqtt5_operation_release(&disconnect_op->base);
+
+ return NULL;
+}
+
+/*********************************************************************************************************************
+ * Publish
+ ********************************************************************************************************************/
+
+int aws_mqtt5_packet_publish_view_validate(const struct aws_mqtt5_packet_publish_view *publish_view) {
+
+ if (publish_view == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "null PUBLISH packet options");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (publish_view->qos < AWS_MQTT5_QOS_AT_MOST_ONCE || publish_view->qos > AWS_MQTT5_QOS_EXACTLY_ONCE) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_publish_view - unsupported QoS value in PUBLISH packet options: %d",
+ (void *)publish_view,
+ (int)publish_view->qos);
+ return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION);
+ }
+
+ if (publish_view->qos == AWS_MQTT5_QOS_AT_MOST_ONCE) {
+ if (publish_view->duplicate) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_publish_view - duplicate flag must be set to 0 for QoS 0 messages",
+ (void *)publish_view);
+ return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION);
+ }
+ if (publish_view->packet_id != 0) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_publish_view - Packet ID must not be set for QoS 0 messages",
+ (void *)publish_view);
+ return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION);
+ }
+ }
+
+ /* 0-length topic is never valid, even with user-controlled outbound aliasing */
+ if (publish_view->topic.len == 0) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view - missing topic", (void *)publish_view);
+ return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION);
+ } else if (aws_mqtt5_validate_utf8_text(publish_view->topic)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL, "id=%p: aws_mqtt5_packet_publish_view - topic not valid UTF-8", (void *)publish_view);
+ return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION);
+ } else if (!aws_mqtt_is_valid_topic(&publish_view->topic)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_publish_view - invalid topic: \"" PRInSTR "\"",
+ (void *)publish_view,
+ AWS_BYTE_CURSOR_PRI(publish_view->topic));
+ return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION);
+ }
+
+ if (publish_view->topic_alias != NULL) {
+ if (*publish_view->topic_alias == 0) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_publish_view - topic alias may not be zero",
+ (void *)publish_view);
+ return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION);
+ }
+ }
+
+ if (publish_view->payload_format != NULL) {
+ if (*publish_view->payload_format < AWS_MQTT5_PFI_BYTES || *publish_view->payload_format > AWS_MQTT5_PFI_UTF8) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_publish_view - invalid payload format value: %d",
+ (void *)publish_view,
+ (int)*publish_view->payload_format);
+ return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION);
+ }
+
+ // Make sure the payload data is UTF-8 if the payload_format set to UTF8
+ if (*publish_view->payload_format == AWS_MQTT5_PFI_UTF8) {
+ if (aws_mqtt5_validate_utf8_text(publish_view->payload)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_publish_view - payload value is not valid UTF-8 while payload format "
+ "set to UTF-8",
+ (void *)publish_view);
+ return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION);
+ }
+ }
+ }
+
+ if (publish_view->response_topic != NULL) {
+ if (publish_view->response_topic->len >= UINT16_MAX) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_publish_view - response topic too long",
+ (void *)publish_view);
+ return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION);
+ }
+
+ if (aws_mqtt5_validate_utf8_text(*publish_view->response_topic)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_publish_view - response topic not valid UTF-8",
+ (void *)publish_view);
+ return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION);
+ }
+
+ if (!aws_mqtt_is_valid_topic(publish_view->response_topic)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_publish_view - response topic must be a valid mqtt topic",
+ (void *)publish_view);
+ return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION);
+ }
+ }
+
+ if (publish_view->correlation_data != NULL) {
+ if (publish_view->correlation_data->len >= UINT16_MAX) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_publish_view - correlation data too long",
+ (void *)publish_view);
+ return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION);
+ }
+ }
+
+ /*
+ * validate is done from a client perspective and clients should never generate subscription identifier in a
+ * publish message
+ */
+ if (publish_view->subscription_identifier_count != 0) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL, "Client-initiated PUBLISH packets may not contain subscription identifiers");
+ return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION);
+ }
+
+ if (publish_view->content_type != NULL) {
+ if (publish_view->content_type->len >= UINT16_MAX) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_publish_view - content type too long",
+ (void *)publish_view);
+ return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION);
+ }
+
+ if (aws_mqtt5_validate_utf8_text(*publish_view->content_type)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_publish_view - content type not valid UTF-8",
+ (void *)publish_view);
+ return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION);
+ }
+ }
+
+ if (s_aws_mqtt5_user_property_set_validate(
+ publish_view->user_properties,
+ publish_view->user_property_count,
+ "aws_mqtt5_packet_publish_view",
+ (void *)publish_view)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt5_packet_publish_view_validate_vs_iot_core(const struct aws_mqtt5_packet_publish_view *publish_view) {
+ if (!aws_mqtt_is_valid_topic_for_iot_core(publish_view->topic)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_publish_view - topic not valid for AWS Iot Core limits: \"" PRInSTR "\"",
+ (void *)publish_view,
+ AWS_BYTE_CURSOR_PRI(publish_view->topic));
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_aws_mqtt5_packet_publish_view_validate_vs_connection_settings(
+ const void *packet_view,
+ const struct aws_mqtt5_client *client) {
+ const struct aws_mqtt5_packet_publish_view *publish_view = packet_view;
+
+ /* If we have valid negotiated settings, check against them as well */
+ if (aws_mqtt5_client_are_negotiated_settings_valid(client)) {
+ const struct aws_mqtt5_negotiated_settings *settings = &client->negotiated_settings;
+
+ if (publish_view->qos > settings->maximum_qos) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_publish_view - QoS value %d exceeds negotiated maximum qos %d",
+ (void *)publish_view,
+ (int)publish_view->qos,
+ (int)settings->maximum_qos);
+ return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION);
+ }
+
+ if (publish_view->topic_alias != NULL) {
+ const struct aws_mqtt5_client_options_storage *client_options = client->config;
+ if (client_options->topic_aliasing_options.outbound_topic_alias_behavior != AWS_MQTT5_COTABT_USER) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_publish_view - topic alias set but outbound topic alias behavior has not "
+ "been set to user controlled",
+ (void *)publish_view);
+ return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION);
+ }
+
+ if (*publish_view->topic_alias > settings->topic_alias_maximum_to_server) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_publish_view - outbound topic alias (%d) exceeds server's topic alias "
+ "maximum "
+ "(%d)",
+ (void *)publish_view,
+ (int)(*publish_view->topic_alias),
+ (int)settings->topic_alias_maximum_to_server);
+ return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION);
+ }
+ }
+
+ if (publish_view->retain && settings->retain_available == false) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_publish_view - server does not support Retain",
+ (void *)publish_view);
+ return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION);
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+void aws_mqtt5_packet_publish_view_log(
+ const struct aws_mqtt5_packet_publish_view *publish_view,
+ enum aws_log_level level) {
+ struct aws_logger *log_handle = aws_logger_get_conditional(AWS_LS_MQTT5_GENERAL, level);
+ if (log_handle == NULL) {
+ return;
+ }
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_publish_view packet id set to %d",
+ (void *)publish_view,
+ (int)publish_view->packet_id);
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_publish_view payload set containing %zu bytes",
+ (void *)publish_view,
+ publish_view->payload.len);
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_publish_view qos set to %d",
+ (void *)publish_view,
+ (int)publish_view->qos);
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_publish_view retain set to %d",
+ (void *)publish_view,
+ (int)publish_view->retain);
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_publish_view topic set to \"" PRInSTR "\"",
+ (void *)publish_view,
+ AWS_BYTE_CURSOR_PRI(publish_view->topic));
+
+ if (publish_view->payload_format != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_publish_view payload format indicator set to %d (%s)",
+ (void *)publish_view,
+ (int)*publish_view->payload_format,
+ aws_mqtt5_payload_format_indicator_to_c_string(*publish_view->payload_format));
+ }
+
+ if (publish_view->message_expiry_interval_seconds != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_publish_view message expiry interval set to %" PRIu32,
+ (void *)publish_view,
+ *publish_view->message_expiry_interval_seconds);
+ }
+
+ if (publish_view->topic_alias != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_publish_view topic alias set to %" PRIu16,
+ (void *)publish_view,
+ *publish_view->topic_alias);
+ }
+
+ if (publish_view->response_topic != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_publish_view response topic set to \"" PRInSTR "\"",
+ (void *)publish_view,
+ AWS_BYTE_CURSOR_PRI(*publish_view->response_topic));
+ }
+
+ if (publish_view->correlation_data != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_publish_view - set correlation data",
+ (void *)publish_view);
+ }
+
+ if (publish_view->content_type != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_publish_view content type set to \"" PRInSTR "\"",
+ (void *)publish_view,
+ AWS_BYTE_CURSOR_PRI(*publish_view->content_type));
+ }
+
+ for (size_t i = 0; i < publish_view->subscription_identifier_count; ++i) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_publish_view subscription identifier %d: %" PRIu32,
+ (void *)publish_view,
+ (int)i,
+ publish_view->subscription_identifiers[i]);
+ }
+
+ s_aws_mqtt5_user_property_set_log(
+ log_handle,
+ publish_view->user_properties,
+ publish_view->user_property_count,
+ (void *)publish_view,
+ level,
+ "aws_mqtt5_packet_publish_view");
+}
+
+static size_t s_aws_mqtt5_packet_publish_compute_storage_size(
+ const struct aws_mqtt5_packet_publish_view *publish_view) {
+ size_t storage_size = s_aws_mqtt5_user_property_set_compute_storage_size(
+ publish_view->user_properties, publish_view->user_property_count);
+
+ storage_size += publish_view->topic.len;
+ storage_size += publish_view->payload.len;
+
+ if (publish_view->response_topic != NULL) {
+ storage_size += publish_view->response_topic->len;
+ }
+
+ if (publish_view->correlation_data != NULL) {
+ storage_size += publish_view->correlation_data->len;
+ }
+
+ if (publish_view->content_type != NULL) {
+ storage_size += publish_view->content_type->len;
+ }
+
+ return storage_size;
+}
+
+int aws_mqtt5_packet_publish_storage_init(
+ struct aws_mqtt5_packet_publish_storage *publish_storage,
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_packet_publish_view *publish_options) {
+
+ AWS_ZERO_STRUCT(*publish_storage);
+ size_t storage_capacity = s_aws_mqtt5_packet_publish_compute_storage_size(publish_options);
+ if (aws_byte_buf_init(&publish_storage->storage, allocator, storage_capacity)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_array_list_init_dynamic(&publish_storage->subscription_identifiers, allocator, 0, sizeof(uint32_t))) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_mqtt5_packet_publish_view *storage_view = &publish_storage->storage_view;
+
+ storage_view->packet_id = publish_options->packet_id;
+
+ storage_view->payload = publish_options->payload;
+ if (aws_byte_buf_append_and_update(&publish_storage->storage, &storage_view->payload)) {
+ return AWS_OP_ERR;
+ }
+
+ storage_view->qos = publish_options->qos;
+ storage_view->retain = publish_options->retain;
+ storage_view->duplicate = publish_options->duplicate;
+
+ storage_view->topic = publish_options->topic;
+ if (aws_byte_buf_append_and_update(&publish_storage->storage, &storage_view->topic)) {
+ return AWS_OP_ERR;
+ }
+
+ if (publish_options->payload_format != NULL) {
+ publish_storage->payload_format = *publish_options->payload_format;
+ storage_view->payload_format = &publish_storage->payload_format;
+ }
+
+ if (publish_options->message_expiry_interval_seconds != NULL) {
+ publish_storage->message_expiry_interval_seconds = *publish_options->message_expiry_interval_seconds;
+ storage_view->message_expiry_interval_seconds = &publish_storage->message_expiry_interval_seconds;
+ }
+
+ if (publish_options->topic_alias != NULL) {
+ publish_storage->topic_alias = *publish_options->topic_alias;
+ storage_view->topic_alias = &publish_storage->topic_alias;
+ }
+
+ if (publish_options->response_topic != NULL) {
+ publish_storage->response_topic = *publish_options->response_topic;
+ if (aws_byte_buf_append_and_update(&publish_storage->storage, &publish_storage->response_topic)) {
+ return AWS_OP_ERR;
+ }
+
+ storage_view->response_topic = &publish_storage->response_topic;
+ }
+
+ if (publish_options->correlation_data != NULL) {
+ publish_storage->correlation_data = *publish_options->correlation_data;
+ if (aws_byte_buf_append_and_update(&publish_storage->storage, &publish_storage->correlation_data)) {
+ return AWS_OP_ERR;
+ }
+
+ storage_view->correlation_data = &publish_storage->correlation_data;
+ }
+
+ for (size_t i = 0; i < publish_options->subscription_identifier_count; ++i) {
+ aws_array_list_push_back(
+ &publish_storage->subscription_identifiers, &publish_options->subscription_identifiers[i]);
+ }
+
+ storage_view->subscription_identifier_count = aws_array_list_length(&publish_storage->subscription_identifiers);
+ storage_view->subscription_identifiers = publish_storage->subscription_identifiers.data;
+
+ if (publish_options->content_type != NULL) {
+ publish_storage->content_type = *publish_options->content_type;
+ if (aws_byte_buf_append_and_update(&publish_storage->storage, &publish_storage->content_type)) {
+ return AWS_OP_ERR;
+ }
+
+ storage_view->content_type = &publish_storage->content_type;
+ }
+
+ if (aws_mqtt5_user_property_set_init_with_storage(
+ &publish_storage->user_properties,
+ allocator,
+ &publish_storage->storage,
+ publish_options->user_property_count,
+ publish_options->user_properties)) {
+ return AWS_OP_ERR;
+ }
+ storage_view->user_property_count = aws_mqtt5_user_property_set_size(&publish_storage->user_properties);
+ storage_view->user_properties = publish_storage->user_properties.properties.data;
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt5_packet_publish_storage_init_from_external_storage(
+ struct aws_mqtt5_packet_publish_storage *publish_storage,
+ struct aws_allocator *allocator) {
+ AWS_ZERO_STRUCT(*publish_storage);
+
+ if (aws_mqtt5_user_property_set_init(&publish_storage->user_properties, allocator)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_array_list_init_dynamic(&publish_storage->subscription_identifiers, allocator, 0, sizeof(uint32_t))) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+void aws_mqtt5_packet_publish_storage_clean_up(struct aws_mqtt5_packet_publish_storage *publish_storage) {
+ aws_mqtt5_user_property_set_clean_up(&publish_storage->user_properties);
+ aws_array_list_clean_up(&publish_storage->subscription_identifiers);
+ aws_byte_buf_clean_up(&publish_storage->storage);
+}
+
+static void s_aws_mqtt5_operation_publish_complete(
+ struct aws_mqtt5_operation *operation,
+ int error_code,
+ enum aws_mqtt5_packet_type packet_type,
+ const void *completion_view) {
+ struct aws_mqtt5_operation_publish *publish_op = operation->impl;
+
+ if (publish_op->completion_options.completion_callback != NULL) {
+ (*publish_op->completion_options.completion_callback)(
+ packet_type, completion_view, error_code, publish_op->completion_options.completion_user_data);
+ }
+}
+
+static void s_aws_mqtt5_operation_publish_set_packet_id(
+ struct aws_mqtt5_operation *operation,
+ aws_mqtt5_packet_id_t packet_id) {
+ struct aws_mqtt5_operation_publish *publish_op = operation->impl;
+ publish_op->options_storage.storage_view.packet_id = packet_id;
+}
+
+static aws_mqtt5_packet_id_t *s_aws_mqtt5_operation_publish_get_packet_id_address(
+ const struct aws_mqtt5_operation *operation) {
+ struct aws_mqtt5_operation_publish *publish_op = operation->impl;
+ return &publish_op->options_storage.storage_view.packet_id;
+}
+
+static struct aws_mqtt5_operation_vtable s_publish_operation_vtable = {
+ .aws_mqtt5_operation_completion_fn = s_aws_mqtt5_operation_publish_complete,
+ .aws_mqtt5_operation_set_packet_id_fn = s_aws_mqtt5_operation_publish_set_packet_id,
+ .aws_mqtt5_operation_get_packet_id_address_fn = s_aws_mqtt5_operation_publish_get_packet_id_address,
+ .aws_mqtt5_operation_validate_vs_connection_settings_fn =
+ s_aws_mqtt5_packet_publish_view_validate_vs_connection_settings,
+};
+
+static void s_destroy_operation_publish(void *object) {
+ if (object == NULL) {
+ return;
+ }
+
+ struct aws_mqtt5_operation_publish *publish_op = object;
+
+ aws_mqtt5_packet_publish_storage_clean_up(&publish_op->options_storage);
+
+ aws_mem_release(publish_op->allocator, publish_op);
+}
+
+struct aws_mqtt5_operation_publish *aws_mqtt5_operation_publish_new(
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_client *client,
+ const struct aws_mqtt5_packet_publish_view *publish_options,
+ const struct aws_mqtt5_publish_completion_options *completion_options) {
+ AWS_PRECONDITION(allocator != NULL);
+ AWS_PRECONDITION(publish_options != NULL);
+
+ if (aws_mqtt5_packet_publish_view_validate(publish_options)) {
+ return NULL;
+ }
+
+ if (publish_options->packet_id != 0) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_publish_view packet id must be zero",
+ (void *)publish_options);
+ aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION);
+ return NULL;
+ }
+
+ if (client != NULL && client->config->extended_validation_and_flow_control_options != AWS_MQTT5_EVAFCO_NONE) {
+ if (aws_mqtt5_packet_publish_view_validate_vs_iot_core(publish_options)) {
+ return NULL;
+ }
+ }
+
+ struct aws_mqtt5_operation_publish *publish_op =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_operation_publish));
+ if (publish_op == NULL) {
+ return NULL;
+ }
+
+ publish_op->allocator = allocator;
+ publish_op->base.vtable = &s_publish_operation_vtable;
+ publish_op->base.packet_type = AWS_MQTT5_PT_PUBLISH;
+ aws_ref_count_init(&publish_op->base.ref_count, publish_op, s_destroy_operation_publish);
+ publish_op->base.impl = publish_op;
+
+ if (aws_mqtt5_packet_publish_storage_init(&publish_op->options_storage, allocator, publish_options)) {
+ goto error;
+ }
+
+ publish_op->base.packet_view = &publish_op->options_storage.storage_view;
+
+ if (completion_options != NULL) {
+ publish_op->completion_options = *completion_options;
+ }
+
+ return publish_op;
+
+error:
+
+ aws_mqtt5_operation_release(&publish_op->base);
+
+ return NULL;
+}
+
+/*********************************************************************************************************************
+ * Puback
+ ********************************************************************************************************************/
+
+static size_t s_aws_mqtt5_packet_puback_compute_storage_size(const struct aws_mqtt5_packet_puback_view *puback_view) {
+ size_t storage_size = s_aws_mqtt5_user_property_set_compute_storage_size(
+ puback_view->user_properties, puback_view->user_property_count);
+
+ if (puback_view->reason_string != NULL) {
+ storage_size += puback_view->reason_string->len;
+ }
+
+ return storage_size;
+}
+
+AWS_MQTT_API int aws_mqtt5_packet_puback_storage_init(
+ struct aws_mqtt5_packet_puback_storage *puback_storage,
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_packet_puback_view *puback_view) {
+ AWS_ZERO_STRUCT(*puback_storage);
+ size_t storage_capacity = s_aws_mqtt5_packet_puback_compute_storage_size(puback_view);
+ if (aws_byte_buf_init(&puback_storage->storage, allocator, storage_capacity)) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_mqtt5_packet_puback_view *storage_view = &puback_storage->storage_view;
+
+ storage_view->packet_id = puback_view->packet_id;
+ storage_view->reason_code = puback_view->reason_code;
+
+ if (puback_view->reason_string != NULL) {
+ puback_storage->reason_string = *puback_view->reason_string;
+ if (aws_byte_buf_append_and_update(&puback_storage->storage, &puback_storage->reason_string)) {
+ return AWS_OP_ERR;
+ }
+
+ storage_view->reason_string = &puback_storage->reason_string;
+ }
+
+ if (aws_mqtt5_user_property_set_init_with_storage(
+ &puback_storage->user_properties,
+ allocator,
+ &puback_storage->storage,
+ puback_view->user_property_count,
+ puback_view->user_properties)) {
+ return AWS_OP_ERR;
+ }
+ storage_view->user_property_count = aws_mqtt5_user_property_set_size(&puback_storage->user_properties);
+ storage_view->user_properties = puback_storage->user_properties.properties.data;
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt5_packet_puback_storage_init_from_external_storage(
+ struct aws_mqtt5_packet_puback_storage *puback_storage,
+ struct aws_allocator *allocator) {
+ AWS_ZERO_STRUCT(*puback_storage);
+
+ if (aws_mqtt5_user_property_set_init(&puback_storage->user_properties, allocator)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+void aws_mqtt5_packet_puback_storage_clean_up(struct aws_mqtt5_packet_puback_storage *puback_storage) {
+
+ if (puback_storage == NULL) {
+ return;
+ }
+
+ aws_mqtt5_user_property_set_clean_up(&puback_storage->user_properties);
+
+ aws_byte_buf_clean_up(&puback_storage->storage);
+}
+
+void aws_mqtt5_packet_puback_view_log(
+ const struct aws_mqtt5_packet_puback_view *puback_view,
+ enum aws_log_level level) {
+ struct aws_logger *log_handle = aws_logger_get_conditional(AWS_LS_MQTT5_GENERAL, level);
+ if (log_handle == NULL) {
+ return;
+ }
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_puback_view packet id set to %d",
+ (void *)puback_view,
+ (int)puback_view->packet_id);
+
+ enum aws_mqtt5_puback_reason_code reason_code = puback_view->reason_code;
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: puback %d reason code: %s",
+ (void *)puback_view,
+ (int)reason_code,
+ aws_mqtt5_puback_reason_code_to_c_string(reason_code));
+
+ if (puback_view->reason_string != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_puback_view reason string set to \"" PRInSTR "\"",
+ (void *)puback_view,
+ AWS_BYTE_CURSOR_PRI(*puback_view->reason_string));
+ }
+
+ s_aws_mqtt5_user_property_set_log(
+ log_handle,
+ puback_view->user_properties,
+ puback_view->user_property_count,
+ (void *)puback_view,
+ level,
+ "aws_mqtt5_packet_puback_view");
+}
+
+static void s_destroy_operation_puback(void *object) {
+ if (object == NULL) {
+ return;
+ }
+
+ struct aws_mqtt5_operation_puback *puback_op = object;
+
+ aws_mqtt5_packet_puback_storage_clean_up(&puback_op->options_storage);
+
+ aws_mem_release(puback_op->allocator, puback_op);
+}
+
+struct aws_mqtt5_operation_puback *aws_mqtt5_operation_puback_new(
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_packet_puback_view *puback_options) {
+ AWS_PRECONDITION(allocator != NULL);
+ AWS_PRECONDITION(puback_options != NULL);
+
+ struct aws_mqtt5_operation_puback *puback_op =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_operation_puback));
+ if (puback_op == NULL) {
+ return NULL;
+ }
+
+ puback_op->allocator = allocator;
+ puback_op->base.vtable = &s_empty_operation_vtable;
+ puback_op->base.packet_type = AWS_MQTT5_PT_PUBACK;
+ aws_ref_count_init(&puback_op->base.ref_count, puback_op, s_destroy_operation_puback);
+ puback_op->base.impl = puback_op;
+
+ if (aws_mqtt5_packet_puback_storage_init(&puback_op->options_storage, allocator, puback_options)) {
+ goto error;
+ }
+
+ puback_op->base.packet_view = &puback_op->options_storage.storage_view;
+
+ return puback_op;
+
+error:
+
+ aws_mqtt5_operation_release(&puback_op->base);
+
+ return NULL;
+}
+
+/*********************************************************************************************************************
+ * Unsubscribe
+ ********************************************************************************************************************/
+
+int aws_mqtt5_packet_unsubscribe_view_validate(const struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_view) {
+
+ if (unsubscribe_view == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "null UNSUBSCRIBE packet options");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (unsubscribe_view->topic_filter_count == 0) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_unsubscribe_view - must contain at least one topic",
+ (void *)unsubscribe_view);
+ return aws_raise_error(AWS_ERROR_MQTT5_UNSUBSCRIBE_OPTIONS_VALIDATION);
+ }
+
+ if (unsubscribe_view->topic_filter_count > AWS_MQTT5_CLIENT_MAXIMUM_TOPIC_FILTERS_PER_UNSUBSCRIBE) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_unsubscribe_view - contains too many topics (%zu)",
+ (void *)unsubscribe_view,
+ unsubscribe_view->topic_filter_count);
+ return aws_raise_error(AWS_ERROR_MQTT5_UNSUBSCRIBE_OPTIONS_VALIDATION);
+ }
+
+ for (size_t i = 0; i < unsubscribe_view->topic_filter_count; ++i) {
+ const struct aws_byte_cursor *topic_filter = &unsubscribe_view->topic_filters[i];
+ if (aws_mqtt5_validate_utf8_text(*topic_filter)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_unsubscribe_view - topic filter not valid UTF-8: \"" PRInSTR "\"",
+ (void *)unsubscribe_view,
+ AWS_BYTE_CURSOR_PRI(*topic_filter));
+ return aws_raise_error(AWS_ERROR_MQTT5_UNSUBSCRIBE_OPTIONS_VALIDATION);
+ }
+ if (!aws_mqtt_is_valid_topic_filter(topic_filter)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_unsubscribe_view - invalid topic filter: \"" PRInSTR "\"",
+ (void *)unsubscribe_view,
+ AWS_BYTE_CURSOR_PRI(*topic_filter));
+ return aws_raise_error(AWS_ERROR_MQTT5_UNSUBSCRIBE_OPTIONS_VALIDATION);
+ }
+ }
+
+ if (s_aws_mqtt5_user_property_set_validate(
+ unsubscribe_view->user_properties,
+ unsubscribe_view->user_property_count,
+ "aws_mqtt5_packet_unsubscribe_view",
+ (void *)unsubscribe_view)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+AWS_MQTT_API int aws_mqtt5_packet_unsubscribe_view_validate_vs_iot_core(
+ const struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_view) {
+
+ for (size_t i = 0; i < unsubscribe_view->topic_filter_count; ++i) {
+ const struct aws_byte_cursor *topic_filter = &unsubscribe_view->topic_filters[i];
+ if (!aws_mqtt_is_valid_topic_filter_for_iot_core(*topic_filter)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_unsubscribe_view - topic filter not valid for AWS Iot Core limits: \"" PRInSTR
+ "\"",
+ (void *)unsubscribe_view,
+ AWS_BYTE_CURSOR_PRI(*topic_filter));
+ return aws_raise_error(AWS_ERROR_MQTT5_UNSUBSCRIBE_OPTIONS_VALIDATION);
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+void aws_mqtt5_packet_unsubscribe_view_log(
+ const struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_view,
+ enum aws_log_level level) {
+ struct aws_logger *log_handle = aws_logger_get_conditional(AWS_LS_MQTT5_GENERAL, level);
+ if (log_handle == NULL) {
+ return;
+ }
+
+ size_t topic_count = unsubscribe_view->topic_filter_count;
+ for (size_t i = 0; i < topic_count; ++i) {
+ const struct aws_byte_cursor *topic_cursor = &unsubscribe_view->topic_filters[i];
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_unsubscribe_view topic #%zu: \"" PRInSTR "\"",
+ (void *)unsubscribe_view,
+ i,
+ AWS_BYTE_CURSOR_PRI(*topic_cursor));
+ }
+
+ s_aws_mqtt5_user_property_set_log(
+ log_handle,
+ unsubscribe_view->user_properties,
+ unsubscribe_view->user_property_count,
+ (void *)unsubscribe_view,
+ level,
+ "aws_mqtt5_packet_unsubscribe_view");
+}
+
+void aws_mqtt5_packet_unsubscribe_storage_clean_up(struct aws_mqtt5_packet_unsubscribe_storage *unsubscribe_storage) {
+ if (unsubscribe_storage == NULL) {
+ return;
+ }
+
+ aws_array_list_clean_up(&unsubscribe_storage->topic_filters);
+ aws_mqtt5_user_property_set_clean_up(&unsubscribe_storage->user_properties);
+ aws_byte_buf_clean_up(&unsubscribe_storage->storage);
+}
+
+static int s_aws_mqtt5_packet_unsubscribe_build_topic_list(
+ struct aws_mqtt5_packet_unsubscribe_storage *unsubscribe_storage,
+ struct aws_allocator *allocator,
+ size_t topic_count,
+ const struct aws_byte_cursor *topics) {
+
+ if (aws_array_list_init_dynamic(
+ &unsubscribe_storage->topic_filters, allocator, topic_count, sizeof(struct aws_byte_cursor))) {
+ return AWS_OP_ERR;
+ }
+
+ for (size_t i = 0; i < topic_count; ++i) {
+ const struct aws_byte_cursor *topic_cursor_ptr = &topics[i];
+ struct aws_byte_cursor topic_cursor = *topic_cursor_ptr;
+
+ if (aws_byte_buf_append_and_update(&unsubscribe_storage->storage, &topic_cursor)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_array_list_push_back(&unsubscribe_storage->topic_filters, &topic_cursor)) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static size_t s_aws_mqtt5_packet_unsubscribe_compute_storage_size(
+ const struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_view) {
+ size_t storage_size = s_aws_mqtt5_user_property_set_compute_storage_size(
+ unsubscribe_view->user_properties, unsubscribe_view->user_property_count);
+
+ for (size_t i = 0; i < unsubscribe_view->topic_filter_count; ++i) {
+ const struct aws_byte_cursor *topic = &unsubscribe_view->topic_filters[i];
+ storage_size += topic->len;
+ }
+
+ return storage_size;
+}
+
+int aws_mqtt5_packet_unsubscribe_storage_init(
+ struct aws_mqtt5_packet_unsubscribe_storage *unsubscribe_storage,
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_options) {
+
+ AWS_ZERO_STRUCT(*unsubscribe_storage);
+ size_t storage_capacity = s_aws_mqtt5_packet_unsubscribe_compute_storage_size(unsubscribe_options);
+ if (aws_byte_buf_init(&unsubscribe_storage->storage, allocator, storage_capacity)) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_mqtt5_packet_unsubscribe_view *storage_view = &unsubscribe_storage->storage_view;
+
+ if (s_aws_mqtt5_packet_unsubscribe_build_topic_list(
+ unsubscribe_storage,
+ allocator,
+ unsubscribe_options->topic_filter_count,
+ unsubscribe_options->topic_filters)) {
+ return AWS_OP_ERR;
+ }
+ storage_view->topic_filter_count = aws_array_list_length(&unsubscribe_storage->topic_filters);
+ storage_view->topic_filters = unsubscribe_storage->topic_filters.data;
+
+ if (aws_mqtt5_user_property_set_init_with_storage(
+ &unsubscribe_storage->user_properties,
+ allocator,
+ &unsubscribe_storage->storage,
+ unsubscribe_options->user_property_count,
+ unsubscribe_options->user_properties)) {
+ return AWS_OP_ERR;
+ }
+ storage_view->user_property_count = aws_mqtt5_user_property_set_size(&unsubscribe_storage->user_properties);
+ storage_view->user_properties = unsubscribe_storage->user_properties.properties.data;
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt5_packet_unsubscribe_storage_init_from_external_storage(
+ struct aws_mqtt5_packet_unsubscribe_storage *unsubscribe_storage,
+ struct aws_allocator *allocator) {
+ AWS_ZERO_STRUCT(*unsubscribe_storage);
+
+ if (aws_mqtt5_user_property_set_init(&unsubscribe_storage->user_properties, allocator)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_array_list_init_dynamic(
+ &unsubscribe_storage->topic_filters, allocator, 0, sizeof(struct aws_byte_cursor))) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_aws_mqtt5_operation_unsubscribe_complete(
+ struct aws_mqtt5_operation *operation,
+ int error_code,
+ enum aws_mqtt5_packet_type packet_type,
+ const void *completion_view) {
+ struct aws_mqtt5_operation_unsubscribe *unsubscribe_op = operation->impl;
+ (void)packet_type;
+
+ if (unsubscribe_op->completion_options.completion_callback != NULL) {
+ (*unsubscribe_op->completion_options.completion_callback)(
+ completion_view, error_code, unsubscribe_op->completion_options.completion_user_data);
+ }
+}
+
+static void s_aws_mqtt5_operation_unsubscribe_set_packet_id(
+ struct aws_mqtt5_operation *operation,
+ aws_mqtt5_packet_id_t packet_id) {
+ struct aws_mqtt5_operation_unsubscribe *unsubscribe_op = operation->impl;
+ unsubscribe_op->options_storage.storage_view.packet_id = packet_id;
+}
+
+static aws_mqtt5_packet_id_t *s_aws_mqtt5_operation_unsubscribe_get_packet_id_address(
+ const struct aws_mqtt5_operation *operation) {
+ struct aws_mqtt5_operation_unsubscribe *unsubscribe_op = operation->impl;
+ return &unsubscribe_op->options_storage.storage_view.packet_id;
+}
+
+static struct aws_mqtt5_operation_vtable s_unsubscribe_operation_vtable = {
+ .aws_mqtt5_operation_completion_fn = s_aws_mqtt5_operation_unsubscribe_complete,
+ .aws_mqtt5_operation_set_packet_id_fn = s_aws_mqtt5_operation_unsubscribe_set_packet_id,
+ .aws_mqtt5_operation_get_packet_id_address_fn = s_aws_mqtt5_operation_unsubscribe_get_packet_id_address,
+ .aws_mqtt5_operation_validate_vs_connection_settings_fn = NULL,
+};
+
+static void s_destroy_operation_unsubscribe(void *object) {
+ if (object == NULL) {
+ return;
+ }
+
+ struct aws_mqtt5_operation_unsubscribe *unsubscribe_op = object;
+
+ aws_mqtt5_packet_unsubscribe_storage_clean_up(&unsubscribe_op->options_storage);
+
+ aws_mem_release(unsubscribe_op->allocator, unsubscribe_op);
+}
+
+struct aws_mqtt5_operation_unsubscribe *aws_mqtt5_operation_unsubscribe_new(
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_client *client,
+ const struct aws_mqtt5_packet_unsubscribe_view *unsubscribe_options,
+ const struct aws_mqtt5_unsubscribe_completion_options *completion_options) {
+ AWS_PRECONDITION(allocator != NULL);
+ AWS_PRECONDITION(unsubscribe_options != NULL);
+
+ if (aws_mqtt5_packet_unsubscribe_view_validate(unsubscribe_options)) {
+ return NULL;
+ }
+
+ if (unsubscribe_options->packet_id != 0) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_unsubscribe_view packet id must be zero",
+ (void *)unsubscribe_options);
+ aws_raise_error(AWS_ERROR_MQTT5_UNSUBSCRIBE_OPTIONS_VALIDATION);
+ return NULL;
+ }
+
+ if (client != NULL && client->config->extended_validation_and_flow_control_options != AWS_MQTT5_EVAFCO_NONE) {
+ if (aws_mqtt5_packet_unsubscribe_view_validate_vs_iot_core(unsubscribe_options)) {
+ return NULL;
+ }
+ }
+
+ struct aws_mqtt5_operation_unsubscribe *unsubscribe_op =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_operation_unsubscribe));
+ if (unsubscribe_op == NULL) {
+ return NULL;
+ }
+
+ unsubscribe_op->allocator = allocator;
+ unsubscribe_op->base.vtable = &s_unsubscribe_operation_vtable;
+ unsubscribe_op->base.packet_type = AWS_MQTT5_PT_UNSUBSCRIBE;
+ aws_ref_count_init(&unsubscribe_op->base.ref_count, unsubscribe_op, s_destroy_operation_unsubscribe);
+ unsubscribe_op->base.impl = unsubscribe_op;
+
+ if (aws_mqtt5_packet_unsubscribe_storage_init(&unsubscribe_op->options_storage, allocator, unsubscribe_options)) {
+ goto error;
+ }
+
+ unsubscribe_op->base.packet_view = &unsubscribe_op->options_storage.storage_view;
+
+ if (completion_options != NULL) {
+ unsubscribe_op->completion_options = *completion_options;
+ }
+
+ return unsubscribe_op;
+
+error:
+
+ aws_mqtt5_operation_release(&unsubscribe_op->base);
+
+ return NULL;
+}
+
+/*********************************************************************************************************************
+ * Subscribe
+ ********************************************************************************************************************/
+
+static int s_aws_mqtt5_validate_subscription(
+ const struct aws_mqtt5_subscription_view *subscription,
+ void *log_context) {
+
+ if (aws_mqtt5_validate_utf8_text(subscription->topic_filter)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_subscribe_view - topic filter \"" PRInSTR "\" not valid UTF-8 in subscription",
+ log_context,
+ AWS_BYTE_CURSOR_PRI(subscription->topic_filter));
+ return aws_raise_error(AWS_ERROR_MQTT5_SUBSCRIBE_OPTIONS_VALIDATION);
+ }
+
+ if (!aws_mqtt_is_valid_topic_filter(&subscription->topic_filter)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_subscribe_view - invalid topic filter \"" PRInSTR "\" in subscription",
+ log_context,
+ AWS_BYTE_CURSOR_PRI(subscription->topic_filter));
+ return aws_raise_error(AWS_ERROR_MQTT5_SUBSCRIBE_OPTIONS_VALIDATION);
+ }
+
+ if (subscription->topic_filter.len > UINT16_MAX) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_subscribe_view - subscription contains too-long topic filter",
+ log_context);
+ return aws_raise_error(AWS_ERROR_MQTT5_SUBSCRIBE_OPTIONS_VALIDATION);
+ }
+
+ if (subscription->qos < AWS_MQTT5_QOS_AT_MOST_ONCE || subscription->qos > AWS_MQTT5_QOS_AT_LEAST_ONCE) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_subscribe_view - unsupported QoS value: %d",
+ log_context,
+ (int)subscription->qos);
+ return aws_raise_error(AWS_ERROR_MQTT5_SUBSCRIBE_OPTIONS_VALIDATION);
+ }
+
+ if (subscription->retain_handling_type < AWS_MQTT5_RHT_SEND_ON_SUBSCRIBE ||
+ subscription->retain_handling_type > AWS_MQTT5_RHT_DONT_SEND) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_subscribe_view - unsupported retain handling value: %d",
+ log_context,
+ (int)subscription->retain_handling_type);
+ return aws_raise_error(AWS_ERROR_MQTT5_SUBSCRIBE_OPTIONS_VALIDATION);
+ }
+
+ /* mqtt5 forbids no_local to be set to 1 if the topic filter represents a shared subscription */
+ if (subscription->no_local) {
+ if (aws_mqtt_is_topic_filter_shared_subscription(subscription->topic_filter)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_subscribe_view - no_local cannot be 1 if the topic filter is a shared"
+ "subscription",
+ log_context);
+ return aws_raise_error(AWS_ERROR_MQTT5_SUBSCRIBE_OPTIONS_VALIDATION);
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt5_packet_subscribe_view_validate(const struct aws_mqtt5_packet_subscribe_view *subscribe_view) {
+
+ if (subscribe_view == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "null SUBSCRIBE packet options");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (subscribe_view->subscription_count == 0) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_subscribe_view - must contain at least one subscription",
+ (void *)subscribe_view);
+ return aws_raise_error(AWS_ERROR_MQTT5_SUBSCRIBE_OPTIONS_VALIDATION);
+ }
+
+ if (subscribe_view->subscription_count > AWS_MQTT5_CLIENT_MAXIMUM_SUBSCRIPTIONS_PER_SUBSCRIBE) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_subscribe_view - too many subscriptions",
+ (void *)subscribe_view);
+ return aws_raise_error(AWS_ERROR_MQTT5_SUBSCRIBE_OPTIONS_VALIDATION);
+ }
+
+ for (size_t i = 0; i < subscribe_view->subscription_count; ++i) {
+ const struct aws_mqtt5_subscription_view *subscription = &subscribe_view->subscriptions[i];
+ if (s_aws_mqtt5_validate_subscription(subscription, (void *)subscribe_view)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_subscribe_view - invalid subscription",
+ (void *)subscribe_view);
+ return aws_raise_error(AWS_ERROR_MQTT5_SUBSCRIBE_OPTIONS_VALIDATION);
+ }
+ }
+
+ if (subscribe_view->subscription_identifier != NULL) {
+ if (*subscribe_view->subscription_identifier > AWS_MQTT5_MAXIMUM_VARIABLE_LENGTH_INTEGER) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_subscribe_view - subscription identifier (%" PRIu32 ") too large",
+ (void *)subscribe_view,
+ *subscribe_view->subscription_identifier);
+ return aws_raise_error(AWS_ERROR_MQTT5_SUBSCRIBE_OPTIONS_VALIDATION);
+ }
+ }
+
+ if (s_aws_mqtt5_user_property_set_validate(
+ subscribe_view->user_properties,
+ subscribe_view->user_property_count,
+ "aws_mqtt5_packet_subscribe_view",
+ (void *)subscribe_view)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+AWS_MQTT_API int aws_mqtt5_packet_subscribe_view_validate_vs_iot_core(
+ const struct aws_mqtt5_packet_subscribe_view *subscribe_view) {
+
+ if (subscribe_view->subscription_count > AWS_IOT_CORE_MAXIMUM_SUSBCRIPTIONS_PER_SUBSCRIBE) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_subscribe_view - number of subscriptions (%zu) exceeds default AWS IoT Core limit "
+ "(%d)",
+ (void *)subscribe_view,
+ subscribe_view->subscription_count,
+ (int)AWS_IOT_CORE_MAXIMUM_SUSBCRIPTIONS_PER_SUBSCRIBE);
+ return AWS_OP_ERR;
+ }
+
+ for (size_t i = 0; i < subscribe_view->subscription_count; ++i) {
+ const struct aws_mqtt5_subscription_view *subscription = &subscribe_view->subscriptions[i];
+ const struct aws_byte_cursor *topic_filter = &subscription->topic_filter;
+ if (!aws_mqtt_is_valid_topic_filter_for_iot_core(*topic_filter)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_subscribe_view - topic filter not valid for AWS Iot Core limits: \"" PRInSTR
+ "\"",
+ (void *)subscribe_view,
+ AWS_BYTE_CURSOR_PRI(*topic_filter));
+ return aws_raise_error(AWS_ERROR_MQTT5_UNSUBSCRIBE_OPTIONS_VALIDATION);
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+void aws_mqtt5_packet_subscribe_view_log(
+ const struct aws_mqtt5_packet_subscribe_view *subscribe_view,
+ enum aws_log_level level) {
+ struct aws_logger *log_handle = aws_logger_get_conditional(AWS_LS_MQTT5_GENERAL, level);
+ if (log_handle == NULL) {
+ return;
+ }
+
+ size_t subscription_count = subscribe_view->subscription_count;
+ for (size_t i = 0; i < subscription_count; ++i) {
+ const struct aws_mqtt5_subscription_view *view = &subscribe_view->subscriptions[i];
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_subscribe_view subscription #%zu, topic filter \"" PRInSTR
+ "\", qos %d, no local %d, retain as "
+ "published %d, retain handling %d (%s)",
+ (void *)subscribe_view,
+ i,
+ AWS_BYTE_CURSOR_PRI(view->topic_filter),
+ (int)view->qos,
+ (int)view->no_local,
+ (int)view->retain_as_published,
+ (int)view->retain_handling_type,
+ aws_mqtt5_retain_handling_type_to_c_string(view->retain_handling_type));
+ }
+
+ if (subscribe_view->subscription_identifier != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_subscribe_view subscription identifier set to %" PRIu32,
+ (void *)subscribe_view,
+ *subscribe_view->subscription_identifier);
+ }
+
+ s_aws_mqtt5_user_property_set_log(
+ log_handle,
+ subscribe_view->user_properties,
+ subscribe_view->user_property_count,
+ (void *)subscribe_view,
+ level,
+ "aws_mqtt5_packet_subscribe_view");
+}
+
+void aws_mqtt5_packet_subscribe_storage_clean_up(struct aws_mqtt5_packet_subscribe_storage *subscribe_storage) {
+ if (subscribe_storage == NULL) {
+ return;
+ }
+
+ aws_array_list_clean_up(&subscribe_storage->subscriptions);
+
+ aws_mqtt5_user_property_set_clean_up(&subscribe_storage->user_properties);
+ aws_byte_buf_clean_up(&subscribe_storage->storage);
+}
+
+static int s_aws_mqtt5_packet_subscribe_storage_init_subscriptions(
+ struct aws_mqtt5_packet_subscribe_storage *subscribe_storage,
+ struct aws_allocator *allocator,
+ size_t subscription_count,
+ const struct aws_mqtt5_subscription_view *subscriptions) {
+
+ if (aws_array_list_init_dynamic(
+ &subscribe_storage->subscriptions,
+ allocator,
+ subscription_count,
+ sizeof(struct aws_mqtt5_subscription_view))) {
+ return AWS_OP_ERR;
+ }
+
+ for (size_t i = 0; i < subscription_count; ++i) {
+ const struct aws_mqtt5_subscription_view *source = &subscriptions[i];
+ struct aws_mqtt5_subscription_view copy = *source;
+
+ if (aws_byte_buf_append_and_update(&subscribe_storage->storage, &copy.topic_filter)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_array_list_push_back(&subscribe_storage->subscriptions, &copy)) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static size_t s_aws_mqtt5_packet_subscribe_compute_storage_size(
+ const struct aws_mqtt5_packet_subscribe_view *subscribe_view) {
+ size_t storage_size = s_aws_mqtt5_user_property_set_compute_storage_size(
+ subscribe_view->user_properties, subscribe_view->user_property_count);
+
+ for (size_t i = 0; i < subscribe_view->subscription_count; ++i) {
+ const struct aws_mqtt5_subscription_view *subscription = &subscribe_view->subscriptions[i];
+ storage_size += subscription->topic_filter.len;
+ }
+
+ return storage_size;
+}
+
+int aws_mqtt5_packet_subscribe_storage_init(
+ struct aws_mqtt5_packet_subscribe_storage *subscribe_storage,
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_packet_subscribe_view *subscribe_options) {
+
+ AWS_ZERO_STRUCT(*subscribe_storage);
+ size_t storage_capacity = s_aws_mqtt5_packet_subscribe_compute_storage_size(subscribe_options);
+ if (aws_byte_buf_init(&subscribe_storage->storage, allocator, storage_capacity)) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_mqtt5_packet_subscribe_view *storage_view = &subscribe_storage->storage_view;
+ storage_view->packet_id = subscribe_options->packet_id;
+
+ if (subscribe_options->subscription_identifier != NULL) {
+ subscribe_storage->subscription_identifier = *subscribe_options->subscription_identifier;
+ storage_view->subscription_identifier = &subscribe_storage->subscription_identifier;
+ }
+
+ if (s_aws_mqtt5_packet_subscribe_storage_init_subscriptions(
+ subscribe_storage, allocator, subscribe_options->subscription_count, subscribe_options->subscriptions)) {
+ return AWS_OP_ERR;
+ }
+ storage_view->subscription_count = aws_array_list_length(&subscribe_storage->subscriptions);
+ storage_view->subscriptions = subscribe_storage->subscriptions.data;
+
+ if (aws_mqtt5_user_property_set_init_with_storage(
+ &subscribe_storage->user_properties,
+ allocator,
+ &subscribe_storage->storage,
+ subscribe_options->user_property_count,
+ subscribe_options->user_properties)) {
+ return AWS_OP_ERR;
+ }
+ storage_view->user_property_count = aws_mqtt5_user_property_set_size(&subscribe_storage->user_properties);
+ storage_view->user_properties = subscribe_storage->user_properties.properties.data;
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt5_packet_subscribe_storage_init_from_external_storage(
+ struct aws_mqtt5_packet_subscribe_storage *subscribe_storage,
+ struct aws_allocator *allocator) {
+ AWS_ZERO_STRUCT(*subscribe_storage);
+
+ if (aws_mqtt5_user_property_set_init(&subscribe_storage->user_properties, allocator)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_array_list_init_dynamic(
+ &subscribe_storage->subscriptions, allocator, 0, sizeof(struct aws_mqtt5_subscription_view))) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_aws_mqtt5_operation_subscribe_complete(
+ struct aws_mqtt5_operation *operation,
+ int error_code,
+ enum aws_mqtt5_packet_type packet_type,
+ const void *completion_view) {
+ (void)packet_type;
+
+ struct aws_mqtt5_operation_subscribe *subscribe_op = operation->impl;
+
+ if (subscribe_op->completion_options.completion_callback != NULL) {
+ (*subscribe_op->completion_options.completion_callback)(
+ completion_view, error_code, subscribe_op->completion_options.completion_user_data);
+ }
+}
+
+static void s_aws_mqtt5_operation_subscribe_set_packet_id(
+ struct aws_mqtt5_operation *operation,
+ aws_mqtt5_packet_id_t packet_id) {
+ struct aws_mqtt5_operation_subscribe *subscribe_op = operation->impl;
+ subscribe_op->options_storage.storage_view.packet_id = packet_id;
+}
+
+static aws_mqtt5_packet_id_t *s_aws_mqtt5_operation_subscribe_get_packet_id_address(
+ const struct aws_mqtt5_operation *operation) {
+ struct aws_mqtt5_operation_subscribe *subscribe_op = operation->impl;
+ return &subscribe_op->options_storage.storage_view.packet_id;
+}
+
+static struct aws_mqtt5_operation_vtable s_subscribe_operation_vtable = {
+ .aws_mqtt5_operation_completion_fn = s_aws_mqtt5_operation_subscribe_complete,
+ .aws_mqtt5_operation_set_packet_id_fn = s_aws_mqtt5_operation_subscribe_set_packet_id,
+ .aws_mqtt5_operation_get_packet_id_address_fn = s_aws_mqtt5_operation_subscribe_get_packet_id_address,
+ .aws_mqtt5_operation_validate_vs_connection_settings_fn = NULL,
+};
+
+static void s_destroy_operation_subscribe(void *object) {
+ if (object == NULL) {
+ return;
+ }
+
+ struct aws_mqtt5_operation_subscribe *subscribe_op = object;
+
+ aws_mqtt5_packet_subscribe_storage_clean_up(&subscribe_op->options_storage);
+
+ aws_mem_release(subscribe_op->allocator, subscribe_op);
+}
+
+struct aws_mqtt5_operation_subscribe *aws_mqtt5_operation_subscribe_new(
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_client *client,
+ const struct aws_mqtt5_packet_subscribe_view *subscribe_options,
+ const struct aws_mqtt5_subscribe_completion_options *completion_options) {
+ AWS_PRECONDITION(allocator != NULL);
+ AWS_PRECONDITION(subscribe_options != NULL);
+
+ if (aws_mqtt5_packet_subscribe_view_validate(subscribe_options)) {
+ return NULL;
+ }
+
+ if (subscribe_options->packet_id != 0) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_subscribe_view packet id must be zero",
+ (void *)subscribe_options);
+ aws_raise_error(AWS_ERROR_MQTT5_SUBSCRIBE_OPTIONS_VALIDATION);
+ return NULL;
+ }
+
+ if (client != NULL && client->config->extended_validation_and_flow_control_options != AWS_MQTT5_EVAFCO_NONE) {
+ if (aws_mqtt5_packet_subscribe_view_validate_vs_iot_core(subscribe_options)) {
+ return NULL;
+ }
+ }
+
+ struct aws_mqtt5_operation_subscribe *subscribe_op =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_operation_subscribe));
+ if (subscribe_op == NULL) {
+ return NULL;
+ }
+
+ subscribe_op->allocator = allocator;
+ subscribe_op->base.vtable = &s_subscribe_operation_vtable;
+ subscribe_op->base.packet_type = AWS_MQTT5_PT_SUBSCRIBE;
+ aws_ref_count_init(&subscribe_op->base.ref_count, subscribe_op, s_destroy_operation_subscribe);
+ subscribe_op->base.impl = subscribe_op;
+
+ if (aws_mqtt5_packet_subscribe_storage_init(&subscribe_op->options_storage, allocator, subscribe_options)) {
+ goto error;
+ }
+
+ subscribe_op->base.packet_view = &subscribe_op->options_storage.storage_view;
+
+ if (completion_options != NULL) {
+ subscribe_op->completion_options = *completion_options;
+ }
+
+ return subscribe_op;
+
+error:
+
+ aws_mqtt5_operation_release(&subscribe_op->base);
+
+ return NULL;
+}
+
+/*********************************************************************************************************************
+ * Suback
+ ********************************************************************************************************************/
+
+static size_t s_aws_mqtt5_packet_suback_compute_storage_size(const struct aws_mqtt5_packet_suback_view *suback_view) {
+ size_t storage_size = s_aws_mqtt5_user_property_set_compute_storage_size(
+ suback_view->user_properties, suback_view->user_property_count);
+
+ if (suback_view->reason_string != NULL) {
+ storage_size += suback_view->reason_string->len;
+ }
+
+ return storage_size;
+}
+
+static int s_aws_mqtt5_packet_suback_storage_init_reason_codes(
+ struct aws_mqtt5_packet_suback_storage *suback_storage,
+ struct aws_allocator *allocator,
+ size_t reason_code_count,
+ const enum aws_mqtt5_suback_reason_code *reason_codes) {
+
+ if (aws_array_list_init_dynamic(
+ &suback_storage->reason_codes, allocator, reason_code_count, sizeof(enum aws_mqtt5_suback_reason_code))) {
+ return AWS_OP_ERR;
+ }
+
+ for (size_t i = 0; i < reason_code_count; ++i) {
+ aws_array_list_push_back(&suback_storage->reason_codes, &reason_codes[i]);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+AWS_MQTT_API int aws_mqtt5_packet_suback_storage_init(
+ struct aws_mqtt5_packet_suback_storage *suback_storage,
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_packet_suback_view *suback_view) {
+ AWS_ZERO_STRUCT(*suback_storage);
+ size_t storage_capacity = s_aws_mqtt5_packet_suback_compute_storage_size(suback_view);
+ if (aws_byte_buf_init(&suback_storage->storage, allocator, storage_capacity)) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_mqtt5_packet_suback_view *storage_view = &suback_storage->storage_view;
+
+ storage_view->packet_id = suback_view->packet_id;
+
+ if (suback_view->reason_string != NULL) {
+ suback_storage->reason_string = *suback_view->reason_string;
+ if (aws_byte_buf_append_and_update(&suback_storage->storage, &suback_storage->reason_string)) {
+ return AWS_OP_ERR;
+ }
+
+ storage_view->reason_string = &suback_storage->reason_string;
+ }
+
+ if (s_aws_mqtt5_packet_suback_storage_init_reason_codes(
+ suback_storage, allocator, suback_view->reason_code_count, suback_view->reason_codes)) {
+ return AWS_OP_ERR;
+ }
+ storage_view->reason_code_count = aws_array_list_length(&suback_storage->reason_codes);
+ storage_view->reason_codes = suback_storage->reason_codes.data;
+
+ if (aws_mqtt5_user_property_set_init_with_storage(
+ &suback_storage->user_properties,
+ allocator,
+ &suback_storage->storage,
+ suback_view->user_property_count,
+ suback_view->user_properties)) {
+ return AWS_OP_ERR;
+ }
+ storage_view->user_property_count = aws_mqtt5_user_property_set_size(&suback_storage->user_properties);
+ storage_view->user_properties = suback_storage->user_properties.properties.data;
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt5_packet_suback_storage_init_from_external_storage(
+ struct aws_mqtt5_packet_suback_storage *suback_storage,
+ struct aws_allocator *allocator) {
+ AWS_ZERO_STRUCT(*suback_storage);
+
+ if (aws_mqtt5_user_property_set_init(&suback_storage->user_properties, allocator)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_array_list_init_dynamic(
+ &suback_storage->reason_codes, allocator, 0, sizeof(enum aws_mqtt5_suback_reason_code))) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+void aws_mqtt5_packet_suback_storage_clean_up(struct aws_mqtt5_packet_suback_storage *suback_storage) {
+ if (suback_storage == NULL) {
+ return;
+ }
+ aws_mqtt5_user_property_set_clean_up(&suback_storage->user_properties);
+
+ aws_array_list_clean_up(&suback_storage->reason_codes);
+
+ aws_byte_buf_clean_up(&suback_storage->storage);
+}
+
+void aws_mqtt5_packet_suback_view_log(
+ const struct aws_mqtt5_packet_suback_view *suback_view,
+ enum aws_log_level level) {
+ struct aws_logger *log_handle = aws_logger_get_conditional(AWS_LS_MQTT5_GENERAL, level);
+ if (log_handle == NULL) {
+ return;
+ }
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_suback_view packet id set to %d",
+ (void *)suback_view,
+ (int)suback_view->packet_id);
+
+ for (size_t i = 0; i < suback_view->reason_code_count; ++i) {
+ enum aws_mqtt5_suback_reason_code reason_code = suback_view->reason_codes[i];
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_suback_view topic #%zu, reason code %d (%s)",
+ (void *)suback_view,
+ i,
+ (int)reason_code,
+ aws_mqtt5_suback_reason_code_to_c_string(reason_code));
+ }
+
+ s_aws_mqtt5_user_property_set_log(
+ log_handle,
+ suback_view->user_properties,
+ suback_view->user_property_count,
+ (void *)suback_view,
+ level,
+ "aws_mqtt5_packet_suback_view");
+}
+
+/*********************************************************************************************************************
+ * Unsuback
+ ********************************************************************************************************************/
+
+static size_t s_aws_mqtt5_packet_unsuback_compute_storage_size(
+ const struct aws_mqtt5_packet_unsuback_view *unsuback_view) {
+ size_t storage_size = s_aws_mqtt5_user_property_set_compute_storage_size(
+ unsuback_view->user_properties, unsuback_view->user_property_count);
+
+ if (unsuback_view->reason_string != NULL) {
+ storage_size += unsuback_view->reason_string->len;
+ }
+
+ return storage_size;
+}
+
+static int s_aws_mqtt5_packet_unsuback_storage_init_reason_codes(
+ struct aws_mqtt5_packet_unsuback_storage *unsuback_storage,
+ struct aws_allocator *allocator,
+ size_t reason_code_count,
+ const enum aws_mqtt5_unsuback_reason_code *reason_codes) {
+
+ if (aws_array_list_init_dynamic(
+ &unsuback_storage->reason_codes,
+ allocator,
+ reason_code_count,
+ sizeof(enum aws_mqtt5_unsuback_reason_code))) {
+ return AWS_OP_ERR;
+ }
+
+ for (size_t i = 0; i < reason_code_count; ++i) {
+ aws_array_list_push_back(&unsuback_storage->reason_codes, &reason_codes[i]);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+AWS_MQTT_API int aws_mqtt5_packet_unsuback_storage_init(
+ struct aws_mqtt5_packet_unsuback_storage *unsuback_storage,
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_packet_unsuback_view *unsuback_view) {
+ AWS_ZERO_STRUCT(*unsuback_storage);
+ size_t storage_capacity = s_aws_mqtt5_packet_unsuback_compute_storage_size(unsuback_view);
+ if (aws_byte_buf_init(&unsuback_storage->storage, allocator, storage_capacity)) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_mqtt5_packet_unsuback_view *storage_view = &unsuback_storage->storage_view;
+
+ storage_view->packet_id = unsuback_view->packet_id;
+
+ if (unsuback_view->reason_string != NULL) {
+ unsuback_storage->reason_string = *unsuback_view->reason_string;
+ if (aws_byte_buf_append_and_update(&unsuback_storage->storage, &unsuback_storage->reason_string)) {
+ return AWS_OP_ERR;
+ }
+
+ storage_view->reason_string = &unsuback_storage->reason_string;
+ }
+
+ if (s_aws_mqtt5_packet_unsuback_storage_init_reason_codes(
+ unsuback_storage, allocator, unsuback_view->reason_code_count, unsuback_view->reason_codes)) {
+ return AWS_OP_ERR;
+ }
+ storage_view->reason_code_count = aws_array_list_length(&unsuback_storage->reason_codes);
+ storage_view->reason_codes = unsuback_storage->reason_codes.data;
+
+ if (aws_mqtt5_user_property_set_init_with_storage(
+ &unsuback_storage->user_properties,
+ allocator,
+ &unsuback_storage->storage,
+ unsuback_view->user_property_count,
+ unsuback_view->user_properties)) {
+ return AWS_OP_ERR;
+ }
+ storage_view->user_property_count = aws_mqtt5_user_property_set_size(&unsuback_storage->user_properties);
+ storage_view->user_properties = unsuback_storage->user_properties.properties.data;
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt5_packet_unsuback_storage_init_from_external_storage(
+ struct aws_mqtt5_packet_unsuback_storage *unsuback_storage,
+ struct aws_allocator *allocator) {
+ AWS_ZERO_STRUCT(*unsuback_storage);
+
+ if (aws_mqtt5_user_property_set_init(&unsuback_storage->user_properties, allocator)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_array_list_init_dynamic(
+ &unsuback_storage->reason_codes, allocator, 0, sizeof(enum aws_mqtt5_unsuback_reason_code))) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+void aws_mqtt5_packet_unsuback_storage_clean_up(struct aws_mqtt5_packet_unsuback_storage *unsuback_storage) {
+ if (unsuback_storage == NULL) {
+ return;
+ }
+ aws_mqtt5_user_property_set_clean_up(&unsuback_storage->user_properties);
+
+ aws_array_list_clean_up(&unsuback_storage->reason_codes);
+
+ aws_byte_buf_clean_up(&unsuback_storage->storage);
+}
+
+void aws_mqtt5_packet_unsuback_view_log(
+ const struct aws_mqtt5_packet_unsuback_view *unsuback_view,
+ enum aws_log_level level) {
+ struct aws_logger *log_handle = aws_logger_get_conditional(AWS_LS_MQTT5_GENERAL, level);
+ if (log_handle == NULL) {
+ return;
+ }
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_unsuback_view packet id set to %d",
+ (void *)unsuback_view,
+ (int)unsuback_view->packet_id);
+
+ for (size_t i = 0; i < unsuback_view->reason_code_count; ++i) {
+ enum aws_mqtt5_unsuback_reason_code reason_code = unsuback_view->reason_codes[i];
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_packet_unsuback_view topic #%zu, reason code %d (%s)",
+ (void *)unsuback_view,
+ i,
+ (int)reason_code,
+ aws_mqtt5_unsuback_reason_code_to_c_string(reason_code));
+ }
+
+ s_aws_mqtt5_user_property_set_log(
+ log_handle,
+ unsuback_view->user_properties,
+ unsuback_view->user_property_count,
+ (void *)unsuback_view,
+ level,
+ "aws_mqtt5_packet_unsuback_view");
+}
+
+/*********************************************************************************************************************
+ * PINGREQ
+ ********************************************************************************************************************/
+
+static void s_destroy_operation_pingreq(void *object) {
+ if (object == NULL) {
+ return;
+ }
+
+ struct aws_mqtt5_operation_pingreq *pingreq_op = object;
+ aws_mem_release(pingreq_op->allocator, pingreq_op);
+}
+
+struct aws_mqtt5_operation_pingreq *aws_mqtt5_operation_pingreq_new(struct aws_allocator *allocator) {
+ struct aws_mqtt5_operation_pingreq *pingreq_op =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_operation_pingreq));
+ if (pingreq_op == NULL) {
+ return NULL;
+ }
+
+ pingreq_op->allocator = allocator;
+ pingreq_op->base.vtable = &s_empty_operation_vtable;
+ pingreq_op->base.packet_type = AWS_MQTT5_PT_PINGREQ;
+ aws_ref_count_init(&pingreq_op->base.ref_count, pingreq_op, s_destroy_operation_pingreq);
+ pingreq_op->base.impl = pingreq_op;
+
+ return pingreq_op;
+}
+
+/*********************************************************************************************************************
+ * Client storage options
+ ********************************************************************************************************************/
+
+int aws_mqtt5_client_options_validate(const struct aws_mqtt5_client_options *options) {
+ if (options == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "null mqtt5 client configuration options");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (options->host_name.len == 0) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "host name not set in mqtt5 client configuration");
+ return aws_raise_error(AWS_ERROR_MQTT5_CLIENT_OPTIONS_VALIDATION);
+ }
+
+ if (options->bootstrap == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "client bootstrap not set in mqtt5 client configuration");
+ return aws_raise_error(AWS_ERROR_MQTT5_CLIENT_OPTIONS_VALIDATION);
+ }
+
+ /* forbid no-timeout until someone convinces me otherwise */
+ if (options->socket_options != NULL) {
+ if (options->socket_options->type == AWS_SOCKET_DGRAM || options->socket_options->connect_timeout_ms == 0) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "invalid socket options in mqtt5 client configuration");
+ return aws_raise_error(AWS_ERROR_MQTT5_CLIENT_OPTIONS_VALIDATION);
+ }
+ }
+
+ if (options->http_proxy_options != NULL) {
+ if (options->http_proxy_options->host.len == 0) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "proxy host name not set in mqtt5 client configuration");
+ return aws_raise_error(AWS_ERROR_MQTT5_CLIENT_OPTIONS_VALIDATION);
+ }
+
+ if (options->http_proxy_options->port == 0) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "proxy port not set in mqtt5 client configuration");
+ return aws_raise_error(AWS_ERROR_MQTT5_CLIENT_OPTIONS_VALIDATION);
+ }
+ }
+
+ /* can't think of why you'd ever want an MQTT client without lifecycle event notifications */
+ if (options->lifecycle_event_handler == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "lifecycle event handler not set in mqtt5 client configuration");
+ return aws_raise_error(AWS_ERROR_MQTT5_CLIENT_OPTIONS_VALIDATION);
+ }
+
+ if (options->publish_received_handler == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "publish received not set in mqtt5 client configuration");
+ return aws_raise_error(AWS_ERROR_MQTT5_CLIENT_OPTIONS_VALIDATION);
+ }
+
+ if (aws_mqtt5_packet_connect_view_validate(options->connect_options)) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "invalid CONNECT options in mqtt5 client configuration");
+ return AWS_OP_ERR;
+ }
+
+ /* The client will not behave properly if ping timeout is not significantly shorter than the keep alive interval */
+ if (options->connect_options->keep_alive_interval_seconds > 0) {
+ uint64_t keep_alive_ms = aws_timestamp_convert(
+ options->connect_options->keep_alive_interval_seconds, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL);
+ uint64_t one_second_ms = aws_timestamp_convert(1, AWS_TIMESTAMP_SECS, AWS_TIMESTAMP_MILLIS, NULL);
+
+ uint64_t ping_timeout_ms = options->ping_timeout_ms;
+ if (ping_timeout_ms == 0) {
+ ping_timeout_ms = AWS_MQTT5_CLIENT_DEFAULT_PING_TIMEOUT_MS;
+ }
+
+ if (ping_timeout_ms + one_second_ms > keep_alive_ms) {
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "keep alive interval is too small relative to ping timeout interval");
+ return AWS_OP_ERR;
+ }
+ }
+
+ if (options->extended_validation_and_flow_control_options != AWS_MQTT5_EVAFCO_NONE) {
+ if (options->connect_options->client_id.len > AWS_IOT_CORE_MAXIMUM_CLIENT_ID_LENGTH) {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_GENERAL,
+ "AWS IoT Core limits client_id to be less than or equal to %d bytes in length",
+ (int)AWS_IOT_CORE_MAXIMUM_CLIENT_ID_LENGTH);
+ return aws_raise_error(AWS_ERROR_MQTT5_CLIENT_OPTIONS_VALIDATION);
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_log_tls_connection_options(
+ struct aws_logger *log_handle,
+ const struct aws_mqtt5_client_options_storage *options_storage,
+ const struct aws_tls_connection_options *tls_options,
+ enum aws_log_level level,
+ const char *log_text) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_client_options_storage %s tls options set:",
+ (void *)options_storage,
+ log_text);
+
+ if (tls_options->advertise_alpn_message && tls_options->alpn_list) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_client_options_storage %s tls options alpn protocol list set to \"%s\"",
+ (void *)options_storage,
+ log_text,
+ aws_string_c_str(tls_options->alpn_list));
+ } else {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_client_options_storage %s tls options alpn not used",
+ (void *)options_storage,
+ log_text);
+ }
+
+ if (tls_options->server_name) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_client_options_storage %s tls options SNI value set to \"%s\"",
+ (void *)options_storage,
+ log_text,
+ aws_string_c_str(tls_options->server_name));
+ } else {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_client_options_storage %s tls options SNI not used",
+ (void *)options_storage,
+ log_text);
+ }
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_client_options_storage %s tls options tls context set to (%p)",
+ (void *)options_storage,
+ log_text,
+ (void *)(tls_options->ctx));
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_client_options_storage %s tls options handshake timeout set to %" PRIu32,
+ (void *)options_storage,
+ log_text,
+ tls_options->timeout_ms);
+}
+
+static void s_log_topic_aliasing_options(
+ struct aws_logger *log_handle,
+ const struct aws_mqtt5_client_options_storage *options_storage,
+ const struct aws_mqtt5_client_topic_alias_options *topic_aliasing_options,
+ enum aws_log_level level) {
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_client_options_storage outbound topic aliasing behavior set to %d (%s)",
+ (void *)options_storage,
+ (int)topic_aliasing_options->outbound_topic_alias_behavior,
+ aws_mqtt5_outbound_topic_alias_behavior_type_to_c_string(
+ topic_aliasing_options->outbound_topic_alias_behavior));
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_client_options_storage maximum outbound topic alias cache size set to %" PRIu16,
+ (void *)options_storage,
+ topic_aliasing_options->outbound_alias_cache_max_size);
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_client_options_storage inbound topic aliasing behavior set to %d (%s)",
+ (void *)options_storage,
+ (int)topic_aliasing_options->inbound_topic_alias_behavior,
+ aws_mqtt5_inbound_topic_alias_behavior_type_to_c_string(topic_aliasing_options->inbound_topic_alias_behavior));
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_client_options_storage inbound topic alias cache size set to %" PRIu16,
+ (void *)options_storage,
+ topic_aliasing_options->inbound_alias_cache_size);
+}
+
+void aws_mqtt5_client_options_storage_log(
+ const struct aws_mqtt5_client_options_storage *options_storage,
+ enum aws_log_level level) {
+ struct aws_logger *log_handle = aws_logger_get_conditional(AWS_LS_MQTT5_GENERAL, level);
+ if (log_handle == NULL) {
+ return;
+ }
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_client_options_storage host name set to %s",
+ (void *)options_storage,
+ aws_string_c_str(options_storage->host_name));
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_client_options_storage port set to %" PRIu16,
+ (void *)options_storage,
+ options_storage->port);
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_client_options_storage client bootstrap set to (%p)",
+ (void *)options_storage,
+ (void *)options_storage->bootstrap);
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_client_options_storage socket options set to: type = %d, domain = %d, connect_timeout_ms = "
+ "%" PRIu32,
+ (void *)options_storage,
+ (int)options_storage->socket_options.type,
+ (int)options_storage->socket_options.domain,
+ options_storage->socket_options.connect_timeout_ms);
+
+ if (options_storage->socket_options.keepalive) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_client_options_storage socket keepalive options set to: keep_alive_interval_sec = "
+ "%" PRIu16 ", "
+ "keep_alive_timeout_sec = %" PRIu16 ", keep_alive_max_failed_probes = %" PRIu16,
+ (void *)options_storage,
+ options_storage->socket_options.keep_alive_interval_sec,
+ options_storage->socket_options.keep_alive_timeout_sec,
+ options_storage->socket_options.keep_alive_max_failed_probes);
+ }
+
+ if (options_storage->tls_options_ptr != NULL) {
+ s_log_tls_connection_options(log_handle, options_storage, options_storage->tls_options_ptr, level, "");
+ }
+
+ if (options_storage->http_proxy_config != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_client_options_storage using http proxy:",
+ (void *)options_storage);
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_client_options_storage http proxy host name set to " PRInSTR,
+ (void *)options_storage,
+ AWS_BYTE_CURSOR_PRI(options_storage->http_proxy_options.host));
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_client_options_storage http proxy port set to %" PRIu16,
+ (void *)options_storage,
+ options_storage->http_proxy_options.port);
+
+ if (options_storage->http_proxy_options.tls_options != NULL) {
+ s_log_tls_connection_options(
+ log_handle, options_storage, options_storage->tls_options_ptr, level, "http proxy");
+ }
+
+ /* ToDo: add (and use) an API to proxy strategy that returns a debug string (Basic, Adaptive, etc...) */
+ if (options_storage->http_proxy_options.proxy_strategy != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_client_options_storage http proxy strategy set to (%p)",
+ (void *)options_storage,
+ (void *)options_storage->http_proxy_options.proxy_strategy);
+ }
+ }
+
+ if (options_storage->websocket_handshake_transform != NULL) {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_client_options_storage enabling websockets",
+ (void *)options_storage);
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_client_options_storage websocket handshake transform user data set to (%p)",
+ (void *)options_storage,
+ options_storage->websocket_handshake_transform_user_data);
+ } else {
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: mqtt5_client_options_storage disabling websockets",
+ (void *)options_storage);
+ }
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_client_options_storage session behavior set to %d (%s)",
+ (void *)options_storage,
+ (int)options_storage->session_behavior,
+ aws_mqtt5_client_session_behavior_type_to_c_string(options_storage->session_behavior));
+
+ s_log_topic_aliasing_options(log_handle, options_storage, &options_storage->topic_aliasing_options, level);
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_client_options_storage extended validation and flow control options set to %d (%s)",
+ (void *)options_storage,
+ (int)options_storage->extended_validation_and_flow_control_options,
+ aws_mqtt5_extended_validation_and_flow_control_options_to_c_string(
+ options_storage->extended_validation_and_flow_control_options));
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_client_options_storage operation queue behavior set to %d (%s)",
+ (void *)options_storage,
+ (int)options_storage->offline_queue_behavior,
+ aws_mqtt5_client_operation_queue_behavior_type_to_c_string(options_storage->offline_queue_behavior));
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_client_options_storage reconnect jitter mode set to %d",
+ (void *)options_storage,
+ (int)options_storage->retry_jitter_mode);
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: mqtt5_client_options_storage reconnect delay min set to %" PRIu64 " ms, max set to %" PRIu64 " ms",
+ (void *)options_storage,
+ options_storage->min_reconnect_delay_ms,
+ options_storage->max_reconnect_delay_ms);
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_client_options_storage minimum necessary connection time in order to reset the reconnect "
+ "delay "
+ "set "
+ "to %" PRIu64 " ms",
+ (void *)options_storage,
+ options_storage->min_connected_time_to_reset_reconnect_delay_ms);
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_client_options_storage ping timeout interval set to %" PRIu32 " ms",
+ (void *)options_storage,
+ options_storage->ping_timeout_ms);
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_client_options_storage connack timeout interval set to %" PRIu32 " ms",
+ (void *)options_storage,
+ options_storage->connack_timeout_ms);
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_client_options_storage connect options:",
+ (void *)options_storage);
+
+ aws_mqtt5_packet_connect_view_log(&options_storage->connect.storage_view, level);
+
+ AWS_LOGUF(
+ log_handle,
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_client_options_storage lifecycle event handler user data set to (%p)",
+ (void *)options_storage,
+ options_storage->lifecycle_event_handler_user_data);
+}
+
+void aws_mqtt5_client_options_storage_destroy(struct aws_mqtt5_client_options_storage *options_storage) {
+ if (options_storage == NULL) {
+ return;
+ }
+
+ aws_string_destroy(options_storage->host_name);
+ aws_client_bootstrap_release(options_storage->bootstrap);
+
+ aws_tls_connection_options_clean_up(&options_storage->tls_options);
+ aws_http_proxy_config_destroy(options_storage->http_proxy_config);
+
+ aws_mqtt5_packet_connect_storage_clean_up(&options_storage->connect);
+
+ aws_mem_release(options_storage->allocator, options_storage);
+}
+
+static void s_apply_zero_valued_defaults_to_client_options_storage(
+ struct aws_mqtt5_client_options_storage *options_storage) {
+ if (options_storage->min_reconnect_delay_ms == 0) {
+ options_storage->min_reconnect_delay_ms = AWS_MQTT5_CLIENT_DEFAULT_MIN_RECONNECT_DELAY_MS;
+ }
+
+ if (options_storage->max_reconnect_delay_ms == 0) {
+ options_storage->max_reconnect_delay_ms = AWS_MQTT5_CLIENT_DEFAULT_MAX_RECONNECT_DELAY_MS;
+ }
+
+ if (options_storage->min_connected_time_to_reset_reconnect_delay_ms == 0) {
+ options_storage->min_connected_time_to_reset_reconnect_delay_ms =
+ AWS_MQTT5_CLIENT_DEFAULT_MIN_CONNECTED_TIME_TO_RESET_RECONNECT_DELAY_MS;
+ }
+
+ if (options_storage->ping_timeout_ms == 0) {
+ options_storage->ping_timeout_ms = AWS_MQTT5_CLIENT_DEFAULT_PING_TIMEOUT_MS;
+ }
+
+ if (options_storage->connack_timeout_ms == 0) {
+ options_storage->connack_timeout_ms = AWS_MQTT5_CLIENT_DEFAULT_CONNACK_TIMEOUT_MS;
+ }
+
+ if (options_storage->ack_timeout_seconds == 0) {
+ options_storage->ack_timeout_seconds = AWS_MQTT5_CLIENT_DEFAULT_OPERATION_TIMEOUNT_SECONDS;
+ }
+
+ if (options_storage->topic_aliasing_options.inbound_alias_cache_size == 0) {
+ options_storage->topic_aliasing_options.inbound_alias_cache_size =
+ AWS_MQTT5_CLIENT_DEFAULT_INBOUND_TOPIC_ALIAS_CACHE_SIZE;
+ }
+
+ if (options_storage->topic_aliasing_options.outbound_alias_cache_max_size == 0) {
+ options_storage->topic_aliasing_options.outbound_alias_cache_max_size =
+ AWS_MQTT5_CLIENT_DEFAULT_OUTBOUND_TOPIC_ALIAS_CACHE_SIZE;
+ }
+}
+
+struct aws_mqtt5_client_options_storage *aws_mqtt5_client_options_storage_new(
+ struct aws_allocator *allocator,
+ const struct aws_mqtt5_client_options *options) {
+ AWS_PRECONDITION(allocator != NULL);
+ AWS_PRECONDITION(options != NULL);
+
+ if (aws_mqtt5_client_options_validate(options)) {
+ return NULL;
+ }
+
+ struct aws_mqtt5_client_options_storage *options_storage =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_client_options_storage));
+ if (options_storage == NULL) {
+ return NULL;
+ }
+
+ options_storage->allocator = allocator;
+ options_storage->host_name = aws_string_new_from_cursor(allocator, &options->host_name);
+ if (options_storage->host_name == NULL) {
+ goto error;
+ }
+
+ options_storage->port = options->port;
+ options_storage->bootstrap = aws_client_bootstrap_acquire(options->bootstrap);
+
+ if (options->socket_options != NULL) {
+ options_storage->socket_options = *options->socket_options;
+ } else {
+ options_storage->socket_options.type = AWS_SOCKET_STREAM;
+ options_storage->socket_options.connect_timeout_ms = AWS_MQTT5_DEFAULT_SOCKET_CONNECT_TIMEOUT_MS;
+ }
+
+ if (options->tls_options != NULL) {
+ if (aws_tls_connection_options_copy(&options_storage->tls_options, options->tls_options)) {
+ goto error;
+ }
+ options_storage->tls_options_ptr = &options_storage->tls_options;
+
+ if (!options_storage->tls_options.server_name) {
+ struct aws_byte_cursor host_name_cur = aws_byte_cursor_from_string(options_storage->host_name);
+ if (aws_tls_connection_options_set_server_name(&options_storage->tls_options, allocator, &host_name_cur)) {
+
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_GENERAL, "Failed to set TLS Connection Options server name");
+ goto error;
+ }
+ }
+ }
+
+ if (options->http_proxy_options != NULL) {
+ options_storage->http_proxy_config =
+ aws_http_proxy_config_new_from_proxy_options(allocator, options->http_proxy_options);
+ if (options_storage->http_proxy_config == NULL) {
+ goto error;
+ }
+
+ aws_http_proxy_options_init_from_config(
+ &options_storage->http_proxy_options, options_storage->http_proxy_config);
+ }
+
+ options_storage->websocket_handshake_transform = options->websocket_handshake_transform;
+ options_storage->websocket_handshake_transform_user_data = options->websocket_handshake_transform_user_data;
+
+ options_storage->publish_received_handler = options->publish_received_handler;
+ options_storage->publish_received_handler_user_data = options->publish_received_handler_user_data;
+
+ options_storage->session_behavior = options->session_behavior;
+ options_storage->extended_validation_and_flow_control_options =
+ options->extended_validation_and_flow_control_options;
+ options_storage->offline_queue_behavior = options->offline_queue_behavior;
+
+ options_storage->retry_jitter_mode = options->retry_jitter_mode;
+ options_storage->min_reconnect_delay_ms = options->min_reconnect_delay_ms;
+ options_storage->max_reconnect_delay_ms = options->max_reconnect_delay_ms;
+ options_storage->min_connected_time_to_reset_reconnect_delay_ms =
+ options->min_connected_time_to_reset_reconnect_delay_ms;
+
+ options_storage->ping_timeout_ms = options->ping_timeout_ms;
+ options_storage->connack_timeout_ms = options->connack_timeout_ms;
+
+ options_storage->ack_timeout_seconds = options->ack_timeout_seconds;
+
+ if (options->topic_aliasing_options != NULL) {
+ options_storage->topic_aliasing_options = *options->topic_aliasing_options;
+ }
+
+ if (aws_mqtt5_packet_connect_storage_init(&options_storage->connect, allocator, options->connect_options)) {
+ goto error;
+ }
+
+ options_storage->lifecycle_event_handler = options->lifecycle_event_handler;
+ options_storage->lifecycle_event_handler_user_data = options->lifecycle_event_handler_user_data;
+
+ options_storage->client_termination_handler = options->client_termination_handler;
+ options_storage->client_termination_handler_user_data = options->client_termination_handler_user_data;
+
+ s_apply_zero_valued_defaults_to_client_options_storage(options_storage);
+
+ return options_storage;
+
+error:
+
+ aws_mqtt5_client_options_storage_destroy(options_storage);
+
+ return NULL;
+}
+
+struct aws_mqtt5_operation_disconnect *aws_mqtt5_operation_disconnect_acquire(
+ struct aws_mqtt5_operation_disconnect *disconnect_op) {
+ if (disconnect_op != NULL) {
+ aws_mqtt5_operation_acquire(&disconnect_op->base);
+ }
+
+ return disconnect_op;
+}
+
+struct aws_mqtt5_operation_disconnect *aws_mqtt5_operation_disconnect_release(
+ struct aws_mqtt5_operation_disconnect *disconnect_op) {
+ if (disconnect_op != NULL) {
+ aws_mqtt5_operation_release(&disconnect_op->base);
+ }
+
+ return NULL;
+}
diff --git a/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_topic_alias.c b/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_topic_alias.c
new file mode 100644
index 0000000000..928c771928
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_topic_alias.c
@@ -0,0 +1,586 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/mqtt/private/v5/mqtt5_topic_alias.h>
+
+#include <aws/common/lru_cache.h>
+#include <aws/common/string.h>
+#include <aws/mqtt/private/v5/mqtt5_utils.h>
+
+int aws_mqtt5_inbound_topic_alias_resolver_init(
+ struct aws_mqtt5_inbound_topic_alias_resolver *resolver,
+ struct aws_allocator *allocator) {
+ AWS_ZERO_STRUCT(*resolver);
+ resolver->allocator = allocator;
+
+ if (aws_array_list_init_dynamic(&resolver->topic_aliases, allocator, 0, sizeof(struct aws_string *))) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_release_aliases(struct aws_mqtt5_inbound_topic_alias_resolver *resolver) {
+ if (!aws_array_list_is_valid(&resolver->topic_aliases)) {
+ return;
+ }
+
+ size_t cache_size = aws_array_list_length(&resolver->topic_aliases);
+ for (size_t i = 0; i < cache_size; ++i) {
+ struct aws_string *topic = NULL;
+
+ aws_array_list_get_at(&resolver->topic_aliases, &topic, i);
+ aws_string_destroy(topic);
+ }
+}
+
+void aws_mqtt5_inbound_topic_alias_resolver_clean_up(struct aws_mqtt5_inbound_topic_alias_resolver *resolver) {
+ s_release_aliases(resolver);
+ aws_array_list_clean_up(&resolver->topic_aliases);
+}
+
+int aws_mqtt5_inbound_topic_alias_resolver_reset(
+ struct aws_mqtt5_inbound_topic_alias_resolver *resolver,
+ uint16_t cache_size) {
+
+ aws_mqtt5_inbound_topic_alias_resolver_clean_up(resolver);
+ AWS_ZERO_STRUCT(resolver->topic_aliases);
+
+ if (aws_array_list_init_dynamic(
+ &resolver->topic_aliases, resolver->allocator, cache_size, sizeof(struct aws_string *))) {
+ return AWS_OP_ERR;
+ }
+
+ for (size_t i = 0; i < cache_size; ++i) {
+ struct aws_string *topic = NULL;
+ aws_array_list_push_back(&resolver->topic_aliases, &topic);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt5_inbound_topic_alias_resolver_resolve_alias(
+ struct aws_mqtt5_inbound_topic_alias_resolver *resolver,
+ uint16_t alias,
+ struct aws_byte_cursor *topic_out) {
+ size_t cache_size = aws_array_list_length(&resolver->topic_aliases);
+
+ if (alias > cache_size || alias == 0) {
+ return aws_raise_error(AWS_ERROR_MQTT5_INVALID_INBOUND_TOPIC_ALIAS);
+ }
+
+ size_t alias_index = alias - 1;
+ struct aws_string *topic = NULL;
+ aws_array_list_get_at(&resolver->topic_aliases, &topic, alias_index);
+
+ if (topic == NULL) {
+ return aws_raise_error(AWS_ERROR_MQTT5_INVALID_INBOUND_TOPIC_ALIAS);
+ }
+
+ *topic_out = aws_byte_cursor_from_string(topic);
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt5_inbound_topic_alias_resolver_register_alias(
+ struct aws_mqtt5_inbound_topic_alias_resolver *resolver,
+ uint16_t alias,
+ struct aws_byte_cursor topic) {
+ size_t cache_size = aws_array_list_length(&resolver->topic_aliases);
+
+ if (alias > cache_size || alias == 0) {
+ return aws_raise_error(AWS_ERROR_MQTT5_INVALID_INBOUND_TOPIC_ALIAS);
+ }
+
+ struct aws_string *new_entry = aws_string_new_from_cursor(resolver->allocator, &topic);
+ if (new_entry == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ size_t alias_index = alias - 1;
+ struct aws_string *existing_entry = NULL;
+ aws_array_list_get_at(&resolver->topic_aliases, &existing_entry, alias_index);
+ aws_string_destroy(existing_entry);
+
+ aws_array_list_set_at(&resolver->topic_aliases, &new_entry, alias_index);
+
+ return AWS_OP_SUCCESS;
+}
+
+/****************************************************************************************************************/
+
+struct aws_mqtt5_outbound_topic_alias_resolver_vtable {
+ void (*destroy_fn)(struct aws_mqtt5_outbound_topic_alias_resolver *);
+ int (*reset_fn)(struct aws_mqtt5_outbound_topic_alias_resolver *, uint16_t);
+ int (*resolve_outbound_publish_fn)(
+ struct aws_mqtt5_outbound_topic_alias_resolver *,
+ const struct aws_mqtt5_packet_publish_view *,
+ uint16_t *,
+ struct aws_byte_cursor *);
+};
+
+struct aws_mqtt5_outbound_topic_alias_resolver {
+ struct aws_allocator *allocator;
+
+ struct aws_mqtt5_outbound_topic_alias_resolver_vtable *vtable;
+ void *impl;
+};
+
+static struct aws_mqtt5_outbound_topic_alias_resolver *s_aws_mqtt5_outbound_topic_alias_resolver_disabled_new(
+ struct aws_allocator *allocator);
+static struct aws_mqtt5_outbound_topic_alias_resolver *s_aws_mqtt5_outbound_topic_alias_resolver_lru_new(
+ struct aws_allocator *allocator);
+static struct aws_mqtt5_outbound_topic_alias_resolver *s_aws_mqtt5_outbound_topic_alias_resolver_user_new(
+ struct aws_allocator *allocator);
+
+struct aws_mqtt5_outbound_topic_alias_resolver *aws_mqtt5_outbound_topic_alias_resolver_new(
+ struct aws_allocator *allocator,
+ enum aws_mqtt5_client_outbound_topic_alias_behavior_type outbound_alias_behavior) {
+
+ switch (aws_mqtt5_outbound_topic_alias_behavior_type_to_non_default(outbound_alias_behavior)) {
+ case AWS_MQTT5_COTABT_USER:
+ return s_aws_mqtt5_outbound_topic_alias_resolver_user_new(allocator);
+
+ case AWS_MQTT5_COTABT_LRU:
+ return s_aws_mqtt5_outbound_topic_alias_resolver_lru_new(allocator);
+
+ case AWS_MQTT5_COTABT_DISABLED:
+ return s_aws_mqtt5_outbound_topic_alias_resolver_disabled_new(allocator);
+
+ default:
+ return NULL;
+ }
+}
+
+void aws_mqtt5_outbound_topic_alias_resolver_destroy(struct aws_mqtt5_outbound_topic_alias_resolver *resolver) {
+ if (resolver == NULL) {
+ return;
+ }
+
+ (*resolver->vtable->destroy_fn)(resolver);
+}
+
+int aws_mqtt5_outbound_topic_alias_resolver_reset(
+ struct aws_mqtt5_outbound_topic_alias_resolver *resolver,
+ uint16_t topic_alias_maximum) {
+
+ if (resolver == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ return (*resolver->vtable->reset_fn)(resolver, topic_alias_maximum);
+}
+
+int aws_mqtt5_outbound_topic_alias_resolver_resolve_outbound_publish(
+ struct aws_mqtt5_outbound_topic_alias_resolver *resolver,
+ const struct aws_mqtt5_packet_publish_view *publish_view,
+ uint16_t *topic_alias_out,
+ struct aws_byte_cursor *topic_out) {
+ if (resolver == NULL) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ return (*resolver->vtable->resolve_outbound_publish_fn)(resolver, publish_view, topic_alias_out, topic_out);
+}
+
+/*
+ * Disabled resolver
+ */
+
+static void s_aws_mqtt5_outbound_topic_alias_resolver_disabled_destroy(
+ struct aws_mqtt5_outbound_topic_alias_resolver *resolver) {
+ if (resolver == NULL) {
+ return;
+ }
+
+ aws_mem_release(resolver->allocator, resolver);
+}
+
+static int s_aws_mqtt5_outbound_topic_alias_resolver_disabled_reset(
+ struct aws_mqtt5_outbound_topic_alias_resolver *resolver,
+ uint16_t topic_alias_maximum) {
+ (void)resolver;
+ (void)topic_alias_maximum;
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_aws_mqtt5_outbound_topic_alias_resolver_disabled_resolve_outbound_publish_fn(
+ struct aws_mqtt5_outbound_topic_alias_resolver *resolver,
+ const struct aws_mqtt5_packet_publish_view *publish_view,
+ uint16_t *topic_alias_out,
+ struct aws_byte_cursor *topic_out) {
+ (void)resolver;
+
+ if (publish_view->topic.len == 0) {
+ return aws_raise_error(AWS_ERROR_MQTT5_PUBLISH_OPTIONS_VALIDATION);
+ }
+
+ *topic_alias_out = 0;
+ *topic_out = publish_view->topic;
+
+ return AWS_OP_SUCCESS;
+}
+
+static struct aws_mqtt5_outbound_topic_alias_resolver_vtable s_aws_mqtt5_outbound_topic_alias_resolver_disabled_vtable =
+ {
+ .destroy_fn = s_aws_mqtt5_outbound_topic_alias_resolver_disabled_destroy,
+ .reset_fn = s_aws_mqtt5_outbound_topic_alias_resolver_disabled_reset,
+ .resolve_outbound_publish_fn = s_aws_mqtt5_outbound_topic_alias_resolver_disabled_resolve_outbound_publish_fn,
+};
+
+static struct aws_mqtt5_outbound_topic_alias_resolver *s_aws_mqtt5_outbound_topic_alias_resolver_disabled_new(
+ struct aws_allocator *allocator) {
+ struct aws_mqtt5_outbound_topic_alias_resolver *resolver =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_outbound_topic_alias_resolver));
+
+ resolver->allocator = allocator;
+ resolver->vtable = &s_aws_mqtt5_outbound_topic_alias_resolver_disabled_vtable;
+
+ return resolver;
+}
+
+/*
+ * User resolver
+ *
+ * User resolution implies the user is controlling the topic alias assignments, but we still want to validate their
+ * actions. In particular, we track the currently valid set of aliases (based on previous outbound publishes)
+ * and only use an alias when the submitted publish is an exact match for the current assignment.
+ */
+
+struct aws_mqtt5_outbound_topic_alias_resolver_user {
+ struct aws_mqtt5_outbound_topic_alias_resolver base;
+
+ struct aws_array_list aliases;
+};
+
+static void s_cleanup_user_aliases(struct aws_mqtt5_outbound_topic_alias_resolver_user *user_resolver) {
+ for (size_t i = 0; i < aws_array_list_length(&user_resolver->aliases); ++i) {
+ struct aws_string *alias = NULL;
+ aws_array_list_get_at(&user_resolver->aliases, &alias, i);
+
+ aws_string_destroy(alias);
+ }
+
+ aws_array_list_clean_up(&user_resolver->aliases);
+ AWS_ZERO_STRUCT(user_resolver->aliases);
+}
+
+static void s_aws_mqtt5_outbound_topic_alias_resolver_user_destroy(
+ struct aws_mqtt5_outbound_topic_alias_resolver *resolver) {
+ if (resolver == NULL) {
+ return;
+ }
+
+ struct aws_mqtt5_outbound_topic_alias_resolver_user *user_resolver = resolver->impl;
+ s_cleanup_user_aliases(user_resolver);
+
+ aws_mem_release(resolver->allocator, user_resolver);
+}
+
+static int s_aws_mqtt5_outbound_topic_alias_resolver_user_reset(
+ struct aws_mqtt5_outbound_topic_alias_resolver *resolver,
+ uint16_t topic_alias_maximum) {
+ struct aws_mqtt5_outbound_topic_alias_resolver_user *user_resolver = resolver->impl;
+ s_cleanup_user_aliases(user_resolver);
+
+ aws_array_list_init_dynamic(
+ &user_resolver->aliases, resolver->allocator, topic_alias_maximum, sizeof(struct aws_string *));
+ for (size_t i = 0; i < topic_alias_maximum; ++i) {
+ struct aws_string *invalid_alias = NULL;
+ aws_array_list_push_back(&user_resolver->aliases, &invalid_alias);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_aws_mqtt5_outbound_topic_alias_resolver_user_resolve_outbound_publish_fn(
+ struct aws_mqtt5_outbound_topic_alias_resolver *resolver,
+ const struct aws_mqtt5_packet_publish_view *publish_view,
+ uint16_t *topic_alias_out,
+ struct aws_byte_cursor *topic_out) {
+
+ if (publish_view->topic_alias == NULL) {
+ /* not using a topic alias, nothing to do */
+ *topic_alias_out = 0;
+ *topic_out = publish_view->topic;
+
+ return AWS_OP_SUCCESS;
+ }
+
+ uint16_t user_alias = *publish_view->topic_alias;
+ if (user_alias == 0) {
+ /* should have been caught by publish validation */
+ return aws_raise_error(AWS_ERROR_MQTT5_INVALID_OUTBOUND_TOPIC_ALIAS);
+ }
+
+ struct aws_mqtt5_outbound_topic_alias_resolver_user *user_resolver = resolver->impl;
+ uint16_t user_alias_index = user_alias - 1;
+ if (user_alias_index >= aws_array_list_length(&user_resolver->aliases)) {
+ /* should have been caught by dynamic publish validation */
+ return aws_raise_error(AWS_ERROR_MQTT5_INVALID_OUTBOUND_TOPIC_ALIAS);
+ }
+
+ struct aws_string *current_assignment = NULL;
+ aws_array_list_get_at(&user_resolver->aliases, &current_assignment, user_alias_index);
+
+ *topic_alias_out = user_alias;
+
+ bool can_use_alias = false;
+ if (current_assignment != NULL) {
+ struct aws_byte_cursor assignment_cursor = aws_byte_cursor_from_string(current_assignment);
+ if (aws_byte_cursor_eq(&assignment_cursor, &publish_view->topic)) {
+ can_use_alias = true;
+ }
+ }
+
+ if (can_use_alias) {
+ AWS_ZERO_STRUCT(*topic_out);
+ } else {
+ *topic_out = publish_view->topic;
+ }
+
+ /* mark this alias as seen */
+ if (!can_use_alias) {
+ aws_string_destroy(current_assignment);
+ current_assignment = aws_string_new_from_cursor(resolver->allocator, &publish_view->topic);
+ aws_array_list_set_at(&user_resolver->aliases, &current_assignment, user_alias_index);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static struct aws_mqtt5_outbound_topic_alias_resolver_vtable s_aws_mqtt5_outbound_topic_alias_resolver_user_vtable = {
+ .destroy_fn = s_aws_mqtt5_outbound_topic_alias_resolver_user_destroy,
+ .reset_fn = s_aws_mqtt5_outbound_topic_alias_resolver_user_reset,
+ .resolve_outbound_publish_fn = s_aws_mqtt5_outbound_topic_alias_resolver_user_resolve_outbound_publish_fn,
+};
+
+static struct aws_mqtt5_outbound_topic_alias_resolver *s_aws_mqtt5_outbound_topic_alias_resolver_user_new(
+ struct aws_allocator *allocator) {
+ struct aws_mqtt5_outbound_topic_alias_resolver_user *resolver =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_outbound_topic_alias_resolver_user));
+
+ resolver->base.allocator = allocator;
+ resolver->base.vtable = &s_aws_mqtt5_outbound_topic_alias_resolver_user_vtable;
+ resolver->base.impl = resolver;
+
+ aws_array_list_init_dynamic(&resolver->aliases, allocator, 0, sizeof(struct aws_string *));
+
+ return &resolver->base;
+}
+
+/*
+ * LRU resolver
+ *
+ * This resolver uses an LRU cache to automatically create topic alias assignments for the user. With a reasonable
+ * cache size, this should perform well for the majority of MQTT workloads. For workloads it does not perform well
+ * with, the user should control the assignment (or disable entirely). Even for workloads where the LRU cache fails
+ * to reuse an assignment every single time, the overall cost is 3 extra bytes per publish. As a rough estimate, this
+ * means that LRU topic aliasing is "worth it" if an existing alias can be used at least once every
+ * (AverageTopicLength / 3) publishes.
+ */
+
+struct aws_mqtt5_outbound_topic_alias_resolver_lru {
+ struct aws_mqtt5_outbound_topic_alias_resolver base;
+
+ struct aws_cache *lru_cache;
+ size_t max_aliases;
+};
+
+static void s_aws_mqtt5_outbound_topic_alias_resolver_lru_destroy(
+ struct aws_mqtt5_outbound_topic_alias_resolver *resolver) {
+ if (resolver == NULL) {
+ return;
+ }
+
+ struct aws_mqtt5_outbound_topic_alias_resolver_lru *lru_resolver = resolver->impl;
+
+ if (lru_resolver->lru_cache != NULL) {
+ aws_cache_destroy(lru_resolver->lru_cache);
+ }
+
+ aws_mem_release(resolver->allocator, lru_resolver);
+}
+
+struct aws_topic_alias_assignment {
+ struct aws_byte_cursor topic_cursor;
+ struct aws_byte_buf topic;
+ uint16_t alias;
+ struct aws_allocator *allocator;
+};
+
+static void s_aws_topic_alias_assignment_destroy(struct aws_topic_alias_assignment *alias_assignment) {
+ if (alias_assignment == NULL) {
+ return;
+ }
+
+ aws_byte_buf_clean_up(&alias_assignment->topic);
+
+ aws_mem_release(alias_assignment->allocator, alias_assignment);
+}
+
+static struct aws_topic_alias_assignment *s_aws_topic_alias_assignment_new(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor topic,
+ uint16_t alias) {
+ struct aws_topic_alias_assignment *assignment =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_topic_alias_assignment));
+
+ assignment->allocator = allocator;
+ assignment->alias = alias;
+
+ if (aws_byte_buf_init_copy_from_cursor(&assignment->topic, allocator, topic)) {
+ goto on_error;
+ }
+
+ assignment->topic_cursor = aws_byte_cursor_from_buf(&assignment->topic);
+
+ return assignment;
+
+on_error:
+
+ s_aws_topic_alias_assignment_destroy(assignment);
+
+ return NULL;
+}
+
+static void s_destroy_assignment_value(void *value) {
+ s_aws_topic_alias_assignment_destroy(value);
+}
+
+static bool s_topic_hash_equality_fn(const void *a, const void *b) {
+ const struct aws_byte_cursor *a_cursor = a;
+ const struct aws_byte_cursor *b_cursor = b;
+
+ return aws_byte_cursor_eq(a_cursor, b_cursor);
+}
+
+static int s_aws_mqtt5_outbound_topic_alias_resolver_lru_reset(
+ struct aws_mqtt5_outbound_topic_alias_resolver *resolver,
+ uint16_t topic_alias_maximum) {
+ struct aws_mqtt5_outbound_topic_alias_resolver_lru *lru_resolver = resolver->impl;
+
+ if (lru_resolver->lru_cache != NULL) {
+ aws_cache_destroy(lru_resolver->lru_cache);
+ lru_resolver->lru_cache = NULL;
+ }
+
+ if (topic_alias_maximum > 0) {
+ lru_resolver->lru_cache = aws_cache_new_lru(
+ lru_resolver->base.allocator,
+ aws_hash_byte_cursor_ptr,
+ s_topic_hash_equality_fn,
+ NULL,
+ s_destroy_assignment_value,
+ topic_alias_maximum);
+ }
+
+ lru_resolver->max_aliases = topic_alias_maximum;
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_aws_mqtt5_outbound_topic_alias_resolver_lru_resolve_outbound_publish_fn(
+ struct aws_mqtt5_outbound_topic_alias_resolver *resolver,
+ const struct aws_mqtt5_packet_publish_view *publish_view,
+ uint16_t *topic_alias_out,
+ struct aws_byte_cursor *topic_out) {
+
+ /* No cache => no aliasing done */
+ struct aws_mqtt5_outbound_topic_alias_resolver_lru *lru_resolver = resolver->impl;
+ if (lru_resolver->lru_cache == NULL || lru_resolver->max_aliases == 0) {
+ *topic_alias_out = 0;
+ *topic_out = publish_view->topic;
+ return AWS_OP_SUCCESS;
+ }
+
+ /* Look for the topic in the cache */
+ struct aws_byte_cursor topic = publish_view->topic;
+ void *existing_element = NULL;
+ if (aws_cache_find(lru_resolver->lru_cache, &topic, &existing_element)) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_topic_alias_assignment *existing_assignment = existing_element;
+ if (existing_assignment != NULL) {
+ /*
+ * Topic exists, so use the assignment. The LRU cache find implementation has already promoted the element
+ * to MRU.
+ */
+ *topic_alias_out = existing_assignment->alias;
+ AWS_ZERO_STRUCT(*topic_out);
+
+ return AWS_OP_SUCCESS;
+ }
+
+ /* Topic doesn't exist in the cache. */
+ uint16_t new_alias_id = 0;
+ size_t assignment_count = aws_cache_get_element_count(lru_resolver->lru_cache);
+ if (assignment_count == lru_resolver->max_aliases) {
+ /*
+ * The cache is full. Get the LRU element to figure out what id we're going to reuse. There's no way to get
+ * the LRU element without promoting it. So we get the element, save the discovered alias id, then remove
+ * the element.
+ */
+ void *lru_element = aws_lru_cache_use_lru_element(lru_resolver->lru_cache);
+
+ struct aws_topic_alias_assignment *replaced_assignment = lru_element;
+ new_alias_id = replaced_assignment->alias;
+ struct aws_byte_cursor replaced_topic = replaced_assignment->topic_cursor;
+
+ /*
+ * This is a little uncomfortable but valid. The cursor we're passing in will get invalidated (and the backing
+ * memory deleted) as part of the removal process but it is only used to find the element to remove. Once
+ * destruction begins it is no longer accessed.
+ */
+ aws_cache_remove(lru_resolver->lru_cache, &replaced_topic);
+ } else {
+ /*
+ * The cache never shrinks and the first N adds are the N valid topic aliases. Since the cache isn't full,
+ * we know the next alias that hasn't been used. This invariant only holds given that we will tear down
+ * the connection (invalidating the cache) on errors from this function (ie, continuing on from a put
+ * error would break the invariant and create duplicated ids).
+ */
+ new_alias_id = (uint16_t)(assignment_count + 1);
+ }
+
+ /*
+ * We have a topic alias to use. Add our new assignment.
+ */
+ struct aws_topic_alias_assignment *new_assignment =
+ s_aws_topic_alias_assignment_new(resolver->allocator, topic, new_alias_id);
+ if (new_assignment == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ /* the LRU cache put implementation automatically makes the newly added element MRU */
+ if (aws_cache_put(lru_resolver->lru_cache, &new_assignment->topic_cursor, new_assignment)) {
+ s_aws_topic_alias_assignment_destroy(new_assignment);
+ return AWS_OP_ERR;
+ }
+
+ *topic_alias_out = new_assignment->alias;
+ *topic_out = topic; /* this is a new assignment so topic must go out too */
+
+ return AWS_OP_SUCCESS;
+}
+
+static struct aws_mqtt5_outbound_topic_alias_resolver_vtable s_aws_mqtt5_outbound_topic_alias_resolver_lru_vtable = {
+ .destroy_fn = s_aws_mqtt5_outbound_topic_alias_resolver_lru_destroy,
+ .reset_fn = s_aws_mqtt5_outbound_topic_alias_resolver_lru_reset,
+ .resolve_outbound_publish_fn = s_aws_mqtt5_outbound_topic_alias_resolver_lru_resolve_outbound_publish_fn,
+};
+
+static struct aws_mqtt5_outbound_topic_alias_resolver *s_aws_mqtt5_outbound_topic_alias_resolver_lru_new(
+ struct aws_allocator *allocator) {
+ struct aws_mqtt5_outbound_topic_alias_resolver_lru *resolver =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_mqtt5_outbound_topic_alias_resolver_lru));
+
+ resolver->base.allocator = allocator;
+ resolver->base.vtable = &s_aws_mqtt5_outbound_topic_alias_resolver_lru_vtable;
+ resolver->base.impl = resolver;
+
+ return &resolver->base;
+}
diff --git a/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_types.c b/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_types.c
new file mode 100644
index 0000000000..c1eb16552c
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_types.c
@@ -0,0 +1,333 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/mqtt/private/v5/mqtt5_utils.h>
+
+/* disconnect and shared reason codes */
+static const char *s_normal_disconnection = "Normal Disconnection";
+static const char *s_disconnect_with_will_message = "Disconnect With Will Message";
+static const char *s_unspecified_error = "Unspecified Error";
+static const char *s_malformed_packet = "Malformed Packet";
+static const char *s_protocol_error = "Protocol Error";
+static const char *s_implementation_specific_error = "Implementation Specific Error";
+static const char *s_not_authorized = "Not Authorized";
+static const char *s_server_busy = "Server Busy";
+static const char *s_server_shutting_down = "Server Shutting Down";
+static const char *s_keep_alive_timeout = "Keep Alive Timeout";
+static const char *s_session_taken_over = "Session Taken Over";
+static const char *s_topic_filter_invalid = "Topic Filter Invalid";
+static const char *s_topic_name_invalid = "Topic Name Invalid";
+static const char *s_receive_maximum_exceeded = "Receive Maximum Exceeded";
+static const char *s_topic_alias_invalid = "Topic Alias Invalid";
+static const char *s_packet_too_large = "Packet Too Large";
+static const char *s_message_rate_too_high = "Message Rate Too High";
+static const char *s_quota_exceeded = "Quota Exceeded";
+static const char *s_administrative_action = "Administrative Action";
+static const char *s_payload_format_invalid = "Payload Format Invalid";
+static const char *s_retain_not_supported = "Retain Not Supported";
+static const char *s_qos_not_supported = "QoS Not Supported";
+static const char *s_use_another_server = "Use Another Server";
+static const char *s_server_moved = "Server Moved";
+static const char *s_shared_subscriptions_not_supported = "Shared Subscriptions Not Supported";
+static const char *s_connection_rate_exceeded = "Connection Rate Exceeded";
+static const char *s_maximum_connect_time = "Maximum Connect Time";
+static const char *s_subscription_identifiers_not_supported = "Subscription Identifiers Not Supported";
+static const char *s_wildcard_subscriptions_not_supported = "Wildcard Subscriptions Not Supported";
+static const char *s_success = "Success";
+static const char *s_unsupported_protocol_version = "Unsupported Protocol Version";
+static const char *s_client_identifier_not_valid = "Client Identifier Not Valid";
+static const char *s_bad_username_or_password = "Bad Username Or Password";
+static const char *s_server_unavailable = "Server Unavailable";
+static const char *s_banned = "Banned";
+static const char *s_bad_authentication_method = "Bad Authentication Method";
+static const char *s_unknown_reason = "Unknown Reason";
+static const char *s_no_subscription_existed = "No Subscription Existed";
+static const char *s_packet_identifier_in_use = "Packet Identifier In Use";
+static const char *s_granted_qos_0 = "Granted QoS 0";
+static const char *s_granted_qos_1 = "Granted QoS 1";
+static const char *s_granted_qos_2 = "Granted QoS 2";
+static const char *s_no_matching_subscribers = "No Matching Subscribers";
+
+const char *aws_mqtt5_connect_reason_code_to_c_string(enum aws_mqtt5_connect_reason_code reason_code) {
+ switch (reason_code) {
+ case AWS_MQTT5_CRC_SUCCESS:
+ return s_success;
+ case AWS_MQTT5_CRC_UNSPECIFIED_ERROR:
+ return s_unspecified_error;
+ case AWS_MQTT5_CRC_MALFORMED_PACKET:
+ return s_malformed_packet;
+ case AWS_MQTT5_CRC_PROTOCOL_ERROR:
+ return s_protocol_error;
+ case AWS_MQTT5_CRC_IMPLEMENTATION_SPECIFIC_ERROR:
+ return s_implementation_specific_error;
+ case AWS_MQTT5_CRC_UNSUPPORTED_PROTOCOL_VERSION:
+ return s_unsupported_protocol_version;
+ case AWS_MQTT5_CRC_CLIENT_IDENTIFIER_NOT_VALID:
+ return s_client_identifier_not_valid;
+ case AWS_MQTT5_CRC_BAD_USERNAME_OR_PASSWORD:
+ return s_bad_username_or_password;
+ case AWS_MQTT5_CRC_NOT_AUTHORIZED:
+ return s_not_authorized;
+ case AWS_MQTT5_CRC_SERVER_UNAVAILABLE:
+ return s_server_unavailable;
+ case AWS_MQTT5_CRC_SERVER_BUSY:
+ return s_server_busy;
+ case AWS_MQTT5_CRC_BANNED:
+ return s_banned;
+ case AWS_MQTT5_CRC_BAD_AUTHENTICATION_METHOD:
+ return s_bad_authentication_method;
+ case AWS_MQTT5_CRC_TOPIC_NAME_INVALID:
+ return s_topic_name_invalid;
+ case AWS_MQTT5_CRC_PACKET_TOO_LARGE:
+ return s_packet_too_large;
+ case AWS_MQTT5_CRC_QUOTA_EXCEEDED:
+ return s_quota_exceeded;
+ case AWS_MQTT5_CRC_PAYLOAD_FORMAT_INVALID:
+ return s_payload_format_invalid;
+ case AWS_MQTT5_CRC_RETAIN_NOT_SUPPORTED:
+ return s_retain_not_supported;
+ case AWS_MQTT5_CRC_QOS_NOT_SUPPORTED:
+ return s_qos_not_supported;
+ case AWS_MQTT5_CRC_USE_ANOTHER_SERVER:
+ return s_use_another_server;
+ case AWS_MQTT5_CRC_SERVER_MOVED:
+ return s_server_moved;
+ case AWS_MQTT5_CRC_CONNECTION_RATE_EXCEEDED:
+ return s_connection_rate_exceeded;
+ }
+
+ return s_unknown_reason;
+}
+
+const char *aws_mqtt5_disconnect_reason_code_to_c_string(
+ enum aws_mqtt5_disconnect_reason_code reason_code,
+ bool *is_valid) {
+ if (is_valid != NULL) {
+ *is_valid = true;
+ }
+
+ switch (reason_code) {
+ case AWS_MQTT5_DRC_NORMAL_DISCONNECTION:
+ return s_normal_disconnection;
+ case AWS_MQTT5_DRC_DISCONNECT_WITH_WILL_MESSAGE:
+ return s_disconnect_with_will_message;
+ case AWS_MQTT5_DRC_UNSPECIFIED_ERROR:
+ return s_unspecified_error;
+ case AWS_MQTT5_DRC_MALFORMED_PACKET:
+ return s_malformed_packet;
+ case AWS_MQTT5_DRC_PROTOCOL_ERROR:
+ return s_protocol_error;
+ case AWS_MQTT5_DRC_IMPLEMENTATION_SPECIFIC_ERROR:
+ return s_implementation_specific_error;
+ case AWS_MQTT5_DRC_NOT_AUTHORIZED:
+ return s_not_authorized;
+ case AWS_MQTT5_DRC_SERVER_BUSY:
+ return s_server_busy;
+ case AWS_MQTT5_DRC_SERVER_SHUTTING_DOWN:
+ return s_server_shutting_down;
+ case AWS_MQTT5_DRC_KEEP_ALIVE_TIMEOUT:
+ return s_keep_alive_timeout;
+ case AWS_MQTT5_DRC_SESSION_TAKEN_OVER:
+ return s_session_taken_over;
+ case AWS_MQTT5_DRC_TOPIC_FILTER_INVALID:
+ return s_topic_filter_invalid;
+ case AWS_MQTT5_DRC_TOPIC_NAME_INVALID:
+ return s_topic_name_invalid;
+ case AWS_MQTT5_DRC_RECEIVE_MAXIMUM_EXCEEDED:
+ return s_receive_maximum_exceeded;
+ case AWS_MQTT5_DRC_TOPIC_ALIAS_INVALID:
+ return s_topic_alias_invalid;
+ case AWS_MQTT5_DRC_PACKET_TOO_LARGE:
+ return s_packet_too_large;
+ case AWS_MQTT5_DRC_MESSAGE_RATE_TOO_HIGH:
+ return s_message_rate_too_high;
+ case AWS_MQTT5_DRC_QUOTA_EXCEEDED:
+ return s_quota_exceeded;
+ case AWS_MQTT5_DRC_ADMINISTRATIVE_ACTION:
+ return s_administrative_action;
+ case AWS_MQTT5_DRC_PAYLOAD_FORMAT_INVALID:
+ return s_payload_format_invalid;
+ case AWS_MQTT5_DRC_RETAIN_NOT_SUPPORTED:
+ return s_retain_not_supported;
+ case AWS_MQTT5_DRC_QOS_NOT_SUPPORTED:
+ return s_qos_not_supported;
+ case AWS_MQTT5_DRC_USE_ANOTHER_SERVER:
+ return s_use_another_server;
+ case AWS_MQTT5_DRC_SERVER_MOVED:
+ return s_server_moved;
+ case AWS_MQTT5_DRC_SHARED_SUBSCRIPTIONS_NOT_SUPPORTED:
+ return s_shared_subscriptions_not_supported;
+ case AWS_MQTT5_DRC_CONNECTION_RATE_EXCEEDED:
+ return s_connection_rate_exceeded;
+ case AWS_MQTT5_DRC_MAXIMUM_CONNECT_TIME:
+ return s_maximum_connect_time;
+ case AWS_MQTT5_DRC_SUBSCRIPTION_IDENTIFIERS_NOT_SUPPORTED:
+ return s_subscription_identifiers_not_supported;
+ case AWS_MQTT5_DRC_WILDCARD_SUBSCRIPTIONS_NOT_SUPPORTED:
+ return s_wildcard_subscriptions_not_supported;
+ }
+
+ if (is_valid != NULL) {
+ *is_valid = false;
+ }
+
+ return s_unknown_reason;
+}
+
+const char *aws_mqtt5_puback_reason_code_to_c_string(enum aws_mqtt5_puback_reason_code reason_code) {
+ switch (reason_code) {
+ case AWS_MQTT5_PARC_SUCCESS:
+ return s_success;
+ case AWS_MQTT5_PARC_NO_MATCHING_SUBSCRIBERS:
+ return s_no_matching_subscribers;
+ case AWS_MQTT5_PARC_UNSPECIFIED_ERROR:
+ return s_unspecified_error;
+ case AWS_MQTT5_PARC_IMPLEMENTATION_SPECIFIC_ERROR:
+ return s_implementation_specific_error;
+ case AWS_MQTT5_PARC_NOT_AUTHORIZED:
+ return s_not_authorized;
+ case AWS_MQTT5_PARC_TOPIC_NAME_INVALID:
+ return s_topic_name_invalid;
+ case AWS_MQTT5_PARC_PACKET_IDENTIFIER_IN_USE:
+ return s_packet_identifier_in_use;
+ case AWS_MQTT5_PARC_QUOTA_EXCEEDED:
+ return s_quota_exceeded;
+ case AWS_MQTT5_PARC_PAYLOAD_FORMAT_INVALID:
+ return s_payload_format_invalid;
+ }
+
+ return s_unknown_reason;
+}
+
+const char *aws_mqtt5_suback_reason_code_to_c_string(enum aws_mqtt5_suback_reason_code reason_code) {
+ switch (reason_code) {
+ case AWS_MQTT5_SARC_GRANTED_QOS_0:
+ return s_granted_qos_0;
+ case AWS_MQTT5_SARC_GRANTED_QOS_1:
+ return s_granted_qos_1;
+ case AWS_MQTT5_SARC_GRANTED_QOS_2:
+ return s_granted_qos_2;
+ case AWS_MQTT5_SARC_UNSPECIFIED_ERROR:
+ return s_unspecified_error;
+ case AWS_MQTT5_SARC_IMPLEMENTATION_SPECIFIC_ERROR:
+ return s_implementation_specific_error;
+ case AWS_MQTT5_SARC_NOT_AUTHORIZED:
+ return s_not_authorized;
+ case AWS_MQTT5_SARC_TOPIC_FILTER_INVALID:
+ return s_topic_filter_invalid;
+ case AWS_MQTT5_SARC_PACKET_IDENTIFIER_IN_USE:
+ return s_packet_identifier_in_use;
+ case AWS_MQTT5_SARC_QUOTA_EXCEEDED:
+ return s_quota_exceeded;
+ case AWS_MQTT5_SARC_SHARED_SUBSCRIPTIONS_NOT_SUPPORTED:
+ return s_shared_subscriptions_not_supported;
+ case AWS_MQTT5_SARC_SUBSCRIPTION_IDENTIFIERS_NOT_SUPPORTED:
+ return s_subscription_identifiers_not_supported;
+ case AWS_MQTT5_SARC_WILDCARD_SUBSCRIPTIONS_NOT_SUPPORTED:
+ return s_wildcard_subscriptions_not_supported;
+ }
+
+ return s_unknown_reason;
+}
+
+const char *aws_mqtt5_unsuback_reason_code_to_c_string(enum aws_mqtt5_unsuback_reason_code reason_code) {
+ switch (reason_code) {
+ case AWS_MQTT5_UARC_SUCCESS:
+ return s_success;
+ case AWS_MQTT5_UARC_NO_SUBSCRIPTION_EXISTED:
+ return s_no_subscription_existed;
+ case AWS_MQTT5_UARC_UNSPECIFIED_ERROR:
+ return s_unspecified_error;
+ case AWS_MQTT5_UARC_IMPLEMENTATION_SPECIFIC_ERROR:
+ return s_implementation_specific_error;
+ case AWS_MQTT5_UARC_NOT_AUTHORIZED:
+ return s_not_authorized;
+ case AWS_MQTT5_UARC_TOPIC_FILTER_INVALID:
+ return s_topic_filter_invalid;
+ case AWS_MQTT5_UARC_PACKET_IDENTIFIER_IN_USE:
+ return s_packet_identifier_in_use;
+ }
+
+ return s_unknown_reason;
+}
+
+const char *aws_mqtt5_payload_format_indicator_to_c_string(enum aws_mqtt5_payload_format_indicator format_indicator) {
+ switch (format_indicator) {
+ case AWS_MQTT5_PFI_BYTES:
+ return "Bytes";
+ case AWS_MQTT5_PFI_UTF8:
+ return "Utf-8";
+ }
+
+ return "Unknown Payload Format";
+}
+
+const char *aws_mqtt5_retain_handling_type_to_c_string(enum aws_mqtt5_retain_handling_type retain_handling_type) {
+ switch (retain_handling_type) {
+ case AWS_MQTT5_RHT_SEND_ON_SUBSCRIBE:
+ return "Send retained on any subscribe";
+ case AWS_MQTT5_RHT_SEND_ON_SUBSCRIBE_IF_NEW:
+ return "Send retained on subscribe if not already subscribed";
+ case AWS_MQTT5_RHT_DONT_SEND:
+ return "Dont send retained at all";
+ }
+
+ return "Unknown Retain Handling Type";
+}
+
+const char *aws_mqtt5_packet_type_to_c_string(enum aws_mqtt5_packet_type packet_type) {
+ switch (packet_type) {
+ case AWS_MQTT5_PT_RESERVED:
+ return "RESERVED(INVALID)";
+
+ case AWS_MQTT5_PT_CONNECT:
+ return "CONNECT";
+
+ case AWS_MQTT5_PT_CONNACK:
+ return "CONNACK";
+
+ case AWS_MQTT5_PT_PUBLISH:
+ return "PUBLISH";
+
+ case AWS_MQTT5_PT_PUBACK:
+ return "PUBACK";
+
+ case AWS_MQTT5_PT_PUBREC:
+ return "PUBREC";
+
+ case AWS_MQTT5_PT_PUBREL:
+ return "PUBREL";
+
+ case AWS_MQTT5_PT_PUBCOMP:
+ return "PUBCOMP";
+
+ case AWS_MQTT5_PT_SUBSCRIBE:
+ return "SUBSCRIBE";
+
+ case AWS_MQTT5_PT_SUBACK:
+ return "SUBACK";
+
+ case AWS_MQTT5_PT_UNSUBSCRIBE:
+ return "UNSUBSCRIBE";
+
+ case AWS_MQTT5_PT_UNSUBACK:
+ return "UNSUBACK";
+
+ case AWS_MQTT5_PT_PINGREQ:
+ return "PINGREQ";
+
+ case AWS_MQTT5_PT_PINGRESP:
+ return "PINGRESP";
+
+ case AWS_MQTT5_PT_DISCONNECT:
+ return "DISCONNECT";
+
+ case AWS_MQTT5_PT_AUTH:
+ return "AUTH";
+
+ default:
+ return "UNKNOWN";
+ }
+}
diff --git a/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_utils.c b/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_utils.c
new file mode 100644
index 0000000000..88de757c65
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/source/v5/mqtt5_utils.c
@@ -0,0 +1,574 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/mqtt/private/v5/mqtt5_utils.h>
+
+#include <aws/common/byte_buf.h>
+#include <aws/common/device_random.h>
+#include <aws/common/encoding.h>
+#include <inttypes.h>
+
+uint8_t aws_mqtt5_compute_fixed_header_byte1(enum aws_mqtt5_packet_type packet_type, uint8_t flags) {
+ return flags | ((uint8_t)packet_type << 4);
+}
+
+/* encodes a utf8-string (2 byte length + "MQTT") + the version value (5) */
+static uint8_t s_connect_variable_length_header_prefix[7] = {0x00, 0x04, 0x4D, 0x51, 0x54, 0x54, 0x05};
+
+struct aws_byte_cursor g_aws_mqtt5_connect_protocol_cursor = {
+ .ptr = &s_connect_variable_length_header_prefix[0],
+ .len = AWS_ARRAY_SIZE(s_connect_variable_length_header_prefix),
+};
+
+void aws_mqtt5_negotiated_settings_log(
+ struct aws_mqtt5_negotiated_settings *negotiated_settings,
+ enum aws_log_level level) {
+
+ struct aws_logger *temp_logger = aws_logger_get();
+ if (temp_logger == NULL || temp_logger->vtable->get_log_level(temp_logger, AWS_LS_MQTT5_GENERAL) < level) {
+ return;
+ }
+
+ AWS_LOGF(
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_negotiated_settings maxiumum qos set to %d",
+ (void *)negotiated_settings,
+ negotiated_settings->maximum_qos);
+
+ AWS_LOGF(
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_negotiated_settings session expiry interval set to %" PRIu32,
+ (void *)negotiated_settings,
+ negotiated_settings->session_expiry_interval);
+
+ AWS_LOGF(
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_negotiated_settings receive maximum from server set to %" PRIu16,
+ (void *)negotiated_settings,
+ negotiated_settings->receive_maximum_from_server);
+
+ AWS_LOGF(
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_negotiated_settings maximum packet size to server set to %" PRIu32,
+ (void *)negotiated_settings,
+ negotiated_settings->maximum_packet_size_to_server);
+
+ AWS_LOGF(
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_negotiated_settings topic alias maximum to server set to %" PRIu16,
+ (void *)negotiated_settings,
+ negotiated_settings->topic_alias_maximum_to_server);
+
+ AWS_LOGF(
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_negotiated_settings topic alias maximum to client set to %" PRIu16,
+ (void *)negotiated_settings,
+ negotiated_settings->topic_alias_maximum_to_client);
+
+ AWS_LOGF(
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_negotiated_settings server keep alive set to %" PRIu16,
+ (void *)negotiated_settings,
+ negotiated_settings->server_keep_alive);
+
+ AWS_LOGF(
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_negotiated_settings retain available set to %s",
+ (void *)negotiated_settings,
+ negotiated_settings->retain_available ? "true" : "false");
+
+ AWS_LOGF(
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_negotiated_settings wildcard subscriptions available set to %s",
+ (void *)negotiated_settings,
+ negotiated_settings->wildcard_subscriptions_available ? "true" : "false");
+
+ AWS_LOGF(
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_negotiated_settings subscription identifiers available set to %s",
+ (void *)negotiated_settings,
+ negotiated_settings->subscription_identifiers_available ? "true" : "false");
+
+ AWS_LOGF(
+ level,
+ AWS_LS_MQTT5_GENERAL,
+ "id=%p: aws_mqtt5_negotiated_settings shared subscriptions available set to %s",
+ (void *)negotiated_settings,
+ negotiated_settings->shared_subscriptions_available ? "true" : "false");
+}
+
+int aws_mqtt5_negotiated_settings_init(
+ struct aws_allocator *allocator,
+ struct aws_mqtt5_negotiated_settings *negotiated_settings,
+ const struct aws_byte_cursor *client_id) {
+ if (aws_byte_buf_init(&negotiated_settings->client_id_storage, allocator, client_id->len)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_byte_buf_append_dynamic(&negotiated_settings->client_id_storage, client_id)) {
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt5_negotiated_settings_copy(
+ const struct aws_mqtt5_negotiated_settings *source,
+ struct aws_mqtt5_negotiated_settings *dest) {
+ aws_mqtt5_negotiated_settings_clean_up(dest);
+
+ *dest = *source;
+ AWS_ZERO_STRUCT(dest->client_id_storage);
+
+ if (source->client_id_storage.allocator != NULL) {
+ return aws_byte_buf_init_copy_from_cursor(
+ &dest->client_id_storage,
+ source->client_id_storage.allocator,
+ aws_byte_cursor_from_buf(&source->client_id_storage));
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_mqtt5_negotiated_settings_apply_client_id(
+ struct aws_mqtt5_negotiated_settings *negotiated_settings,
+ const struct aws_byte_cursor *client_id) {
+
+ if (negotiated_settings->client_id_storage.len == 0) {
+ if (aws_byte_buf_append_dynamic(&negotiated_settings->client_id_storage, client_id)) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+void aws_mqtt5_negotiated_settings_clean_up(struct aws_mqtt5_negotiated_settings *negotiated_settings) {
+ aws_byte_buf_clean_up(&negotiated_settings->client_id_storage);
+}
+
+/** Assign defaults values to negotiated_settings */
+void aws_mqtt5_negotiated_settings_reset(
+ struct aws_mqtt5_negotiated_settings *negotiated_settings,
+ const struct aws_mqtt5_packet_connect_view *packet_connect_view) {
+ AWS_PRECONDITION(negotiated_settings != NULL);
+ AWS_PRECONDITION(packet_connect_view != NULL);
+
+ /* Properties that may be sent in CONNECT to Server. These should only be sent if Client
+ changes them from their default values.
+ */
+ negotiated_settings->server_keep_alive = packet_connect_view->keep_alive_interval_seconds;
+ negotiated_settings->session_expiry_interval = 0;
+ negotiated_settings->receive_maximum_from_server = AWS_MQTT5_RECEIVE_MAXIMUM;
+ negotiated_settings->maximum_packet_size_to_server = AWS_MQTT5_MAXIMUM_PACKET_SIZE;
+ negotiated_settings->topic_alias_maximum_to_client = 0;
+
+ // Default for Client is QoS 1. Server default is 2.
+ // This should only be changed if server returns a 0 in the CONNACK
+ negotiated_settings->maximum_qos = AWS_MQTT5_QOS_AT_LEAST_ONCE;
+ negotiated_settings->topic_alias_maximum_to_server = 0;
+
+ // Default is true for following settings but can be changed by Server on CONNACK
+ negotiated_settings->retain_available = true;
+ negotiated_settings->wildcard_subscriptions_available = true;
+ negotiated_settings->subscription_identifiers_available = true;
+ negotiated_settings->shared_subscriptions_available = true;
+
+ negotiated_settings->rejoined_session = false;
+
+ /**
+ * Apply user set properties to negotiated_settings
+ * NULL pointers indicate user has not set a property and it should remain the default value.
+ */
+
+ if (packet_connect_view->session_expiry_interval_seconds != NULL) {
+ negotiated_settings->session_expiry_interval = *packet_connect_view->session_expiry_interval_seconds;
+ }
+
+ if (packet_connect_view->topic_alias_maximum != NULL) {
+ negotiated_settings->topic_alias_maximum_to_client = *packet_connect_view->topic_alias_maximum;
+ }
+}
+
+void aws_mqtt5_negotiated_settings_apply_connack(
+ struct aws_mqtt5_negotiated_settings *negotiated_settings,
+ const struct aws_mqtt5_packet_connack_view *connack_data) {
+ AWS_PRECONDITION(negotiated_settings != NULL);
+ AWS_PRECONDITION(connack_data != NULL);
+
+ /**
+ * Reconcile CONNACK set properties with current negotiated_settings values
+ * NULL pointers indicate Server has not set a property
+ */
+
+ if (connack_data->session_expiry_interval != NULL) {
+ negotiated_settings->session_expiry_interval = *connack_data->session_expiry_interval;
+ }
+
+ if (connack_data->receive_maximum != NULL) {
+ negotiated_settings->receive_maximum_from_server = *connack_data->receive_maximum;
+ }
+
+ // NULL = Maximum QoS of 2.
+ if (connack_data->maximum_qos != NULL) {
+ if (*connack_data->maximum_qos < negotiated_settings->maximum_qos) {
+ negotiated_settings->maximum_qos = *connack_data->maximum_qos;
+ }
+ }
+
+ if (connack_data->retain_available != NULL) {
+ negotiated_settings->retain_available = *connack_data->retain_available;
+ }
+
+ if (connack_data->maximum_packet_size != NULL) {
+ negotiated_settings->maximum_packet_size_to_server = *connack_data->maximum_packet_size;
+ }
+
+ // If a value is not sent by Server, the Client must not send any Topic Aliases to the Server.
+ if (connack_data->topic_alias_maximum != NULL) {
+ negotiated_settings->topic_alias_maximum_to_server = *connack_data->topic_alias_maximum;
+ }
+
+ if (connack_data->wildcard_subscriptions_available != NULL) {
+ negotiated_settings->wildcard_subscriptions_available = *connack_data->wildcard_subscriptions_available;
+ }
+
+ if (connack_data->subscription_identifiers_available != NULL) {
+ negotiated_settings->subscription_identifiers_available = *connack_data->subscription_identifiers_available;
+ }
+
+ if (connack_data->shared_subscriptions_available != NULL) {
+ negotiated_settings->shared_subscriptions_available = *connack_data->shared_subscriptions_available;
+ }
+
+ if (connack_data->server_keep_alive != NULL) {
+ negotiated_settings->server_keep_alive = *connack_data->server_keep_alive;
+ }
+
+ if (connack_data->assigned_client_identifier != NULL) {
+ aws_mqtt5_negotiated_settings_apply_client_id(negotiated_settings, connack_data->assigned_client_identifier);
+ }
+
+ negotiated_settings->rejoined_session = connack_data->session_present;
+}
+
+const char *aws_mqtt5_client_session_behavior_type_to_c_string(
+ enum aws_mqtt5_client_session_behavior_type session_behavior) {
+ switch (aws_mqtt5_client_session_behavior_type_to_non_default(session_behavior)) {
+ case AWS_MQTT5_CSBT_CLEAN:
+ return "Clean session always";
+ case AWS_MQTT5_CSBT_REJOIN_POST_SUCCESS:
+ return "Attempt to resume a session after initial connection success";
+ case AWS_MQTT5_CSBT_REJOIN_ALWAYS:
+ return "Always attempt to resume a session";
+ default:
+ return "Unknown session behavior";
+ }
+}
+
+enum aws_mqtt5_client_session_behavior_type aws_mqtt5_client_session_behavior_type_to_non_default(
+ enum aws_mqtt5_client_session_behavior_type session_behavior) {
+ if (session_behavior == AWS_MQTT5_CSBT_DEFAULT) {
+ return AWS_MQTT5_CSBT_CLEAN;
+ }
+
+ return session_behavior;
+}
+
+const char *aws_mqtt5_outbound_topic_alias_behavior_type_to_c_string(
+ enum aws_mqtt5_client_outbound_topic_alias_behavior_type outbound_aliasing_behavior) {
+ switch (aws_mqtt5_outbound_topic_alias_behavior_type_to_non_default(outbound_aliasing_behavior)) {
+ case AWS_MQTT5_COTABT_USER:
+ return "User-controlled outbound topic aliasing behavior";
+ case AWS_MQTT5_COTABT_LRU:
+ return "LRU caching outbound topic aliasing behavior";
+ case AWS_MQTT5_COTABT_DISABLED:
+ return "Outbound topic aliasing disabled";
+
+ default:
+ return "Unknown outbound topic aliasing behavior";
+ }
+}
+
+enum aws_mqtt5_client_outbound_topic_alias_behavior_type aws_mqtt5_outbound_topic_alias_behavior_type_to_non_default(
+ enum aws_mqtt5_client_outbound_topic_alias_behavior_type outbound_aliasing_behavior) {
+ if (outbound_aliasing_behavior == AWS_MQTT5_COTABT_DEFAULT) {
+ return AWS_MQTT5_COTABT_DISABLED;
+ }
+
+ return outbound_aliasing_behavior;
+}
+
+const char *aws_mqtt5_inbound_topic_alias_behavior_type_to_c_string(
+ enum aws_mqtt5_client_inbound_topic_alias_behavior_type inbound_aliasing_behavior) {
+ switch (aws_mqtt5_inbound_topic_alias_behavior_type_to_non_default(inbound_aliasing_behavior)) {
+ case AWS_MQTT5_CITABT_ENABLED:
+ return "Inbound topic aliasing behavior enabled";
+ case AWS_MQTT5_CITABT_DISABLED:
+ return "Inbound topic aliasing behavior disabled";
+ default:
+ return "Unknown inbound topic aliasing behavior";
+ }
+}
+
+enum aws_mqtt5_client_inbound_topic_alias_behavior_type aws_mqtt5_inbound_topic_alias_behavior_type_to_non_default(
+ enum aws_mqtt5_client_inbound_topic_alias_behavior_type inbound_aliasing_behavior) {
+ if (inbound_aliasing_behavior == AWS_MQTT5_CITABT_DEFAULT) {
+ return AWS_MQTT5_CITABT_DISABLED;
+ }
+
+ return inbound_aliasing_behavior;
+}
+
+const char *aws_mqtt5_extended_validation_and_flow_control_options_to_c_string(
+ enum aws_mqtt5_extended_validation_and_flow_control_options extended_validation_behavior) {
+ switch (extended_validation_behavior) {
+ case AWS_MQTT5_EVAFCO_NONE:
+ return "No additional flow control or packet validation";
+ case AWS_MQTT5_EVAFCO_AWS_IOT_CORE_DEFAULTS:
+ return "AWS IoT Core flow control and packet validation";
+ default:
+ return "Unknown extended validation behavior";
+ }
+}
+
+const char *aws_mqtt5_client_operation_queue_behavior_type_to_c_string(
+ enum aws_mqtt5_client_operation_queue_behavior_type offline_queue_behavior) {
+ switch (aws_mqtt5_client_operation_queue_behavior_type_to_non_default(offline_queue_behavior)) {
+ case AWS_MQTT5_COQBT_FAIL_NON_QOS1_PUBLISH_ON_DISCONNECT:
+ return "Fail all incomplete operations except QoS 1 publishes";
+ case AWS_MQTT5_COQBT_FAIL_QOS0_PUBLISH_ON_DISCONNECT:
+ return "Fail incomplete QoS 0 publishes";
+ case AWS_MQTT5_COQBT_FAIL_ALL_ON_DISCONNECT:
+ return "Fail all incomplete operations";
+ default:
+ return "Unknown operation queue behavior type";
+ }
+}
+
+enum aws_mqtt5_client_operation_queue_behavior_type aws_mqtt5_client_operation_queue_behavior_type_to_non_default(
+ enum aws_mqtt5_client_operation_queue_behavior_type offline_queue_behavior) {
+ if (offline_queue_behavior == AWS_MQTT5_COQBT_DEFAULT) {
+ return AWS_MQTT5_COQBT_FAIL_QOS0_PUBLISH_ON_DISCONNECT;
+ }
+
+ return offline_queue_behavior;
+}
+
+const char *aws_mqtt5_client_lifecycle_event_type_to_c_string(
+ enum aws_mqtt5_client_lifecycle_event_type lifecycle_event) {
+ switch (lifecycle_event) {
+ case AWS_MQTT5_CLET_ATTEMPTING_CONNECT:
+ return "Connection establishment attempt";
+ case AWS_MQTT5_CLET_CONNECTION_SUCCESS:
+ return "Connection establishment success";
+ case AWS_MQTT5_CLET_CONNECTION_FAILURE:
+ return "Connection establishment failure";
+ case AWS_MQTT5_CLET_DISCONNECTION:
+ return "Disconnection";
+ case AWS_MQTT5_CLET_STOPPED:
+ return "Client stopped";
+ }
+
+ return "Unknown lifecycle event";
+}
+
+uint64_t aws_mqtt5_client_random_in_range(uint64_t from, uint64_t to) {
+ uint64_t max = aws_max_u64(from, to);
+ uint64_t min = aws_min_u64(from, to);
+
+ /* Note: this contains several changes to the corresponding function in aws-c-io. Don't throw them away.
+ *
+ * 1. random range is now inclusive/closed: [from, to] rather than half-open [from, to)
+ * 2. as a corollary, diff == 0 => return min, not 0
+ */
+ uint64_t diff = max - min;
+ if (!diff) {
+ return min;
+ }
+
+ uint64_t random_value = 0;
+ if (aws_device_random_u64(&random_value)) {
+ return min;
+ }
+
+ if (diff == UINT64_MAX) {
+ return random_value;
+ }
+
+ return min + random_value % (diff + 1); /* + 1 is safe due to previous check */
+}
+
+static uint8_t s_aws_iot_core_rules_prefix[] = "$aws/rules/";
+
+struct aws_byte_cursor aws_mqtt5_topic_skip_aws_iot_rules_prefix(struct aws_byte_cursor topic_cursor) {
+ size_t prefix_length = AWS_ARRAY_SIZE(s_aws_iot_core_rules_prefix) - 1; /* skip 0-terminator */
+
+ struct aws_byte_cursor rules_prefix = {
+ .ptr = s_aws_iot_core_rules_prefix,
+ .len = prefix_length,
+ };
+
+ if (topic_cursor.len < rules_prefix.len) {
+ return topic_cursor;
+ }
+
+ struct aws_byte_cursor topic_cursor_copy = topic_cursor;
+ struct aws_byte_cursor topic_prefix = topic_cursor;
+ topic_prefix.len = rules_prefix.len;
+
+ if (!aws_byte_cursor_eq_ignore_case(&rules_prefix, &topic_prefix)) {
+ return topic_cursor;
+ }
+
+ aws_byte_cursor_advance(&topic_cursor_copy, prefix_length);
+ if (topic_cursor_copy.len == 0) {
+ return topic_cursor;
+ }
+
+ struct aws_byte_cursor rule_name_segment_cursor;
+ AWS_ZERO_STRUCT(rule_name_segment_cursor);
+
+ if (!aws_byte_cursor_next_split(&topic_cursor_copy, '/', &rule_name_segment_cursor)) {
+ return topic_cursor;
+ }
+
+ if (topic_cursor_copy.len < rule_name_segment_cursor.len + 1) {
+ return topic_cursor;
+ }
+
+ aws_byte_cursor_advance(&topic_cursor_copy, rule_name_segment_cursor.len + 1);
+
+ return topic_cursor_copy;
+}
+
+size_t aws_mqtt5_topic_get_segment_count(struct aws_byte_cursor topic_cursor) {
+ size_t segment_count = 0;
+
+ struct aws_byte_cursor segment_cursor;
+ AWS_ZERO_STRUCT(segment_cursor);
+
+ while (aws_byte_cursor_next_split(&topic_cursor, '/', &segment_cursor)) {
+ ++segment_count;
+ }
+
+ return segment_count;
+}
+
+bool aws_mqtt_is_valid_topic_filter_for_iot_core(struct aws_byte_cursor topic_filter_cursor) {
+ struct aws_byte_cursor post_rule_suffix = aws_mqtt5_topic_skip_aws_iot_rules_prefix(topic_filter_cursor);
+ return aws_mqtt5_topic_get_segment_count(post_rule_suffix) <= AWS_IOT_CORE_MAXIMUM_TOPIC_SEGMENTS;
+}
+
+bool aws_mqtt_is_valid_topic_for_iot_core(struct aws_byte_cursor topic_cursor) {
+ struct aws_byte_cursor post_rule_suffix = aws_mqtt5_topic_skip_aws_iot_rules_prefix(topic_cursor);
+ if (aws_mqtt5_topic_get_segment_count(post_rule_suffix) > AWS_IOT_CORE_MAXIMUM_TOPIC_SEGMENTS) {
+ return false;
+ }
+
+ return post_rule_suffix.len <= AWS_IOT_CORE_MAXIMUM_TOPIC_LENGTH;
+}
+
+static uint8_t s_shared_subscription_prefix[] = "$share";
+
+static bool s_is_not_hash_or_plus(uint8_t byte) {
+ return byte != '+' && byte != '#';
+}
+
+/* $share/{ShareName}/{filter} */
+bool aws_mqtt_is_topic_filter_shared_subscription(struct aws_byte_cursor topic_cursor) {
+
+ /* shared subscription filters must have an initial segment of "$share" */
+ struct aws_byte_cursor first_segment_cursor;
+ AWS_ZERO_STRUCT(first_segment_cursor);
+ if (!aws_byte_cursor_next_split(&topic_cursor, '/', &first_segment_cursor)) {
+ return false;
+ }
+
+ struct aws_byte_cursor share_prefix_cursor = {
+ .ptr = s_shared_subscription_prefix,
+ .len = AWS_ARRAY_SIZE(s_shared_subscription_prefix) - 1, /* skip null terminator */
+ };
+
+ if (!aws_byte_cursor_eq_ignore_case(&share_prefix_cursor, &first_segment_cursor)) {
+ return false;
+ }
+
+ /*
+ * The next segment must be non-empty and cannot include '#', '/', or '+'. In this case we know it already
+ * does not include '/'
+ */
+ struct aws_byte_cursor second_segment_cursor = first_segment_cursor;
+ if (!aws_byte_cursor_next_split(&topic_cursor, '/', &second_segment_cursor)) {
+ return false;
+ }
+
+ if (second_segment_cursor.len == 0 ||
+ !aws_byte_cursor_satisfies_pred(&second_segment_cursor, s_is_not_hash_or_plus)) {
+ return false;
+ }
+
+ /*
+ * Everything afterwards must form a normal, valid topic filter.
+ */
+ struct aws_byte_cursor remaining_cursor = topic_cursor;
+ size_t remaining_length =
+ topic_cursor.ptr + topic_cursor.len - (second_segment_cursor.len + second_segment_cursor.ptr);
+ if (remaining_length == 0) {
+ return false;
+ }
+
+ aws_byte_cursor_advance(&remaining_cursor, topic_cursor.len - remaining_length + 1);
+
+ if (!aws_mqtt_is_valid_topic_filter(&remaining_cursor)) {
+ return false;
+ }
+
+ return true;
+}
+
+/* UTF-8 encoded string validation respect to [MQTT-1.5.3-2]. */
+static int aws_mqtt5_utf8_decoder(uint32_t codepoint, void *user_data) {
+ (void)user_data;
+ /* U+0000 - A UTF-8 Encoded String MUST NOT include an encoding of the null character U+0000. [MQTT-1.5.4-2]
+ * U+0001..U+001F control characters are not valid
+ */
+ if (AWS_UNLIKELY(codepoint <= 0x001F)) {
+ return aws_raise_error(AWS_ERROR_MQTT5_INVALID_UTF8_STRING);
+ }
+
+ /* U+007F..U+009F control characters are not valid */
+ if (AWS_UNLIKELY((codepoint >= 0x007F) && (codepoint <= 0x009F))) {
+ return aws_raise_error(AWS_ERROR_MQTT5_INVALID_UTF8_STRING);
+ }
+
+ /* Unicode non-characters are not valid: https://www.unicode.org/faq/private_use.html#nonchar1 */
+ if (AWS_UNLIKELY((codepoint & 0x00FFFF) >= 0x00FFFE)) {
+ return aws_raise_error(AWS_ERROR_MQTT5_INVALID_UTF8_STRING);
+ }
+ if (AWS_UNLIKELY(codepoint >= 0xFDD0 && codepoint <= 0xFDEF)) {
+ return aws_raise_error(AWS_ERROR_MQTT5_INVALID_UTF8_STRING);
+ }
+
+ return AWS_ERROR_SUCCESS;
+}
+
+struct aws_utf8_decoder_options g_aws_mqtt5_utf8_decoder_options = {
+ .on_codepoint = aws_mqtt5_utf8_decoder,
+};
+
+int aws_mqtt5_validate_utf8_text(struct aws_byte_cursor text) {
+ return aws_decode_utf8(text, &g_aws_mqtt5_utf8_decoder_options);
+}
diff --git a/contrib/restricted/aws/aws-c-mqtt/source/v5/rate_limiters.c b/contrib/restricted/aws/aws-c-mqtt/source/v5/rate_limiters.c
new file mode 100644
index 0000000000..03e2790630
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/source/v5/rate_limiters.c
@@ -0,0 +1,217 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/mqtt/private/v5/rate_limiters.h>
+
+#include <aws/common/clock.h>
+
+static int s_rate_limit_time_fn(const struct aws_rate_limiter_token_bucket_options *options, uint64_t *current_time) {
+ if (options->clock_fn != NULL) {
+ return (*options->clock_fn)(current_time);
+ }
+
+ return aws_high_res_clock_get_ticks(current_time);
+}
+
+int aws_rate_limiter_token_bucket_init(
+ struct aws_rate_limiter_token_bucket *limiter,
+ const struct aws_rate_limiter_token_bucket_options *options) {
+ AWS_ZERO_STRUCT(*limiter);
+
+ if (options->tokens_per_second == 0 || options->maximum_token_count == 0) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ limiter->config = *options;
+
+ aws_rate_limiter_token_bucket_reset(limiter);
+
+ return AWS_OP_SUCCESS;
+}
+
+void aws_rate_limiter_token_bucket_reset(struct aws_rate_limiter_token_bucket *limiter) {
+
+ limiter->current_token_count =
+ aws_min_u64(limiter->config.initial_token_count, limiter->config.maximum_token_count);
+ limiter->fractional_nanos = 0;
+ limiter->fractional_nano_tokens = 0;
+
+ uint64_t now = 0;
+ AWS_FATAL_ASSERT(s_rate_limit_time_fn(&limiter->config, &now) == AWS_OP_SUCCESS);
+
+ limiter->last_service_time = now;
+}
+
+static void s_regenerate_tokens(struct aws_rate_limiter_token_bucket *limiter) {
+ uint64_t now = 0;
+ AWS_FATAL_ASSERT(s_rate_limit_time_fn(&limiter->config, &now) == AWS_OP_SUCCESS);
+
+ if (now <= limiter->last_service_time) {
+ return;
+ }
+
+ uint64_t nanos_elapsed = now - limiter->last_service_time;
+
+ /*
+ * We break the regeneration calculation into two distinct steps:
+ * (1) Perform regeneration based on whole seconds elapsed (nice and easy just multiply times the regen rate)
+ * (2) Perform regeneration based on the remaining fraction of a second elapsed
+ *
+ * We do this to minimize the chances of multiplication saturation before the divide necessary to normalize to
+ * nanos.
+ *
+ * In particular, by doing this, we won't see saturation unless a regeneration rate in the multi-billions is used
+ * or elapsed_seconds is in the billions. This is similar reasoning to what we do in aws_timestamp_convert_u64.
+ *
+ * Additionally, we use a (sub-second) fractional counter/accumulator (fractional_nanos, fractional_nano_tokens)
+ * in order to prevent error accumulation due to integer division rounding.
+ */
+
+ /* break elapsed time into seconds and remainder nanos */
+ uint64_t remainder_nanos = 0;
+ uint64_t elapsed_seconds =
+ aws_timestamp_convert(nanos_elapsed, AWS_TIMESTAMP_NANOS, AWS_TIMESTAMP_SECS, &remainder_nanos);
+
+ /* apply seconds-based regeneration */
+ uint64_t tokens_regenerated = aws_mul_u64_saturating(elapsed_seconds, limiter->config.tokens_per_second);
+
+ /* apply fractional remainder regeneration */
+ limiter->fractional_nanos += remainder_nanos;
+
+ /* fractional overflow check */
+ if (limiter->fractional_nanos < AWS_TIMESTAMP_NANOS) {
+ /*
+ * no overflow, just do the division to figure out how many tokens are represented by the updated
+ * fractional nanos
+ */
+ uint64_t new_fractional_tokens =
+ aws_mul_u64_saturating(limiter->fractional_nanos, limiter->config.tokens_per_second) / AWS_TIMESTAMP_NANOS;
+
+ /*
+ * update token count by how much fractional tokens changed
+ */
+ tokens_regenerated += new_fractional_tokens - limiter->fractional_nano_tokens;
+ limiter->fractional_nano_tokens = new_fractional_tokens;
+ } else {
+ /*
+ * overflow. In this case, update token count by the remaining tokens left to regenerate to make the
+ * original fractional nano amount equal to one second. This is the key part (a pseudo-reset) that lets us
+ * avoid error accumulation due to integer division rounding over time.
+ */
+ tokens_regenerated += limiter->config.tokens_per_second - limiter->fractional_nano_tokens;
+
+ /*
+ * subtract off a second from the fractional part. Guaranteed to be less than a second afterwards.
+ */
+ limiter->fractional_nanos -= AWS_TIMESTAMP_NANOS;
+
+ /*
+ * Calculate the new fractional nano token amount, and add them in.
+ */
+ limiter->fractional_nano_tokens =
+ aws_mul_u64_saturating(limiter->fractional_nanos, limiter->config.tokens_per_second) / AWS_TIMESTAMP_NANOS;
+ tokens_regenerated += limiter->fractional_nano_tokens;
+ }
+
+ limiter->current_token_count = aws_add_u64_saturating(tokens_regenerated, limiter->current_token_count);
+ if (limiter->current_token_count > limiter->config.maximum_token_count) {
+ limiter->current_token_count = limiter->config.maximum_token_count;
+ }
+
+ limiter->last_service_time = now;
+}
+
+bool aws_rate_limiter_token_bucket_can_take_tokens(
+ struct aws_rate_limiter_token_bucket *limiter,
+ uint64_t token_count) {
+ s_regenerate_tokens(limiter);
+
+ return limiter->current_token_count >= token_count;
+}
+
+int aws_rate_limiter_token_bucket_take_tokens(struct aws_rate_limiter_token_bucket *limiter, uint64_t token_count) {
+ s_regenerate_tokens(limiter);
+
+ if (limiter->current_token_count < token_count) {
+ /* TODO: correct error once seated in aws-c-common */
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ limiter->current_token_count -= token_count;
+ return AWS_OP_SUCCESS;
+}
+
+uint64_t aws_rate_limiter_token_bucket_compute_wait_for_tokens(
+ struct aws_rate_limiter_token_bucket *limiter,
+ uint64_t token_count) {
+ s_regenerate_tokens(limiter);
+
+ if (limiter->current_token_count >= token_count) {
+ return 0;
+ }
+
+ uint64_t token_rate = limiter->config.tokens_per_second;
+ AWS_FATAL_ASSERT(limiter->fractional_nanos < AWS_TIMESTAMP_NANOS);
+ AWS_FATAL_ASSERT(limiter->fractional_nano_tokens <= token_rate);
+
+ uint64_t expected_wait = 0;
+
+ uint64_t deficit = token_count - limiter->current_token_count;
+ uint64_t remaining_fractional_tokens = token_rate - limiter->fractional_nano_tokens;
+
+ if (deficit < remaining_fractional_tokens) {
+ /*
+ * case 1:
+ * The token deficit is less than what will be regenerated by waiting for the fractional nanos accumulator
+ * to reach one second's worth of time.
+ *
+ * In this case, base the calculation off of just a wait from fractional nanos.
+ */
+ uint64_t target_fractional_tokens = aws_add_u64_saturating(deficit, limiter->fractional_nano_tokens);
+ uint64_t remainder_wait_unnormalized = aws_mul_u64_saturating(target_fractional_tokens, AWS_TIMESTAMP_NANOS);
+
+ expected_wait = remainder_wait_unnormalized / token_rate - limiter->fractional_nanos;
+
+ /* If the fractional wait is itself, fractional, then add one more nano second to push us over the edge */
+ if (remainder_wait_unnormalized % token_rate) {
+ ++expected_wait;
+ }
+ } else {
+ /*
+ * case 2:
+ * The token deficit requires regeneration for a time interval at least as large as what is needed
+ * to overflow the fractional nanos accumulator.
+ */
+
+ /* First account for making the fractional nano accumulator exactly one second */
+ expected_wait = AWS_TIMESTAMP_NANOS - limiter->fractional_nanos;
+ deficit -= remaining_fractional_tokens;
+
+ /*
+ * Now, for the remaining tokens, split into tokens from whole seconds worth of regeneration as well
+ * as a remainder requiring a fractional regeneration
+ */
+ uint64_t expected_wait_seconds = deficit / token_rate;
+ uint64_t deficit_remainder = deficit % token_rate;
+
+ /*
+ * Account for seconds worth of waiting
+ */
+ expected_wait += aws_mul_u64_saturating(expected_wait_seconds, AWS_TIMESTAMP_NANOS);
+
+ /*
+ * And finally, calculate the fractional wait to give us the last few tokens
+ */
+ uint64_t remainder_wait_unnormalized = aws_mul_u64_saturating(deficit_remainder, AWS_TIMESTAMP_NANOS);
+ expected_wait += remainder_wait_unnormalized / token_rate;
+
+ /* If the fractional wait is itself, fractional, then add one more nano second to push us over the edge */
+ if (remainder_wait_unnormalized % token_rate) {
+ ++expected_wait;
+ }
+ }
+
+ return expected_wait;
+}
diff --git a/contrib/restricted/aws/aws-c-mqtt/ya.make b/contrib/restricted/aws/aws-c-mqtt/ya.make
new file mode 100644
index 0000000000..e6397a5367
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-mqtt/ya.make
@@ -0,0 +1,71 @@
+# Generated by devtools/yamaker from nixpkgs 23.05.
+
+LIBRARY()
+
+LICENSE(Apache-2.0)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+VERSION(0.8.8)
+
+ORIGINAL_SOURCE(https://github.com/awslabs/aws-c-mqtt/archive/v0.8.8.tar.gz)
+
+PEERDIR(
+ contrib/restricted/aws/aws-c-common
+ contrib/restricted/aws/aws-c-http
+ contrib/restricted/aws/aws-c-io
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/aws/aws-c-mqtt/include
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_RUNTIME()
+
+CFLAGS(
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_MQTT_USE_IMPORT_EXPORT
+ -DAWS_MQTT_WITH_WEBSOCKETS
+ -DAWS_USE_EPOLL
+ -DHAVE_SYSCONF
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+)
+
+SRCS(
+ source/client.c
+ source/client_channel_handler.c
+ source/fixed_header.c
+ source/mqtt.c
+ source/packets.c
+ source/shared_constants.c
+ source/topic_tree.c
+ source/v5/mqtt5_callbacks.c
+ source/v5/mqtt5_client.c
+ source/v5/mqtt5_decoder.c
+ source/v5/mqtt5_encoder.c
+ source/v5/mqtt5_listener.c
+ source/v5/mqtt5_options_storage.c
+ source/v5/mqtt5_topic_alias.c
+ source/v5/mqtt5_types.c
+ source/v5/mqtt5_utils.c
+ source/v5/rate_limiters.c
+)
+
+END()
diff --git a/contrib/restricted/aws/aws-c-s3/CMakeLists.darwin-arm64.txt b/contrib/restricted/aws/aws-c-s3/CMakeLists.darwin-arm64.txt
new file mode 100644
index 0000000000..afd70c3f76
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/CMakeLists.darwin-arm64.txt
@@ -0,0 +1,68 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-s3)
+target_compile_options(restricted-aws-aws-c-s3 PRIVATE
+ -DAWS_AUTH_USE_IMPORT_EXPORT
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_CHECKSUMS_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_S3_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DCJSON_HIDE_SYMBOLS
+ -DHAVE_SYSCONF
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-s3 PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/include
+)
+target_link_libraries(restricted-aws-aws-c-s3 PUBLIC
+ restricted-aws-aws-c-auth
+ restricted-aws-aws-c-cal
+ restricted-aws-aws-c-common
+ restricted-aws-aws-c-http
+ restricted-aws-aws-c-io
+ restricted-aws-aws-c-sdkutils
+ restricted-aws-aws-checksums
+)
+target_sources(restricted-aws-aws-c-s3 PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_auto_ranged_get.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_auto_ranged_put.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_checksum_stream.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_checksums.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_chunk_stream.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_client.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_copy_object.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_default_meta_request.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_endpoint.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_list_objects.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_list_parts.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_meta_request.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_paginator.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_request.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_request_messages.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_util.c
+)
diff --git a/contrib/restricted/aws/aws-c-s3/CMakeLists.darwin-x86_64.txt b/contrib/restricted/aws/aws-c-s3/CMakeLists.darwin-x86_64.txt
new file mode 100644
index 0000000000..afd70c3f76
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/CMakeLists.darwin-x86_64.txt
@@ -0,0 +1,68 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-s3)
+target_compile_options(restricted-aws-aws-c-s3 PRIVATE
+ -DAWS_AUTH_USE_IMPORT_EXPORT
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_CHECKSUMS_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_S3_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DCJSON_HIDE_SYMBOLS
+ -DHAVE_SYSCONF
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-s3 PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/include
+)
+target_link_libraries(restricted-aws-aws-c-s3 PUBLIC
+ restricted-aws-aws-c-auth
+ restricted-aws-aws-c-cal
+ restricted-aws-aws-c-common
+ restricted-aws-aws-c-http
+ restricted-aws-aws-c-io
+ restricted-aws-aws-c-sdkutils
+ restricted-aws-aws-checksums
+)
+target_sources(restricted-aws-aws-c-s3 PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_auto_ranged_get.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_auto_ranged_put.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_checksum_stream.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_checksums.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_chunk_stream.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_client.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_copy_object.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_default_meta_request.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_endpoint.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_list_objects.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_list_parts.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_meta_request.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_paginator.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_request.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_request_messages.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_util.c
+)
diff --git a/contrib/restricted/aws/aws-c-s3/CMakeLists.linux-aarch64.txt b/contrib/restricted/aws/aws-c-s3/CMakeLists.linux-aarch64.txt
new file mode 100644
index 0000000000..0969b7de23
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/CMakeLists.linux-aarch64.txt
@@ -0,0 +1,69 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-s3)
+target_compile_options(restricted-aws-aws-c-s3 PRIVATE
+ -DAWS_AUTH_USE_IMPORT_EXPORT
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_CHECKSUMS_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_S3_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DCJSON_HIDE_SYMBOLS
+ -DHAVE_SYSCONF
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-s3 PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/include
+)
+target_link_libraries(restricted-aws-aws-c-s3 PUBLIC
+ contrib-libs-linux-headers
+ restricted-aws-aws-c-auth
+ restricted-aws-aws-c-cal
+ restricted-aws-aws-c-common
+ restricted-aws-aws-c-http
+ restricted-aws-aws-c-io
+ restricted-aws-aws-c-sdkutils
+ restricted-aws-aws-checksums
+)
+target_sources(restricted-aws-aws-c-s3 PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_auto_ranged_get.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_auto_ranged_put.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_checksum_stream.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_checksums.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_chunk_stream.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_client.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_copy_object.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_default_meta_request.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_endpoint.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_list_objects.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_list_parts.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_meta_request.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_paginator.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_request.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_request_messages.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_util.c
+)
diff --git a/contrib/restricted/aws/aws-c-s3/CMakeLists.linux-x86_64.txt b/contrib/restricted/aws/aws-c-s3/CMakeLists.linux-x86_64.txt
new file mode 100644
index 0000000000..0969b7de23
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/CMakeLists.linux-x86_64.txt
@@ -0,0 +1,69 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-s3)
+target_compile_options(restricted-aws-aws-c-s3 PRIVATE
+ -DAWS_AUTH_USE_IMPORT_EXPORT
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_CHECKSUMS_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_S3_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DCJSON_HIDE_SYMBOLS
+ -DHAVE_SYSCONF
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-s3 PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/include
+)
+target_link_libraries(restricted-aws-aws-c-s3 PUBLIC
+ contrib-libs-linux-headers
+ restricted-aws-aws-c-auth
+ restricted-aws-aws-c-cal
+ restricted-aws-aws-c-common
+ restricted-aws-aws-c-http
+ restricted-aws-aws-c-io
+ restricted-aws-aws-c-sdkutils
+ restricted-aws-aws-checksums
+)
+target_sources(restricted-aws-aws-c-s3 PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_auto_ranged_get.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_auto_ranged_put.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_checksum_stream.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_checksums.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_chunk_stream.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_client.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_copy_object.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_default_meta_request.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_endpoint.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_list_objects.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_list_parts.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_meta_request.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_paginator.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_request.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_request_messages.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_util.c
+)
diff --git a/contrib/restricted/aws/aws-c-s3/CMakeLists.txt b/contrib/restricted/aws/aws-c-s3/CMakeLists.txt
new file mode 100644
index 0000000000..2dce3a77fe
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/CMakeLists.txt
@@ -0,0 +1,19 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+if (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" AND NOT HAVE_CUDA)
+ include(CMakeLists.linux-aarch64.txt)
+elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
+ include(CMakeLists.darwin-x86_64.txt)
+elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
+ include(CMakeLists.darwin-arm64.txt)
+elseif (WIN32 AND CMAKE_SYSTEM_PROCESSOR STREQUAL "AMD64" AND NOT HAVE_CUDA)
+ include(CMakeLists.windows-x86_64.txt)
+elseif (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT HAVE_CUDA)
+ include(CMakeLists.linux-x86_64.txt)
+endif()
diff --git a/contrib/restricted/aws/aws-c-s3/CMakeLists.windows-x86_64.txt b/contrib/restricted/aws/aws-c-s3/CMakeLists.windows-x86_64.txt
new file mode 100644
index 0000000000..afd70c3f76
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/CMakeLists.windows-x86_64.txt
@@ -0,0 +1,68 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-s3)
+target_compile_options(restricted-aws-aws-c-s3 PRIVATE
+ -DAWS_AUTH_USE_IMPORT_EXPORT
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_CHECKSUMS_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_S3_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DCJSON_HIDE_SYMBOLS
+ -DHAVE_SYSCONF
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-s3 PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/include
+)
+target_link_libraries(restricted-aws-aws-c-s3 PUBLIC
+ restricted-aws-aws-c-auth
+ restricted-aws-aws-c-cal
+ restricted-aws-aws-c-common
+ restricted-aws-aws-c-http
+ restricted-aws-aws-c-io
+ restricted-aws-aws-c-sdkutils
+ restricted-aws-aws-checksums
+)
+target_sources(restricted-aws-aws-c-s3 PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_auto_ranged_get.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_auto_ranged_put.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_checksum_stream.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_checksums.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_chunk_stream.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_client.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_copy_object.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_default_meta_request.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_endpoint.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_list_objects.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_list_parts.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_meta_request.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_paginator.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_request.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_request_messages.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-s3/source/s3_util.c
+)
diff --git a/contrib/restricted/aws/aws-c-s3/CODE_OF_CONDUCT.md b/contrib/restricted/aws/aws-c-s3/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..5b627cfa60
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/CODE_OF_CONDUCT.md
@@ -0,0 +1,4 @@
+## Code of Conduct
+This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
+For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
+opensource-codeofconduct@amazon.com with any additional questions or comments.
diff --git a/contrib/restricted/aws/aws-c-s3/CONTRIBUTING.md b/contrib/restricted/aws/aws-c-s3/CONTRIBUTING.md
new file mode 100644
index 0000000000..762ffe81b1
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/CONTRIBUTING.md
@@ -0,0 +1,61 @@
+# Contributing Guidelines
+
+Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional
+documentation, we greatly value feedback and contributions from our community.
+
+Please read through this document before submitting any issues or pull requests to ensure we have all the necessary
+information to effectively respond to your bug report or contribution.
+
+
+## Reporting Bugs/Feature Requests
+
+We welcome you to use the GitHub issue tracker to report bugs or suggest features.
+
+When filing an issue, please check [existing open](https://github.com/awslabs/aws-c-s3/issues), or [recently closed](https://github.com/awslabs/aws-c-s3/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already
+reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
+
+* A reproducible test case or series of steps
+* The version of our code being used
+* Any modifications you've made relevant to the bug
+* Anything unusual about your environment or deployment
+
+
+## Contributing via Pull Requests
+Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that:
+
+1. You are working against the latest source on the *main* branch.
+2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
+3. You open an issue to discuss any significant work - we would hate for your time to be wasted.
+
+To send us a pull request, please:
+
+1. Fork the repository.
+2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change.
+3. Ensure local tests pass.
+4. Commit to your fork using clear commit messages.
+5. Send us a pull request, answering any default questions in the pull request interface.
+6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation.
+
+GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
+[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
+
+
+## Finding contributions to work on
+Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/awslabs/aws-c-s3/labels/help%20wanted) issues is a great place to start.
+
+
+## Code of Conduct
+This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
+For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
+opensource-codeofconduct@amazon.com with any additional questions or comments.
+
+
+## Security issue notifications
+If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue.
+
+
+## Licensing
+
+See the [LICENSE](https://github.com/awslabs/aws-c-s3/blob/main/LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution.
+
+We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes.
diff --git a/contrib/restricted/aws/aws-c-s3/LICENSE b/contrib/restricted/aws/aws-c-s3/LICENSE
new file mode 100644
index 0000000000..67db858821
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/LICENSE
@@ -0,0 +1,175 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
diff --git a/contrib/restricted/aws/aws-c-s3/NOTICE b/contrib/restricted/aws/aws-c-s3/NOTICE
new file mode 100644
index 0000000000..616fc58894
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/NOTICE
@@ -0,0 +1 @@
+Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
diff --git a/contrib/restricted/aws/aws-c-s3/README.md b/contrib/restricted/aws/aws-c-s3/README.md
new file mode 100644
index 0000000000..bc46111ec5
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/README.md
@@ -0,0 +1,74 @@
+## AWS C S3
+
+C99 library implementation for communicating with the S3 service, designed for maximizing throughput on high bandwidth EC2 instances.
+
+## License
+
+This library is licensed under the Apache 2.0 License.
+
+## Usage
+
+### Building
+
+CMake 3.1+ is required to build.
+
+`<install-path>` must be an absolute path in the following instructions.
+
+#### Linux-Only Dependencies
+
+If you are building on Linux, you will need to build aws-lc and s2n-tls first.
+
+```
+git clone git@github.com:awslabs/aws-lc.git
+cmake -S aws-lc -B aws-lc/build -DCMAKE_INSTALL_PREFIX=<install-path>
+cmake --build aws-lc/build --target install
+
+git clone git@github.com:aws/s2n-tls.git
+cmake -S s2n-tls -B s2n-tls/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build s2n-tls/build --target install
+```
+
+#### Building aws-c-s3 and Remaining Dependencies
+
+```
+git clone git@github.com:awslabs/aws-c-common.git
+cmake -S aws-c-common -B aws-c-common/build -DCMAKE_INSTALL_PREFIX=<install-path>
+cmake --build aws-c-common/build --target install
+
+git clone git@github.com:awslabs/aws-checksums.git
+cmake -S aws-checksums -B aws-checksums/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build aws-checksums/build --target install
+
+git clone git@github.com:awslabs/aws-c-cal.git
+cmake -S aws-c-cal -B aws-c-cal/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build aws-c-cal/build --target install
+
+git clone git@github.com:awslabs/aws-c-io.git
+cmake -S aws-c-io -B aws-c-io/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build aws-c-io/build --target install
+
+git clone git@github.com:awslabs/aws-c-compression.git
+cmake -S aws-c-compression -B aws-c-compression/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build aws-c-compression/build --target install
+
+git clone git@github.com:awslabs/aws-c-http.git
+cmake -S aws-c-http -B aws-c-http/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build aws-c-http/build --target install
+
+git clone git@github.com:awslabs/aws-c-sdkutils.git
+cmake -S aws-c-sdkutils -B aws-c-sdkutils/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build aws-c-sdkutils/build --target install
+
+git clone git@github.com:awslabs/aws-c-auth.git
+cmake -S aws-c-auth -B aws-c-auth/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build aws-c-auth/build --target install
+
+git clone git@github.com:awslabs/aws-c-s3.git
+cmake -S aws-c-s3 -B aws-c-s3/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build aws-c-s3/build --target install
+```
+
+## Testing
+
+The unit tests require an AWS account with S3 buckets set up in a particular way.
+Use the [test_helper script](./tests/test_helper/) to set this up.
diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/exports.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/exports.h
new file mode 100644
index 0000000000..c736317348
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/exports.h
@@ -0,0 +1,29 @@
+#ifndef AWS_S3_EXPORTS_H
+#define AWS_S3_EXPORTS_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#if defined(USE_WINDOWS_DLL_SEMANTICS) || defined(WIN32)
+# ifdef AWS_S3_USE_IMPORT_EXPORT
+# ifdef AWS_S3_EXPORTS
+# define AWS_S3_API __declspec(dllexport)
+# else
+# define AWS_S3_API __declspec(dllimport)
+# endif /* AWS_S3_EXPORTS */
+# else
+# define AWS_S3_API
+# endif /*USE_IMPORT_EXPORT */
+
+#else
+# if ((__GNUC__ >= 4) || defined(__clang__)) && defined(AWS_S3_USE_IMPORT_EXPORT) && defined(AWS_S3_EXPORTS)
+# define AWS_S3_API __attribute__((visibility("default")))
+# else
+# define AWS_S3_API
+# endif /* __GNUC__ >= 4 || defined(__clang__) */
+
+#endif /* defined(USE_WINDOWS_DLL_SEMANTICS) || defined(WIN32) */
+
+#endif /* AWS_S3_EXPORTS_H */
diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_auto_ranged_get.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_auto_ranged_get.h
new file mode 100644
index 0000000000..3845b96292
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_auto_ranged_get.h
@@ -0,0 +1,64 @@
+#ifndef AWS_S3_AUTO_RANGED_GET_H
+#define AWS_S3_AUTO_RANGED_GET_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include "aws/s3/private/s3_meta_request_impl.h"
+
+enum aws_s3_auto_ranged_get_request_type {
+ AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_HEAD_OBJECT,
+ AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_PART,
+ AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_INITIAL_MESSAGE,
+};
+
+struct aws_s3_auto_ranged_get {
+ struct aws_s3_meta_request base;
+
+ enum aws_s3_checksum_algorithm validation_algorithm;
+ /* Members to only be used when the mutex in the base type is locked. */
+ struct {
+ /* The starting byte of the data that we will be retrieved from the object.*/
+ uint64_t object_range_start;
+
+ /* The last byte of the data that will be retrieved from the object.*/
+ uint64_t object_range_end;
+
+ /* The total number of parts that are being used in downloading the object range. Note that "part" here
+ * currently refers to a range-get, and does not require a "part" on the service side. */
+ uint32_t total_num_parts;
+
+ uint32_t num_parts_requested;
+ uint32_t num_parts_completed;
+ uint32_t num_parts_successful;
+ uint32_t num_parts_failed;
+ uint32_t num_parts_checksum_validated;
+
+ uint32_t object_range_known : 1;
+ uint32_t head_object_sent : 1;
+ uint32_t head_object_completed : 1;
+ uint32_t get_without_range_sent : 1;
+ uint32_t get_without_range_completed : 1;
+ uint32_t read_window_warning_issued : 1;
+ } synced_data;
+
+ uint32_t initial_message_has_range_header : 1;
+ uint32_t initial_message_has_if_match_header : 1;
+
+ struct aws_string *etag;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/* Creates a new auto-ranged get meta request. This will do multiple parallel ranged-gets when appropriate. */
+AWS_S3_API struct aws_s3_meta_request *aws_s3_meta_request_auto_ranged_get_new(
+ struct aws_allocator *allocator,
+ struct aws_s3_client *client,
+ size_t part_size,
+ const struct aws_s3_meta_request_options *options);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_S3_AUTO_RANGED_GET_H */
diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_auto_ranged_put.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_auto_ranged_put.h
new file mode 100644
index 0000000000..42b5a02f9e
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_auto_ranged_put.h
@@ -0,0 +1,117 @@
+#ifndef AWS_S3_AUTO_RANGED_PUT_H
+#define AWS_S3_AUTO_RANGED_PUT_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include "aws/s3/private/s3_meta_request_impl.h"
+#include "s3_paginator.h"
+
+enum aws_s3_auto_ranged_put_request_tag {
+ AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_LIST_PARTS,
+ AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_CREATE_MULTIPART_UPLOAD,
+ AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_PART,
+ AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_ABORT_MULTIPART_UPLOAD,
+ AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_COMPLETE_MULTIPART_UPLOAD,
+
+ AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_MAX,
+};
+
+struct aws_s3_auto_ranged_put {
+ struct aws_s3_meta_request base;
+
+ /* Initialized either during creation in resume flow or as result of create multipart upload during normal flow. */
+ struct aws_string *upload_id;
+
+ /* Resume token used to resume the operation */
+ struct aws_s3_meta_request_resume_token *resume_token;
+
+ uint64_t content_length;
+
+ /* Only meant for use in the update function, which is never called concurrently. */
+ struct {
+ /*
+ * Next part number to send.
+ * Note: this follows s3 part number convention and counting starts with 1.
+ * Throughout codebase 0 based part numbers are usually referred to as part index.
+ */
+ uint32_t next_part_number;
+ } threaded_update_data;
+
+ /*
+ * Should only be used during prepare requests. Note: stream reads must be sequential,
+ * so prepare currently never runs concurrently with another prepare
+ */
+ struct {
+ /*
+ * How many parts have been read from input steam.
+ * Since reads are always sequential, this is essentially the number of how many parts were read from start of
+ * stream.
+ */
+ uint32_t num_parts_read_from_stream;
+ } prepare_data;
+
+ /*
+ * Very similar to the etag_list used in complete_multipart_upload to create the XML payload. Each part will set the
+ * corresponding index to it's checksum result, so while the list is shared across threads each index will only be
+ * accessed once to initialize by the corresponding part number, and then again during the complete multipart upload
+ * request which will only be invoked after all other parts/threads have completed.
+ */
+ struct aws_byte_buf *encoded_checksum_list;
+
+ /* Members to only be used when the mutex in the base type is locked. */
+ struct {
+ /* Array list of `struct aws_string *`. */
+ struct aws_array_list etag_list;
+
+ struct aws_s3_paginated_operation *list_parts_operation;
+ struct aws_string *list_parts_continuation_token;
+
+ uint32_t total_num_parts;
+ uint32_t num_parts_sent;
+ uint32_t num_parts_completed;
+ uint32_t num_parts_successful;
+ uint32_t num_parts_failed;
+
+ struct aws_http_headers *needed_response_headers;
+
+ int list_parts_error_code;
+ int create_multipart_upload_error_code;
+ int complete_multipart_upload_error_code;
+ int abort_multipart_upload_error_code;
+
+ struct {
+ /* Mark a single ListParts request has started or not */
+ uint32_t started : 1;
+ /* Mark ListParts need to continue or not */
+ uint32_t continues : 1;
+ /* Mark ListParts has completed all the pages or not */
+ uint32_t completed : 1;
+ } list_parts_state;
+ uint32_t create_multipart_upload_sent : 1;
+ uint32_t create_multipart_upload_completed : 1;
+ uint32_t complete_multipart_upload_sent : 1;
+ uint32_t complete_multipart_upload_completed : 1;
+ uint32_t abort_multipart_upload_sent : 1;
+ uint32_t abort_multipart_upload_completed : 1;
+
+ } synced_data;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/* Creates a new auto-ranged put meta request. This will do a multipart upload in parallel when appropriate. */
+
+AWS_S3_API struct aws_s3_meta_request *aws_s3_meta_request_auto_ranged_put_new(
+ struct aws_allocator *allocator,
+ struct aws_s3_client *client,
+ size_t part_size,
+ uint64_t content_length,
+ uint32_t num_parts,
+ const struct aws_s3_meta_request_options *options);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_S3_AUTO_RANGED_PUT_H */
diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_checksums.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_checksums.h
new file mode 100644
index 0000000000..62155614eb
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_checksums.h
@@ -0,0 +1,149 @@
+#ifndef AWS_S3_CHECKSUMS_H
+#define AWS_S3_CHECKSUMS_H
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include "aws/s3/s3_client.h"
+
+/* TODO: consider moving the aws_checksum_stream to aws-c-checksum, and the rest about checksum headers and trailer to
+ * aws-c-sdkutil. */
+
+struct aws_s3_checksum;
+
+struct aws_checksum_vtable {
+ void (*destroy)(struct aws_s3_checksum *checksum);
+ int (*update)(struct aws_s3_checksum *checksum, const struct aws_byte_cursor *buf);
+ int (*finalize)(struct aws_s3_checksum *checksum, struct aws_byte_buf *out, size_t truncate_to);
+};
+
+struct aws_s3_checksum {
+ struct aws_allocator *allocator;
+ struct aws_checksum_vtable *vtable;
+ void *impl;
+ size_t digest_size;
+ enum aws_s3_checksum_algorithm algorithm;
+ bool good;
+};
+
+struct checksum_config {
+ enum aws_s3_checksum_location location;
+ enum aws_s3_checksum_algorithm checksum_algorithm;
+ bool validate_response_checksum;
+ struct {
+ bool crc32c;
+ bool crc32;
+ bool sha1;
+ bool sha256;
+ } response_checksum_algorithms;
+};
+
+/**
+ * a stream that takes in a stream, computes a running checksum as it is read, and outputs the checksum when the stream
+ * is destroyed.
+ * Note: seek this stream will immediately fail, as it would prevent an accurate calculation of the
+ * checksum.
+ *
+ * @param allocator
+ * @param existing_stream The real content to read from. Destroying the checksum stream destroys the existing stream.
+ * outputs the checksum of existing stream to checksum_output upon destruction. Will be kept
+ * alive by the checksum stream
+ * @param algorithm Checksum algorithm to use.
+ * @param checksum_output Checksum of the `existing_stream`, owned by caller, which will be calculated when this stream
+ * is destroyed.
+ */
+AWS_S3_API
+struct aws_input_stream *aws_checksum_stream_new(
+ struct aws_allocator *allocator,
+ struct aws_input_stream *existing_stream,
+ enum aws_s3_checksum_algorithm algorithm,
+ struct aws_byte_buf *checksum_output);
+
+/**
+ * TODO: properly support chunked encoding.
+ *
+ * A stream that takes in a stream, encodes it to aws_chunked. Computes a running checksum as it is read and add the
+ * checksum as trailer at the end of the stream. All of the added bytes will be counted to the length of the stream.
+ * Note: seek this stream will immediately fail, as it would prevent an accurate calculation of the
+ * checksum.
+ *
+ * @param allocator
+ * @param existing_stream The data to be chunkified prepended by information on the stream length followed by a final
+ * chunk and a trailing chunk containing a checksum of the existing stream. Destroying the
+ * chunk stream will destroy the existing stream.
+ * @param checksum_output Optional argument, if provided the buffer will be initialized to the appropriate size and
+ * filled with the checksum result when calculated. Callers responsibility to cleanup.
+ */
+AWS_S3_API
+struct aws_input_stream *aws_chunk_stream_new(
+ struct aws_allocator *allocator,
+ struct aws_input_stream *existing_stream,
+ enum aws_s3_checksum_algorithm algorithm,
+ struct aws_byte_buf *checksum_output);
+
+/**
+ * Get the size of the checksum output corresponding to the aws_s3_checksum_algorithm enum value.
+ */
+AWS_S3_API
+size_t aws_get_digest_size_from_algorithm(enum aws_s3_checksum_algorithm algorithm);
+
+/**
+ * Get the header name corresponding to the aws_s3_checksum_algorithm enum value.
+ */
+AWS_S3_API
+const struct aws_byte_cursor *aws_get_http_header_name_from_algorithm(enum aws_s3_checksum_algorithm algorithm);
+
+/**
+ * Get the multipart upload header name corresponding to the aws_s3_checksum_algorithm enum value.
+ */
+AWS_S3_API
+const struct aws_byte_cursor *aws_get_create_mpu_header_name_from_algorithm(enum aws_s3_checksum_algorithm algorithm);
+
+/**
+ * Get the complete multipart upload name corresponding to the aws_s3_checksum_algorithm enum value.
+ */
+AWS_S3_API
+const struct aws_byte_cursor *aws_get_complete_mpu_name_from_algorithm(enum aws_s3_checksum_algorithm algorithm);
+
+/**
+ * create a new aws_checksum corresponding to the aws_s3_checksum_algorithm enum value.
+ */
+AWS_S3_API
+struct aws_s3_checksum *aws_checksum_new(struct aws_allocator *allocator, enum aws_s3_checksum_algorithm algorithm);
+
+/**
+ * Compute an aws_checksum corresponding to the provided enum, passing a function pointer around instead of using a
+ * conditional would be faster, but would be a negligible improvement compared to the cost of processing data twice
+ * which would be the only time this function would be used, and would be harder to follow.
+ */
+AWS_S3_API
+int aws_checksum_compute(
+ struct aws_allocator *allocator,
+ enum aws_s3_checksum_algorithm algorithm,
+ const struct aws_byte_cursor *input,
+ struct aws_byte_buf *output,
+ size_t truncate_to);
+
+/**
+ * Cleans up and deallocates checksum.
+ */
+AWS_S3_API
+void aws_checksum_destroy(struct aws_s3_checksum *checksum);
+
+/**
+ * Updates the running checksum with to_checksum. this can be called multiple times.
+ */
+AWS_S3_API
+int aws_checksum_update(struct aws_s3_checksum *checksum, const struct aws_byte_cursor *to_checksum);
+
+/**
+ * Completes the checksum computation and writes the final digest to output.
+ * Allocation of output is the caller's responsibility.
+ */
+AWS_S3_API
+int aws_checksum_finalize(struct aws_s3_checksum *checksum, struct aws_byte_buf *output, size_t truncate_to);
+
+AWS_S3_API
+void checksum_config_init(struct checksum_config *internal_config, const struct aws_s3_checksum_config *config);
+
+#endif /* AWS_S3_CHECKSUMS_H */
diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_client_impl.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_client_impl.h
new file mode 100644
index 0000000000..ec0ff66c9e
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_client_impl.h
@@ -0,0 +1,439 @@
+#ifndef AWS_S3_CLIENT_IMPL_H
+#define AWS_S3_CLIENT_IMPL_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include "aws/s3/s3_client.h"
+
+#include <aws/common/atomics.h>
+#include <aws/common/byte_buf.h>
+#include <aws/common/hash_table.h>
+#include <aws/common/linked_list.h>
+#include <aws/common/mutex.h>
+#include <aws/common/ref_count.h>
+#include <aws/common/task_scheduler.h>
+#include <aws/http/connection.h>
+#include <aws/http/connection_manager.h>
+#include <aws/http/proxy.h>
+
+/* TODO automate this value in the future to prevent it from becoming out-of-sync. */
+#define AWS_S3_CLIENT_VERSION "0.1.x"
+
+struct aws_http_connection;
+struct aws_http_connection_manager;
+struct aws_host_resolver;
+struct aws_s3_endpoint;
+
+enum aws_s3_connection_finish_code {
+ AWS_S3_CONNECTION_FINISH_CODE_SUCCESS,
+ AWS_S3_CONNECTION_FINISH_CODE_FAILED,
+ AWS_S3_CONNECTION_FINISH_CODE_RETRY,
+};
+
+/* Callback for the owner of the endpoint when the endpoint has completely cleaned up. */
+typedef void(aws_s3_endpoint_shutdown_fn)(void *user_data);
+
+struct aws_s3_endpoint_options {
+ /* URL of the host that this endpoint refers to. */
+ struct aws_string *host_name;
+
+ /* Callback for when this endpoint completely shuts down. */
+ aws_s3_endpoint_shutdown_fn *shutdown_callback;
+
+ /* Bootstrap of the client to be used for spawning a connection manager. */
+ struct aws_client_bootstrap *client_bootstrap;
+
+ /* TLS connection options to be used for the connection manager. */
+ const struct aws_tls_connection_options *tls_connection_options;
+
+ /* DNS TTL to use for addresses for this endpoint. */
+ size_t dns_host_address_ttl_seconds;
+
+ /* Client that owns this endpoint */
+ struct aws_s3_client *client;
+
+ /* Maximum number of connections that can be spawned for this endpoint. */
+ uint32_t max_connections;
+
+ /* HTTP port override. If zero, determine port based on TLS context */
+ uint16_t port;
+
+ /**
+ * Optional.
+ * Proxy configuration for http connection.
+ */
+ struct aws_http_proxy_config *proxy_config;
+
+ /**
+ * Optional.
+ * Configuration for fetching proxy configuration from environment.
+ * By Default proxy_ev_settings.aws_http_proxy_env_var_type is set to AWS_HPEV_ENABLE which means read proxy
+ * configuration from environment.
+ * Only works when proxy_config is not set. If both are set, configuration from proxy_config is used.
+ */
+ struct proxy_env_var_settings *proxy_ev_settings;
+
+ /**
+ * Optional.
+ * If set to 0, default value is used.
+ */
+ uint32_t connect_timeout_ms;
+
+ /**
+ * Optional.
+ * Set keepalive to periodically transmit messages for detecting a disconnected peer.
+ */
+ struct aws_s3_tcp_keep_alive_options *tcp_keep_alive_options;
+
+ /**
+ * Optional.
+ * Configuration options for connection monitoring.
+ * If the transfer speed falls below the specified minimum_throughput_bytes_per_second, the operation is aborted.
+ */
+ struct aws_http_connection_monitoring_options *monitoring_options;
+};
+
+/* global vtable, only used when mocking for tests */
+struct aws_s3_endpoint_system_vtable {
+ void (*acquire)(struct aws_s3_endpoint *endpoint, bool already_holding_lock);
+ void (*release)(struct aws_s3_endpoint *endpoint);
+};
+
+struct aws_s3_endpoint {
+ struct {
+ /* This is NOT an atomic ref-count.
+ * The endpoint lives in hashtable: `aws_s3_client.synced_data.endpoints`
+ * This ref-count can only be touched while holding client's lock */
+ size_t ref_count;
+ } client_synced_data;
+
+ /* What allocator was used to create this endpoint. */
+ struct aws_allocator *allocator;
+
+ /* URL of the host that this endpoint refers to. */
+ struct aws_string *host_name;
+
+ /* Connection manager that manages all connections to this endpoint. */
+ struct aws_http_connection_manager *http_connection_manager;
+
+ /* Client that owns this endpoint */
+ struct aws_s3_client *client;
+};
+
+/* Represents one connection on a particular VIP. */
+struct aws_s3_connection {
+ /* Endpoint that this connection is connected to. */
+ struct aws_s3_endpoint *endpoint;
+
+ /* The underlying, currently in-use HTTP connection. */
+ struct aws_http_connection *http_connection;
+
+ /* Request currently being processed on this connection. */
+ struct aws_s3_request *request;
+
+ /* Current retry token for the request. If it has never been retried, this will be NULL. */
+ struct aws_retry_token *retry_token;
+};
+
+struct aws_s3_client_vtable {
+
+ struct aws_s3_meta_request *(
+ *meta_request_factory)(struct aws_s3_client *client, const struct aws_s3_meta_request_options *options);
+
+ void (*create_connection_for_request)(struct aws_s3_client *client, struct aws_s3_request *request);
+
+ void (*acquire_http_connection)(
+ struct aws_http_connection_manager *conn_manager,
+ aws_http_connection_manager_on_connection_setup_fn *on_connection_acquired_callback,
+ void *user_data);
+
+ size_t (*get_host_address_count)(
+ struct aws_host_resolver *host_resolver,
+ const struct aws_string *host_name,
+ uint32_t flags);
+
+ void (*schedule_process_work_synced)(struct aws_s3_client *client);
+
+ void (*process_work)(struct aws_s3_client *client);
+
+ void (*endpoint_shutdown_callback)(struct aws_s3_client *client);
+
+ void (*finish_destroy)(struct aws_s3_client *client);
+};
+
+/* Represents the state of the S3 client. */
+struct aws_s3_client {
+ struct aws_allocator *allocator;
+
+ struct aws_s3_client_vtable *vtable;
+
+ struct aws_ref_count ref_count;
+
+ /* Client bootstrap for setting up connection managers. */
+ struct aws_client_bootstrap *client_bootstrap;
+
+ /* Event loop on the client bootstrap ELG for processing work/dispatching requests. */
+ struct aws_event_loop *process_work_event_loop;
+
+ /* Event loop group for streaming request bodies back to the user. */
+ struct aws_event_loop_group *body_streaming_elg;
+
+ /* Region of the S3 bucket. */
+ struct aws_string *region;
+
+ /* Size of parts for files when doing gets or puts. This exists on the client as configurable option that is passed
+ * to meta requests for use. */
+ const size_t part_size;
+
+ /* Size of parts for files when doing gets or puts. This exists on the client as configurable option that is passed
+ * to meta requests for use. */
+ const size_t max_part_size;
+
+ /* TLS Options to be used for each connection. */
+ struct aws_tls_connection_options *tls_connection_options;
+
+ /* Cached signing config. Can be NULL if no signing config was specified. */
+ struct aws_cached_signing_config_aws *cached_signing_config;
+
+ /* Throughput target in Gbps that we are trying to reach. */
+ const double throughput_target_gbps;
+
+ /* The calculated ideal number of VIP's based on throughput target and throughput per vip. */
+ const uint32_t ideal_vip_count;
+
+ /**
+ * For multi-part upload, content-md5 will be calculated if the AWS_MR_CONTENT_MD5_ENABLED is specified
+ * or initial request has content-md5 header.
+ * For single-part upload, if the content-md5 header is specified, it will remain unchanged. If the header is not
+ * specified, and this is set to AWS_MR_CONTENT_MD5_ENABLED, it will be calculated. */
+ const enum aws_s3_meta_request_compute_content_md5 compute_content_md5;
+
+ /* Hard limit on max connections set through the client config. */
+ const uint32_t max_active_connections_override;
+
+ struct aws_atomic_var max_allowed_connections;
+
+ /* Retry strategy used for scheduling request retries. */
+ struct aws_retry_strategy *retry_strategy;
+
+ /**
+ * Optional.
+ * Proxy configuration for http connection.
+ */
+ struct aws_http_proxy_config *proxy_config;
+
+ /**
+ * Optional.
+ * Configuration for fetching proxy configuration from environment.
+ * By Default proxy_ev_settings.aws_http_proxy_env_var_type is set to AWS_HPEV_ENABLE which means read proxy
+ * configuration from environment.
+ * Only works when proxy_config is not set. If both are set, configuration from proxy_config is used.
+ */
+ struct proxy_env_var_settings *proxy_ev_settings;
+
+ /**
+ * Optional.
+ * If set to 0, default value is used.
+ */
+ uint32_t connect_timeout_ms;
+
+ /**
+ * Optional.
+ * Set keepalive to periodically transmit messages for detecting a disconnected peer.
+ */
+ struct aws_s3_tcp_keep_alive_options *tcp_keep_alive_options;
+
+ /**
+ * Configuration options for connection monitoring.
+ * If the transfer speed falls below the specified minimum_throughput_bytes_per_second, the operation is aborted.
+ * If user passes in NULL, default values are used.
+ */
+ struct aws_http_connection_monitoring_options monitoring_options;
+
+ /* tls options from proxy environment settings. */
+ struct aws_tls_connection_options *proxy_ev_tls_options;
+
+ /* Shutdown callbacks to notify when the client is completely cleaned up. */
+ aws_s3_client_shutdown_complete_callback_fn *shutdown_callback;
+ void *shutdown_callback_user_data;
+
+ /* Whether read backpressure (aka flow-control window) is being applied. */
+ const bool enable_read_backpressure;
+
+ /* The starting size of each meta request's flow-control window, in bytes.
+ * Ignored unless `enable_read_backpressure` is true. */
+ const size_t initial_read_window;
+
+ struct {
+ /* Number of overall requests currently being processed by the client. */
+ struct aws_atomic_var num_requests_in_flight;
+
+ /* Number of requests being sent/received over network. */
+ struct aws_atomic_var num_requests_network_io[AWS_S3_META_REQUEST_TYPE_MAX];
+
+ /* Number of requests sitting in their meta request priority queue, waiting to be streamed. */
+ struct aws_atomic_var num_requests_stream_queued_waiting;
+
+ /* Number of requests currently scheduled to be streamed or are actively being streamed. */
+ struct aws_atomic_var num_requests_streaming;
+ } stats;
+
+ struct {
+ struct aws_mutex lock;
+
+ /* Hash table of endpoints that are in-use by the client.
+ * Key: aws_string of endpoint hostname. Value: aws_s3_endpoint */
+ struct aws_hash_table endpoints;
+
+ /* How many requests failed to be prepared. */
+ uint32_t num_failed_prepare_requests;
+
+ /* Meta requests that need added in the work event loop.
+ * List contains aws_s3_meta_request_work */
+ struct aws_linked_list pending_meta_request_work;
+
+ /* aws_s3_request that are prepared and ready to be put in the threaded_data request queue. */
+ struct aws_linked_list prepared_requests;
+
+ /* Task for processing requests from meta requests on connections. */
+ struct aws_task process_work_task;
+
+ /* Number of endpoints currently allocated. Used during clean up to know how many endpoints are still in
+ * memory.*/
+ uint32_t num_endpoints_allocated;
+
+ /* Whether or not the client has started cleaning up all of its resources */
+ uint32_t active : 1;
+
+ /* True if the start_destroy function is still executing, which blocks shutdown from completing. */
+ uint32_t start_destroy_executing : 1;
+
+ /* Whether or not work processing is currently scheduled. */
+ uint32_t process_work_task_scheduled : 1;
+
+ /* Whether or not work process is currently in progress. */
+ uint32_t process_work_task_in_progress : 1;
+
+ /* Whether or not the body streaming ELG is allocated. If the body streaming ELG is NULL, but this is true, the
+ * shutdown callback has not yet been called.*/
+ uint32_t body_streaming_elg_allocated : 1;
+
+ /* True if client has been flagged to finish destroying itself. Used to catch double-destroy bugs.*/
+ uint32_t finish_destroy : 1;
+
+ } synced_data;
+
+ struct {
+ /* Queue of prepared aws_s3_request that are waiting to be assigned to connections. */
+ struct aws_linked_list request_queue;
+
+ /* Client list of ongoing aws_s3_meta_requests. */
+ struct aws_linked_list meta_requests;
+
+ /* Number of requests in the request_queue linked_list. */
+ uint32_t request_queue_size;
+
+ /* Number of requests currently being prepared. */
+ uint32_t num_requests_being_prepared;
+
+ } threaded_data;
+};
+
+struct aws_s3_meta_request_resume_token {
+ struct aws_allocator *allocator;
+ struct aws_ref_count ref_count;
+
+ enum aws_s3_meta_request_type type;
+
+ /* Note: since pause currently only supports upload, this structure only has
+ upload specific fields. Extending it to support other types is left as
+ exercise for future. */
+ struct aws_string *multipart_upload_id;
+ size_t part_size;
+ size_t total_num_parts;
+
+ /* Note: this field is used only when s3 tells us that upload id no longer
+ exists, and if this indicates that all parts have already been uploaded,
+ request is completed instead of failing it.*/
+ size_t num_parts_completed;
+};
+
+void aws_s3_client_notify_connection_finished(
+ struct aws_s3_client *client,
+ struct aws_s3_connection *connection,
+ int error_code,
+ enum aws_s3_connection_finish_code finish_code);
+
+AWS_EXTERN_C_BEGIN
+
+AWS_S3_API
+struct aws_s3_meta_request_resume_token *aws_s3_meta_request_resume_token_new(struct aws_allocator *allocator);
+
+AWS_S3_API
+void aws_s3_set_dns_ttl(size_t ttl);
+
+AWS_S3_API
+uint32_t aws_s3_client_get_max_requests_prepare(struct aws_s3_client *client);
+
+AWS_S3_API
+uint32_t aws_s3_client_get_max_active_connections(
+ struct aws_s3_client *client,
+ struct aws_s3_meta_request *meta_request);
+
+AWS_S3_API
+uint32_t aws_s3_client_get_max_requests_in_flight(struct aws_s3_client *client);
+
+AWS_S3_API
+uint32_t aws_s3_client_queue_requests_threaded(
+ struct aws_s3_client *client,
+ struct aws_linked_list *request_list,
+ bool queue_front);
+
+AWS_S3_API
+struct aws_s3_request *aws_s3_client_dequeue_request_threaded(struct aws_s3_client *client);
+
+AWS_S3_API
+void aws_s3_client_schedule_process_work(struct aws_s3_client *client);
+
+AWS_S3_API
+void aws_s3_client_update_meta_requests_threaded(struct aws_s3_client *client);
+
+AWS_S3_API
+void aws_s3_client_update_connections_threaded(struct aws_s3_client *client);
+
+AWS_S3_API
+struct aws_s3_endpoint *aws_s3_endpoint_new(
+ struct aws_allocator *allocator,
+ const struct aws_s3_endpoint_options *options);
+
+AWS_S3_API void aws_s3_client_lock_synced_data(struct aws_s3_client *client);
+
+AWS_S3_API
+void aws_s3_client_unlock_synced_data(struct aws_s3_client *client);
+
+/* Used for mocking */
+AWS_S3_API
+void aws_s3_endpoint_set_system_vtable(const struct aws_s3_endpoint_system_vtable *vtable);
+
+/* Increment the endpoint's ref-count.
+ * If `already_holding_lock` is false, then this call will briefly take hold of the client's lock */
+struct aws_s3_endpoint *aws_s3_endpoint_acquire(struct aws_s3_endpoint *endpoint, bool already_holding_lock);
+
+/* Decrement the endpoint's ref-count.
+ * You MUST NOT call this while the client's lock is held.
+ * (this call briefly holds the client's lock and may remove the endpoint
+ * from the client's hashtable) */
+void aws_s3_endpoint_release(struct aws_s3_endpoint *endpoint);
+
+AWS_S3_API
+extern const uint32_t g_max_num_connections_per_vip;
+
+AWS_S3_API
+extern const uint32_t g_num_conns_per_vip_meta_request_look_up[];
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_S3_CLIENT_IMPL_H */
diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_copy_object.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_copy_object.h
new file mode 100644
index 0000000000..a839e1fcc4
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_copy_object.h
@@ -0,0 +1,76 @@
+#ifndef AWS_S3_COPY_OBJECT_H
+#define AWS_S3_COPY_OBJECT_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include "aws/s3/private/s3_meta_request_impl.h"
+
+enum aws_s3_copy_object_request_tag {
+ AWS_S3_COPY_OBJECT_REQUEST_TAG_GET_OBJECT_SIZE,
+ AWS_S3_COPY_OBJECT_REQUEST_TAG_BYPASS,
+ AWS_S3_COPY_OBJECT_REQUEST_TAG_CREATE_MULTIPART_UPLOAD,
+ AWS_S3_COPY_OBJECT_REQUEST_TAG_MULTIPART_COPY,
+ AWS_S3_COPY_OBJECT_REQUEST_TAG_ABORT_MULTIPART_UPLOAD,
+ AWS_S3_COPY_OBJECT_REQUEST_TAG_COMPLETE_MULTIPART_UPLOAD,
+
+ AWS_S3_COPY_OBJECT_REQUEST_TAG_MAX,
+};
+
+struct aws_s3_copy_object {
+ struct aws_s3_meta_request base;
+
+ /* Useable after the Create Multipart Upload request succeeds. */
+ struct aws_string *upload_id;
+
+ /* Only meant for use in the update function, which is never called concurrently. */
+ struct {
+ uint32_t next_part_number;
+ } threaded_update_data;
+
+ /* Members to only be used when the mutex in the base type is locked. */
+ struct {
+ struct aws_array_list etag_list;
+
+ /* obtained through a HEAD request against the source object */
+ uint64_t content_length;
+ size_t part_size;
+
+ uint32_t total_num_parts;
+ uint32_t num_parts_sent;
+ uint32_t num_parts_completed;
+ uint32_t num_parts_successful;
+ uint32_t num_parts_failed;
+
+ struct aws_http_headers *needed_response_headers;
+
+ int create_multipart_upload_error_code;
+ int complete_multipart_upload_error_code;
+ int abort_multipart_upload_error_code;
+
+ uint32_t head_object_sent : 1;
+ uint32_t head_object_completed : 1;
+ uint32_t copy_request_bypass_sent : 1;
+ uint32_t copy_request_bypass_completed : 1;
+ uint32_t create_multipart_upload_sent : 1;
+ uint32_t create_multipart_upload_completed : 1;
+ uint32_t complete_multipart_upload_sent : 1;
+ uint32_t complete_multipart_upload_completed : 1;
+ uint32_t abort_multipart_upload_sent : 1;
+ uint32_t abort_multipart_upload_completed : 1;
+
+ } synced_data;
+};
+
+/* Creates a new CopyObject meta request. This will perform either
+ * 1) A CopyObject S3 API call if the source object length is < 1 GB or
+ * 2) a multipart copy in parallel otherwise.
+ */
+struct aws_s3_meta_request *aws_s3_meta_request_copy_object_new(
+ struct aws_allocator *allocator,
+ struct aws_s3_client *client,
+ const struct aws_s3_meta_request_options *options);
+
+#endif
diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_default_meta_request.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_default_meta_request.h
new file mode 100644
index 0000000000..123c963b59
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_default_meta_request.h
@@ -0,0 +1,37 @@
+#ifndef AWS_S3_DEFAULT_META_REQUEST_H
+#define AWS_S3_DEFAULT_META_REQUEST_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include "aws/s3/private/s3_meta_request_impl.h"
+
+struct aws_s3_client;
+
+struct aws_s3_meta_request_default {
+ struct aws_s3_meta_request base;
+
+ size_t content_length;
+
+ /* Members to only be used when the mutex in the base type is locked. */
+ struct {
+ int cached_response_status;
+ int request_error_code;
+
+ uint32_t request_sent : 1;
+ uint32_t request_completed : 1;
+
+ } synced_data;
+};
+
+/* Creates a new default meta request. This will send the request as is and pass back the response. */
+struct aws_s3_meta_request *aws_s3_meta_request_default_new(
+ struct aws_allocator *allocator,
+ struct aws_s3_client *client,
+ uint64_t content_length,
+ bool should_compute_content_md5,
+ const struct aws_s3_meta_request_options *options);
+
+#endif
diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_list_objects.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_list_objects.h
new file mode 100644
index 0000000000..e0b5d0cd0f
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_list_objects.h
@@ -0,0 +1,121 @@
+#ifndef AWS_S3_LIST_OBJECTS_H
+#define AWS_S3_LIST_OBJECTS_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/s3/s3_client.h>
+
+#include <aws/common/date_time.h>
+#include <aws/common/string.h>
+
+#include <aws/s3/private/s3_paginator.h>
+
+/** Struct representing the file system relevant data for an object returned from a ListObjectsV2 API call. */
+struct aws_s3_object_info {
+ /**
+ * When a delimiter is specified in the request, S3 groups the common prefixes that contain the delimiter.
+ * This member is set to the prefix substring ending at the first occurrence of the specified delimiter,
+ * analogous to a directory entry of a file system.
+ */
+ struct aws_byte_cursor prefix;
+ /**
+ * Prefix is not included. This is the object name for use with prefix for a call to GetObject()
+ */
+ struct aws_byte_cursor key;
+ /**
+ * Size of the object in bytes.
+ */
+ uint64_t size;
+ /**
+ * Timestamp from S3 on the latest modification, if you have a reliable clock on your machine, you COULD use this
+ * to implement caching.
+ */
+ struct aws_date_time last_modified;
+ /**
+ * Etag for the object, usually an MD5 hash. you COULD also use this to implement caching.
+ */
+ struct aws_byte_cursor e_tag;
+};
+
+/**
+ * Invoked when an object or prefix is encountered during a ListObjectsV2 API call. Return false, to immediately
+ * terminate the list operation. Returning true will continue until at least the current page is iterated.
+ */
+typedef bool(aws_s3_on_object_fn)(const struct aws_s3_object_info *info, void *user_data);
+
+/**
+ * Invoked upon the complete fetch and parsing of a page. If error_code is AWS_OP_SUCCESS and
+ * aws_s3_paginator_has_more_results() returns true, you may want to call,
+ * aws_s3_paginator_continue() from here to fetch the rest of the bucket contents.
+ */
+typedef void(aws_s3_on_object_list_finished_fn)(struct aws_s3_paginator *paginator, int error_code, void *user_data);
+
+/**
+ * Parameters for calling aws_s3_initiate_list_objects(). All values are copied out or re-seated and reference counted.
+ */
+struct aws_s3_list_objects_params {
+ /**
+ * Must not be NULL. The internal call will increment the reference count on client.
+ */
+ struct aws_s3_client *client;
+ /**
+ * Must not be empty. Name of the bucket to list.
+ */
+ struct aws_byte_cursor bucket_name;
+ /**
+ * Optional. The prefix to list. By default, this will be the root of the bucket. If you would like to start the
+ * list operation at a prefix (similar to a file system directory), specify that here.
+ */
+ struct aws_byte_cursor prefix;
+ /**
+ * Optional. The prefix delimiter. By default, this is the '/' character.
+ */
+ struct aws_byte_cursor delimiter;
+ /**
+ * Optional. The continuation token for fetching the next page for ListBucketV2. You likely shouldn't set this
+ * unless you have a special use case.
+ */
+ struct aws_byte_cursor continuation_token;
+ /**
+ * Must not be empty. The endpoint for the S3 bucket to hit. Can be virtual or path style.
+ */
+ struct aws_byte_cursor endpoint;
+ /**
+ * Callback to invoke on each object that's listed.
+ */
+ aws_s3_on_object_fn *on_object;
+ /**
+ * Callback to invoke when each page of the bucket listing completes.
+ */
+ aws_s3_on_object_list_finished_fn *on_list_finished;
+ void *user_data;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Initiates a list objects command (without executing it), and returns a paginator object to iterate the bucket with if
+ * successful.
+ *
+ * Returns NULL on failure. Check aws_last_error() for details on the error that occurred.
+ *
+ * this is a reference counted object. It is returned with a reference count of 1. You must call
+ * aws_s3_paginator_release() on this object when you are finished with it.
+ *
+ * This does not start the actual list operation. You need to call aws_s3_paginator_continue() to start
+ * the operation.
+ */
+AWS_S3_API struct aws_s3_paginator *aws_s3_initiate_list_objects(
+ struct aws_allocator *allocator,
+ const struct aws_s3_list_objects_params *params);
+
+AWS_S3_API struct aws_s3_paginated_operation *aws_s3_list_objects_operation_new(
+ struct aws_allocator *allocator,
+ const struct aws_s3_list_objects_params *params);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_S3_LIST_OBJECTS_H */
diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_list_parts.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_list_parts.h
new file mode 100644
index 0000000000..30af99f3c4
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_list_parts.h
@@ -0,0 +1,124 @@
+#ifndef AWS_S3_LIST_PARTS_H
+#define AWS_S3_LIST_PARTS_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/s3/private/s3_paginator.h>
+#include <aws/s3/s3_client.h>
+
+#include <aws/common/date_time.h>
+#include <aws/common/string.h>
+
+/** Struct representing part info as returned from ListParts call. */
+struct aws_s3_part_info {
+ /**
+ * Size of the object in bytes.
+ */
+ uint64_t size;
+ /**
+ * Part number of the given part.
+ */
+ uint32_t part_number;
+ /**
+ * Timestamp from S3 on the latest modification, if you have a reliable clock on your machine, you COULD use this
+ * to implement caching.
+ */
+ struct aws_date_time last_modified;
+ /**
+ * Etag for the object, usually an MD5 hash. you COULD also use this to implement caching.
+ */
+ struct aws_byte_cursor e_tag;
+
+ /**
+ * CRC32 checksum for the part. Optional.
+ */
+ struct aws_byte_cursor checksumCRC32;
+
+ /**
+ * CRC32C checksum for the part. Optional.
+ */
+ struct aws_byte_cursor checksumCRC32C;
+
+ /**
+ * SHA1 checksum for the part. Optional.
+ */
+ struct aws_byte_cursor checksumSHA1;
+
+ /**
+ * SHA256 checksum for the part. Optional.
+ */
+ struct aws_byte_cursor checksumSHA256;
+};
+
+/**
+ * Invoked when a part is encountered during ListParts call. Return false, to immediately
+ * terminate the list operation. Returning true will continue until at least the current page is iterated.
+ */
+typedef bool(aws_s3_on_part_fn)(const struct aws_s3_part_info *info, void *user_data);
+
+/**
+ * Parameters for calling aws_s3_initiate_list_parts(). All values are copied out or re-seated and reference counted.
+ */
+struct aws_s3_list_parts_params {
+ /**
+ * Must not be NULL. The internal call will increment the reference count on client.
+ */
+ struct aws_s3_client *client;
+ /**
+ * Must not be empty. Name of the bucket to list.
+ */
+ struct aws_byte_cursor bucket_name;
+ /**
+ * Must not be empty. Key with which multipart upload was initiated.
+ */
+ struct aws_byte_cursor key;
+ /**
+ * Must not be empty. Id identifying multipart upload.
+ */
+ struct aws_byte_cursor upload_id;
+ /**
+ * Must not be empty. The endpoint for the S3 bucket to hit. Can be virtual or path style.
+ */
+ struct aws_byte_cursor endpoint;
+ /**
+ * Callback to invoke on each part that's listed.
+ */
+ aws_s3_on_part_fn *on_part;
+ /**
+ * Callback to invoke when each page of the bucket listing completes.
+ */
+ aws_s3_on_page_finished_fn *on_list_finished;
+ /**
+ * Associated user data.
+ */
+ void *user_data;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Initiates a list objects command (without executing it), and returns a paginator object to iterate the bucket with if
+ * successful.
+ *
+ * Returns NULL on failure. Check aws_last_error() for details on the error that occurred.
+ *
+ * this is a reference counted object. It is returned with a reference count of 1. You must call
+ * aws_s3_paginator_release() on this object when you are finished with it.
+ *
+ * This does not start the actual list operation. You need to call aws_s3_paginator_continue() to start
+ * the operation.
+ */
+AWS_S3_API struct aws_s3_paginator *aws_s3_initiate_list_parts(
+ struct aws_allocator *allocator,
+ const struct aws_s3_list_parts_params *params);
+
+AWS_S3_API struct aws_s3_paginated_operation *aws_s3_list_parts_operation_new(
+ struct aws_allocator *allocator,
+ const struct aws_s3_list_parts_params *params);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_S3_LIST_PARTS_H */
diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_meta_request_impl.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_meta_request_impl.h
new file mode 100644
index 0000000000..1f61641921
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_meta_request_impl.h
@@ -0,0 +1,352 @@
+#ifndef AWS_S3_META_REQUEST_IMPL_H
+#define AWS_S3_META_REQUEST_IMPL_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/signing.h>
+#include <aws/common/atomics.h>
+#include <aws/common/linked_list.h>
+#include <aws/common/mutex.h>
+#include <aws/common/ref_count.h>
+#include <aws/common/task_scheduler.h>
+#include <aws/http/request_response.h>
+
+#include "aws/s3/private/s3_checksums.h"
+#include "aws/s3/private/s3_client_impl.h"
+#include "aws/s3/private/s3_request.h"
+
+struct aws_s3_client;
+struct aws_s3_connection;
+struct aws_s3_meta_request;
+struct aws_s3_request;
+struct aws_s3_request_options;
+struct aws_http_headers;
+struct aws_http_make_request_options;
+struct aws_retry_strategy;
+
+enum aws_s3_meta_request_state {
+ AWS_S3_META_REQUEST_STATE_ACTIVE,
+ AWS_S3_META_REQUEST_STATE_FINISHED,
+};
+
+enum aws_s3_meta_request_update_flags {
+ /* The client potentially has multiple meta requests that it can spread across connections, and the given meta
+ request can selectively not return a request if there is a performance reason to do so.*/
+ AWS_S3_META_REQUEST_UPDATE_FLAG_CONSERVATIVE = 0x00000002,
+};
+
+typedef void(aws_s3_meta_request_prepare_request_callback_fn)(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request,
+ int error_code,
+ void *user_data);
+
+struct aws_s3_prepare_request_payload {
+ struct aws_s3_request *request;
+ aws_s3_meta_request_prepare_request_callback_fn *callback;
+ void *user_data;
+ struct aws_task task;
+};
+
+struct aws_s3_meta_request_vtable {
+ /* Update the meta request. out_request is required to be non-null. Returns true if there is any work in
+ * progress, false if there is not. */
+ bool (*update)(struct aws_s3_meta_request *meta_request, uint32_t flags, struct aws_s3_request **out_request);
+
+ void (*schedule_prepare_request)(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request,
+ aws_s3_meta_request_prepare_request_callback_fn *callback,
+ void *user_data);
+
+ /* Given a request, prepare it for sending (ie: creating the correct HTTP message, reading from a stream (if
+ * necessary), signing it, computing hashes, etc.) */
+ int (*prepare_request)(struct aws_s3_meta_request *meta_request, struct aws_s3_request *request);
+
+ void (*init_signing_date_time)(struct aws_s3_meta_request *meta_request, struct aws_date_time *date_time);
+
+ /* Sign the given request. */
+ void (*sign_request)(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request,
+ aws_signing_complete_fn *on_signing_complete,
+ void *user_data);
+
+ /* Called when any sending of the request is finished, including for each retry. */
+ void (*send_request_finish)(struct aws_s3_connection *connection, struct aws_http_stream *stream, int error_code);
+
+ /* Called when the request is done being sent, and will not be retried/sent again. */
+ void (*finished_request)(struct aws_s3_meta_request *meta_request, struct aws_s3_request *request, int error_code);
+
+ /* Called by the derived meta request when the meta request is completely finished. */
+ void (*finish)(struct aws_s3_meta_request *meta_request);
+
+ /* Handle de-allocation of the meta request. */
+ void (*destroy)(struct aws_s3_meta_request *);
+
+ /* Pause the given request */
+ int (*pause)(struct aws_s3_meta_request *meta_request, struct aws_s3_meta_request_resume_token **resume_token);
+};
+
+/**
+ * This represents one meta request, ie, one accelerated file transfer. One S3 meta request can represent multiple S3
+ * requests.
+ */
+struct aws_s3_meta_request {
+ struct aws_allocator *allocator;
+
+ struct aws_ref_count ref_count;
+
+ void *impl;
+
+ struct aws_s3_meta_request_vtable *vtable;
+
+ /* Initial HTTP Message that this meta request is based on. */
+ struct aws_http_message *initial_request_message;
+
+ /* Part size to use for uploads and downloads. Passed down by the creating client. */
+ const size_t part_size;
+
+ struct aws_cached_signing_config_aws *cached_signing_config;
+
+ /* Client that created this meta request which also processes this request. After the meta request is finished, this
+ * reference is removed.*/
+ struct aws_s3_client *client;
+
+ struct aws_s3_endpoint *endpoint;
+
+ /* Event loop to schedule IO work related on, ie, reading from streams, streaming parts back to the caller, etc..
+ * After the meta request is finished, this will be reset along with the client reference.*/
+ struct aws_event_loop *io_event_loop;
+
+ /* User data to be passed to each customer specified callback.*/
+ void *user_data;
+
+ /* Customer specified callbacks. */
+ aws_s3_meta_request_headers_callback_fn *headers_callback;
+ aws_s3_meta_request_receive_body_callback_fn *body_callback;
+ aws_s3_meta_request_finish_fn *finish_callback;
+ aws_s3_meta_request_shutdown_fn *shutdown_callback;
+ aws_s3_meta_request_progress_fn *progress_callback;
+
+ /* Customer specified callbacks to be called by our specialized callback to calculate the response checksum. */
+ aws_s3_meta_request_headers_callback_fn *headers_user_callback_after_checksum;
+ aws_s3_meta_request_receive_body_callback_fn *body_user_callback_after_checksum;
+ aws_s3_meta_request_finish_fn *finish_user_callback_after_checksum;
+
+ enum aws_s3_meta_request_type type;
+
+ struct {
+ struct aws_mutex lock;
+
+ /* Priority queue for pending streaming requests. We use a priority queue to keep parts in order so that we
+ * can stream them to the caller in order. */
+ struct aws_priority_queue pending_body_streaming_requests;
+
+ /* Current state of the meta request. */
+ enum aws_s3_meta_request_state state;
+
+ /* The sum of initial_read_window, plus all window_increment() calls. This number never goes down. */
+ uint64_t read_window_running_total;
+
+ /* The next expected streaming part number needed to continue streaming part bodies. (For example, this will
+ * initially be 1 for part 1, and after that part is received, it will be 2, then 3, etc.. */
+ uint32_t next_streaming_part;
+
+ /* Number of parts scheduled for delivery. */
+ uint32_t num_parts_delivery_sent;
+
+ /* Total number of parts that have been attempted to be delivered. (Will equal the sum of succeeded and
+ * failed.)*/
+ uint32_t num_parts_delivery_completed;
+
+ /* Number of parts that have been successfully delivered to the caller. */
+ uint32_t num_parts_delivery_succeeded;
+
+ /* Number of parts that have failed while trying to be delivered to the caller. */
+ uint32_t num_parts_delivery_failed;
+
+ /* The end finish result of the meta request. */
+ struct aws_s3_meta_request_result finish_result;
+
+ /* True if the finish result has been set. */
+ uint32_t finish_result_set : 1;
+
+ } synced_data;
+
+ /* Anything in this structure should only ever be accessed by the client on its process work event loop task. */
+ struct {
+
+ /* Linked list node for the meta requests linked list in the client. */
+ /* Note: this needs to be first for using AWS_CONTAINER_OF with the nested structure. */
+ struct aws_linked_list_node node;
+
+ /* True if this meta request is currently in the client's list. */
+ bool scheduled;
+
+ } client_process_work_threaded_data;
+
+ const bool should_compute_content_md5;
+
+ /* deep copy of the checksum config. */
+ struct checksum_config checksum_config;
+
+ /* checksum found in either a default get request, or in the initial head request of a multipart get */
+ struct aws_byte_buf meta_request_level_response_header_checksum;
+
+ /* running checksum of all of the parts of a default get, or ranged get meta request*/
+ struct aws_s3_checksum *meta_request_level_running_response_sum;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/* Initialize the base meta request structure. */
+AWS_S3_API
+int aws_s3_meta_request_init_base(
+ struct aws_allocator *allocator,
+ struct aws_s3_client *client,
+ size_t part_size,
+ bool should_compute_content_md5,
+ const struct aws_s3_meta_request_options *options,
+ void *impl,
+ struct aws_s3_meta_request_vtable *vtable,
+ struct aws_s3_meta_request *base_type);
+
+/* Returns true if the meta request is still in the "active" state. */
+AWS_S3_API
+bool aws_s3_meta_request_is_active(struct aws_s3_meta_request *meta_request);
+
+/* Returns true if the meta request is in the "finished" state. */
+AWS_S3_API
+bool aws_s3_meta_request_is_finished(struct aws_s3_meta_request *meta_request);
+
+/* Returns true if the meta request has a finish result, which indicates that the meta request has trying to finish or
+ * has already finished. */
+AWS_S3_API
+bool aws_s3_meta_request_has_finish_result(struct aws_s3_meta_request *meta_request);
+
+AWS_S3_API
+void aws_s3_meta_request_lock_synced_data(struct aws_s3_meta_request *meta_request);
+
+AWS_S3_API
+void aws_s3_meta_request_unlock_synced_data(struct aws_s3_meta_request *meta_request);
+
+/* Called by the client to retrieve the next request and update the meta request's internal state. out_request is
+ * optional, and can be NULL if just desiring to update internal state. */
+AWS_S3_API
+bool aws_s3_meta_request_update(
+ struct aws_s3_meta_request *meta_request,
+ uint32_t flags,
+ struct aws_s3_request **out_request);
+
+AWS_S3_API
+void aws_s3_meta_request_prepare_request(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request,
+ aws_s3_meta_request_prepare_request_callback_fn *callback,
+ void *user_data);
+
+AWS_S3_API
+void aws_s3_meta_request_send_request(struct aws_s3_meta_request *meta_request, struct aws_s3_connection *connection);
+
+AWS_S3_API
+void aws_s3_meta_request_init_signing_date_time_default(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_date_time *date_time);
+
+AWS_S3_API
+void aws_s3_meta_request_sign_request_default(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request,
+ aws_signing_complete_fn *on_signing_complete,
+ void *user_data);
+
+/* Default implementation for when a request finishes a particular send. */
+AWS_S3_API
+void aws_s3_meta_request_send_request_finish_default(
+ struct aws_s3_connection *connection,
+ struct aws_http_stream *stream,
+ int error_code);
+
+/* Implementation for when a request finishes a particular send to handle possible async error from S3. */
+AWS_S3_API
+void aws_s3_meta_request_send_request_finish_handle_async_error(
+ struct aws_s3_connection *connection,
+ struct aws_http_stream *stream,
+ int error_code);
+
+/* Called by the client when a request is completely finished and not doing any further retries. */
+AWS_S3_API
+void aws_s3_meta_request_finished_request(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request,
+ int error_code);
+
+/* Called to place the request in the meta request's priority queue for streaming back to the caller. Once all requests
+ * with a part number less than the given request has been received, the given request and the previous requests will
+ * scheduled for streaming. */
+AWS_S3_API
+void aws_s3_meta_request_stream_response_body_synced(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request);
+
+/* Read from the meta request's input stream. Should always be done outside of any mutex, as reading from the stream
+ * could cause user code to call back into aws-c-s3.*/
+AWS_S3_API
+int aws_s3_meta_request_read_body(struct aws_s3_meta_request *meta_request, struct aws_byte_buf *buffer);
+
+/* Set the meta request finish result as failed. This is meant to be called sometime before aws_s3_meta_request_finish.
+ * Subsequent calls to this function or to aws_s3_meta_request_set_success_synced will not overwrite the end result of
+ * the meta request. */
+AWS_S3_API
+void aws_s3_meta_request_set_fail_synced(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *failed_request,
+ int error_code);
+
+/* Set the meta request finish result as successful. This is meant to be called sometime before
+ * aws_s3_meta_request_finish. Subsequent calls this function or to aws_s3_meta_request_set_fail_synced will not
+ * overwrite the end result of the meta request. */
+AWS_S3_API
+void aws_s3_meta_request_set_success_synced(struct aws_s3_meta_request *meta_request, int response_status);
+
+/* Returns true if the finish result has been set (ie: either aws_s3_meta_request_set_fail_synced or
+ * aws_s3_meta_request_set_success_synced have been called.) */
+AWS_S3_API
+bool aws_s3_meta_request_has_finish_result_synced(struct aws_s3_meta_request *meta_request);
+
+/* Virtual function called by the meta request derived type when it's completely finished and there is no other work to
+ * be done. */
+AWS_S3_API
+void aws_s3_meta_request_finish(struct aws_s3_meta_request *meta_request);
+
+/* Default implementation of the meta request finish function. */
+AWS_S3_API
+void aws_s3_meta_request_finish_default(struct aws_s3_meta_request *meta_request);
+
+/* Sets up a meta request result structure. */
+AWS_S3_API
+void aws_s3_meta_request_result_setup(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_meta_request_result *result,
+ struct aws_s3_request *request,
+ int response_status,
+ int error_code);
+
+/* Cleans up a meta request result structure. */
+AWS_S3_API
+void aws_s3_meta_request_result_clean_up(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_meta_request_result *result);
+
+AWS_S3_API
+bool aws_s3_meta_request_checksum_config_has_algorithm(
+ struct aws_s3_meta_request *meta_request,
+ enum aws_s3_checksum_algorithm algorithm);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_S3_META_REQUEST_IMPL_H */
diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_paginator.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_paginator.h
new file mode 100644
index 0000000000..f51e9b8f66
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_paginator.h
@@ -0,0 +1,169 @@
+#ifndef AWS_S3_PAGINATOR_H
+#define AWS_S3_PAGINATOR_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/signing_config.h>
+#include <aws/s3/exports.h>
+#include <aws/s3/s3_client.h>
+
+#include <aws/common/common.h>
+#include <aws/common/xml_parser.h>
+
+/**
+ * Wrapper for a generic paginated operation.
+ * Provides implementations for how to construct next paginated and how to read the request.
+ * Can be used with either paginator or plugged into request loop.
+ */
+struct aws_s3_paginated_operation;
+
+/**
+ * Generic driver for paginated operations.
+ * Provides functionality to send requests to iterate over pages of the operation.
+ */
+struct aws_s3_paginator;
+
+typedef int(aws_s3_next_http_message_fn)(
+ struct aws_byte_cursor *continuation_token,
+ void *user_data,
+ struct aws_http_message **out_message);
+
+typedef bool(
+ aws_s3_on_result_node_encountered_fn)(struct aws_xml_parser *parser, struct aws_xml_node *node, void *user_data);
+
+typedef void(aws_s3_on_page_finished_fn)(struct aws_s3_paginator *paginator, int error_code, void *user_data);
+
+typedef void(aws_s3_on_paginated_operation_cleanup_fn)(void *user_data);
+
+/**
+ * Parameters for initiating paginator. All values are copied out or re-seated and reference counted.
+ */
+struct aws_s3_paginator_params {
+ /**
+ * Must not be NULL. The internal call will increment the reference count on client.
+ */
+ struct aws_s3_client *client;
+
+ /**
+ * Underlying paginated operation. Must not be NULL.
+ */
+ struct aws_s3_paginated_operation *operation;
+
+ /**
+ * Optional. The continuation token for fetching the next page. You likely shouldn't set this
+ * unless you have a special use case.
+ */
+ struct aws_byte_cursor continuation_token;
+
+ /**
+ * Must not be empty. Name of the bucket to list.
+ */
+ struct aws_byte_cursor bucket_name;
+
+ /**
+ * Must not be empty. Key with which multipart upload was initiated.
+ */
+ struct aws_byte_cursor endpoint;
+
+ /**
+ * Callback to invoke on each part that's listed.
+ */
+ aws_s3_on_page_finished_fn *on_page_finished_fn;
+
+ /**
+ * User data passed back into callbacks.
+ */
+ void *user_data;
+};
+
+/**
+ * Parameters for initiating paginated operation. All values are copied out or re-seated and reference counted.
+ */
+struct aws_s3_paginated_operation_params {
+ /**
+ * Name of the top level result node. Must not be NULL.
+ */
+ const struct aws_byte_cursor *result_xml_node_name;
+
+ /**
+ * Name of the continuation token node. Must not be NULL.
+ */
+ const struct aws_byte_cursor *continuation_token_node_name;
+
+ /**
+ * Function to generate next message.
+ */
+ aws_s3_next_http_message_fn *next_message;
+
+ /**
+ * Function to parse result node.
+ */
+ aws_s3_on_result_node_encountered_fn *on_result_node_encountered_fn;
+
+ /**
+ * Callback for when operation is cleaned.
+ */
+ aws_s3_on_paginated_operation_cleanup_fn *on_paginated_operation_cleanup;
+
+ /**
+ * Associated user data.
+ */
+ void *user_data;
+};
+
+AWS_EXTERN_C_BEGIN
+
+AWS_S3_API struct aws_s3_paginator *aws_s3_initiate_paginator(
+ struct aws_allocator *allocator,
+ const struct aws_s3_paginator_params *params);
+
+AWS_S3_API void aws_s3_paginator_acquire(struct aws_s3_paginator *paginator);
+AWS_S3_API void aws_s3_paginator_release(struct aws_s3_paginator *paginator);
+
+AWS_S3_API struct aws_s3_paginated_operation *aws_s3_paginated_operation_new(
+ struct aws_allocator *allocator,
+ const struct aws_s3_paginated_operation_params *params);
+
+AWS_S3_API void aws_s3_paginated_operation_acquire(struct aws_s3_paginated_operation *operation);
+AWS_S3_API void aws_s3_paginated_operation_release(struct aws_s3_paginated_operation *operation);
+
+/**
+ * Start the paginated operation. If there are more results to fetch, it will begin that work.
+ *
+ * Signing_config contains information for SigV4 signing for the operation. It must not be NULL. It will be copied.
+ *
+ * Returns AWS_OP_SUCCESS on successful start of the operation, and AWS_OP_ERR otherwise. Check aws_last_error() for
+ * more information on the error that occurred.
+ */
+AWS_S3_API int aws_s3_paginator_continue(
+ struct aws_s3_paginator *paginator,
+ const struct aws_signing_config_aws *signing_config);
+
+/**
+ * If the paginator has more results to fetch, returns true.
+ */
+AWS_S3_API bool aws_s3_paginator_has_more_results(const struct aws_s3_paginator *paginator);
+
+/**
+ * Construct next message for the given operation.
+ */
+AWS_S3_API int aws_s3_construct_next_paginated_request_http_message(
+ struct aws_s3_paginated_operation *operation,
+ struct aws_byte_cursor *continuation_token,
+ struct aws_http_message **out_message);
+
+/**
+ * Parse received response for operation.
+ */
+AWS_S3_API int aws_s3_paginated_operation_on_response(
+ struct aws_s3_paginated_operation *operation,
+ struct aws_byte_cursor *response_body,
+ struct aws_string **continuation_token_out,
+ bool *has_more_results_out);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_S3_PAGINATOR_H */
diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_request.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_request.h
new file mode 100644
index 0000000000..aed5b1b395
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_request.h
@@ -0,0 +1,152 @@
+#ifndef AWS_S3_REQUEST_H
+#define AWS_S3_REQUEST_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/byte_buf.h>
+#include <aws/common/linked_list.h>
+#include <aws/common/ref_count.h>
+#include <aws/s3/s3.h>
+
+#include <aws/s3/private/s3_checksums.h>
+
+struct aws_http_message;
+struct aws_signable;
+struct aws_s3_meta_request;
+
+enum aws_s3_request_flags {
+ AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS = 0x00000001,
+ AWS_S3_REQUEST_FLAG_PART_SIZE_RESPONSE_BODY = 0x00000002,
+ AWS_S3_REQUEST_FLAG_ALWAYS_SEND = 0x00000004,
+};
+
+/* Represents a single request made to S3. */
+struct aws_s3_request {
+
+ /* Linked list node used for queuing. */
+ struct aws_linked_list_node node;
+
+ /* TODO Ref count on the request is no longer needed--only one part of code should ever be holding onto a request,
+ * and we can just transfer ownership.*/
+ struct aws_ref_count ref_count;
+
+ struct aws_allocator *allocator;
+
+ /* Owning meta request. */
+ struct aws_s3_meta_request *meta_request;
+
+ /* Request body to use when sending the request. The contents of this body will be re-used if a request is
+ * retried.*/
+ struct aws_byte_buf request_body;
+
+ /* Beginning range of this part. */
+ /* TODO currently only used by auto_range_get, could be hooked up to auto_range_put as well. */
+ uint64_t part_range_start;
+
+ /* Last byte of this part.*/
+ /* TODO currently only used by auto_range_get, could be hooked up to auto_range_put as well. */
+ uint64_t part_range_end;
+
+ /* Part number that this request refers to. If this is not a part, this can be 0. (S3 Part Numbers start at 1.)
+ * However, must currently be a valid part number (ie: greater than 0) if the response body is to be streamed to the
+ * caller.
+ */
+ uint32_t part_number;
+
+ /* Number of times aws_s3_meta_request_prepare has been called for a request. During the first call to the virtual
+ * prepare function, this will be 0.*/
+ uint32_t num_times_prepared;
+
+ /* checksum found in the header of an individual get part http request */
+ struct aws_byte_buf request_level_response_header_checksum;
+
+ /* running checksum of the response to an individual get part http request */
+ struct aws_s3_checksum *request_level_running_response_sum;
+ /* The algorithm used to validate the checksum */
+ enum aws_s3_checksum_algorithm validation_algorithm;
+
+ /* Get request only, was there a checksum to validate */
+ bool did_validate;
+
+ /* Get request only, if there was an attached checksum to validate did it match the computed checksum */
+ bool checksum_match;
+
+ /* Tag that defines what the built request will actually consist of. This is meant to be space for an enum defined
+ * by the derived type. Request tags do not necessarily map 1:1 with actual S3 API requests. For example, they can
+ * be more contextual, like "first part" instead of just "part".) */
+
+ /* TODO: this should be a union type to make it clear that this could be one of two enums for puts, and gets. */
+ int request_tag;
+
+ /* Members of this structure will be repopulated each time the request is sent. If the request fails, and needs to
+ * be retried, then the members of this structure will be cleaned up and re-populated on the next send.
+ */
+ /* TODO rename this anonymous structure to something more intuitive. (Maybe "attempt_data")*/
+ struct {
+
+ /* The HTTP message to send for this request. */
+ struct aws_http_message *message;
+
+ /* Signable created for the above message. */
+ struct aws_signable *signable;
+
+ /* Recorded response headers for the request. Set only when the request desc has record_response_headers set to
+ * true or when this response indicates an error. */
+ struct aws_http_headers *response_headers;
+
+ /* Recorded response body of the request. */
+ struct aws_byte_buf response_body;
+
+ /* Returned response status of this request. */
+ int response_status;
+
+ } send_data;
+
+ /* When true, response headers from the request will be stored in the request's response_headers variable. */
+ uint32_t record_response_headers : 1;
+
+ /* When true, the response body buffer will be allocated in the size of a part. */
+ uint32_t part_size_response_body : 1;
+
+ /* When true, this request is being tracked by the client for limiting the amount of in-flight-requests/stats. */
+ uint32_t tracked_by_client : 1;
+
+ /* When true, even when the meta request has a finish result set, this request will be sent. */
+ uint32_t always_send : 1;
+
+ /* When true, this request is intended to find out the object size. This is currently only used by auto_range_get.
+ */
+ uint32_t discovers_object_size : 1;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/* Create a new s3 request structure with the given options. */
+AWS_S3_API
+struct aws_s3_request *aws_s3_request_new(
+ struct aws_s3_meta_request *meta_request,
+ int request_tag,
+ uint32_t part_number,
+ uint32_t flags);
+
+/* Set up the request to be sent. Called each time before the request is sent. Will initially call
+ * aws_s3_request_clean_up_send_data to clear out anything previously existing in send_data. */
+AWS_S3_API
+void aws_s3_request_setup_send_data(struct aws_s3_request *request, struct aws_http_message *message);
+
+/* Clear out send_data members so that they can be repopulated before the next send. */
+AWS_S3_API
+void aws_s3_request_clean_up_send_data(struct aws_s3_request *request);
+
+AWS_S3_API
+void aws_s3_request_acquire(struct aws_s3_request *request);
+
+AWS_S3_API
+void aws_s3_request_release(struct aws_s3_request *request);
+
+AWS_EXTERN_C_END
+
+#endif
diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_request_messages.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_request_messages.h
new file mode 100644
index 0000000000..5903ed75ef
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_request_messages.h
@@ -0,0 +1,197 @@
+#ifndef AWS_S3_REQUEST_MESSAGES_H
+#define AWS_S3_REQUEST_MESSAGES_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include "aws/s3/s3.h"
+#include "aws/s3/s3_client.h"
+#include <inttypes.h>
+#include <stdbool.h>
+#include <stddef.h>
+
+struct aws_allocator;
+struct aws_http_message;
+struct aws_byte_buf;
+struct aws_byte_cursor;
+struct aws_string;
+struct aws_array_list;
+struct checksum_config;
+
+AWS_EXTERN_C_BEGIN
+
+/* Copy message (but not the body) and retain all headers */
+AWS_S3_API
+struct aws_http_message *aws_s3_message_util_copy_http_message_no_body_all_headers(
+ struct aws_allocator *allocator,
+ struct aws_http_message *message);
+
+/* Copy message (but not the body) and exclude specific headers.
+ * exclude_x_amz_meta controls whether S3 user metadata headers (prefixed with "x-amz-meta) are excluded.*/
+AWS_S3_API
+struct aws_http_message *aws_s3_message_util_copy_http_message_no_body_filter_headers(
+ struct aws_allocator *allocator,
+ struct aws_http_message *message,
+ const struct aws_byte_cursor *excluded_headers_arrays,
+ size_t excluded_headers_size,
+ bool exclude_x_amz_meta);
+
+/* Copy message and retain all headers, but replace body with one that reads directly from a filepath. */
+AWS_S3_API
+struct aws_http_message *aws_s3_message_util_copy_http_message_filepath_body_all_headers(
+ struct aws_allocator *allocator,
+ struct aws_http_message *message,
+ struct aws_byte_cursor filepath);
+
+/* Copy headers from one message to the other and exclude specific headers.
+ * exclude_x_amz_meta controls whether S3 user metadata headers (prefixed with "x-amz-meta) are excluded.*/
+AWS_S3_API
+void aws_s3_message_util_copy_headers(
+ struct aws_http_message *source_message,
+ struct aws_http_message *dest_message,
+ const struct aws_byte_cursor *excluded_headers_arrays,
+ size_t excluded_headers_size,
+ bool exclude_x_amz_meta);
+
+AWS_S3_API
+struct aws_input_stream *aws_s3_message_util_assign_body(
+ struct aws_allocator *allocator,
+ struct aws_byte_buf *byte_buf,
+ struct aws_http_message *out_message,
+ const struct checksum_config *checksum_config,
+ struct aws_byte_buf *out_checksum);
+
+/* Return true if checksum headers has been set. */
+AWS_S3_API
+bool aws_s3_message_util_check_checksum_header(struct aws_http_message *message);
+
+/* Create an HTTP request for an S3 Ranged Get Object Request, using the given request as a basis */
+AWS_S3_API
+struct aws_http_message *aws_s3_ranged_get_object_message_new(
+ struct aws_allocator *allocator,
+ struct aws_http_message *base_message,
+ uint64_t range_start,
+ uint64_t range_end);
+
+AWS_S3_API
+int aws_s3_message_util_set_multipart_request_path(
+ struct aws_allocator *allocator,
+ const struct aws_string *upload_id,
+ uint32_t part_number,
+ bool append_uploads_suffix,
+ struct aws_http_message *message);
+
+/* Create an HTTP request for an S3 Create-Multipart-Upload request. */
+AWS_S3_API
+struct aws_http_message *aws_s3_create_multipart_upload_message_new(
+ struct aws_allocator *allocator,
+ struct aws_http_message *base_message,
+ enum aws_s3_checksum_algorithm algorithm);
+
+/* Create an HTTP request for an S3 Put Object request, using the original request as a basis. Creates and assigns a
+ * body stream using the passed in buffer. If multipart is not needed, part number and upload_id can be 0 and NULL,
+ * respectively. */
+AWS_S3_API
+struct aws_http_message *aws_s3_upload_part_message_new(
+ struct aws_allocator *allocator,
+ struct aws_http_message *base_message,
+ struct aws_byte_buf *buffer,
+ uint32_t part_number,
+ const struct aws_string *upload_id,
+ bool should_compute_content_md5,
+ const struct checksum_config *checksum_config,
+ struct aws_byte_buf *encoded_checksum_output);
+
+/* Create an HTTP request for an S3 UploadPartCopy request, using the original request as a basis.
+ * If multipart is not needed, part number and upload_id can be 0 and NULL,
+ * respectively. */
+AWS_S3_API
+struct aws_http_message *aws_s3_upload_part_copy_message_new(
+ struct aws_allocator *allocator,
+ struct aws_http_message *base_message,
+ struct aws_byte_buf *buffer,
+ uint32_t part_number,
+ uint64_t range_start,
+ uint64_t range_end,
+ const struct aws_string *upload_id,
+ bool should_compute_content_md5);
+
+/* Create an HTTP request for an S3 Complete-Multipart-Upload request. Creates the necessary XML payload using the
+ * passed in array list of ETags. (Each ETag is assumed to be an aws_string*) Buffer passed in will be used to store
+ * said XML payload, which will be used as the body. */
+AWS_S3_API
+struct aws_http_message *aws_s3_complete_multipart_message_new(
+ struct aws_allocator *allocator,
+ struct aws_http_message *base_message,
+ struct aws_byte_buf *body_buffer,
+ const struct aws_string *upload_id,
+ const struct aws_array_list *etags,
+ struct aws_byte_buf *checksums,
+ enum aws_s3_checksum_algorithm algorithm);
+
+AWS_S3_API
+struct aws_http_message *aws_s3_abort_multipart_upload_message_new(
+ struct aws_allocator *allocator,
+ struct aws_http_message *base_message,
+ const struct aws_string *upload_id);
+
+/* Creates a HEAD GetObject request to get the size of the specified object. */
+AWS_S3_API
+struct aws_http_message *aws_s3_get_object_size_message_new(
+ struct aws_allocator *allocator,
+ struct aws_http_message *base_message,
+ struct aws_byte_cursor source_bucket,
+ struct aws_byte_cursor source_key);
+
+/* Creates a HEAD GetObject sub-request to get the size of the source object of a Copy meta request. */
+AWS_S3_API
+struct aws_http_message *aws_s3_get_source_object_size_message_new(
+ struct aws_allocator *allocator,
+ struct aws_http_message *base_message);
+
+/* Add content-md5 header to the http message passed in. The MD5 will be computed from the input_buf */
+AWS_S3_API
+int aws_s3_message_util_add_content_md5_header(
+ struct aws_allocator *allocator,
+ struct aws_byte_buf *input_buf,
+ struct aws_http_message *message);
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_s3_create_multipart_upload_excluded_headers[];
+
+AWS_S3_API
+extern const size_t g_s3_create_multipart_upload_excluded_headers_count;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_s3_upload_part_excluded_headers[];
+
+AWS_S3_API
+extern const size_t g_s3_upload_part_excluded_headers_count;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_s3_complete_multipart_upload_excluded_headers[];
+
+AWS_S3_API
+extern const size_t g_s3_complete_multipart_upload_excluded_headers_count;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_s3_abort_multipart_upload_excluded_headers[];
+
+AWS_S3_API
+extern const size_t g_s3_abort_multipart_upload_excluded_headers_count;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_s3_list_parts_excluded_headers[];
+
+AWS_S3_API extern const size_t g_s3_list_parts_excluded_headers_count;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_s3_list_parts_with_checksum_excluded_headers[];
+
+AWS_S3_API
+extern const size_t g_s3_list_parts_with_checksum_excluded_headers_count;
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_S3_REQUEST_H */
diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_util.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_util.h
new file mode 100644
index 0000000000..5fe22ff740
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/private/s3_util.h
@@ -0,0 +1,255 @@
+#ifndef AWS_S3_UTIL_H
+#define AWS_S3_UTIL_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+/* This file provides access to useful constants and simple utility functions. */
+
+#include <aws/auth/signing_config.h>
+#include <aws/common/byte_buf.h>
+#include <aws/s3/s3.h>
+
+#if ASSERT_LOCK_HELD
+# define ASSERT_SYNCED_DATA_LOCK_HELD(object) \
+ { \
+ int cached_error = aws_last_error(); \
+ AWS_ASSERT(aws_mutex_try_lock(&(object)->synced_data.lock) == AWS_OP_ERR); \
+ aws_raise_error(cached_error); \
+ }
+#else
+# define ASSERT_SYNCED_DATA_LOCK_HELD(object)
+#endif
+#define KB_TO_BYTES(kb) ((kb)*1024)
+#define MB_TO_BYTES(mb) ((mb)*1024 * 1024)
+
+struct aws_allocator;
+struct aws_http_stream;
+struct aws_http_headers;
+struct aws_http_message;
+struct aws_event_loop;
+
+enum aws_s3_response_status {
+ AWS_S3_RESPONSE_STATUS_SUCCESS = 200,
+ AWS_S3_RESPONSE_STATUS_NO_CONTENT_SUCCESS = 204,
+ AWS_S3_RESPONSE_STATUS_RANGE_SUCCESS = 206,
+ AWS_S3_RESPONSE_STATUS_INTERNAL_ERROR = 500,
+ AWS_S3_RESPONSE_STATUS_SLOW_DOWN = 503,
+};
+
+struct aws_cached_signing_config_aws {
+ struct aws_allocator *allocator;
+ struct aws_string *service;
+ struct aws_string *region;
+ struct aws_string *signed_body_value;
+
+ struct aws_signing_config_aws config;
+};
+
+AWS_EXTERN_C_BEGIN
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_content_md5_header_name;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_trailer_header_name;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_request_validation_mode;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_enabled;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_create_mpu_checksum_header_name;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_crc32c_header_name;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_crc32_header_name;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_sha1_header_name;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_sha256_header_name;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_crc32c_create_mpu_header_name;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_crc32_create_mpu_header_name;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_sha1_create_mpu_header_name;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_sha256_create_mpu_header_name;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_crc32c_complete_mpu_name;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_crc32_complete_mpu_name;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_sha1_complete_mpu_name;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_sha256_complete_mpu_name;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_s3_client_version;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_user_agent_header_name;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_user_agent_header_product_name;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_acl_header_name;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_host_header_name;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_content_type_header_name;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_content_encoding_header_name;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_content_encoding_header_aws_chunked;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_content_length_header_name;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_decoded_content_length_header_name;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_etag_header_name;
+
+AWS_S3_API
+extern const size_t g_s3_min_upload_part_size;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_s3_service_name;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_range_header_name;
+
+extern const struct aws_byte_cursor g_if_match_header_name;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_content_range_header_name;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_accept_ranges_header_name;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_post_method;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_head_method;
+
+AWS_S3_API
+extern const struct aws_byte_cursor g_delete_method;
+
+extern const struct aws_byte_cursor g_error_body_xml_name;
+
+extern const struct aws_byte_cursor g_code_body_xml_name;
+
+extern const struct aws_byte_cursor g_s3_internal_error_code;
+
+AWS_S3_API
+extern const uint32_t g_s3_max_num_upload_parts;
+
+struct aws_cached_signing_config_aws *aws_cached_signing_config_new(
+ struct aws_allocator *allocator,
+ const struct aws_signing_config_aws *signing_config);
+
+void aws_cached_signing_config_destroy(struct aws_cached_signing_config_aws *cached_signing_config);
+
+/* Sets all headers specified for src on dest */
+AWS_S3_API
+void copy_http_headers(const struct aws_http_headers *src, struct aws_http_headers *dest);
+
+/* Get a top-level (exists directly under the root tag) tag value. */
+AWS_S3_API
+struct aws_string *aws_xml_get_top_level_tag(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *tag_name,
+ struct aws_byte_cursor *xml_body);
+
+/* Get a top-level (exists directly under the root tag) tag value with expected root name. */
+AWS_S3_API
+struct aws_string *aws_xml_get_top_level_tag_with_root_name(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *tag_name,
+ const struct aws_byte_cursor *expected_root_name,
+ bool *out_root_name_mismatch,
+ struct aws_byte_cursor *xml_body);
+
+/* replace &quot; with escaped /" */
+AWS_S3_API
+void replace_quote_entities(struct aws_allocator *allocator, struct aws_string *str, struct aws_byte_buf *out_buf);
+
+/* strip quotes if string is enclosed in quotes. does not remove quotes if they only appear on either side of the string
+ */
+AWS_S3_API
+struct aws_string *aws_strip_quotes(struct aws_allocator *allocator, struct aws_byte_cursor in_cur);
+
+/* TODO could be moved to aws-c-common. */
+AWS_S3_API
+int aws_last_error_or_unknown(void);
+
+AWS_S3_API
+void aws_s3_add_user_agent_header(struct aws_allocator *allocator, struct aws_http_message *message);
+
+/* Given the response headers list, finds the Content-Range header and parses the range-start, range-end and
+ * object-size. All output arguments are optional.*/
+AWS_S3_API
+int aws_s3_parse_content_range_response_header(
+ struct aws_allocator *allocator,
+ struct aws_http_headers *response_headers,
+ uint64_t *out_range_start,
+ uint64_t *out_range_end,
+ uint64_t *out_object_size);
+
+/* Given response headers, parses the content-length from a content-length response header.*/
+AWS_S3_API
+int aws_s3_parse_content_length_response_header(
+ struct aws_allocator *allocator,
+ struct aws_http_headers *response_headers,
+ uint64_t *out_content_length);
+
+/* Calculate the number of parts based on overall object-range and part_size. This takes into account aligning
+ * part-ranges on part_size. (ie: if object_range_start is not evenly divisible by part_size, it is considered in the
+ * middle of a contiguous part, and that first part will be smaller than part_size.) */
+AWS_S3_API
+uint32_t aws_s3_get_num_parts(size_t part_size, uint64_t object_range_start, uint64_t object_range_end);
+
+/* Calculates the part range for a part given overall object range, size of each part, and the part's number. Note: part
+ * numbers begin at one. This takes into account aligning part-ranges on part_size. Intended to be used in conjunction
+ * with aws_s3_get_num_parts. part_number should be less than or equal to the result of aws_s3_get_num_parts. */
+AWS_S3_API
+void aws_s3_get_part_range(
+ uint64_t object_range_start,
+ uint64_t object_range_end,
+ size_t part_size,
+ uint32_t part_number,
+ uint64_t *out_part_range_start,
+ uint64_t *out_part_range_end);
+
+/* Match the S3 error code to CRT error code, return AWS_ERROR_UNKNOWN when not matched */
+AWS_S3_API
+int aws_s3_crt_error_code_from_server_error_code_string(const struct aws_string *error_code_string);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_S3_UTIL_H */
diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/s3.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/s3.h
new file mode 100644
index 0000000000..1d942cd49b
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/s3.h
@@ -0,0 +1,95 @@
+#ifndef AWS_S3_H
+#define AWS_S3_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/common.h>
+#include <aws/io/logging.h>
+#include <aws/s3/exports.h>
+
+#define AWS_C_S3_PACKAGE_ID 14
+
+enum aws_s3_errors {
+ AWS_ERROR_S3_MISSING_CONTENT_RANGE_HEADER = AWS_ERROR_ENUM_BEGIN_RANGE(AWS_C_S3_PACKAGE_ID),
+ AWS_ERROR_S3_INVALID_CONTENT_RANGE_HEADER,
+ AWS_ERROR_S3_MISSING_CONTENT_LENGTH_HEADER,
+ AWS_ERROR_S3_INVALID_CONTENT_LENGTH_HEADER,
+ AWS_ERROR_S3_MISSING_ETAG,
+ AWS_ERROR_S3_INTERNAL_ERROR,
+ AWS_ERROR_S3_SLOW_DOWN,
+ AWS_ERROR_S3_INVALID_RESPONSE_STATUS,
+ AWS_ERROR_S3_MISSING_UPLOAD_ID,
+ AWS_ERROR_S3_PROXY_PARSE_FAILED,
+ AWS_ERROR_S3_UNSUPPORTED_PROXY_SCHEME,
+ AWS_ERROR_S3_CANCELED,
+ AWS_ERROR_S3_INVALID_RANGE_HEADER,
+ AWS_ERROR_S3_MULTIRANGE_HEADER_UNSUPPORTED,
+ AWS_ERROR_S3_RESPONSE_CHECKSUM_MISMATCH,
+ AWS_ERROR_S3_CHECKSUM_CALCULATION_FAILED,
+ AWS_ERROR_S3_PAUSED,
+ AWS_ERROR_S3_LIST_PARTS_PARSE_FAILED,
+ AWS_ERROR_S3_RESUMED_PART_CHECKSUM_MISMATCH,
+ AWS_ERROR_S3_RESUME_FAILED,
+ AWS_ERROR_S3_OBJECT_MODIFIED,
+ AWS_ERROR_S3_NON_RECOVERABLE_ASYNC_ERROR,
+ AWS_ERROR_S3_END_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_S3_PACKAGE_ID)
+};
+
+enum aws_s3_subject {
+ AWS_LS_S3_GENERAL = AWS_LOG_SUBJECT_BEGIN_RANGE(AWS_C_S3_PACKAGE_ID),
+ AWS_LS_S3_CLIENT,
+ AWS_LS_S3_CLIENT_STATS,
+ AWS_LS_S3_REQUEST,
+ AWS_LS_S3_META_REQUEST,
+ AWS_LS_S3_ENDPOINT,
+ AWS_LS_S3_LAST = AWS_LOG_SUBJECT_END_RANGE(AWS_C_S3_PACKAGE_ID)
+};
+
+struct aws_s3_cpu_group_info {
+ /* group index, this usually refers to a particular numa node */
+ uint16_t cpu_group;
+ /* array of network devices on this node */
+ const struct aws_byte_cursor *nic_name_array;
+ /* length of network devices array */
+ size_t nic_name_array_length;
+};
+
+struct aws_s3_compute_platform_info {
+ /* name of the instance-type: example c5n.18xlarge */
+ const struct aws_byte_cursor instance_type;
+ /* max throughput for this instance type */
+ uint16_t max_throughput_gbps;
+ /* array of cpu group info. This will always have at least one entry. */
+ const struct aws_s3_cpu_group_info *cpu_group_info_array;
+ /* length of cpu group info array */
+ size_t cpu_group_info_array_length;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ * Initializes internal datastructures used by aws-c-s3.
+ * Must be called before using any functionality in aws-c-s3.
+ */
+AWS_S3_API
+void aws_s3_library_init(struct aws_allocator *allocator);
+
+/**
+ * Retrieves the pre-configured metadata for an ec2 instance type. If no such pre-configuration exists, returns NULL.
+ */
+AWS_S3_API
+struct aws_s3_compute_platform_info *aws_s3_get_compute_platform_info_for_instance_type(
+ const struct aws_byte_cursor instance_type_name);
+
+/**
+ * Shuts down the internal datastructures used by aws-c-s3.
+ */
+AWS_S3_API
+void aws_s3_library_clean_up(void);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_S3_H */
diff --git a/contrib/restricted/aws/aws-c-s3/include/aws/s3/s3_client.h b/contrib/restricted/aws/aws-c-s3/include/aws/s3/s3_client.h
new file mode 100644
index 0000000000..6be3b9d669
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/include/aws/s3/s3_client.h
@@ -0,0 +1,664 @@
+#ifndef AWS_S3_CLIENT_H
+#define AWS_S3_CLIENT_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/signing_config.h>
+#include <aws/io/retry_strategy.h>
+#include <aws/s3/s3.h>
+
+struct aws_allocator;
+
+struct aws_http_stream;
+struct aws_http_message;
+struct aws_http_headers;
+struct aws_tls_connection_options;
+struct aws_input_stream;
+
+struct aws_s3_client;
+struct aws_s3_request;
+struct aws_s3_meta_request;
+struct aws_s3_meta_request_result;
+struct aws_s3_meta_request_resume_token;
+struct aws_uri;
+struct aws_string;
+
+/**
+ * A Meta Request represents a group of generated requests that are being done on behalf of the
+ * original request. For example, one large GetObject request can be transformed into a series
+ * of ranged GetObject requests that are executed in parallel to improve throughput.
+ *
+ * The aws_s3_meta_request_type is a hint of transformation to be applied.
+ */
+enum aws_s3_meta_request_type {
+
+ /**
+ * The Default meta request type sends any request to S3 as-is (with no transformation). For example,
+ * it can be used to pass a CreateBucket request.
+ */
+ AWS_S3_META_REQUEST_TYPE_DEFAULT,
+
+ /**
+ * The GetObject request will be split into a series of ranged GetObject requests that are
+ * executed in parallel to improve throughput, when possible.
+ */
+ AWS_S3_META_REQUEST_TYPE_GET_OBJECT,
+
+ /**
+ * The PutObject request will be split into MultiPart uploads that are executed in parallel
+ * to improve throughput, when possible.
+ */
+ AWS_S3_META_REQUEST_TYPE_PUT_OBJECT,
+
+ /**
+ * The CopyObject meta request performs a multi-part copy
+ * using multiple S3 UploadPartCopy requests in parallel, or bypasses
+ * a CopyObject request to S3 if the object size is not large enough for
+ * a multipart upload.
+ */
+ AWS_S3_META_REQUEST_TYPE_COPY_OBJECT,
+
+ AWS_S3_META_REQUEST_TYPE_MAX,
+};
+
+/**
+ * Invoked to provide response headers received during execution of the meta request, both for
+ * success and error HTTP status codes.
+ *
+ * Return AWS_OP_SUCCESS to continue processing the request.
+ * Return AWS_OP_ERR to indicate failure and cancel the request.
+ */
+typedef int(aws_s3_meta_request_headers_callback_fn)(
+ struct aws_s3_meta_request *meta_request,
+ const struct aws_http_headers *headers,
+ int response_status,
+ void *user_data);
+
+/**
+ * Invoked to provide the response body as it is received.
+ *
+ * Note: If you set `enable_read_backpressure` true on the S3 client,
+ * you must maintain the flow-control window.
+ * The flow-control window shrinks as you receive body data via this callback.
+ * Whenever the flow-control window reaches 0 you will stop downloading data.
+ * Use aws_s3_meta_request_increment_read_window() to increment the window and keep data flowing.
+ * Maintain a larger window to keep up a high download throughput,
+ * parts cannot download in parallel unless the window is large enough to hold multiple parts.
+ * Maintain a smaller window to limit the amount of data buffered in memory.
+ *
+ * If `manual_window_management` is false, you do not need to maintain the flow-control window.
+ * No back-pressure is applied and data arrives as fast as possible.
+ *
+ * Return AWS_OP_SUCCESS to continue processing the request.
+ * Return AWS_OP_ERR to indicate failure and cancel the request.
+ */
+typedef int(aws_s3_meta_request_receive_body_callback_fn)(
+
+ /* The meta request that the callback is being issued for. */
+ struct aws_s3_meta_request *meta_request,
+
+ /* The body data for this chunk of the object. */
+ const struct aws_byte_cursor *body,
+
+ /* The byte index of the object that this refers to. For example, for an HTTP message that has a range header, the
+ first chunk received will have a range_start that matches the range header's range-start.*/
+ uint64_t range_start,
+
+ /* User data specified by aws_s3_meta_request_options.*/
+ void *user_data);
+
+/**
+ * Invoked when the entire meta request execution is complete.
+ */
+typedef void(aws_s3_meta_request_finish_fn)(
+ struct aws_s3_meta_request *meta_request,
+ const struct aws_s3_meta_request_result *meta_request_result,
+ void *user_data);
+
+/**
+ * Information sent in the meta_request progress callback.
+ */
+struct aws_s3_meta_request_progress {
+
+ /* Bytes transferred since the previous progress update */
+ uint64_t bytes_transferred;
+
+ /* Length of the entire meta request operation */
+ uint64_t content_length;
+};
+
+/**
+ * Invoked to report progress of multi-part upload and copy object requests.
+ */
+typedef void(aws_s3_meta_request_progress_fn)(
+ struct aws_s3_meta_request *meta_request,
+ const struct aws_s3_meta_request_progress *progress,
+ void *user_data);
+
+typedef void(aws_s3_meta_request_shutdown_fn)(void *user_data);
+
+typedef void(aws_s3_client_shutdown_complete_callback_fn)(void *user_data);
+
+enum aws_s3_meta_request_tls_mode {
+ AWS_MR_TLS_ENABLED,
+ AWS_MR_TLS_DISABLED,
+};
+
+enum aws_s3_meta_request_compute_content_md5 {
+ AWS_MR_CONTENT_MD5_DISABLED,
+ AWS_MR_CONTENT_MD5_ENABLED,
+};
+
+enum aws_s3_checksum_algorithm {
+ AWS_SCA_NONE = 0,
+ AWS_SCA_INIT,
+ AWS_SCA_CRC32C = AWS_SCA_INIT,
+ AWS_SCA_CRC32,
+ AWS_SCA_SHA1,
+ AWS_SCA_SHA256,
+ AWS_SCA_END = AWS_SCA_SHA256,
+};
+
+enum aws_s3_checksum_location {
+ AWS_SCL_NONE = 0,
+ AWS_SCL_HEADER,
+ AWS_SCL_TRAILER,
+};
+
+/* Keepalive properties are TCP only.
+ * If interval or timeout are zero, then default values are used.
+ */
+struct aws_s3_tcp_keep_alive_options {
+
+ uint16_t keep_alive_interval_sec;
+ uint16_t keep_alive_timeout_sec;
+
+ /* If set, sets the number of keep alive probes allowed to fail before the connection is considered
+ * lost. If zero OS defaults are used. On Windows, this option is meaningless until Windows 10 1703.*/
+ uint16_t keep_alive_max_failed_probes;
+};
+
+/* Options for a new client. */
+struct aws_s3_client_config {
+
+ /* When set, this will cap the number of active connections. When 0, the client will determine this value based on
+ * throughput_target_gbps. (Recommended) */
+ uint32_t max_active_connections_override;
+
+ /* Region that the S3 bucket lives in. */
+ struct aws_byte_cursor region;
+
+ /* Client bootstrap used for common staples such as event loop group, host resolver, etc.. s*/
+ struct aws_client_bootstrap *client_bootstrap;
+
+ /* How tls should be used while performing the request
+ * If this is ENABLED:
+ * If tls_connection_options is not-null, then those tls options will be used
+ * If tls_connection_options is NULL, then default tls options will be used
+ * If this is DISABLED:
+ * No tls options will be used, regardless of tls_connection_options value.
+ */
+ enum aws_s3_meta_request_tls_mode tls_mode;
+
+ /* TLS Options to be used for each connection, if tls_mode is ENABLED. When compiling with BYO_CRYPTO, and tls_mode
+ * is ENABLED, this is required. Otherwise, this is optional. */
+ struct aws_tls_connection_options *tls_connection_options;
+
+ /* Signing options to be used for each request. Specify NULL to not sign requests. */
+ struct aws_signing_config_aws *signing_config;
+
+ /* Size of parts the files will be downloaded or uploaded in. */
+ size_t part_size;
+
+ /* If the part size needs to be adjusted for service limits, this is the maximum size it will be adjusted to.. */
+ size_t max_part_size;
+
+ /* Throughput target in Gbps that we are trying to reach. */
+ double throughput_target_gbps;
+
+ /* Retry strategy to use. If NULL, a default retry strategy will be used. */
+ struct aws_retry_strategy *retry_strategy;
+
+ /**
+ * TODO: move MD5 config to checksum config.
+ * For multi-part upload, content-md5 will be calculated if the AWS_MR_CONTENT_MD5_ENABLED is specified
+ * or initial request has content-md5 header.
+ * For single-part upload, keep the content-md5 in the initial request unchanged. */
+ enum aws_s3_meta_request_compute_content_md5 compute_content_md5;
+
+ /* Callback and associated user data for when the client has completed its shutdown process. */
+ aws_s3_client_shutdown_complete_callback_fn *shutdown_callback;
+ void *shutdown_callback_user_data;
+
+ /**
+ * Optional.
+ * Proxy configuration for http connection.
+ * If the connection_type is AWS_HPCT_HTTP_LEGACY, it will be converted to AWS_HPCT_HTTP_TUNNEL if tls_mode is
+ * ENABLED. Otherwise, it will be converted to AWS_HPCT_HTTP_FORWARD.
+ */
+ struct aws_http_proxy_options *proxy_options;
+
+ /**
+ * Optional.
+ * Configuration for fetching proxy configuration from environment.
+ * By Default proxy_ev_settings.aws_http_proxy_env_var_type is set to AWS_HPEV_ENABLE which means read proxy
+ * configuration from environment.
+ * Only works when proxy_options is not set. If both are set, configuration from proxy_options is used.
+ */
+ struct proxy_env_var_settings *proxy_ev_settings;
+
+ /**
+ * Optional.
+ * If set to 0, default value is used.
+ */
+ uint32_t connect_timeout_ms;
+
+ /**
+ * Optional.
+ * Set keepalive to periodically transmit messages for detecting a disconnected peer.
+ */
+ struct aws_s3_tcp_keep_alive_options *tcp_keep_alive_options;
+
+ /**
+ * Optional.
+ * Configuration options for connection monitoring.
+ * If the transfer speed falls below the specified minimum_throughput_bytes_per_second, the operation is aborted.
+ * If set to NULL, default values are used.
+ */
+ struct aws_http_connection_monitoring_options *monitoring_options;
+
+ /**
+ * Enable backpressure and prevent response data from downloading faster than you can handle it.
+ *
+ * If false (default), no backpressure is applied and data will download as fast as possible.
+ *
+ * If true, each meta request has a flow-control window that shrinks as
+ * response body data is downloaded (headers do not affect the window).
+ * `initial_read_window` determines the starting size of each meta request's window.
+ * You will stop downloading data whenever the flow-control window reaches 0
+ * You must call aws_s3_meta_request_increment_read_window() to keep data flowing.
+ *
+ * WARNING: This feature is experimental.
+ * Currently, backpressure is only applied to GetObject requests which are split into multiple parts,
+ * and you may still receive some data after the window reaches 0.
+ */
+ bool enable_read_backpressure;
+
+ /**
+ * The starting size of each meta request's flow-control window, in bytes.
+ * Ignored unless `enable_read_backpressure` is true.
+ */
+ size_t initial_read_window;
+};
+
+struct aws_s3_checksum_config {
+
+ /**
+ * The location of client added checksum header.
+ *
+ * If AWS_SCL_NONE. No request payload checksum will be add and calculated.
+ *
+ * If AWS_SCL_HEADER, the checksum will be calculated by client and added related header to the request sent.
+ *
+ * If AWS_SCL_TRAILER, the payload will be aws_chunked encoded, The checksum will be calculate while reading the
+ * payload by client. Related header will be added to the trailer part of the encoded payload. Note the payload of
+ * the original request cannot be aws-chunked encoded already. Otherwise, error will be raised.
+ */
+ enum aws_s3_checksum_location location;
+
+ /**
+ * The checksum algorithm used.
+ * Must be set if location is not AWS_SCL_NONE. Must be AWS_SCA_NONE if location is AWS_SCL_NONE.
+ */
+ enum aws_s3_checksum_algorithm checksum_algorithm;
+
+ /**
+ * Enable checksum mode header will be attached to get requests, this will tell s3 to send back checksums headers if
+ * they exist. Calculate the corresponding checksum on the response bodies. The meta request will finish with a did
+ * validate field and set the error code to AWS_ERROR_S3_RESPONSE_CHECKSUM_MISMATCH if the calculated
+ * checksum, and checksum found in the response header do not match.
+ */
+ bool validate_response_checksum;
+
+ /**
+ * Optional array of `enum aws_s3_checksum_algorithm`.
+ *
+ * Ignored when validate_response_checksum is not set.
+ * If not set all the algorithms will be selected as default behavior.
+ * Owned by the caller.
+ *
+ * The list of algorithms for user to pick up when validate the checksum. Client will pick up the algorithm from the
+ * list with the priority based on performance, and the algorithm sent by server. The priority based on performance
+ * is [CRC32C, CRC32, SHA1, SHA256].
+ *
+ * If the response checksum was validated by client, the result will indicate which algorithm was picked.
+ */
+ struct aws_array_list *validate_checksum_algorithms;
+};
+
+/* Options for a new meta request, ie, file transfer that will be handled by the high performance client. */
+struct aws_s3_meta_request_options {
+ /* TODO: The meta request options cannot control the request to be split or not. Should consider to add one */
+
+ /* The type of meta request we will be trying to accelerate. */
+ enum aws_s3_meta_request_type type;
+
+ /* Signing options to be used for each request created for this meta request. If NULL, options in the client will
+ * be used. If not NULL, these options will override the client options. */
+ const struct aws_signing_config_aws *signing_config;
+
+ /* Initial HTTP message that defines what operation we are doing.
+ * When uploading a file, you should set `send_filepath` (instead of the message's body-stream)
+ * for better performance. */
+ struct aws_http_message *message;
+
+ /**
+ * Optional.
+ * If set, this file is sent as the request body, and the `message` body-stream is ignored.
+ * This can give better performance than sending data using the body-stream.
+ */
+ struct aws_byte_cursor send_filepath;
+
+ /**
+ * Optional.
+ * if set, the flexible checksum will be performed by client based on the config.
+ */
+ const struct aws_s3_checksum_config *checksum_config;
+
+ /* User data for all callbacks. */
+ void *user_data;
+
+ /**
+ * Optional.
+ * Invoked to provide response headers received during execution of the meta request.
+ * Note: this callback will not be fired for cases when resuming an
+ * operation that was already completed (ex. pausing put object after it
+ * uploaded all data and then resuming it)
+ * See `aws_s3_meta_request_headers_callback_fn`.
+ */
+ aws_s3_meta_request_headers_callback_fn *headers_callback;
+
+ /**
+ * Invoked to provide the response body as it is received.
+ * See `aws_s3_meta_request_receive_body_callback_fn`.
+ */
+ aws_s3_meta_request_receive_body_callback_fn *body_callback;
+
+ /**
+ * Invoked when the entire meta request execution is complete.
+ * See `aws_s3_meta_request_finish_fn`.
+ */
+ aws_s3_meta_request_finish_fn *finish_callback;
+
+ /* Callback for when the meta request has completely cleaned up. */
+ aws_s3_meta_request_shutdown_fn *shutdown_callback;
+
+ /**
+ * Invoked to report progress of the meta request execution.
+ * Currently, the progress callback is invoked only for the CopyObject meta request type.
+ * TODO: support this callback for all the types of meta requests
+ * See `aws_s3_meta_request_progress_fn`
+ */
+ aws_s3_meta_request_progress_fn *progress_callback;
+
+ /**
+ * Optional.
+ * Endpoint override for request. Can be used to override scheme and port of
+ * the endpoint.
+ * There is some overlap between Host header and Endpoint and corner cases
+ * are handled as follows:
+ * - Only Host header is set - Host is used to construct endpoint. https is
+ * default with corresponding port
+ * - Only endpoint is set - Host header is created from endpoint. Port and
+ * Scheme from endpoint is used.
+ * - Both Host and Endpoint is set - Host header must match Authority of
+ * Endpoint uri. Port and Scheme from endpoint is used.
+ */
+ struct aws_uri *endpoint;
+
+ /**
+ * Optional.
+ * For meta requests that support pause/resume (e.g. PutObject), serialized resume token returned by
+ * aws_s3_meta_request_pause() can be provided here.
+ * Note: If PutObject request specifies a checksum algorithm, client will calculate checksums while skipping parts
+ * from the buffer and compare them them to previously uploaded part checksums.
+ */
+ struct aws_s3_meta_request_resume_token *resume_token;
+};
+
+/* Result details of a meta request.
+ *
+ * If error_code is AWS_ERROR_SUCCESS, then response_status will match the response_status passed earlier by the header
+ * callback and error_response_headers and error_response_body will be NULL.
+ *
+ * If error_code is equal to AWS_ERROR_S3_INVALID_RESPONSE_STATUS, then error_response_headers, error_response_body, and
+ * response_status will be populated by the failed request.
+ *
+ * For all other error codes, response_status will be 0, and the error_response variables will be NULL.
+ */
+struct aws_s3_meta_request_result {
+
+ /* HTTP Headers for the failed request that triggered finish of the meta request. NULL if no request failed. */
+ struct aws_http_headers *error_response_headers;
+
+ /* Response body for the failed request that triggered finishing of the meta request. NUll if no request failed.*/
+ struct aws_byte_buf *error_response_body;
+
+ /* Response status of the failed request or of the entire meta request. */
+ int response_status;
+
+ /* Only set for GET request.
+ * Was the server side checksum compared against a calculated checksum of the response body. This may be false
+ * even if validate_get_response_checksum was set because the object was uploaded without a checksum, or was
+ * uploaded as a multipart object.
+ *
+ * If the object to get is multipart object, the part checksum MAY be validated if the part size to get matches the
+ * part size uploaded. In that case, if any part mismatch the checksum received, the meta request will failed with
+ * checksum mismatch. However, even if the parts checksum were validated, this will NOT be set to true, as the
+ * checksum for the whole meta request was NOT validated.
+ **/
+ bool did_validate;
+
+ /* algorithm used to validate checksum */
+ enum aws_s3_checksum_algorithm validation_algorithm;
+
+ /* Final error code of the meta request. */
+ int error_code;
+};
+
+AWS_EXTERN_C_BEGIN
+
+AWS_S3_API
+struct aws_s3_client *aws_s3_client_new(
+ struct aws_allocator *allocator,
+ const struct aws_s3_client_config *client_config);
+
+/**
+ * Add a reference, keeping this object alive.
+ * The reference must be released when you are done with it, or it's memory will never be cleaned up.
+ * You must not pass in NULL.
+ * Always returns the same pointer that was passed in.
+ */
+AWS_S3_API
+struct aws_s3_client *aws_s3_client_acquire(struct aws_s3_client *client);
+
+/**
+ * Release a reference.
+ * When the reference count drops to 0, this object will be cleaned up.
+ * It's OK to pass in NULL (nothing happens).
+ * Always returns NULL.
+ */
+AWS_S3_API
+struct aws_s3_client *aws_s3_client_release(struct aws_s3_client *client);
+
+AWS_S3_API
+struct aws_s3_meta_request *aws_s3_client_make_meta_request(
+ struct aws_s3_client *client,
+ const struct aws_s3_meta_request_options *options);
+
+/**
+ * Increment the flow-control window, so that response data continues downloading.
+ *
+ * If the client was created with `enable_read_backpressure` set true,
+ * each meta request has a flow-control window that shrinks as response
+ * body data is downloaded (headers do not affect the size of the window).
+ * The client's `initial_read_window` determines the starting size of each meta request's window.
+ * If a meta request's flow-control window reaches 0, no further data will be downloaded.
+ * If the `initial_read_window` is 0, the request will not start until the window is incremented.
+ * Maintain a larger window to keep up a high download throughput,
+ * parts cannot download in parallel unless the window is large enough to hold multiple parts.
+ * Maintain a smaller window to limit the amount of data buffered in memory.
+ *
+ * If `enable_read_backpressure` is false this call will have no effect,
+ * no backpressure is being applied and data is being downloaded as fast as possible.
+ *
+ * WARNING: This feature is experimental.
+ * Currently, backpressure is only applied to GetObject requests which are split into multiple parts,
+ * and you may still receive some data after the window reaches 0.
+ */
+AWS_S3_API
+void aws_s3_meta_request_increment_read_window(struct aws_s3_meta_request *meta_request, uint64_t bytes);
+
+AWS_S3_API
+void aws_s3_meta_request_cancel(struct aws_s3_meta_request *meta_request);
+
+/**
+ * Note: pause is currently only supported on upload requests.
+ * In order to pause an ongoing upload, call aws_s3_meta_request_pause() that
+ * will return resume token. Token can be used to query the state of operation
+ * at the pausing time.
+ * To resume an upload that was paused, supply resume token in the meta
+ * request options structure member aws_s3_meta_request_options.resume_token.
+ * The upload can be resumed either from the same client or a different one.
+ * Corner cases for resume upload are as follows:
+ * - upload is not MPU - fail with AWS_ERROR_UNSUPPORTED_OPERATION
+ * - pausing before MPU is created - NULL resume token returned. NULL resume
+ * token is equivalent to restarting upload
+ * - pausing in the middle of part transfer - return resume token. scheduling of
+ * new part uploads stops.
+ * - pausing after completeMPU started - return resume token. if s3 cannot find
+ * find associated MPU id when resuming with that token and num of parts
+ * uploaded equals to total num parts, then operation is a no op. Otherwise
+ * operation fails.
+ * Note: for no op case the call will succeed and finish/shutdown request callbacks will
+ * fire, but on headers callback will not fire.
+ * Note: similar to cancel pause does not cancel requests already in flight and
+ * and parts might complete after pause is requested.
+ * @param meta_request pointer to the aws_s3_meta_request of the upload to be paused
+ * @param resume_token resume token
+ * @return either AWS_OP_ERR or AWS_OP_SUCCESS
+ */
+AWS_S3_API
+int aws_s3_meta_request_pause(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_meta_request_resume_token **out_resume_token);
+
+/*
+ * Options to construct upload resume token.
+ * Note: fields correspond to getters on the token below and it up to the caller
+ * to persist those in whichever way they choose.
+ */
+struct aws_s3_upload_resume_token_options {
+ struct aws_byte_cursor upload_id; /* Required */
+ size_t part_size; /* Required */
+ size_t total_num_parts; /* Required */
+
+ /**
+ * Optional.
+ *
+ * Note: during resume num_parts_uploaded is used for sanity checking against
+ * uploads on s3 side.
+ * In cases where upload id does not exist (already resumed using this token
+ * or pause called after upload completes, etc...) and num_parts_uploaded
+ * equals to total num parts, resume will become a noop.
+ */
+ size_t num_parts_completed;
+};
+
+/**
+ * Create upload resume token from persisted data.
+ * Note: Data required for resume token varies per operation.
+ */
+AWS_S3_API
+struct aws_s3_meta_request_resume_token *aws_s3_meta_request_resume_token_new_upload(
+ struct aws_allocator *allocator,
+ const struct aws_s3_upload_resume_token_options *options);
+
+/*
+ * Increment resume token ref count.
+ */
+AWS_S3_API
+struct aws_s3_meta_request_resume_token *aws_s3_meta_request_resume_token_acquire(
+ struct aws_s3_meta_request_resume_token *resume_token);
+
+/*
+ * Decrement resume token ref count.
+ */
+AWS_S3_API
+struct aws_s3_meta_request_resume_token *aws_s3_meta_request_resume_token_release(
+ struct aws_s3_meta_request_resume_token *resume_token);
+
+/*
+ * Type of resume token.
+ */
+AWS_S3_API
+enum aws_s3_meta_request_type aws_s3_meta_request_resume_token_type(
+ struct aws_s3_meta_request_resume_token *resume_token);
+
+/*
+ * Part size associated with operation.
+ */
+AWS_S3_API
+size_t aws_s3_meta_request_resume_token_part_size(struct aws_s3_meta_request_resume_token *resume_token);
+
+/*
+ * Total num parts associated with operation.
+ */
+AWS_S3_API
+size_t aws_s3_meta_request_resume_token_total_num_parts(struct aws_s3_meta_request_resume_token *resume_token);
+
+/*
+ * Num parts completed.
+ */
+AWS_S3_API
+size_t aws_s3_meta_request_resume_token_num_parts_completed(struct aws_s3_meta_request_resume_token *resume_token);
+
+/*
+ * Upload id associated with operation.
+ * Only valid for tokens returned from upload operation. For all other operations
+ * this will return empty.
+ */
+AWS_S3_API
+struct aws_byte_cursor aws_s3_meta_request_resume_token_upload_id(
+ struct aws_s3_meta_request_resume_token *resume_token);
+
+/**
+ * Add a reference, keeping this object alive.
+ * The reference must be released when you are done with it, or it's memory will never be cleaned up.
+ * You must not pass in NULL.
+ * Always returns the same pointer that was passed in.
+ */
+AWS_S3_API
+struct aws_s3_meta_request *aws_s3_meta_request_acquire(struct aws_s3_meta_request *meta_request);
+
+/**
+ * Release a reference.
+ * When the reference count drops to 0, this object will be cleaned up.
+ * It's OK to pass in NULL (nothing happens).
+ * Always returns NULL.
+ */
+AWS_S3_API
+struct aws_s3_meta_request *aws_s3_meta_request_release(struct aws_s3_meta_request *meta_request);
+
+AWS_S3_API
+void aws_s3_init_default_signing_config(
+ struct aws_signing_config_aws *signing_config,
+ const struct aws_byte_cursor region,
+ struct aws_credentials_provider *credentials_provider);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_S3_CLIENT_H */
diff --git a/contrib/restricted/aws/aws-c-s3/source/s3.c b/contrib/restricted/aws/aws-c-s3/source/s3.c
new file mode 100644
index 0000000000..a3a7116607
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/source/s3.c
@@ -0,0 +1,174 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/s3/s3.h>
+
+#include <aws/auth/auth.h>
+#include <aws/common/error.h>
+#include <aws/common/hash_table.h>
+#include <aws/http/http.h>
+
+#define AWS_DEFINE_ERROR_INFO_S3(CODE, STR) AWS_DEFINE_ERROR_INFO(CODE, STR, "aws-c-s3")
+
+/* clang-format off */
+static struct aws_error_info s_errors[] = {
+ AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_MISSING_CONTENT_RANGE_HEADER, "Response missing required Content-Range header."),
+ AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_INVALID_CONTENT_RANGE_HEADER, "Response contains invalid Content-Range header."),
+ AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_MISSING_CONTENT_LENGTH_HEADER, "Response missing required Content-Length header."),
+ AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_INVALID_CONTENT_LENGTH_HEADER, "Response contains invalid Content-Length header."),
+ AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_MISSING_ETAG, "Response missing required ETag header."),
+ AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_INTERNAL_ERROR, "Response code indicates internal server error"),
+ AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_SLOW_DOWN, "Response code indicates throttling"),
+ AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_INVALID_RESPONSE_STATUS, "Invalid response status from request"),
+ AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_MISSING_UPLOAD_ID, "Upload Id not found in create-multipart-upload response"),
+ AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_PROXY_PARSE_FAILED, "Could not parse proxy URI"),
+ AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_UNSUPPORTED_PROXY_SCHEME, "Given Proxy URI has an unsupported scheme"),
+ AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_CANCELED, "Request successfully cancelled"),
+ AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_INVALID_RANGE_HEADER, "Range header has invalid syntax"),
+ AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_MULTIRANGE_HEADER_UNSUPPORTED, "Range header specifies multiple ranges which is unsupported"),
+ AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_RESPONSE_CHECKSUM_MISMATCH, "response checksum header does not match calculated checksum"),
+ AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_CHECKSUM_CALCULATION_FAILED, "failed to calculate a checksum for the provided stream"),
+ AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_PAUSED, "Request successfully paused"),
+ AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_LIST_PARTS_PARSE_FAILED, "Failed to parse result from list parts"),
+ AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_RESUMED_PART_CHECKSUM_MISMATCH, "Checksum does not match previously uploaded part"),
+ AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_RESUME_FAILED, "Resuming request failed"),
+ AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_OBJECT_MODIFIED, "The object modifed during download."),
+ AWS_DEFINE_ERROR_INFO_S3(AWS_ERROR_S3_NON_RECOVERABLE_ASYNC_ERROR, "Async error received from S3 and not recoverable from retry.")
+};
+/* clang-format on */
+
+static struct aws_error_info_list s_error_list = {
+ .error_list = s_errors,
+ .count = AWS_ARRAY_SIZE(s_errors),
+};
+
+static struct aws_log_subject_info s_s3_log_subject_infos[] = {
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_S3_GENERAL, "S3General", "Subject for aws-c-s3 logging that defies categorization."),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_S3_CLIENT, "S3Client", "Subject for aws-c-s3 logging from an aws_s3_client."),
+ DEFINE_LOG_SUBJECT_INFO(
+ AWS_LS_S3_CLIENT_STATS,
+ "S3ClientStats",
+ "Subject for aws-c-s3 logging for stats tracked by an aws_s3_client."),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_S3_REQUEST, "S3Request", "Subject for aws-c-s3 logging from an aws_s3_request."),
+ DEFINE_LOG_SUBJECT_INFO(
+ AWS_LS_S3_META_REQUEST,
+ "S3MetaRequest",
+ "Subject for aws-c-s3 logging from an aws_s3_meta_request."),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_S3_ENDPOINT, "S3Endpoint", "Subject for aws-c-s3 logging from an aws_s3_endpoint."),
+};
+
+static struct aws_log_subject_info_list s_s3_log_subject_list = {
+ .subject_list = s_s3_log_subject_infos,
+ .count = AWS_ARRAY_SIZE(s_s3_log_subject_infos),
+};
+
+/**** Configuration info for the c5n.18xlarge *****/
+static struct aws_byte_cursor s_c5n_18xlarge_nic_array[] = {AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("eth0")};
+
+static struct aws_s3_cpu_group_info s_c5n_18xlarge_cpu_group_info_array[] = {
+ {
+ .cpu_group = 0u,
+ .nic_name_array = s_c5n_18xlarge_nic_array,
+ .nic_name_array_length = AWS_ARRAY_SIZE(s_c5n_18xlarge_nic_array),
+ },
+ {
+ .cpu_group = 1u,
+ .nic_name_array = NULL,
+ .nic_name_array_length = 0u,
+ },
+};
+
+static struct aws_s3_compute_platform_info s_c5n_18xlarge_platform_info = {
+ .instance_type = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("c5n.18xlarge"),
+ .max_throughput_gbps = 100u,
+ .cpu_group_info_array = s_c5n_18xlarge_cpu_group_info_array,
+ .cpu_group_info_array_length = AWS_ARRAY_SIZE(s_c5n_18xlarge_cpu_group_info_array),
+};
+/****** End c5n.18xlarge *****/
+
+static struct aws_hash_table s_compute_platform_info_table;
+
+static bool s_library_initialized = false;
+static struct aws_allocator *s_library_allocator = NULL;
+
+void aws_s3_library_init(struct aws_allocator *allocator) {
+ if (s_library_initialized) {
+ return;
+ }
+
+ if (allocator) {
+ s_library_allocator = allocator;
+ } else {
+ s_library_allocator = aws_default_allocator();
+ }
+
+ aws_auth_library_init(s_library_allocator);
+ aws_http_library_init(s_library_allocator);
+
+ aws_register_error_info(&s_error_list);
+ aws_register_log_subject_info_list(&s_s3_log_subject_list);
+
+ AWS_FATAL_ASSERT(
+ !aws_hash_table_init(
+ &s_compute_platform_info_table,
+ allocator,
+ 32,
+ aws_hash_byte_cursor_ptr_ignore_case,
+ (bool (*)(const void *, const void *))aws_byte_cursor_eq_ignore_case,
+ NULL,
+ NULL) &&
+ "Hash table init failed!");
+
+ AWS_FATAL_ASSERT(
+ !aws_hash_table_put(
+ &s_compute_platform_info_table,
+ &s_c5n_18xlarge_platform_info.instance_type,
+ &s_c5n_18xlarge_platform_info,
+ NULL) &&
+ "hash table put failed!");
+
+ s_library_initialized = true;
+}
+
+void aws_s3_library_clean_up(void) {
+ if (!s_library_initialized) {
+ return;
+ }
+
+ s_library_initialized = false;
+ aws_thread_join_all_managed();
+
+ aws_hash_table_clean_up(&s_compute_platform_info_table);
+ aws_unregister_log_subject_info_list(&s_s3_log_subject_list);
+ aws_unregister_error_info(&s_error_list);
+ aws_http_library_clean_up();
+ aws_auth_library_clean_up();
+ s_library_allocator = NULL;
+}
+
+struct aws_s3_compute_platform_info *aws_s3_get_compute_platform_info_for_instance_type(
+ const struct aws_byte_cursor instance_type_name) {
+ AWS_LOGF_TRACE(
+ AWS_LS_S3_GENERAL,
+ "static: looking up compute platform info for instance type " PRInSTR,
+ AWS_BYTE_CURSOR_PRI(instance_type_name));
+
+ struct aws_hash_element *platform_info_element = NULL;
+ aws_hash_table_find(&s_compute_platform_info_table, &instance_type_name, &platform_info_element);
+
+ if (platform_info_element) {
+ AWS_LOGF_INFO(
+ AWS_LS_S3_GENERAL,
+ "static: found compute platform info for instance type " PRInSTR,
+ AWS_BYTE_CURSOR_PRI(instance_type_name));
+ return platform_info_element->value;
+ }
+
+ AWS_LOGF_INFO(
+ AWS_LS_S3_GENERAL,
+ "static: compute platform info for instance type " PRInSTR " not found",
+ AWS_BYTE_CURSOR_PRI(instance_type_name));
+ return NULL;
+}
diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_auto_ranged_get.c b/contrib/restricted/aws/aws-c-s3/source/s3_auto_ranged_get.c
new file mode 100644
index 0000000000..75689aaa42
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/source/s3_auto_ranged_get.c
@@ -0,0 +1,722 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include "aws/s3/private/s3_auto_ranged_get.h"
+#include "aws/s3/private/s3_client_impl.h"
+#include "aws/s3/private/s3_meta_request_impl.h"
+#include "aws/s3/private/s3_request_messages.h"
+#include "aws/s3/private/s3_util.h"
+#include <aws/common/string.h>
+#include <inttypes.h>
+
+#ifdef _MSC_VER
+/* sscanf warning (not currently scanning for strings) */
+# pragma warning(disable : 4996)
+#endif
+
+const uint32_t s_conservative_max_requests_in_flight = 8;
+const struct aws_byte_cursor g_application_xml_value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("application/xml");
+const struct aws_byte_cursor g_object_size_value = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ActualObjectSize");
+
+static void s_s3_meta_request_auto_ranged_get_destroy(struct aws_s3_meta_request *meta_request);
+
+static bool s_s3_auto_ranged_get_update(
+ struct aws_s3_meta_request *meta_request,
+ uint32_t flags,
+ struct aws_s3_request **out_request);
+
+static int s_s3_auto_ranged_get_prepare_request(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request);
+
+static void s_s3_auto_ranged_get_request_finished(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request,
+ int error_code);
+
+static struct aws_s3_meta_request_vtable s_s3_auto_ranged_get_vtable = {
+ .update = s_s3_auto_ranged_get_update,
+ .send_request_finish = aws_s3_meta_request_send_request_finish_default,
+ .prepare_request = s_s3_auto_ranged_get_prepare_request,
+ .init_signing_date_time = aws_s3_meta_request_init_signing_date_time_default,
+ .sign_request = aws_s3_meta_request_sign_request_default,
+ .finished_request = s_s3_auto_ranged_get_request_finished,
+ .destroy = s_s3_meta_request_auto_ranged_get_destroy,
+ .finish = aws_s3_meta_request_finish_default,
+};
+
+static int s_s3_auto_ranged_get_success_status(struct aws_s3_meta_request *meta_request) {
+ AWS_PRECONDITION(meta_request);
+
+ struct aws_s3_auto_ranged_get *auto_ranged_get = meta_request->impl;
+ AWS_PRECONDITION(auto_ranged_get);
+
+ if (auto_ranged_get->initial_message_has_range_header) {
+ return AWS_S3_RESPONSE_STATUS_RANGE_SUCCESS;
+ }
+
+ return AWS_S3_RESPONSE_STATUS_SUCCESS;
+}
+
+/* Allocate a new auto-ranged-get meta request. */
+struct aws_s3_meta_request *aws_s3_meta_request_auto_ranged_get_new(
+ struct aws_allocator *allocator,
+ struct aws_s3_client *client,
+ size_t part_size,
+ const struct aws_s3_meta_request_options *options) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(client);
+ AWS_PRECONDITION(options);
+ AWS_PRECONDITION(options->message);
+
+ struct aws_s3_auto_ranged_get *auto_ranged_get =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_auto_ranged_get));
+
+ /* Try to initialize the base type. */
+ if (aws_s3_meta_request_init_base(
+ allocator,
+ client,
+ part_size,
+ false,
+ options,
+ auto_ranged_get,
+ &s_s3_auto_ranged_get_vtable,
+ &auto_ranged_get->base)) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p Could not initialize base type for Auto-Ranged-Get Meta Request.",
+ (void *)auto_ranged_get);
+ aws_mem_release(allocator, auto_ranged_get);
+ return NULL;
+ }
+
+ struct aws_http_headers *headers = aws_http_message_get_headers(auto_ranged_get->base.initial_request_message);
+ AWS_ASSERT(headers != NULL);
+
+ auto_ranged_get->initial_message_has_range_header = aws_http_headers_has(headers, g_range_header_name);
+ auto_ranged_get->initial_message_has_if_match_header = aws_http_headers_has(headers, g_if_match_header_name);
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST, "id=%p Created new Auto-Ranged Get Meta Request.", (void *)&auto_ranged_get->base);
+
+ return &auto_ranged_get->base;
+}
+
+static void s_s3_meta_request_auto_ranged_get_destroy(struct aws_s3_meta_request *meta_request) {
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(meta_request->impl);
+
+ struct aws_s3_auto_ranged_get *auto_ranged_get = meta_request->impl;
+ aws_string_destroy(auto_ranged_get->etag);
+ aws_mem_release(meta_request->allocator, auto_ranged_get);
+}
+
+static bool s_s3_auto_ranged_get_update(
+ struct aws_s3_meta_request *meta_request,
+ uint32_t flags,
+ struct aws_s3_request **out_request) {
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(out_request);
+
+ struct aws_s3_auto_ranged_get *auto_ranged_get = meta_request->impl;
+ struct aws_s3_request *request = NULL;
+ bool work_remaining = false;
+
+ /* BEGIN CRITICAL SECTION */
+ {
+ aws_s3_meta_request_lock_synced_data(meta_request);
+
+ /* If nothing has set the "finish result" then this meta request is still in progress, and we can potentially
+ * send additional requests. */
+ if (!aws_s3_meta_request_has_finish_result_synced(meta_request)) {
+
+ if ((flags & AWS_S3_META_REQUEST_UPDATE_FLAG_CONSERVATIVE) != 0) {
+ uint32_t num_requests_in_flight =
+ (auto_ranged_get->synced_data.num_parts_requested -
+ auto_ranged_get->synced_data.num_parts_completed) +
+ (uint32_t)aws_priority_queue_size(&meta_request->synced_data.pending_body_streaming_requests);
+
+ /* auto-ranged-gets make use of body streaming, which will hold onto response bodies if parts earlier in
+ * the file haven't arrived yet. This can potentially create a lot of backed up requests, causing us to
+ * hit our global request limit. To help mitigate this, when the "conservative" flag is passed in, we
+ * only allow the total amount of requests being sent/streamed to be inside of a set limit. */
+ if (num_requests_in_flight > s_conservative_max_requests_in_flight) {
+ goto has_work_remaining;
+ }
+ }
+
+ /* If the overall range of the object that we are trying to retrieve isn't known yet, then we need to send a
+ * request to figure that out. */
+ if (!auto_ranged_get->synced_data.object_range_known) {
+
+ /* If there exists a range header or we require validation of the response checksum, we currently always
+ * do a head request first.
+ * S3 returns the checksum of the entire object from the HEAD response
+ *
+ * For the range header value could be parsed client-side, doing so presents a number of
+ * complications. For example, the given range could be an unsatisfiable range, and might not even
+ * specify a complete range. To keep things simple, we are currently relying on the service to handle
+ * turning the Range header into a Content-Range response header.*/
+ bool head_object_required = auto_ranged_get->initial_message_has_range_header != 0 ||
+ meta_request->checksum_config.validate_response_checksum;
+
+ if (head_object_required) {
+ /* If the head object request hasn't been sent yet, then send it now. */
+ if (!auto_ranged_get->synced_data.head_object_sent) {
+ request = aws_s3_request_new(
+ meta_request,
+ AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_HEAD_OBJECT,
+ 0,
+ AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS | AWS_S3_REQUEST_FLAG_PART_SIZE_RESPONSE_BODY);
+
+ request->discovers_object_size = true;
+
+ auto_ranged_get->synced_data.head_object_sent = true;
+ }
+ } else if (auto_ranged_get->synced_data.num_parts_requested == 0) {
+ /* If we aren't using a head object, then discover the size of the object while trying to get the
+ * first part. */
+ request = aws_s3_request_new(
+ meta_request,
+ AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_PART,
+ 1,
+ AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS | AWS_S3_REQUEST_FLAG_PART_SIZE_RESPONSE_BODY);
+
+ request->part_range_start = 0;
+ request->part_range_end = meta_request->part_size - 1;
+ request->discovers_object_size = true;
+
+ ++auto_ranged_get->synced_data.num_parts_requested;
+ }
+
+ goto has_work_remaining;
+ }
+
+ /* If the object range is known and that range is empty, then we have an empty file to request. */
+ if (auto_ranged_get->synced_data.object_range_start == 0 &&
+ auto_ranged_get->synced_data.object_range_end == 0) {
+ if (auto_ranged_get->synced_data.get_without_range_sent) {
+ if (auto_ranged_get->synced_data.get_without_range_completed) {
+ goto no_work_remaining;
+ } else {
+ goto has_work_remaining;
+ }
+ }
+ request = aws_s3_request_new(
+ meta_request,
+ AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_INITIAL_MESSAGE,
+ 0,
+ AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS);
+
+ auto_ranged_get->synced_data.get_without_range_sent = true;
+ goto has_work_remaining;
+ }
+
+ /* If there are still more parts to be requested */
+ if (auto_ranged_get->synced_data.num_parts_requested < auto_ranged_get->synced_data.total_num_parts) {
+
+ if (meta_request->client->enable_read_backpressure) {
+ /* Don't start a part until we have enough window to send bytes to the user.
+ *
+ * Note that we start a part once we have enough window to deliver ANY of its bytes.
+ * If we waited until the window was large enough for the WHOLE part,
+ * we could end up stuck in a situation where the user is
+ * waiting for more bytes before they'll open the window,
+ * and this implementation is waiting for more window before it will send more parts. */
+ uint64_t read_data_requested =
+ auto_ranged_get->synced_data.num_parts_requested * meta_request->part_size;
+ if (read_data_requested >= meta_request->synced_data.read_window_running_total) {
+
+ /* Avoid spamming users with this DEBUG message */
+ if (auto_ranged_get->synced_data.read_window_warning_issued == 0) {
+ auto_ranged_get->synced_data.read_window_warning_issued = 1;
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p: Download paused because read window is zero. "
+ "You must increment to window to continue.",
+ (void *)meta_request);
+ }
+
+ goto has_work_remaining;
+ }
+
+ auto_ranged_get->synced_data.read_window_warning_issued = 0;
+ }
+
+ request = aws_s3_request_new(
+ meta_request,
+ AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_PART,
+ auto_ranged_get->synced_data.num_parts_requested + 1,
+ AWS_S3_REQUEST_FLAG_PART_SIZE_RESPONSE_BODY);
+
+ aws_s3_get_part_range(
+ auto_ranged_get->synced_data.object_range_start,
+ auto_ranged_get->synced_data.object_range_end,
+ meta_request->part_size,
+ request->part_number,
+ &request->part_range_start,
+ &request->part_range_end);
+
+ ++auto_ranged_get->synced_data.num_parts_requested;
+ goto has_work_remaining;
+ }
+
+ /* If there are parts that have not attempted delivery to the caller, then there is still work being done.
+ */
+ if (meta_request->synced_data.num_parts_delivery_completed < auto_ranged_get->synced_data.total_num_parts) {
+ goto has_work_remaining;
+ }
+ } else {
+ /* Else, if there is a finish result set, make sure that all work-in-progress winds down before the meta
+ * request completely exits. */
+
+ if (auto_ranged_get->synced_data.head_object_sent && !auto_ranged_get->synced_data.head_object_completed) {
+ goto has_work_remaining;
+ }
+
+ /* Wait for all requests to complete (successfully or unsuccessfully) before finishing.*/
+ if (auto_ranged_get->synced_data.num_parts_completed < auto_ranged_get->synced_data.num_parts_requested) {
+ goto has_work_remaining;
+ }
+
+ if (auto_ranged_get->synced_data.get_without_range_sent &&
+ !auto_ranged_get->synced_data.get_without_range_completed) {
+ goto has_work_remaining;
+ }
+
+ /* If some parts are still being delivered to the caller, then wait for those to finish. */
+ if (meta_request->synced_data.num_parts_delivery_completed <
+ meta_request->synced_data.num_parts_delivery_sent) {
+ goto has_work_remaining;
+ }
+ }
+
+ goto no_work_remaining;
+
+ has_work_remaining:
+ work_remaining = true;
+
+ if (request != NULL) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p: Returning request %p for part %d of %d",
+ (void *)meta_request,
+ (void *)request,
+ request->part_number,
+ auto_ranged_get->synced_data.total_num_parts);
+ }
+
+ no_work_remaining:
+
+ if (!work_remaining) {
+ aws_s3_meta_request_set_success_synced(meta_request, s_s3_auto_ranged_get_success_status(meta_request));
+ if (auto_ranged_get->synced_data.num_parts_checksum_validated ==
+ auto_ranged_get->synced_data.num_parts_requested) {
+ /* If we have validated the checksum for every parts, we set the meta request level checksum validation
+ * result.*/
+ meta_request->synced_data.finish_result.did_validate = true;
+ meta_request->synced_data.finish_result.validation_algorithm = auto_ranged_get->validation_algorithm;
+ }
+ }
+
+ aws_s3_meta_request_unlock_synced_data(meta_request);
+ }
+ /* END CRITICAL SECTION */
+
+ if (work_remaining) {
+ *out_request = request;
+ } else {
+ AWS_ASSERT(request == NULL);
+ aws_s3_meta_request_finish(meta_request);
+ }
+
+ return work_remaining;
+}
+
+/* Given a request, prepare it for sending based on its description. */
+static int s_s3_auto_ranged_get_prepare_request(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request) {
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(request);
+
+ /* Generate a new ranged get request based on the original message. */
+ struct aws_http_message *message = NULL;
+ struct aws_s3_auto_ranged_get *auto_ranged_get = meta_request->impl;
+
+ switch (request->request_tag) {
+ case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_HEAD_OBJECT:
+ /* A head object will be a copy of the original headers but with a HEAD request method. */
+ message = aws_s3_message_util_copy_http_message_no_body_all_headers(
+ meta_request->allocator, meta_request->initial_request_message);
+ if (message) {
+ aws_http_message_set_request_method(message, g_head_method);
+ }
+ break;
+ case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_PART:
+ message = aws_s3_ranged_get_object_message_new(
+ meta_request->allocator,
+ meta_request->initial_request_message,
+ request->part_range_start,
+ request->part_range_end);
+ break;
+ case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_INITIAL_MESSAGE:
+ message = aws_s3_message_util_copy_http_message_no_body_all_headers(
+ meta_request->allocator, meta_request->initial_request_message);
+ break;
+ }
+
+ if (message == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p Could not create message for request with tag %d for auto-ranged-get meta request.",
+ (void *)meta_request,
+ request->request_tag);
+ goto message_alloc_failed;
+ }
+ if (meta_request->checksum_config.validate_response_checksum) {
+ aws_http_headers_set(aws_http_message_get_headers(message), g_request_validation_mode, g_enabled);
+ }
+ if (!auto_ranged_get->initial_message_has_if_match_header && auto_ranged_get->etag) {
+ /* Add the if_match to the request */
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p: Added the If-Match header to request %p for part %d",
+ (void *)meta_request,
+ (void *)request,
+ request->part_number);
+ aws_http_headers_set(
+ aws_http_message_get_headers(message),
+ g_if_match_header_name,
+ aws_byte_cursor_from_string(auto_ranged_get->etag));
+ }
+
+ aws_s3_request_setup_send_data(request, message);
+ aws_http_message_release(message);
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p: Created request %p for part %d",
+ (void *)meta_request,
+ (void *)request,
+ request->part_number);
+
+ return AWS_OP_SUCCESS;
+
+message_alloc_failed:
+
+ return AWS_OP_ERR;
+}
+
+/* Check the finish result of meta request, in case of the request failed because of downloading an empty file */
+static bool s_check_empty_file_download_error(struct aws_s3_request *failed_request) {
+ struct aws_http_headers *failed_headers = failed_request->send_data.response_headers;
+ struct aws_byte_buf failed_body = failed_request->send_data.response_body;
+ if (failed_headers && failed_body.capacity > 0) {
+ struct aws_byte_cursor content_type;
+ AWS_ZERO_STRUCT(content_type);
+ if (!aws_http_headers_get(failed_headers, g_content_type_header_name, &content_type)) {
+ /* Content type found */
+ if (aws_byte_cursor_eq_ignore_case(&content_type, &g_application_xml_value)) {
+ /* XML response */
+ struct aws_byte_cursor body_cursor = aws_byte_cursor_from_buf(&failed_body);
+ struct aws_string *size =
+ aws_xml_get_top_level_tag(failed_request->allocator, &g_object_size_value, &body_cursor);
+ bool check_size = aws_string_eq_c_str(size, "0");
+ aws_string_destroy(size);
+ if (check_size) {
+ return true;
+ }
+ }
+ }
+ }
+ return false;
+}
+
+static int s_discover_object_range_and_content_length(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request,
+ int error_code,
+ uint64_t *out_total_content_length,
+ uint64_t *out_object_range_start,
+ uint64_t *out_object_range_end) {
+ AWS_PRECONDITION(out_total_content_length);
+ AWS_PRECONDITION(out_object_range_start);
+ AWS_PRECONDITION(out_object_range_end);
+
+ int result = AWS_OP_ERR;
+
+ uint64_t total_content_length = 0;
+ uint64_t object_range_start = 0;
+ uint64_t object_range_end = 0;
+
+ AWS_ASSERT(request->discovers_object_size);
+ struct aws_s3_auto_ranged_get *auto_ranged_get = meta_request->impl;
+ switch (request->request_tag) {
+ case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_HEAD_OBJECT:
+ if (error_code != AWS_ERROR_SUCCESS) {
+ /* If the head request failed, there's nothing we can do, so resurface the error code. */
+ aws_raise_error(error_code);
+ break;
+ }
+
+ /* There should be a Content-Length header that indicates the total size of the range.*/
+ if (aws_s3_parse_content_length_response_header(
+ meta_request->allocator, request->send_data.response_headers, &total_content_length)) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p Could not find content-length header for request %p",
+ (void *)meta_request,
+ (void *)request);
+ break;
+ }
+
+ /* if the inital message had a ranged header, there should also be a Content-Range header that specifies the
+ * object range and total object size. Otherwise the size and range should be equal to the
+ * total_content_length. */
+ if (!auto_ranged_get->initial_message_has_range_header) {
+ object_range_end = total_content_length - 1;
+ } else if (aws_s3_parse_content_range_response_header(
+ meta_request->allocator,
+ request->send_data.response_headers,
+ &object_range_start,
+ &object_range_end,
+ NULL)) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p Could not find content-range header for request %p",
+ (void *)meta_request,
+ (void *)request);
+ break;
+ }
+
+ result = AWS_OP_SUCCESS;
+ break;
+ case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_PART:
+ AWS_ASSERT(request->part_number == 1);
+
+ if (error_code != AWS_ERROR_SUCCESS) {
+ /* If we hit an empty file while trying to discover the object-size via part, then this request
+ failure
+ * is as designed. */
+ if (s_check_empty_file_download_error(request)) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p Detected empty file with request %p. Sending new request without range header.",
+ (void *)meta_request,
+ (void *)request);
+
+ total_content_length = 0ULL;
+
+ result = AWS_OP_SUCCESS;
+ } else {
+ /* Otherwise, resurface the error code. */
+ aws_raise_error(error_code);
+ }
+ break;
+ }
+
+ AWS_ASSERT(request->send_data.response_headers != NULL);
+
+ /* Parse the object size from the part response. */
+ if (aws_s3_parse_content_range_response_header(
+ meta_request->allocator, request->send_data.response_headers, NULL, NULL, &total_content_length)) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p Could not find content-range header for request %p",
+ (void *)meta_request,
+ (void *)request);
+
+ break;
+ }
+
+ /* When discovering the object size via first-part, the object range is the entire object. */
+ object_range_start = 0;
+ object_range_end = total_content_length - 1;
+
+ result = AWS_OP_SUCCESS;
+ break;
+ default:
+ AWS_ASSERT(false);
+ break;
+ }
+
+ if (result == AWS_OP_SUCCESS) {
+ *out_total_content_length = total_content_length;
+ *out_object_range_start = object_range_start;
+ *out_object_range_end = object_range_end;
+ }
+
+ return result;
+}
+
+static void s_s3_auto_ranged_get_request_finished(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request,
+ int error_code) {
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(meta_request->impl);
+ AWS_PRECONDITION(request);
+
+ struct aws_s3_auto_ranged_get *auto_ranged_get = meta_request->impl;
+ AWS_PRECONDITION(auto_ranged_get);
+
+ uint64_t total_content_length = 0ULL;
+ uint64_t object_range_start = 0ULL;
+ uint64_t object_range_end = 0ULL;
+
+ bool found_object_size = false;
+ bool request_failed = error_code != AWS_ERROR_SUCCESS;
+
+ if (request->discovers_object_size) {
+
+ /* Try to discover the object-range and content length.*/
+ if (s_discover_object_range_and_content_length(
+ meta_request, request, error_code, &total_content_length, &object_range_start, &object_range_end)) {
+
+ error_code = aws_last_error_or_unknown();
+
+ goto update_synced_data;
+ }
+
+ if (!request_failed && !auto_ranged_get->initial_message_has_if_match_header) {
+ AWS_ASSERT(auto_ranged_get->etag == NULL);
+ struct aws_byte_cursor etag_header_value;
+
+ if (aws_http_headers_get(request->send_data.response_headers, g_etag_header_name, &etag_header_value)) {
+ aws_raise_error(AWS_ERROR_S3_MISSING_ETAG);
+ error_code = AWS_ERROR_S3_MISSING_ETAG;
+ goto update_synced_data;
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p Etag received for the meta request. value is: " PRInSTR "",
+ (void *)meta_request,
+ AWS_BYTE_CURSOR_PRI(etag_header_value));
+ auto_ranged_get->etag = aws_string_new_from_cursor(auto_ranged_get->base.allocator, &etag_header_value);
+ }
+
+ /* If we were able to discover the object-range/content length successfully, then any error code that was passed
+ * into this function is being handled and does not indicate an overall failure.*/
+ error_code = AWS_ERROR_SUCCESS;
+ found_object_size = true;
+
+ if (meta_request->headers_callback != NULL) {
+ struct aws_http_headers *response_headers = aws_http_headers_new(meta_request->allocator);
+
+ copy_http_headers(request->send_data.response_headers, response_headers);
+
+ /* If this request is a part, then the content range isn't applicable. */
+ if (request->request_tag == AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_PART) {
+ /* For now, we can assume that discovery of size via the first part of the object does not apply to
+ * breaking up a ranged request. If it ever does, then we will need to repopulate this header. */
+ AWS_ASSERT(!auto_ranged_get->initial_message_has_range_header);
+
+ aws_http_headers_erase(response_headers, g_content_range_header_name);
+ }
+
+ char content_length_buffer[64] = "";
+ snprintf(content_length_buffer, sizeof(content_length_buffer), "%" PRIu64, total_content_length);
+ aws_http_headers_set(
+ response_headers, g_content_length_header_name, aws_byte_cursor_from_c_str(content_length_buffer));
+
+ if (meta_request->headers_callback(
+ meta_request,
+ response_headers,
+ s_s3_auto_ranged_get_success_status(meta_request),
+ meta_request->user_data)) {
+
+ error_code = aws_last_error_or_unknown();
+ }
+ meta_request->headers_callback = NULL;
+
+ aws_http_headers_release(response_headers);
+ }
+ }
+
+update_synced_data:
+
+ /* BEGIN CRITICAL SECTION */
+ {
+ aws_s3_meta_request_lock_synced_data(meta_request);
+
+ /* If the object range was found, then record it. */
+ if (found_object_size) {
+ AWS_ASSERT(!auto_ranged_get->synced_data.object_range_known);
+
+ auto_ranged_get->synced_data.object_range_known = true;
+ auto_ranged_get->synced_data.object_range_start = object_range_start;
+ auto_ranged_get->synced_data.object_range_end = object_range_end;
+ auto_ranged_get->synced_data.total_num_parts =
+ aws_s3_get_num_parts(meta_request->part_size, object_range_start, object_range_end);
+ }
+
+ switch (request->request_tag) {
+ case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_HEAD_OBJECT:
+ auto_ranged_get->synced_data.head_object_completed = true;
+ AWS_LOGF_DEBUG(AWS_LS_S3_META_REQUEST, "id=%p Head object completed.", (void *)meta_request);
+ break;
+ case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_PART:
+ ++auto_ranged_get->synced_data.num_parts_completed;
+
+ if (!request_failed) {
+
+ /* Record the number of parts that checksum has been validated */
+ if (request->did_validate) {
+ if (auto_ranged_get->validation_algorithm == AWS_SCA_NONE) {
+ auto_ranged_get->validation_algorithm = request->validation_algorithm;
+ }
+ /* They should be the same. */
+ AWS_ASSERT(auto_ranged_get->validation_algorithm == request->validation_algorithm);
+ ++auto_ranged_get->synced_data.num_parts_checksum_validated;
+ }
+ ++auto_ranged_get->synced_data.num_parts_successful;
+
+ aws_s3_meta_request_stream_response_body_synced(meta_request, request);
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p: %d out of %d parts have completed.",
+ (void *)meta_request,
+ (auto_ranged_get->synced_data.num_parts_successful +
+ auto_ranged_get->synced_data.num_parts_failed),
+ auto_ranged_get->synced_data.total_num_parts);
+ } else {
+ ++auto_ranged_get->synced_data.num_parts_failed;
+ }
+ break;
+ case AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_INITIAL_MESSAGE:
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST, "id=%p Get of file using initial message completed.", (void *)meta_request);
+ auto_ranged_get->synced_data.get_without_range_completed = true;
+ break;
+ }
+
+ if (error_code != AWS_ERROR_SUCCESS) {
+ if (error_code == AWS_ERROR_S3_INVALID_RESPONSE_STATUS &&
+ request->send_data.response_status == AWS_HTTP_STATUS_CODE_412_PRECONDITION_FAILED &&
+ !auto_ranged_get->initial_message_has_if_match_header) {
+ /* Use more clear error code as we added the if-match header under the hood. */
+ error_code = AWS_ERROR_S3_OBJECT_MODIFIED;
+ }
+ aws_s3_meta_request_set_fail_synced(meta_request, request, error_code);
+ if (error_code == AWS_ERROR_S3_RESPONSE_CHECKSUM_MISMATCH) {
+ /* It's a mismatch of checksum, tell user that we validated the checksum and the algorithm we validated
+ */
+ meta_request->synced_data.finish_result.did_validate = true;
+ meta_request->synced_data.finish_result.validation_algorithm = request->validation_algorithm;
+ }
+ }
+
+ aws_s3_meta_request_unlock_synced_data(meta_request);
+ }
+ /* END CRITICAL SECTION */
+}
diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_auto_ranged_put.c b/contrib/restricted/aws/aws-c-s3/source/s3_auto_ranged_put.c
new file mode 100644
index 0000000000..0386f86b04
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/source/s3_auto_ranged_put.c
@@ -0,0 +1,1305 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include "aws/s3/private/s3_auto_ranged_put.h"
+#include "aws/s3/private/s3_checksums.h"
+#include "aws/s3/private/s3_list_parts.h"
+#include "aws/s3/private/s3_request_messages.h"
+#include "aws/s3/private/s3_util.h"
+#include <aws/common/encoding.h>
+#include <aws/common/string.h>
+#include <aws/io/stream.h>
+
+static const struct aws_byte_cursor s_upload_id = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("UploadId");
+static const size_t s_complete_multipart_upload_init_body_size_bytes = 512;
+static const size_t s_abort_multipart_upload_init_body_size_bytes = 512;
+
+static const struct aws_byte_cursor s_create_multipart_upload_copy_headers[] = {
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-algorithm"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-key-MD5"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-context"),
+};
+
+static void s_s3_meta_request_auto_ranged_put_destroy(struct aws_s3_meta_request *meta_request);
+
+static bool s_s3_auto_ranged_put_update(
+ struct aws_s3_meta_request *meta_request,
+ uint32_t flags,
+ struct aws_s3_request **out_request);
+
+static int s_s3_auto_ranged_put_prepare_request(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request);
+
+static void s_s3_auto_ranged_put_request_finished(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request,
+ int error_code);
+
+static void s_s3_auto_ranged_put_send_request_finish(
+ struct aws_s3_connection *connection,
+ struct aws_http_stream *stream,
+ int error_code);
+
+static int s_s3_auto_ranged_put_pause(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_meta_request_resume_token **resume_token);
+
+static bool s_process_part_info(const struct aws_s3_part_info *info, void *user_data) {
+ struct aws_s3_auto_ranged_put *auto_ranged_put = user_data;
+
+ struct aws_string *etag = aws_strip_quotes(auto_ranged_put->base.allocator, info->e_tag);
+
+ const struct aws_byte_cursor *checksum_cur = NULL;
+ switch (auto_ranged_put->base.checksum_config.checksum_algorithm) {
+ case AWS_SCA_CRC32:
+ checksum_cur = &info->checksumCRC32;
+ break;
+ case AWS_SCA_CRC32C:
+ checksum_cur = &info->checksumCRC32C;
+ break;
+ case AWS_SCA_SHA1:
+ checksum_cur = &info->checksumSHA1;
+ break;
+ case AWS_SCA_SHA256:
+ checksum_cur = &info->checksumSHA256;
+ break;
+ case AWS_SCA_NONE:
+ break;
+ default:
+ AWS_ASSERT(false);
+ break;
+ }
+
+ if (checksum_cur) {
+ aws_byte_buf_init_copy_from_cursor(
+ &auto_ranged_put->encoded_checksum_list[info->part_number - 1],
+ auto_ranged_put->base.allocator,
+ *checksum_cur);
+ }
+
+ aws_array_list_set_at(&auto_ranged_put->synced_data.etag_list, &etag, info->part_number - 1);
+
+ return true;
+}
+
+/*
+ * Validates token and updates part variables. Noop if token is null.
+ */
+static int s_try_update_part_info_from_resume_token(
+ uint64_t content_length,
+ const struct aws_s3_meta_request_resume_token *resume_token,
+ size_t *out_part_size,
+ uint32_t *out_total_num_parts) {
+
+ if (!resume_token) {
+ return AWS_OP_SUCCESS;
+ }
+
+ if (resume_token->type != AWS_S3_META_REQUEST_TYPE_PUT_OBJECT) {
+ AWS_LOGF_ERROR(AWS_LS_S3_META_REQUEST, "Could not load persisted state. Invalid token type.");
+ goto invalid_argument_cleanup;
+ }
+
+ if (resume_token->multipart_upload_id == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_S3_META_REQUEST, "Could not load persisted state. Multipart upload id missing.");
+ goto invalid_argument_cleanup;
+ }
+
+ if (resume_token->part_size < g_s3_min_upload_part_size) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST,
+ "Could not create resume auto-ranged-put meta request; part size of %" PRIu64
+ " specified in the token is below minimum threshold for multi-part.",
+ (uint64_t)resume_token->part_size);
+
+ goto invalid_argument_cleanup;
+ }
+
+ if ((uint32_t)resume_token->total_num_parts > g_s3_max_num_upload_parts) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST,
+ "Could not create resume auto-ranged-put meta request; total number of parts %" PRIu32
+ " specified in the token is too large for platform.",
+ (uint32_t)resume_token->total_num_parts);
+
+ goto invalid_argument_cleanup;
+ }
+
+ uint32_t num_parts = (uint32_t)(content_length / resume_token->part_size);
+
+ if ((content_length % resume_token->part_size) > 0) {
+ ++num_parts;
+ }
+
+ if (resume_token->total_num_parts != num_parts) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST,
+ "Could not create auto-ranged-put meta request; persisted number of parts %zu"
+ " does not match expected number of parts based on length of the body.",
+ resume_token->total_num_parts);
+
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ *out_part_size = resume_token->part_size;
+ *out_total_num_parts = (uint32_t)resume_token->total_num_parts;
+
+ return AWS_OP_SUCCESS;
+
+invalid_argument_cleanup:
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+}
+
+/**
+ * Initializes state necessary to resume upload. Noop if token is null.
+ */
+static int s_try_init_resume_state_from_persisted_data(
+ struct aws_allocator *allocator,
+ struct aws_s3_auto_ranged_put *auto_ranged_put,
+ const struct aws_s3_meta_request_resume_token *resume_token) {
+
+ if (resume_token == NULL) {
+ auto_ranged_put->synced_data.list_parts_operation = NULL;
+ auto_ranged_put->synced_data.list_parts_state.completed = true;
+ auto_ranged_put->synced_data.list_parts_state.started = true;
+ return AWS_OP_SUCCESS;
+ }
+
+ struct aws_byte_cursor request_path;
+ if (aws_http_message_get_request_path(auto_ranged_put->base.initial_request_message, &request_path)) {
+ AWS_LOGF_ERROR(AWS_LS_S3_META_REQUEST, "Could not load persisted state. Request path could not be read.");
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ auto_ranged_put->synced_data.num_parts_sent = 0;
+ auto_ranged_put->synced_data.num_parts_completed = 0;
+ auto_ranged_put->synced_data.create_multipart_upload_sent = true;
+ auto_ranged_put->synced_data.create_multipart_upload_completed = true;
+ auto_ranged_put->upload_id = aws_string_clone_or_reuse(allocator, resume_token->multipart_upload_id);
+
+ struct aws_s3_list_parts_params list_parts_params = {
+ .key = request_path,
+ .upload_id = aws_byte_cursor_from_string(auto_ranged_put->upload_id),
+ .on_part = s_process_part_info,
+ .user_data = auto_ranged_put,
+ };
+
+ auto_ranged_put->synced_data.list_parts_operation = aws_s3_list_parts_operation_new(allocator, &list_parts_params);
+
+ struct aws_http_headers *needed_response_headers = aws_http_headers_new(allocator);
+ const size_t copy_header_count = AWS_ARRAY_SIZE(s_create_multipart_upload_copy_headers);
+ struct aws_http_headers *initial_headers =
+ aws_http_message_get_headers(auto_ranged_put->base.initial_request_message);
+
+ /* Copy headers that would have been used for create multi part from initial message, since create will never be
+ * called in this flow */
+ for (size_t header_index = 0; header_index < copy_header_count; ++header_index) {
+ const struct aws_byte_cursor *header_name = &s_create_multipart_upload_copy_headers[header_index];
+ struct aws_byte_cursor header_value;
+ AWS_ZERO_STRUCT(header_value);
+
+ if (aws_http_headers_get(initial_headers, *header_name, &header_value) == AWS_OP_SUCCESS) {
+ aws_http_headers_set(needed_response_headers, *header_name, header_value);
+ }
+ }
+
+ auto_ranged_put->synced_data.needed_response_headers = needed_response_headers;
+
+ return AWS_OP_SUCCESS;
+}
+
+static struct aws_s3_meta_request_vtable s_s3_auto_ranged_put_vtable = {
+ .update = s_s3_auto_ranged_put_update,
+ .send_request_finish = s_s3_auto_ranged_put_send_request_finish,
+ .prepare_request = s_s3_auto_ranged_put_prepare_request,
+ .init_signing_date_time = aws_s3_meta_request_init_signing_date_time_default,
+ .sign_request = aws_s3_meta_request_sign_request_default,
+ .finished_request = s_s3_auto_ranged_put_request_finished,
+ .destroy = s_s3_meta_request_auto_ranged_put_destroy,
+ .finish = aws_s3_meta_request_finish_default,
+ .pause = s_s3_auto_ranged_put_pause,
+};
+
+/* Allocate a new auto-ranged put meta request */
+struct aws_s3_meta_request *aws_s3_meta_request_auto_ranged_put_new(
+ struct aws_allocator *allocator,
+ struct aws_s3_client *client,
+ size_t part_size,
+ uint64_t content_length,
+ uint32_t num_parts,
+ const struct aws_s3_meta_request_options *options) {
+
+ /* These should already have been validated by the caller. */
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(client);
+ AWS_PRECONDITION(options);
+ AWS_PRECONDITION(options->message);
+ AWS_PRECONDITION(aws_http_message_get_body_stream(options->message));
+
+ if (s_try_update_part_info_from_resume_token(content_length, options->resume_token, &part_size, &num_parts)) {
+ return NULL;
+ }
+
+ struct aws_s3_auto_ranged_put *auto_ranged_put =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_auto_ranged_put));
+
+ if (aws_s3_meta_request_init_base(
+ allocator,
+ client,
+ part_size,
+ client->compute_content_md5 == AWS_MR_CONTENT_MD5_ENABLED ||
+ aws_http_headers_has(aws_http_message_get_headers(options->message), g_content_md5_header_name),
+ options,
+ auto_ranged_put,
+ &s_s3_auto_ranged_put_vtable,
+ &auto_ranged_put->base)) {
+ aws_mem_release(allocator, auto_ranged_put);
+ return NULL;
+ }
+
+ auto_ranged_put->content_length = content_length;
+ auto_ranged_put->synced_data.total_num_parts = num_parts;
+ auto_ranged_put->upload_id = NULL;
+ auto_ranged_put->resume_token = options->resume_token;
+
+ aws_s3_meta_request_resume_token_acquire(auto_ranged_put->resume_token);
+
+ auto_ranged_put->threaded_update_data.next_part_number = 1;
+ auto_ranged_put->prepare_data.num_parts_read_from_stream = 0;
+
+ struct aws_string **etag_c_array = aws_mem_calloc(allocator, sizeof(struct aws_string *), num_parts);
+ aws_array_list_init_static(
+ &auto_ranged_put->synced_data.etag_list, etag_c_array, num_parts, sizeof(struct aws_string *));
+ auto_ranged_put->encoded_checksum_list = aws_mem_calloc(allocator, sizeof(struct aws_byte_buf), num_parts);
+
+ if (s_try_init_resume_state_from_persisted_data(allocator, auto_ranged_put, options->resume_token)) {
+ goto error_clean_up;
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST, "id=%p Created new Auto-Ranged Put Meta Request.", (void *)&auto_ranged_put->base);
+
+ return &auto_ranged_put->base;
+
+error_clean_up:
+ aws_s3_meta_request_release(&auto_ranged_put->base);
+ return NULL;
+}
+
+/* Destroy our auto-ranged put meta request */
+static void s_s3_meta_request_auto_ranged_put_destroy(struct aws_s3_meta_request *meta_request) {
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(meta_request->impl);
+
+ struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl;
+
+ aws_string_destroy(auto_ranged_put->upload_id);
+ auto_ranged_put->upload_id = NULL;
+
+ auto_ranged_put->resume_token = aws_s3_meta_request_resume_token_release(auto_ranged_put->resume_token);
+
+ aws_s3_paginated_operation_release(auto_ranged_put->synced_data.list_parts_operation);
+
+ for (size_t etag_index = 0; etag_index < auto_ranged_put->synced_data.total_num_parts; ++etag_index) {
+ struct aws_string *etag = NULL;
+
+ aws_array_list_get_at(&auto_ranged_put->synced_data.etag_list, &etag, etag_index);
+ aws_string_destroy(etag);
+ }
+
+ aws_string_destroy(auto_ranged_put->synced_data.list_parts_continuation_token);
+
+ for (size_t checksum_index = 0; checksum_index < auto_ranged_put->synced_data.total_num_parts; ++checksum_index) {
+ aws_byte_buf_clean_up(&auto_ranged_put->encoded_checksum_list[checksum_index]);
+ }
+ aws_mem_release(meta_request->allocator, auto_ranged_put->synced_data.etag_list.data);
+ aws_mem_release(meta_request->allocator, auto_ranged_put->encoded_checksum_list);
+ aws_array_list_clean_up(&auto_ranged_put->synced_data.etag_list);
+ aws_http_headers_release(auto_ranged_put->synced_data.needed_response_headers);
+ aws_mem_release(meta_request->allocator, auto_ranged_put);
+}
+
+static bool s_s3_auto_ranged_put_update(
+ struct aws_s3_meta_request *meta_request,
+ uint32_t flags,
+ struct aws_s3_request **out_request) {
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(out_request);
+
+ struct aws_s3_request *request = NULL;
+ bool work_remaining = false;
+
+ struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl;
+
+ /* BEGIN CRITICAL SECTION */
+ {
+ aws_s3_meta_request_lock_synced_data(meta_request);
+
+ if (!aws_s3_meta_request_has_finish_result_synced(meta_request)) {
+ /* If resuming and list part has not be sent, do it now. */
+ if (!auto_ranged_put->synced_data.list_parts_state.started) {
+ request = aws_s3_request_new(
+ meta_request,
+ AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_LIST_PARTS,
+ 0,
+ AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS);
+
+ auto_ranged_put->synced_data.list_parts_state.started = true;
+
+ goto has_work_remaining;
+ }
+
+ if (auto_ranged_put->synced_data.list_parts_state.continues) {
+ /* If list parts need to continue, send another list parts request. */
+ AWS_ASSERT(auto_ranged_put->synced_data.list_parts_continuation_token != NULL);
+ request = aws_s3_request_new(
+ meta_request,
+ AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_LIST_PARTS,
+ 0,
+ AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS);
+ auto_ranged_put->synced_data.list_parts_state.continues = false;
+ goto has_work_remaining;
+ }
+
+ if (!auto_ranged_put->synced_data.list_parts_state.completed) {
+ /* waiting on list parts to finish. */
+ goto has_work_remaining;
+ }
+
+ /* If we haven't already sent a create-multipart-upload message, do so now. */
+ if (!auto_ranged_put->synced_data.create_multipart_upload_sent) {
+ request = aws_s3_request_new(
+ meta_request,
+ AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_CREATE_MULTIPART_UPLOAD,
+ 0,
+ AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS);
+
+ auto_ranged_put->synced_data.create_multipart_upload_sent = true;
+
+ goto has_work_remaining;
+ }
+
+ /* If the create-multipart-upload message hasn't been completed, then there is still additional work to do,
+ * but it can't be done yet. */
+ if (!auto_ranged_put->synced_data.create_multipart_upload_completed) {
+ goto has_work_remaining;
+ }
+
+ /* If we haven't sent all of the parts yet, then set up to send a new part now. */
+ if (auto_ranged_put->synced_data.num_parts_sent < auto_ranged_put->synced_data.total_num_parts) {
+
+ /* Check if the etag/checksum list has the result already */
+ int part_index = auto_ranged_put->threaded_update_data.next_part_number - 1;
+ for (size_t etag_index = part_index;
+ etag_index < aws_array_list_length(&auto_ranged_put->synced_data.etag_list);
+ ++etag_index) {
+ struct aws_string *etag = NULL;
+
+ if (!aws_array_list_get_at(&auto_ranged_put->synced_data.etag_list, &etag, etag_index) && etag) {
+ /* part already downloaded, skip it here and prepare will take care of adjusting the buffer */
+ ++auto_ranged_put->threaded_update_data.next_part_number;
+
+ } else {
+ // incomplete part found. break out and create request for it.
+ break;
+ }
+ }
+
+ // Something went really wrong. we still have parts to send, but have etags for all parts
+ AWS_FATAL_ASSERT(
+ auto_ranged_put->threaded_update_data.next_part_number <=
+ auto_ranged_put->synced_data.total_num_parts);
+
+ if ((flags & AWS_S3_META_REQUEST_UPDATE_FLAG_CONSERVATIVE) != 0) {
+ uint32_t num_parts_in_flight =
+ (auto_ranged_put->synced_data.num_parts_sent -
+ auto_ranged_put->synced_data.num_parts_completed);
+
+ /* Because uploads must read from their streams serially, we try to limit the amount of in flight
+ * requests for a given multipart upload if we can. */
+ if (num_parts_in_flight > 0) {
+ goto has_work_remaining;
+ }
+ }
+
+ /* Allocate a request for another part. */
+ request = aws_s3_request_new(
+ meta_request,
+ AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_PART,
+ 0,
+ AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS);
+
+ request->part_number = auto_ranged_put->threaded_update_data.next_part_number;
+
+ ++auto_ranged_put->threaded_update_data.next_part_number;
+ ++auto_ranged_put->synced_data.num_parts_sent;
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p: Returning request %p for part %d",
+ (void *)meta_request,
+ (void *)request,
+ request->part_number);
+
+ goto has_work_remaining;
+ }
+
+ /* There is one more request to send after all of the parts (the complete-multipart-upload) but it can't be
+ * done until all of the parts have been completed.*/
+ if (auto_ranged_put->synced_data.num_parts_completed != auto_ranged_put->synced_data.total_num_parts) {
+ goto has_work_remaining;
+ }
+
+ /* If the complete-multipart-upload request hasn't been set yet, then send it now. */
+ if (!auto_ranged_put->synced_data.complete_multipart_upload_sent) {
+ request = aws_s3_request_new(
+ meta_request,
+ AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_COMPLETE_MULTIPART_UPLOAD,
+ 0,
+ AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS);
+
+ auto_ranged_put->synced_data.complete_multipart_upload_sent = true;
+
+ goto has_work_remaining;
+ }
+
+ /* Wait for the complete-multipart-upload request to finish. */
+ if (!auto_ranged_put->synced_data.complete_multipart_upload_completed) {
+ goto has_work_remaining;
+ }
+
+ goto no_work_remaining;
+ } else {
+
+ /* If the create multipart upload hasn't been sent, then there is nothing left to do when canceling. */
+ if (!auto_ranged_put->synced_data.create_multipart_upload_sent) {
+ goto no_work_remaining;
+ }
+
+ /* If the create-multipart-upload request is still in flight, wait for it to finish. */
+ if (!auto_ranged_put->synced_data.create_multipart_upload_completed) {
+ goto has_work_remaining;
+ }
+
+ /* If the number of parts completed is less than the number of parts sent, then we need to wait until all of
+ * those parts are done sending before aborting. */
+ if (auto_ranged_put->synced_data.num_parts_completed < auto_ranged_put->synced_data.num_parts_sent) {
+ goto has_work_remaining;
+ }
+
+ /* If the complete-multipart-upload is already in flight, then we can't necessarily send an abort. */
+ if (auto_ranged_put->synced_data.complete_multipart_upload_sent &&
+ !auto_ranged_put->synced_data.complete_multipart_upload_completed) {
+ goto has_work_remaining;
+ }
+
+ /* If the upload was paused or resume failed, we don't abort the multipart upload. */
+ if (meta_request->synced_data.finish_result.error_code == AWS_ERROR_S3_PAUSED ||
+ meta_request->synced_data.finish_result.error_code == AWS_ERROR_S3_RESUME_FAILED) {
+ goto no_work_remaining;
+ }
+
+ /* If the complete-multipart-upload completed successfully, then there is nothing to abort since the
+ * transfer has already finished. */
+ if (auto_ranged_put->synced_data.complete_multipart_upload_completed &&
+ auto_ranged_put->synced_data.complete_multipart_upload_error_code == AWS_ERROR_SUCCESS) {
+ goto no_work_remaining;
+ }
+
+ /* If we made it here, and the abort-multipart-upload message hasn't been sent yet, then do so now. */
+ if (!auto_ranged_put->synced_data.abort_multipart_upload_sent) {
+ if (auto_ranged_put->upload_id == NULL) {
+ goto no_work_remaining;
+ }
+ if (auto_ranged_put->base.synced_data.finish_result.error_code == AWS_ERROR_SUCCESS) {
+ /* Not sending abort when success even if we haven't sent complete MPU, in case we resume after MPU
+ * already completed. */
+ goto no_work_remaining;
+ }
+
+ request = aws_s3_request_new(
+ meta_request,
+ AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_ABORT_MULTIPART_UPLOAD,
+ 0,
+ AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS | AWS_S3_REQUEST_FLAG_ALWAYS_SEND);
+
+ auto_ranged_put->synced_data.abort_multipart_upload_sent = true;
+
+ goto has_work_remaining;
+ }
+
+ /* Wait for the multipart upload to be completed. */
+ if (!auto_ranged_put->synced_data.abort_multipart_upload_completed) {
+ goto has_work_remaining;
+ }
+
+ goto no_work_remaining;
+ }
+
+ has_work_remaining:
+ work_remaining = true;
+
+ no_work_remaining:
+
+ if (!work_remaining) {
+ aws_s3_meta_request_set_success_synced(meta_request, AWS_S3_RESPONSE_STATUS_SUCCESS);
+ }
+
+ aws_s3_meta_request_unlock_synced_data(meta_request);
+ }
+ /* END CRITICAL SECTION */
+
+ if (work_remaining) {
+ *out_request = request;
+ } else {
+ AWS_ASSERT(request == NULL);
+
+ aws_s3_meta_request_finish(meta_request);
+ }
+
+ return work_remaining;
+}
+
+/**
+ * Helper to compute request body size.
+ * Basically returns either part size or if content is not equally divisible into parts, the size of the remaining last
+ * part.
+ */
+static size_t s_compute_request_body_size(struct aws_s3_meta_request *meta_request, uint32_t part_number) {
+ AWS_PRECONDITION(meta_request);
+
+ struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl;
+
+ size_t request_body_size = meta_request->part_size;
+ /* Last part--adjust size to match remaining content length. */
+ if (part_number == auto_ranged_put->synced_data.total_num_parts) {
+ size_t content_remainder = (size_t)(auto_ranged_put->content_length % (uint64_t)meta_request->part_size);
+
+ if (content_remainder > 0) {
+ request_body_size = content_remainder;
+ }
+ }
+
+ return request_body_size;
+}
+
+static int s_verify_part_matches_checksum(
+ struct aws_allocator *allocator,
+ struct aws_byte_buf part_body,
+ enum aws_s3_checksum_algorithm algorithm,
+ struct aws_byte_buf part_checksum) {
+ AWS_PRECONDITION(allocator);
+
+ if (algorithm == AWS_SCA_NONE) {
+ return AWS_OP_SUCCESS;
+ }
+
+ struct aws_byte_buf checksum;
+ if (aws_byte_buf_init(&checksum, allocator, aws_get_digest_size_from_algorithm(algorithm))) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_byte_buf encoded_checksum = {0};
+
+ int return_status = AWS_OP_SUCCESS;
+ struct aws_byte_cursor body_cur = aws_byte_cursor_from_buf(&part_body);
+
+ size_t encoded_len = 0;
+ if (aws_base64_compute_encoded_len(aws_get_digest_size_from_algorithm(algorithm), &encoded_len)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST, "Failed to resume upload. Unable to determine length of encoded checksum.");
+ return_status = aws_raise_error(AWS_ERROR_S3_RESUME_FAILED);
+ goto on_done;
+ }
+
+ if (aws_checksum_compute(allocator, algorithm, &body_cur, &checksum, 0)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST, "Failed to resume upload. Unable to compute checksum for the skipped part.");
+ return_status = aws_raise_error(AWS_ERROR_S3_RESUME_FAILED);
+ goto on_done;
+ }
+
+ if (aws_byte_buf_init(&encoded_checksum, allocator, encoded_len)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST, "Failed to resume upload. Unable to allocate buffer for encoded checksum.");
+ return_status = aws_raise_error(AWS_ERROR_S3_RESUME_FAILED);
+ goto on_done;
+ }
+
+ struct aws_byte_cursor checksum_cur = aws_byte_cursor_from_buf(&checksum);
+ if (aws_base64_encode(&checksum_cur, &encoded_checksum)) {
+ AWS_LOGF_ERROR(AWS_LS_S3_META_REQUEST, "Failed to resume upload. Unable to encode checksum.");
+ return_status = aws_raise_error(AWS_ERROR_S3_RESUME_FAILED);
+ goto on_done;
+ }
+
+ if (!aws_byte_buf_eq(&encoded_checksum, &part_checksum)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST, "Failed to resume upload. Checksum for previously uploaded part does not match");
+ return_status = aws_raise_error(AWS_ERROR_S3_RESUMED_PART_CHECKSUM_MISMATCH);
+ goto on_done;
+ }
+
+on_done:
+ aws_byte_buf_clean_up(&checksum);
+ aws_byte_buf_clean_up(&encoded_checksum);
+ return return_status;
+}
+
+/**
+ * Skips parts from input stream that were previously uploaded.
+ * Assumes input stream has num_parts_read_from_stream specifying which part stream is on
+ * and will read into temp buffer until it gets to skip_until_part_number (i.e. skipping does include
+ * that part). If checksum is set on the request and parts with checksums were uploaded before, checksum will be
+ * verified.
+ */
+static int s_skip_parts_from_stream(
+ struct aws_s3_meta_request *meta_request,
+ uint32_t num_parts_read_from_stream,
+ uint32_t skip_until_part_number) {
+
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(num_parts_read_from_stream <= skip_until_part_number);
+
+ struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl;
+
+ AWS_PRECONDITION(skip_until_part_number <= auto_ranged_put->synced_data.total_num_parts);
+
+ if (num_parts_read_from_stream == skip_until_part_number) {
+ return AWS_OP_SUCCESS;
+ }
+
+ struct aws_byte_buf temp_body_buf;
+ if (aws_byte_buf_init(&temp_body_buf, meta_request->allocator, 0)) {
+ return AWS_OP_ERR;
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p: Skipping parts %d through %d",
+ (void *)meta_request,
+ num_parts_read_from_stream,
+ skip_until_part_number);
+
+ int return_status = AWS_OP_SUCCESS;
+ for (uint32_t part_index = num_parts_read_from_stream; part_index < skip_until_part_number; ++part_index) {
+
+ size_t request_body_size = s_compute_request_body_size(meta_request, part_index + 1);
+
+ if (temp_body_buf.capacity != request_body_size) {
+ // reinit with correct size
+ aws_byte_buf_clean_up(&temp_body_buf);
+ if (aws_byte_buf_init(&temp_body_buf, meta_request->allocator, request_body_size)) {
+ return AWS_OP_ERR;
+ }
+ } else {
+ // reuse buffer
+ aws_byte_buf_reset(&temp_body_buf, false);
+ }
+
+ if (aws_s3_meta_request_read_body(meta_request, &temp_body_buf)) {
+ AWS_LOGF_ERROR(AWS_LS_S3_META_REQUEST, "Failed to resume upload. Input steam cannot be read.");
+ return_status = AWS_OP_ERR;
+ goto on_done;
+ }
+
+ // compare skipped checksum to previously uploaded checksum
+ if (auto_ranged_put->encoded_checksum_list[part_index].len > 0 &&
+ s_verify_part_matches_checksum(
+ meta_request->allocator,
+ temp_body_buf,
+ meta_request->checksum_config.checksum_algorithm,
+ auto_ranged_put->encoded_checksum_list[part_index])) {
+ return_status = AWS_OP_ERR;
+ goto on_done;
+ }
+ }
+
+on_done:
+ aws_byte_buf_clean_up(&temp_body_buf);
+ return return_status;
+}
+
+/* Given a request, prepare it for sending based on its description. */
+static int s_s3_auto_ranged_put_prepare_request(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request) {
+ AWS_PRECONDITION(meta_request);
+
+ struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl;
+ AWS_PRECONDITION(auto_ranged_put);
+
+ struct aws_http_message *message = NULL;
+
+ switch (request->request_tag) {
+ case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_LIST_PARTS: {
+
+ int message_creation_result = AWS_OP_ERR;
+ /* BEGIN CRITICAL SECTION */
+ {
+ aws_s3_meta_request_lock_synced_data(meta_request);
+
+ if (auto_ranged_put->synced_data.list_parts_continuation_token) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p ListParts for Multi-part Upload, with ID:%s, continues with token:%s.",
+ (void *)meta_request,
+ aws_string_c_str(auto_ranged_put->upload_id),
+ aws_string_c_str(auto_ranged_put->synced_data.list_parts_continuation_token));
+ struct aws_byte_cursor continuation_cur =
+ aws_byte_cursor_from_string(auto_ranged_put->synced_data.list_parts_continuation_token);
+ message_creation_result = aws_s3_construct_next_paginated_request_http_message(
+ auto_ranged_put->synced_data.list_parts_operation, &continuation_cur, &message);
+ } else {
+ message_creation_result = aws_s3_construct_next_paginated_request_http_message(
+ auto_ranged_put->synced_data.list_parts_operation, NULL, &message);
+ }
+
+ aws_s3_meta_request_unlock_synced_data(meta_request);
+ }
+ /* END CRITICAL SECTION */
+
+ if (message_creation_result) {
+ goto message_create_failed;
+ }
+ if (meta_request->checksum_config.checksum_algorithm == AWS_SCA_NONE) {
+ /* We don't need to worry about the pre-calculated checksum from user as for multipart upload, only way
+ * to calculate checksum for multipart upload is from client. */
+ aws_s3_message_util_copy_headers(
+ meta_request->initial_request_message,
+ message,
+ g_s3_list_parts_excluded_headers,
+ g_s3_list_parts_excluded_headers_count,
+ true);
+ } else {
+ aws_s3_message_util_copy_headers(
+ meta_request->initial_request_message,
+ message,
+ g_s3_list_parts_with_checksum_excluded_headers,
+ g_s3_list_parts_with_checksum_excluded_headers_count,
+ true);
+ }
+
+ break;
+ }
+ case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_CREATE_MULTIPART_UPLOAD: {
+
+ /* Create the message to create a new multipart upload. */
+ message = aws_s3_create_multipart_upload_message_new(
+ meta_request->allocator,
+ meta_request->initial_request_message,
+ meta_request->checksum_config.checksum_algorithm);
+
+ break;
+ }
+ case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_PART: {
+
+ size_t request_body_size = s_compute_request_body_size(meta_request, request->part_number);
+
+ if (request->num_times_prepared == 0) {
+ if (s_skip_parts_from_stream(
+ meta_request,
+ auto_ranged_put->prepare_data.num_parts_read_from_stream,
+ request->part_number - 1)) {
+ goto message_create_failed;
+ }
+ auto_ranged_put->prepare_data.num_parts_read_from_stream = request->part_number - 1;
+
+ aws_byte_buf_init(&request->request_body, meta_request->allocator, request_body_size);
+
+ if (aws_s3_meta_request_read_body(meta_request, &request->request_body)) {
+ goto message_create_failed;
+ }
+ ++auto_ranged_put->prepare_data.num_parts_read_from_stream;
+ }
+ /* Create a new put-object message to upload a part. */
+ message = aws_s3_upload_part_message_new(
+ meta_request->allocator,
+ meta_request->initial_request_message,
+ &request->request_body,
+ request->part_number,
+ auto_ranged_put->upload_id,
+ meta_request->should_compute_content_md5,
+ &meta_request->checksum_config,
+ &auto_ranged_put->encoded_checksum_list[request->part_number - 1]);
+ break;
+ }
+ case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_COMPLETE_MULTIPART_UPLOAD: {
+
+ if (request->num_times_prepared == 0) {
+
+ /* Corner case of last part being previously uploaded during resume.
+ * Read it from input stream and potentially verify checksum */
+ if (s_skip_parts_from_stream(
+ meta_request,
+ auto_ranged_put->prepare_data.num_parts_read_from_stream,
+ auto_ranged_put->synced_data.total_num_parts)) {
+ goto message_create_failed;
+ }
+ auto_ranged_put->prepare_data.num_parts_read_from_stream = auto_ranged_put->synced_data.total_num_parts;
+
+ aws_byte_buf_init(
+ &request->request_body, meta_request->allocator, s_complete_multipart_upload_init_body_size_bytes);
+ } else {
+ aws_byte_buf_reset(&request->request_body, false);
+ }
+
+ /* BEGIN CRITICAL SECTION */
+ {
+ aws_s3_meta_request_lock_synced_data(meta_request);
+
+ AWS_FATAL_ASSERT(auto_ranged_put->upload_id);
+ AWS_ASSERT(request->request_body.capacity > 0);
+ aws_byte_buf_reset(&request->request_body, false);
+
+ /* Build the message to complete our multipart upload, which includes a payload describing all of
+ * our completed parts. */
+ message = aws_s3_complete_multipart_message_new(
+ meta_request->allocator,
+ meta_request->initial_request_message,
+ &request->request_body,
+ auto_ranged_put->upload_id,
+ &auto_ranged_put->synced_data.etag_list,
+ auto_ranged_put->encoded_checksum_list,
+ meta_request->checksum_config.checksum_algorithm);
+
+ aws_s3_meta_request_unlock_synced_data(meta_request);
+ }
+ /* END CRITICAL SECTION */
+
+ break;
+ }
+ case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_ABORT_MULTIPART_UPLOAD: {
+ AWS_FATAL_ASSERT(auto_ranged_put->upload_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p Abort multipart upload request for upload id %s.",
+ (void *)meta_request,
+ aws_string_c_str(auto_ranged_put->upload_id));
+
+ if (request->num_times_prepared == 0) {
+ aws_byte_buf_init(
+ &request->request_body, meta_request->allocator, s_abort_multipart_upload_init_body_size_bytes);
+ } else {
+ aws_byte_buf_reset(&request->request_body, false);
+ }
+
+ /* Build the message to abort our multipart upload */
+ message = aws_s3_abort_multipart_upload_message_new(
+ meta_request->allocator, meta_request->initial_request_message, auto_ranged_put->upload_id);
+
+ break;
+ }
+ }
+
+ if (message == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p Could not allocate message for request with tag %d for auto-ranged-put meta request.",
+ (void *)meta_request,
+ request->request_tag);
+ goto message_create_failed;
+ }
+
+ aws_s3_request_setup_send_data(request, message);
+
+ aws_http_message_release(message);
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p: Prepared request %p for part %d",
+ (void *)meta_request,
+ (void *)request,
+ request->part_number);
+
+ return AWS_OP_SUCCESS;
+
+message_create_failed:
+
+ return AWS_OP_ERR;
+}
+
+/* Invoked before retry */
+static void s_s3_auto_ranged_put_send_request_finish(
+ struct aws_s3_connection *connection,
+ struct aws_http_stream *stream,
+ int error_code) {
+
+ struct aws_s3_request *request = connection->request;
+ AWS_PRECONDITION(request);
+
+ /* Request tag is different from different type of meta requests */
+ switch (request->request_tag) {
+
+ case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_COMPLETE_MULTIPART_UPLOAD: {
+ /* For complete multipart upload, the server may return async error. */
+ aws_s3_meta_request_send_request_finish_handle_async_error(connection, stream, error_code);
+ break;
+ }
+
+ default:
+ aws_s3_meta_request_send_request_finish_default(connection, stream, error_code);
+ break;
+ }
+}
+
+/* Invoked when no-retry will happen */
+static void s_s3_auto_ranged_put_request_finished(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request,
+ int error_code) {
+
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(meta_request->impl);
+ AWS_PRECONDITION(request);
+
+ struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl;
+
+ switch (request->request_tag) {
+
+ case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_LIST_PARTS: {
+ /* BEGIN CRITICAL SECTION */
+ {
+ aws_s3_meta_request_lock_synced_data(meta_request);
+
+ bool has_more_results = false;
+
+ if (error_code == AWS_ERROR_SUCCESS) {
+
+ struct aws_byte_cursor body_cursor = aws_byte_cursor_from_buf(&request->send_data.response_body);
+ /* Clear the token before */
+ aws_string_destroy(auto_ranged_put->synced_data.list_parts_continuation_token);
+ auto_ranged_put->synced_data.list_parts_continuation_token = NULL;
+ if (aws_s3_paginated_operation_on_response(
+ auto_ranged_put->synced_data.list_parts_operation,
+ &body_cursor,
+ &auto_ranged_put->synced_data.list_parts_continuation_token,
+ &has_more_results)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST, "id=%p Failed to parse list parts response.", (void *)meta_request);
+ error_code = AWS_ERROR_S3_LIST_PARTS_PARSE_FAILED;
+ } else if (!has_more_results) {
+ for (size_t etag_index = 0;
+ etag_index < aws_array_list_length(&auto_ranged_put->synced_data.etag_list);
+ etag_index++) {
+ struct aws_string *etag = NULL;
+ aws_array_list_get_at(&auto_ranged_put->synced_data.etag_list, &etag, etag_index);
+ if (etag != NULL) {
+ /* Update the number of parts sent/completed previously */
+ ++auto_ranged_put->synced_data.num_parts_sent;
+ ++auto_ranged_put->synced_data.num_parts_completed;
+ }
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p: Resuming PutObject. %d out of %d parts have completed during previous request.",
+ (void *)meta_request,
+ auto_ranged_put->synced_data.num_parts_completed,
+ auto_ranged_put->synced_data.total_num_parts);
+ }
+ }
+
+ if (has_more_results) {
+ /* If list parts has more result, make sure list parts continues */
+ auto_ranged_put->synced_data.list_parts_state.continues = true;
+ auto_ranged_put->synced_data.list_parts_state.completed = false;
+ } else {
+ /* No more result, complete the list parts */
+ auto_ranged_put->synced_data.list_parts_state.continues = false;
+ auto_ranged_put->synced_data.list_parts_state.completed = true;
+ }
+ auto_ranged_put->synced_data.list_parts_error_code = error_code;
+
+ if (error_code != AWS_ERROR_SUCCESS) {
+ if (request->send_data.response_status == AWS_HTTP_STATUS_CODE_404_NOT_FOUND &&
+ auto_ranged_put->resume_token->num_parts_completed ==
+ auto_ranged_put->resume_token->total_num_parts) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p: Resuming PutObject ended early, since there is nothing to resume"
+ "(request finished prior to being paused?)",
+ (void *)meta_request);
+
+ aws_s3_meta_request_set_success_synced(meta_request, AWS_S3_RESPONSE_STATUS_SUCCESS);
+ } else {
+ aws_s3_meta_request_set_fail_synced(meta_request, request, error_code);
+ }
+ }
+
+ aws_s3_meta_request_unlock_synced_data(meta_request);
+ }
+ /* END CRITICAL SECTION */
+ break;
+ }
+
+ case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_CREATE_MULTIPART_UPLOAD: {
+ struct aws_http_headers *needed_response_headers = NULL;
+
+ if (error_code == AWS_ERROR_SUCCESS) {
+ needed_response_headers = aws_http_headers_new(meta_request->allocator);
+ const size_t copy_header_count = AWS_ARRAY_SIZE(s_create_multipart_upload_copy_headers);
+
+ /* Copy any headers now that we'll need for the final, transformed headers later. */
+ for (size_t header_index = 0; header_index < copy_header_count; ++header_index) {
+ const struct aws_byte_cursor *header_name = &s_create_multipart_upload_copy_headers[header_index];
+ struct aws_byte_cursor header_value;
+ AWS_ZERO_STRUCT(header_value);
+
+ if (aws_http_headers_get(request->send_data.response_headers, *header_name, &header_value) ==
+ AWS_OP_SUCCESS) {
+ aws_http_headers_set(needed_response_headers, *header_name, header_value);
+ }
+ }
+
+ struct aws_byte_cursor buffer_byte_cursor = aws_byte_cursor_from_buf(&request->send_data.response_body);
+
+ /* Find the upload id for this multipart upload. */
+ struct aws_string *upload_id =
+ aws_xml_get_top_level_tag(meta_request->allocator, &s_upload_id, &buffer_byte_cursor);
+
+ if (upload_id == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p Could not find upload-id in create-multipart-upload response",
+ (void *)meta_request);
+
+ aws_raise_error(AWS_ERROR_S3_MISSING_UPLOAD_ID);
+ error_code = AWS_ERROR_S3_MISSING_UPLOAD_ID;
+ } else {
+ /* Store the multipart upload id. */
+ auto_ranged_put->upload_id = upload_id;
+ }
+ }
+
+ /* BEGIN CRITICAL SECTION */
+ {
+ aws_s3_meta_request_lock_synced_data(meta_request);
+
+ AWS_ASSERT(auto_ranged_put->synced_data.needed_response_headers == NULL);
+ auto_ranged_put->synced_data.needed_response_headers = needed_response_headers;
+
+ auto_ranged_put->synced_data.create_multipart_upload_completed = true;
+ auto_ranged_put->synced_data.list_parts_error_code = error_code;
+
+ if (error_code != AWS_ERROR_SUCCESS) {
+ aws_s3_meta_request_set_fail_synced(meta_request, request, error_code);
+ }
+
+ aws_s3_meta_request_unlock_synced_data(meta_request);
+ }
+ /* END CRITICAL SECTION */
+ break;
+ }
+
+ case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_PART: {
+ size_t part_number = request->part_number;
+ AWS_FATAL_ASSERT(part_number > 0);
+ size_t part_index = part_number - 1;
+ struct aws_string *etag = NULL;
+
+ if (error_code == AWS_ERROR_SUCCESS) {
+ /* Find the ETag header if it exists and cache it. */
+ struct aws_byte_cursor etag_within_quotes;
+
+ AWS_ASSERT(request->send_data.response_headers);
+
+ if (aws_http_headers_get(
+ request->send_data.response_headers, g_etag_header_name, &etag_within_quotes) !=
+ AWS_OP_SUCCESS) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p Could not find ETag header for request %p",
+ (void *)meta_request,
+ (void *)request);
+
+ error_code = AWS_ERROR_S3_MISSING_ETAG;
+ } else {
+ /* The ETag value arrives in quotes, but we don't want it in quotes when we send it back up
+ * later, so just get rid of the quotes now. */
+ etag = aws_strip_quotes(meta_request->allocator, etag_within_quotes);
+ }
+ }
+ if (error_code == AWS_ERROR_SUCCESS && meta_request->progress_callback != NULL) {
+ struct aws_s3_meta_request_progress progress = {
+ .bytes_transferred = meta_request->part_size,
+ .content_length = auto_ranged_put->content_length,
+ };
+ meta_request->progress_callback(meta_request, &progress, meta_request->user_data);
+ }
+ /* BEGIN CRITICAL SECTION */
+ {
+ aws_s3_meta_request_lock_synced_data(meta_request);
+
+ ++auto_ranged_put->synced_data.num_parts_completed;
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p: %d out of %d parts have completed.",
+ (void *)meta_request,
+ auto_ranged_put->synced_data.num_parts_completed,
+ auto_ranged_put->synced_data.total_num_parts);
+
+ if (error_code == AWS_ERROR_SUCCESS) {
+ AWS_ASSERT(etag != NULL);
+
+ ++auto_ranged_put->synced_data.num_parts_successful;
+
+ /* ETags need to be associated with their part number, so we keep the etag indices consistent with
+ * part numbers. This means we may have to add padding to the list in the case that parts finish out
+ * of order. */
+ aws_array_list_set_at(&auto_ranged_put->synced_data.etag_list, &etag, part_index);
+ } else {
+ ++auto_ranged_put->synced_data.num_parts_failed;
+ aws_s3_meta_request_set_fail_synced(meta_request, request, error_code);
+ }
+
+ aws_s3_meta_request_unlock_synced_data(meta_request);
+ }
+ /* END CRITICAL SECTION */
+
+ break;
+ }
+
+ case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_COMPLETE_MULTIPART_UPLOAD: {
+ if (error_code == AWS_ERROR_SUCCESS && meta_request->headers_callback != NULL) {
+ struct aws_http_headers *final_response_headers = aws_http_headers_new(meta_request->allocator);
+
+ /* Copy all the response headers from this request. */
+ copy_http_headers(request->send_data.response_headers, final_response_headers);
+
+ /* Copy over any response headers that we've previously determined are needed for this final
+ * response.
+ */
+
+ /* BEGIN CRITICAL SECTION */
+ {
+ aws_s3_meta_request_lock_synced_data(meta_request);
+ copy_http_headers(auto_ranged_put->synced_data.needed_response_headers, final_response_headers);
+ aws_s3_meta_request_unlock_synced_data(meta_request);
+ }
+ /* END CRITICAL SECTION */
+
+ struct aws_byte_cursor response_body_cursor =
+ aws_byte_cursor_from_buf(&request->send_data.response_body);
+
+ /**
+ * TODO: The body of the response can be ERROR, check Error specified in body part from
+ * https://docs.aws.amazon.com/AmazonS3/latest/API/API_CompleteMultipartUpload.html#AmazonS3-CompleteMultipartUpload-response-CompleteMultipartUploadOutput
+ * We need to handle this case.
+ * TODO: the checksum returned within the response of complete multipart upload need to be exposed?
+ */
+
+ /* Grab the ETag for the entire object, and set it as a header. */
+ struct aws_string *etag_header_value =
+ aws_xml_get_top_level_tag(meta_request->allocator, &g_etag_header_name, &response_body_cursor);
+
+ if (etag_header_value != NULL) {
+ struct aws_byte_buf etag_header_value_byte_buf;
+ AWS_ZERO_STRUCT(etag_header_value_byte_buf);
+
+ replace_quote_entities(meta_request->allocator, etag_header_value, &etag_header_value_byte_buf);
+
+ aws_http_headers_set(
+ final_response_headers,
+ g_etag_header_name,
+ aws_byte_cursor_from_buf(&etag_header_value_byte_buf));
+
+ aws_string_destroy(etag_header_value);
+ aws_byte_buf_clean_up(&etag_header_value_byte_buf);
+ }
+
+ /* Notify the user of the headers. */
+ if (meta_request->headers_callback(
+ meta_request,
+ final_response_headers,
+ request->send_data.response_status,
+ meta_request->user_data)) {
+
+ error_code = aws_last_error_or_unknown();
+ }
+ meta_request->headers_callback = NULL;
+
+ aws_http_headers_release(final_response_headers);
+ }
+
+ /* BEGIN CRITICAL SECTION */
+ {
+ aws_s3_meta_request_lock_synced_data(meta_request);
+ auto_ranged_put->synced_data.complete_multipart_upload_completed = true;
+ auto_ranged_put->synced_data.complete_multipart_upload_error_code = error_code;
+
+ if (error_code != AWS_ERROR_SUCCESS) {
+ aws_s3_meta_request_set_fail_synced(meta_request, request, error_code);
+ }
+ aws_s3_meta_request_unlock_synced_data(meta_request);
+ }
+ /* END CRITICAL SECTION */
+
+ break;
+ }
+ case AWS_S3_AUTO_RANGED_PUT_REQUEST_TAG_ABORT_MULTIPART_UPLOAD: {
+ /* BEGIN CRITICAL SECTION */
+ {
+ aws_s3_meta_request_lock_synced_data(meta_request);
+ auto_ranged_put->synced_data.abort_multipart_upload_error_code = error_code;
+ auto_ranged_put->synced_data.abort_multipart_upload_completed = true;
+ aws_s3_meta_request_unlock_synced_data(meta_request);
+ }
+ /* END CRITICAL SECTION */
+ break;
+ }
+ }
+}
+
+static int s_s3_auto_ranged_put_pause(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_meta_request_resume_token **out_resume_token) {
+
+ *out_resume_token = NULL;
+
+ /* lock */
+ aws_s3_meta_request_lock_synced_data(meta_request);
+ struct aws_s3_auto_ranged_put *auto_ranged_put = meta_request->impl;
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p: Pausing request with %u out of %u parts have completed.",
+ (void *)meta_request,
+ auto_ranged_put->synced_data.num_parts_completed,
+ auto_ranged_put->synced_data.total_num_parts);
+
+ /* upload can be in one of several states:
+ * - not started, i.e. we didn't even call crete mpu yet - return success,
+ * token is NULL and cancel the upload
+ * - in the middle of upload - return success, create token and cancel
+ * upload
+ * - complete MPU started - return success, generate token and try to cancel
+ * complete MPU
+ */
+ if (auto_ranged_put->synced_data.create_multipart_upload_completed) {
+
+ *out_resume_token = aws_s3_meta_request_resume_token_new(meta_request->allocator);
+
+ (*out_resume_token)->type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT;
+ (*out_resume_token)->multipart_upload_id =
+ aws_string_clone_or_reuse(meta_request->allocator, auto_ranged_put->upload_id);
+ (*out_resume_token)->part_size = meta_request->part_size;
+ (*out_resume_token)->total_num_parts = auto_ranged_put->synced_data.total_num_parts;
+ (*out_resume_token)->num_parts_completed = auto_ranged_put->synced_data.num_parts_completed;
+ }
+
+ /**
+ * Cancels the meta request using the PAUSED flag to avoid deletion of uploaded parts.
+ * This allows the client to resume the upload later, setting the persistable state in the meta request options.
+ */
+ aws_s3_meta_request_set_fail_synced(meta_request, NULL, AWS_ERROR_S3_PAUSED);
+
+ /* unlock */
+ aws_s3_meta_request_unlock_synced_data(meta_request);
+
+ return AWS_OP_SUCCESS;
+}
diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_checksum_stream.c b/contrib/restricted/aws/aws-c-s3/source/s3_checksum_stream.c
new file mode 100644
index 0000000000..71c2b746aa
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/source/s3_checksum_stream.c
@@ -0,0 +1,116 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include "aws/s3/private/s3_checksums.h"
+#include <aws/common/encoding.h>
+#include <aws/io/stream.h>
+
+struct aws_checksum_stream {
+ struct aws_input_stream base;
+ struct aws_allocator *allocator;
+
+ struct aws_input_stream *old_stream;
+ struct aws_s3_checksum *checksum;
+ struct aws_byte_buf checksum_result;
+ /* base64 encoded checksum of the stream, updated on destruction of stream */
+ struct aws_byte_buf *encoded_checksum_output;
+};
+
+static int s_aws_input_checksum_stream_seek(
+ struct aws_input_stream *stream,
+ int64_t offset,
+ enum aws_stream_seek_basis basis) {
+ (void)stream;
+ (void)offset;
+ (void)basis;
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_CLIENT,
+ "Cannot seek on checksum stream, as it will cause the checksum output to mismatch the checksum of the stream "
+ "contents");
+ AWS_ASSERT(false);
+ return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION);
+}
+
+static int s_aws_input_checksum_stream_read(struct aws_input_stream *stream, struct aws_byte_buf *dest) {
+ struct aws_checksum_stream *impl = AWS_CONTAINER_OF(stream, struct aws_checksum_stream, base);
+
+ size_t original_len = dest->len;
+ if (aws_input_stream_read(impl->old_stream, dest)) {
+ return AWS_OP_ERR;
+ }
+ struct aws_byte_cursor to_sum = aws_byte_cursor_from_buf(dest);
+ /* Move the cursor to the part to calculate the checksum */
+ aws_byte_cursor_advance(&to_sum, original_len);
+ /* If read failed, `aws_input_stream_read` will handle the error to restore the dest. No need to handle error here
+ */
+ return aws_checksum_update(impl->checksum, &to_sum);
+}
+
+static int s_aws_input_checksum_stream_get_status(struct aws_input_stream *stream, struct aws_stream_status *status) {
+ struct aws_checksum_stream *impl = AWS_CONTAINER_OF(stream, struct aws_checksum_stream, base);
+ return aws_input_stream_get_status(impl->old_stream, status);
+}
+
+static int s_aws_input_checksum_stream_get_length(struct aws_input_stream *stream, int64_t *out_length) {
+ struct aws_checksum_stream *impl = AWS_CONTAINER_OF(stream, struct aws_checksum_stream, base);
+ return aws_input_stream_get_length(impl->old_stream, out_length);
+}
+
+/* We take ownership of the old input stream, and destroy it with this input stream. This is because we want to be able
+ * to substitute in the chunk_stream for the cursor stream currently used in s_s3_meta_request_default_prepare_request
+ * which returns the new stream. So in order to prevent the need of keeping track of two input streams we instead
+ * consume the cursor stream and destroy it with this one */
+static void s_aws_input_checksum_stream_destroy(struct aws_checksum_stream *impl) {
+ if (!impl) {
+ return;
+ }
+
+ int result = aws_checksum_finalize(impl->checksum, &impl->checksum_result, 0);
+ if (result != AWS_OP_SUCCESS) {
+ aws_byte_buf_reset(&impl->checksum_result, true);
+ }
+ AWS_ASSERT(result == AWS_OP_SUCCESS);
+ struct aws_byte_cursor checksum_result_cursor = aws_byte_cursor_from_buf(&impl->checksum_result);
+ AWS_FATAL_ASSERT(aws_base64_encode(&checksum_result_cursor, impl->encoded_checksum_output) == AWS_OP_SUCCESS);
+ aws_checksum_destroy(impl->checksum);
+ aws_input_stream_release(impl->old_stream);
+ aws_byte_buf_clean_up(&impl->checksum_result);
+ aws_mem_release(impl->allocator, impl);
+}
+
+static struct aws_input_stream_vtable s_aws_input_checksum_stream_vtable = {
+ .seek = s_aws_input_checksum_stream_seek,
+ .read = s_aws_input_checksum_stream_read,
+ .get_status = s_aws_input_checksum_stream_get_status,
+ .get_length = s_aws_input_checksum_stream_get_length,
+};
+
+struct aws_input_stream *aws_checksum_stream_new(
+ struct aws_allocator *allocator,
+ struct aws_input_stream *existing_stream,
+ enum aws_s3_checksum_algorithm algorithm,
+ struct aws_byte_buf *checksum_output) {
+ AWS_PRECONDITION(existing_stream);
+
+ struct aws_checksum_stream *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_checksum_stream));
+
+ impl->allocator = allocator;
+ impl->base.vtable = &s_aws_input_checksum_stream_vtable;
+
+ impl->checksum = aws_checksum_new(allocator, algorithm);
+ if (impl->checksum == NULL) {
+ goto on_error;
+ }
+ aws_byte_buf_init(&impl->checksum_result, allocator, impl->checksum->digest_size);
+ impl->old_stream = aws_input_stream_acquire(existing_stream);
+ impl->encoded_checksum_output = checksum_output;
+ aws_ref_count_init(
+ &impl->base.ref_count, impl, (aws_simple_completion_callback *)s_aws_input_checksum_stream_destroy);
+
+ return &impl->base;
+on_error:
+ aws_mem_release(impl->allocator, impl);
+ return NULL;
+}
diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_checksums.c b/contrib/restricted/aws/aws-c-s3/source/s3_checksums.c
new file mode 100644
index 0000000000..c16288535e
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/source/s3_checksums.c
@@ -0,0 +1,310 @@
+#include "aws/s3/private/s3_checksums.h"
+#include "aws/s3/private/s3_util.h"
+#include <aws/cal/hash.h>
+#include <aws/checksums/crc.h>
+#include <aws/io/stream.h>
+
+#define AWS_CRC32_LEN 4
+#define AWS_CRC32C_LEN 4
+
+size_t aws_get_digest_size_from_algorithm(enum aws_s3_checksum_algorithm algorithm) {
+ switch (algorithm) {
+ case AWS_SCA_CRC32C:
+ return AWS_CRC32C_LEN;
+ case AWS_SCA_CRC32:
+ return AWS_CRC32_LEN;
+ case AWS_SCA_SHA1:
+ return AWS_SHA1_LEN;
+ case AWS_SCA_SHA256:
+ return AWS_SHA256_LEN;
+ default:
+ return 0;
+ }
+}
+
+const struct aws_byte_cursor *aws_get_http_header_name_from_algorithm(enum aws_s3_checksum_algorithm algorithm) {
+ switch (algorithm) {
+ case AWS_SCA_CRC32C:
+ return &g_crc32c_header_name;
+ case AWS_SCA_CRC32:
+ return &g_crc32_header_name;
+ case AWS_SCA_SHA1:
+ return &g_sha1_header_name;
+ case AWS_SCA_SHA256:
+ return &g_sha256_header_name;
+ default:
+ return NULL;
+ }
+}
+const struct aws_byte_cursor *aws_get_create_mpu_header_name_from_algorithm(enum aws_s3_checksum_algorithm algorithm) {
+ switch (algorithm) {
+ case AWS_SCA_CRC32C:
+ return &g_crc32c_create_mpu_header_name;
+ case AWS_SCA_CRC32:
+ return &g_crc32_create_mpu_header_name;
+ case AWS_SCA_SHA1:
+ return &g_sha1_create_mpu_header_name;
+ case AWS_SCA_SHA256:
+ return &g_sha256_create_mpu_header_name;
+ default:
+ return NULL;
+ }
+}
+
+const struct aws_byte_cursor *aws_get_complete_mpu_name_from_algorithm(enum aws_s3_checksum_algorithm algorithm) {
+ switch (algorithm) {
+ case AWS_SCA_CRC32C:
+ return &g_crc32c_complete_mpu_name;
+ case AWS_SCA_CRC32:
+ return &g_crc32_complete_mpu_name;
+ case AWS_SCA_SHA1:
+ return &g_sha1_complete_mpu_name;
+ case AWS_SCA_SHA256:
+ return &g_sha256_complete_mpu_name;
+ default:
+ return NULL;
+ }
+}
+
+void s3_hash_destroy(struct aws_s3_checksum *checksum) {
+ struct aws_hash *hash = (struct aws_hash *)checksum->impl;
+ aws_hash_destroy(hash);
+ aws_mem_release(checksum->allocator, checksum);
+}
+
+int s3_hash_update(struct aws_s3_checksum *checksum, const struct aws_byte_cursor *to_checksum) {
+ struct aws_hash *hash = (struct aws_hash *)checksum->impl;
+ return aws_hash_update(hash, to_checksum);
+}
+
+int s3_hash_finalize(struct aws_s3_checksum *checksum, struct aws_byte_buf *output, size_t truncate_to) {
+ struct aws_hash *hash = (struct aws_hash *)checksum->impl;
+ checksum->good = false;
+ return aws_hash_finalize(hash, output, truncate_to);
+}
+
+typedef uint32_t (*crc_fn)(const uint8_t *, int, uint32_t);
+
+uint32_t aws_crc32_common(uint32_t previous, const struct aws_byte_cursor *buf, crc_fn checksum_fn) {
+
+ size_t length = buf->len;
+ uint8_t *buffer = buf->ptr;
+ uint32_t val = previous;
+ while (length > INT_MAX) {
+ val = checksum_fn(buffer, INT_MAX, val);
+ buffer += (size_t)INT_MAX;
+ length -= (size_t)INT_MAX;
+ }
+ return checksum_fn(buffer, (int)length, val);
+}
+
+int aws_crc_finalize(struct aws_s3_checksum *checksum, struct aws_byte_buf *out, size_t truncate_to) {
+ if (!checksum->good) {
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+ checksum->good = false;
+ size_t available_buffer = out->capacity - out->len;
+ size_t len = checksum->digest_size;
+ if (truncate_to && truncate_to < len) {
+ len = truncate_to;
+ }
+ if (available_buffer < len) {
+ return aws_raise_error(AWS_ERROR_SHORT_BUFFER);
+ }
+ AWS_PRECONDITION(aws_byte_buf_is_valid(out));
+ uint32_t tmp = aws_hton32(*(uint32_t *)checksum->impl);
+ if (aws_byte_buf_write(out, (uint8_t *)&tmp, len)) {
+ return AWS_OP_SUCCESS;
+ }
+ return aws_raise_error(AWS_ERROR_INVALID_BUFFER_SIZE);
+}
+
+int aws_crc32_checksum_update(struct aws_s3_checksum *checksum, const struct aws_byte_cursor *buf) {
+ if (!checksum->good) {
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+ *(uint32_t *)checksum->impl = aws_crc32_common(*(uint32_t *)checksum->impl, buf, aws_checksums_crc32);
+ return AWS_OP_SUCCESS;
+}
+
+int aws_crc32c_checksum_update(struct aws_s3_checksum *checksum, const struct aws_byte_cursor *buf) {
+ if (!checksum->good) {
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+ *(uint32_t *)checksum->impl = aws_crc32_common(*(uint32_t *)checksum->impl, buf, aws_checksums_crc32c);
+ return AWS_OP_SUCCESS;
+}
+
+void aws_crc_destroy(struct aws_s3_checksum *checksum) {
+ aws_mem_release(checksum->allocator, checksum->impl);
+ aws_mem_release(checksum->allocator, checksum);
+}
+
+static struct aws_checksum_vtable hash_vtable = {
+ .update = s3_hash_update,
+ .finalize = s3_hash_finalize,
+ .destroy = s3_hash_destroy,
+};
+
+static struct aws_checksum_vtable crc32_vtable = {
+ .update = aws_crc32_checksum_update,
+ .finalize = aws_crc_finalize,
+ .destroy = aws_crc_destroy,
+};
+static struct aws_checksum_vtable crc32c_vtable = {
+ .update = aws_crc32c_checksum_update,
+ .finalize = aws_crc_finalize,
+ .destroy = aws_crc_destroy,
+};
+
+struct aws_s3_checksum *aws_hash_new(struct aws_allocator *allocator, aws_hash_new_fn hash_fn) {
+ struct aws_s3_checksum *checksum = aws_mem_acquire(allocator, sizeof(struct aws_s3_checksum));
+ struct aws_hash *hash = hash_fn(allocator);
+ checksum->impl = (void *)hash;
+ checksum->allocator = allocator;
+ checksum->vtable = &hash_vtable;
+ checksum->good = true;
+ checksum->digest_size = hash->digest_size;
+ return checksum;
+}
+
+struct aws_s3_checksum *aws_crc32_checksum_new(struct aws_allocator *allocator) {
+ struct aws_s3_checksum *checksum = aws_mem_acquire(allocator, sizeof(struct aws_s3_checksum));
+ uint32_t *crc_val = aws_mem_acquire(allocator, sizeof(uint32_t));
+ *crc_val = 0;
+ checksum->vtable = &crc32_vtable;
+ checksum->allocator = allocator;
+ checksum->impl = crc_val;
+ checksum->good = true;
+ checksum->digest_size = AWS_CRC32_LEN;
+
+ return checksum;
+}
+
+struct aws_s3_checksum *aws_crc32c_checksum_new(struct aws_allocator *allocator) {
+ struct aws_s3_checksum *checksum = aws_mem_acquire(allocator, sizeof(struct aws_s3_checksum));
+ uint32_t *crc_val = aws_mem_acquire(allocator, sizeof(uint32_t));
+ *crc_val = 0;
+ checksum->vtable = &crc32c_vtable;
+ checksum->allocator = allocator;
+ checksum->impl = crc_val;
+ checksum->good = true;
+ checksum->digest_size = AWS_CRC32_LEN;
+ return checksum;
+}
+
+struct aws_s3_checksum *aws_checksum_new(struct aws_allocator *allocator, enum aws_s3_checksum_algorithm algorithm) {
+ struct aws_s3_checksum *checksum = NULL;
+ switch (algorithm) {
+ case AWS_SCA_CRC32C:
+ checksum = aws_crc32c_checksum_new(allocator);
+ break;
+ case AWS_SCA_CRC32:
+ checksum = aws_crc32_checksum_new(allocator);
+ break;
+ case AWS_SCA_SHA1:
+ checksum = aws_hash_new(allocator, aws_sha1_new);
+ break;
+ case AWS_SCA_SHA256:
+ checksum = aws_hash_new(allocator, aws_sha256_new);
+ break;
+ default:
+ return NULL;
+ }
+ checksum->algorithm = algorithm;
+ return checksum;
+}
+
+int aws_checksum_compute_fn(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *input,
+ struct aws_byte_buf *output,
+ struct aws_s3_checksum *(*aws_crc_new)(struct aws_allocator *),
+ size_t truncate_to) {
+ struct aws_s3_checksum *checksum = aws_crc_new(allocator);
+ if (aws_checksum_update(checksum, input)) {
+ aws_checksum_destroy(checksum);
+ return AWS_OP_ERR;
+ }
+ if (aws_checksum_finalize(checksum, output, truncate_to)) {
+ aws_checksum_destroy(checksum);
+ return AWS_OP_ERR;
+ }
+ aws_checksum_destroy(checksum);
+ return AWS_OP_SUCCESS;
+}
+
+void aws_checksum_destroy(struct aws_s3_checksum *checksum) {
+ if (checksum != NULL) {
+ checksum->vtable->destroy(checksum);
+ }
+}
+
+int aws_checksum_update(struct aws_s3_checksum *checksum, const struct aws_byte_cursor *to_checksum) {
+ return checksum->vtable->update(checksum, to_checksum);
+}
+
+int aws_checksum_finalize(struct aws_s3_checksum *checksum, struct aws_byte_buf *output, size_t truncate_to) {
+ return checksum->vtable->finalize(checksum, output, truncate_to);
+}
+
+int aws_checksum_compute(
+ struct aws_allocator *allocator,
+ enum aws_s3_checksum_algorithm algorithm,
+ const struct aws_byte_cursor *input,
+ struct aws_byte_buf *output,
+ size_t truncate_to) {
+
+ switch (algorithm) {
+ case AWS_SCA_SHA1:
+ return aws_sha1_compute(allocator, input, output, truncate_to);
+ case AWS_SCA_SHA256:
+ return aws_sha256_compute(allocator, input, output, truncate_to);
+ case AWS_SCA_CRC32:
+ return aws_checksum_compute_fn(allocator, input, output, aws_crc32_checksum_new, truncate_to);
+ case AWS_SCA_CRC32C:
+ return aws_checksum_compute_fn(allocator, input, output, aws_crc32c_checksum_new, truncate_to);
+ default:
+ return AWS_OP_ERR;
+ }
+}
+
+void checksum_config_init(struct checksum_config *internal_config, const struct aws_s3_checksum_config *config) {
+ AWS_ZERO_STRUCT(*internal_config);
+ if (!config) {
+ return;
+ }
+ internal_config->checksum_algorithm = config->checksum_algorithm;
+ internal_config->location = config->location;
+ internal_config->validate_response_checksum = config->validate_response_checksum;
+
+ if (config->validate_checksum_algorithms) {
+ const size_t count = aws_array_list_length(config->validate_checksum_algorithms);
+ for (size_t i = 0; i < count; ++i) {
+ enum aws_s3_checksum_algorithm algorithm;
+ aws_array_list_get_at(config->validate_checksum_algorithms, &algorithm, i);
+ switch (algorithm) {
+ case AWS_SCA_CRC32C:
+ internal_config->response_checksum_algorithms.crc32c = true;
+ break;
+ case AWS_SCA_CRC32:
+ internal_config->response_checksum_algorithms.crc32 = true;
+ break;
+ case AWS_SCA_SHA1:
+ internal_config->response_checksum_algorithms.sha1 = true;
+ break;
+ case AWS_SCA_SHA256:
+ internal_config->response_checksum_algorithms.sha256 = true;
+ break;
+ default:
+ break;
+ }
+ }
+
+ } else if (config->validate_response_checksum) {
+ internal_config->response_checksum_algorithms.crc32 = true;
+ internal_config->response_checksum_algorithms.crc32c = true;
+ internal_config->response_checksum_algorithms.sha1 = true;
+ internal_config->response_checksum_algorithms.sha256 = true;
+ }
+}
diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_chunk_stream.c b/contrib/restricted/aws/aws-c-s3/source/s3_chunk_stream.c
new file mode 100644
index 0000000000..14dea36643
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/source/s3_chunk_stream.c
@@ -0,0 +1,278 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include "aws/s3/private/s3_checksums.h"
+#include <aws/common/encoding.h>
+#include <aws/common/string.h>
+#include <aws/io/stream.h>
+#include <inttypes.h>
+
+AWS_STATIC_STRING_FROM_LITERAL(s_carriage_return, "\r\n");
+AWS_STATIC_STRING_FROM_LITERAL(s_empty_chunk, "0\r\n");
+AWS_STATIC_STRING_FROM_LITERAL(s_final_chunk, "\r\n0\r\n");
+AWS_STATIC_STRING_FROM_LITERAL(s_colon, ":");
+AWS_STATIC_STRING_FROM_LITERAL(s_post_trailer, "\r\n\r\n");
+
+struct aws_chunk_stream;
+
+typedef int(set_stream_fn)(struct aws_chunk_stream *parent_stream);
+
+struct aws_chunk_stream {
+ struct aws_input_stream base;
+ struct aws_allocator *allocator;
+
+ /* aws_input_stream_byte_cursor provides our actual functionality */
+ /* Pointing to the stream we read from */
+ struct aws_input_stream *current_stream;
+
+ struct aws_input_stream *checksum_stream;
+ struct aws_byte_buf checksum_result;
+ struct aws_byte_buf *checksum_result_output;
+ struct aws_byte_buf pre_chunk_buffer;
+ struct aws_byte_buf post_chunk_buffer;
+ const struct aws_byte_cursor *checksum_header_name;
+ int64_t length;
+ set_stream_fn *set_current_stream_fn;
+};
+
+static int s_set_null_stream(struct aws_chunk_stream *parent_stream) {
+ aws_input_stream_release(parent_stream->current_stream);
+ parent_stream->current_stream = NULL;
+ parent_stream->set_current_stream_fn = NULL;
+ aws_byte_buf_clean_up(&parent_stream->post_chunk_buffer);
+ return AWS_OP_SUCCESS;
+}
+
+static int s_set_post_chunk_stream(struct aws_chunk_stream *parent_stream) {
+ int64_t current_stream_length;
+ if (aws_input_stream_get_length(parent_stream->current_stream, &current_stream_length)) {
+ aws_input_stream_release(parent_stream->current_stream);
+ return AWS_OP_ERR;
+ }
+ aws_input_stream_release(parent_stream->current_stream);
+
+ struct aws_byte_cursor final_chunk_cursor;
+
+ if (current_stream_length > 0) {
+ final_chunk_cursor = aws_byte_cursor_from_string(s_final_chunk);
+ } else {
+ final_chunk_cursor = aws_byte_cursor_from_string(s_empty_chunk);
+ }
+ struct aws_byte_cursor post_trailer_cursor = aws_byte_cursor_from_string(s_post_trailer);
+ struct aws_byte_cursor colon_cursor = aws_byte_cursor_from_string(s_colon);
+
+ if (parent_stream->checksum_result.len == 0) {
+ AWS_LOGF_ERROR(AWS_LS_S3_META_REQUEST, "Failed to extract base64 encoded checksum of stream");
+ return aws_raise_error(AWS_ERROR_S3_CHECKSUM_CALCULATION_FAILED);
+ }
+ struct aws_byte_cursor checksum_result_cursor = aws_byte_cursor_from_buf(&parent_stream->checksum_result);
+ if (parent_stream->checksum_result_output &&
+ aws_byte_buf_init_copy_from_cursor(
+ parent_stream->checksum_result_output, aws_default_allocator(), checksum_result_cursor)) {
+ return AWS_OP_ERR;
+ }
+ if (aws_byte_buf_init(
+ &parent_stream->post_chunk_buffer,
+ aws_default_allocator(),
+ final_chunk_cursor.len + parent_stream->checksum_header_name->len + colon_cursor.len +
+ checksum_result_cursor.len + post_trailer_cursor.len)) {
+ goto error;
+ }
+ if (aws_byte_buf_append(&parent_stream->post_chunk_buffer, &final_chunk_cursor) ||
+ aws_byte_buf_append(&parent_stream->post_chunk_buffer, parent_stream->checksum_header_name) ||
+ aws_byte_buf_append(&parent_stream->post_chunk_buffer, &colon_cursor) ||
+ aws_byte_buf_append(&parent_stream->post_chunk_buffer, &checksum_result_cursor) ||
+ aws_byte_buf_append(&parent_stream->post_chunk_buffer, &post_trailer_cursor)) {
+ goto error;
+ }
+ struct aws_byte_cursor post_chunk_cursor = aws_byte_cursor_from_buf(&parent_stream->post_chunk_buffer);
+ parent_stream->current_stream = aws_input_stream_new_from_cursor(aws_default_allocator(), &post_chunk_cursor);
+ parent_stream->set_current_stream_fn = s_set_null_stream;
+ return AWS_OP_SUCCESS;
+error:
+ aws_byte_buf_clean_up(parent_stream->checksum_result_output);
+ aws_byte_buf_clean_up(&parent_stream->post_chunk_buffer);
+ return AWS_OP_ERR;
+}
+
+static int s_set_chunk_stream(struct aws_chunk_stream *parent_stream) {
+ aws_input_stream_release(parent_stream->current_stream);
+ parent_stream->current_stream = parent_stream->checksum_stream;
+ aws_byte_buf_clean_up(&parent_stream->pre_chunk_buffer);
+ parent_stream->checksum_stream = NULL;
+ parent_stream->set_current_stream_fn = s_set_post_chunk_stream;
+ return AWS_OP_SUCCESS;
+}
+
+static int s_aws_input_chunk_stream_seek(
+ struct aws_input_stream *stream,
+ int64_t offset,
+ enum aws_stream_seek_basis basis) {
+ (void)stream;
+ (void)offset;
+ (void)basis;
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_CLIENT,
+ "Cannot seek on chunk stream, as it will cause the checksum output to mismatch the checksum of the stream"
+ "contents");
+ AWS_ASSERT(false);
+ return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION);
+}
+
+static int s_aws_input_chunk_stream_read(struct aws_input_stream *stream, struct aws_byte_buf *dest) {
+ struct aws_chunk_stream *impl = AWS_CONTAINER_OF(stream, struct aws_chunk_stream, base);
+
+ struct aws_stream_status status;
+ AWS_ZERO_STRUCT(status);
+ while (impl->current_stream != NULL && dest->len < dest->capacity) {
+ int err = aws_input_stream_read(impl->current_stream, dest);
+ if (err) {
+ return err;
+ }
+ if (aws_input_stream_get_status(impl->current_stream, &status)) {
+ return AWS_OP_ERR;
+ }
+ if (status.is_end_of_stream && impl->set_current_stream_fn(impl)) {
+ return AWS_OP_ERR;
+ }
+ }
+ return AWS_OP_SUCCESS;
+}
+
+static int s_aws_input_chunk_stream_get_status(struct aws_input_stream *stream, struct aws_stream_status *status) {
+ struct aws_chunk_stream *impl = AWS_CONTAINER_OF(stream, struct aws_chunk_stream, base);
+
+ if (impl->current_stream == NULL) {
+ status->is_end_of_stream = true;
+ status->is_valid = true;
+ return AWS_OP_SUCCESS;
+ }
+ int res = aws_input_stream_get_status(impl->current_stream, status);
+ if (res != AWS_OP_SUCCESS) {
+ /* Only when the current_stream is NULL, it is end of stream, as the current stream will be updated to feed to
+ * data */
+ status->is_end_of_stream = false;
+ }
+ return res;
+}
+
+static int s_aws_input_chunk_stream_get_length(struct aws_input_stream *stream, int64_t *out_length) {
+ struct aws_chunk_stream *impl = AWS_CONTAINER_OF(stream, struct aws_chunk_stream, base);
+ *out_length = impl->length;
+ return AWS_OP_SUCCESS;
+}
+
+static void s_aws_input_chunk_stream_destroy(struct aws_chunk_stream *impl) {
+ if (impl) {
+ if (impl->current_stream) {
+ aws_input_stream_release(impl->current_stream);
+ }
+ if (impl->checksum_stream) {
+ aws_input_stream_release(impl->checksum_stream);
+ }
+ aws_byte_buf_clean_up(&impl->pre_chunk_buffer);
+ aws_byte_buf_clean_up(&impl->checksum_result);
+ aws_byte_buf_clean_up(&impl->post_chunk_buffer);
+ aws_mem_release(impl->allocator, impl);
+ }
+}
+
+static struct aws_input_stream_vtable s_aws_input_chunk_stream_vtable = {
+ .seek = s_aws_input_chunk_stream_seek,
+ .read = s_aws_input_chunk_stream_read,
+ .get_status = s_aws_input_chunk_stream_get_status,
+ .get_length = s_aws_input_chunk_stream_get_length,
+};
+
+struct aws_input_stream *aws_chunk_stream_new(
+ struct aws_allocator *allocator,
+ struct aws_input_stream *existing_stream,
+ enum aws_s3_checksum_algorithm algorithm,
+ struct aws_byte_buf *checksum_output) {
+
+ struct aws_chunk_stream *impl = aws_mem_calloc(allocator, 1, sizeof(struct aws_chunk_stream));
+
+ impl->allocator = allocator;
+ impl->base.vtable = &s_aws_input_chunk_stream_vtable;
+ impl->checksum_result_output = checksum_output;
+ int64_t stream_length = 0;
+ int64_t final_chunk_len = 0;
+ if (aws_input_stream_get_length(existing_stream, &stream_length)) {
+ goto error;
+ }
+ struct aws_byte_cursor pre_chunk_cursor = aws_byte_cursor_from_string(s_carriage_return);
+ char stream_length_string[32];
+ AWS_ZERO_ARRAY(stream_length_string);
+ snprintf(stream_length_string, AWS_ARRAY_SIZE(stream_length_string), "%" PRIX64, stream_length);
+ struct aws_string *stream_length_aws_string = aws_string_new_from_c_str(allocator, stream_length_string);
+ struct aws_byte_cursor stream_length_cursor = aws_byte_cursor_from_string(stream_length_aws_string);
+ if (aws_byte_buf_init(&impl->pre_chunk_buffer, allocator, stream_length_cursor.len + pre_chunk_cursor.len)) {
+ goto error;
+ }
+ if (aws_byte_buf_append(&impl->pre_chunk_buffer, &stream_length_cursor)) {
+ goto error;
+ }
+ aws_string_destroy(stream_length_aws_string);
+ if (aws_byte_buf_append(&impl->pre_chunk_buffer, &pre_chunk_cursor)) {
+ goto error;
+ }
+
+ size_t checksum_len = aws_get_digest_size_from_algorithm(algorithm);
+
+ size_t encoded_checksum_len = 0;
+ if (aws_base64_compute_encoded_len(checksum_len, &encoded_checksum_len)) {
+ goto error;
+ }
+ if (aws_byte_buf_init(&impl->checksum_result, allocator, encoded_checksum_len)) {
+ goto error;
+ }
+
+ impl->checksum_stream = aws_checksum_stream_new(allocator, existing_stream, algorithm, &impl->checksum_result);
+ if (impl->checksum_stream == NULL) {
+ goto error;
+ }
+
+ int64_t prechunk_stream_len = 0;
+ int64_t colon_len = s_colon->len;
+ int64_t post_trailer_len = s_post_trailer->len;
+
+ struct aws_byte_cursor complete_pre_chunk_cursor = aws_byte_cursor_from_buf(&impl->pre_chunk_buffer);
+
+ if (stream_length > 0) {
+ impl->current_stream = aws_input_stream_new_from_cursor(allocator, &complete_pre_chunk_cursor);
+ final_chunk_len = s_final_chunk->len;
+ if (impl->current_stream == NULL) {
+ goto error;
+ }
+ impl->set_current_stream_fn = s_set_chunk_stream;
+ } else {
+ impl->current_stream = impl->checksum_stream;
+ final_chunk_len = s_empty_chunk->len;
+ impl->checksum_stream = NULL;
+ impl->set_current_stream_fn = s_set_post_chunk_stream;
+ }
+
+ impl->checksum_header_name = aws_get_http_header_name_from_algorithm(algorithm);
+
+ if (aws_input_stream_get_length(impl->current_stream, &prechunk_stream_len)) {
+ goto error;
+ }
+ /* we subtract one since aws_base64_compute_encoded_len accounts for the null terminator which won't show up in our
+ * stream */
+ impl->length = prechunk_stream_len + stream_length + final_chunk_len + impl->checksum_header_name->len + colon_len +
+ encoded_checksum_len + post_trailer_len - 1;
+
+ AWS_ASSERT(impl->current_stream);
+ aws_ref_count_init(&impl->base.ref_count, impl, (aws_simple_completion_callback *)s_aws_input_chunk_stream_destroy);
+ return &impl->base;
+
+error:
+ aws_input_stream_release(impl->checksum_stream);
+ aws_input_stream_release(impl->current_stream);
+ aws_byte_buf_clean_up(&impl->pre_chunk_buffer);
+ aws_byte_buf_clean_up(&impl->checksum_result);
+ aws_mem_release(impl->allocator, impl);
+ return NULL;
+}
diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_client.c b/contrib/restricted/aws/aws-c-s3/source/s3_client.c
new file mode 100644
index 0000000000..4dd80bd4f0
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/source/s3_client.c
@@ -0,0 +1,2038 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include "aws/s3/private/s3_auto_ranged_get.h"
+#include "aws/s3/private/s3_auto_ranged_put.h"
+#include "aws/s3/private/s3_client_impl.h"
+#include "aws/s3/private/s3_default_meta_request.h"
+#include "aws/s3/private/s3_meta_request_impl.h"
+#include "aws/s3/private/s3_request_messages.h"
+#include "aws/s3/private/s3_util.h"
+
+#include <aws/auth/credentials.h>
+#include <aws/common/assert.h>
+#include <aws/common/atomics.h>
+#include <aws/common/clock.h>
+#include <aws/common/device_random.h>
+#include <aws/common/environment.h>
+#include <aws/common/json.h>
+#include <aws/common/string.h>
+#include <aws/common/system_info.h>
+#include <aws/http/connection.h>
+#include <aws/http/connection_manager.h>
+#include <aws/http/proxy.h>
+#include <aws/http/request_response.h>
+#include <aws/io/channel_bootstrap.h>
+#include <aws/io/event_loop.h>
+#include <aws/io/host_resolver.h>
+#include <aws/io/retry_strategy.h>
+#include <aws/io/socket.h>
+#include <aws/io/stream.h>
+#include <aws/io/tls_channel_handler.h>
+#include <aws/io/uri.h>
+
+#include <aws/s3/private/s3_copy_object.h>
+#include <inttypes.h>
+#include <math.h>
+
+#ifdef _MSC_VER
+# pragma warning(disable : 4232) /* function pointer to dll symbol */
+#endif /* _MSC_VER */
+
+struct aws_s3_meta_request_work {
+ struct aws_linked_list_node node;
+ struct aws_s3_meta_request *meta_request;
+};
+
+static const enum aws_log_level s_log_level_client_stats = AWS_LL_INFO;
+
+static const uint32_t s_max_requests_multiplier = 4;
+
+/* TODO Provide analysis on origins of this value. */
+static const double s_throughput_per_vip_gbps = 4.0;
+
+/* Preferred amount of active connections per meta request type. */
+const uint32_t g_num_conns_per_vip_meta_request_look_up[AWS_S3_META_REQUEST_TYPE_MAX] = {
+ 10, /* AWS_S3_META_REQUEST_TYPE_DEFAULT */
+ 10, /* AWS_S3_META_REQUEST_TYPE_GET_OBJECT */
+ 10, /* AWS_S3_META_REQUEST_TYPE_PUT_OBJECT */
+ 10 /* AWS_S3_META_REQUEST_TYPE_COPY_OBJECT */
+};
+
+/* Should be max of s_num_conns_per_vip_meta_request_look_up */
+const uint32_t g_max_num_connections_per_vip = 10;
+
+/**
+ * Default part size is 8 MiB to reach the best performance from the experiments we had.
+ * Default max part size is SIZE_MAX on 32bit systems, which is around 4GiB; and 5GiB on a 64bit system.
+ * The server limit is 5GiB, but object size limit is 5TiB for now. We should be good enough for all the cases.
+ * The max number of upload parts is 10000, which limits the object size to 39TiB on 32bit and 49TiB on 64bit.
+ * TODO Provide more information on other values.
+ */
+static const size_t s_default_part_size = 8 * 1024 * 1024;
+static const uint64_t s_default_max_part_size = SIZE_MAX < 5368709120ULL ? SIZE_MAX : 5368709120ULL;
+static const double s_default_throughput_target_gbps = 10.0;
+static const uint32_t s_default_max_retries = 5;
+static size_t s_dns_host_address_ttl_seconds = 5 * 60;
+
+/* Default time until a connection is declared dead, while handling a request but seeing no activity.
+ * 30 seconds mirrors the value currently used by the Java SDK. */
+static const uint32_t s_default_throughput_failure_interval_seconds = 30;
+
+/* Called when ref count is 0. */
+static void s_s3_client_start_destroy(void *user_data);
+
+/* Called by s_s3_client_process_work_default when all shutdown criteria has been met. */
+static void s_s3_client_finish_destroy_default(struct aws_s3_client *client);
+
+/* Called when the body streaming elg shutdown has completed. */
+static void s_s3_client_body_streaming_elg_shutdown(void *user_data);
+
+static void s_s3_client_create_connection_for_request(struct aws_s3_client *client, struct aws_s3_request *request);
+
+/* Callback which handles the HTTP connection retrieved by acquire_http_connection. */
+static void s_s3_client_on_acquire_http_connection(
+ struct aws_http_connection *http_connection,
+ int error_code,
+ void *user_data);
+
+static void s_s3_client_push_meta_request_synced(
+ struct aws_s3_client *client,
+ struct aws_s3_meta_request *meta_request);
+
+/* Schedule task for processing work. (Calls the corresponding vtable function.) */
+static void s_s3_client_schedule_process_work_synced(struct aws_s3_client *client);
+
+/* Default implementation for scheduling processing of work. */
+static void s_s3_client_schedule_process_work_synced_default(struct aws_s3_client *client);
+
+/* Actual task function that processes work. */
+static void s_s3_client_process_work_task(struct aws_task *task, void *arg, enum aws_task_status task_status);
+
+static void s_s3_client_process_work_default(struct aws_s3_client *client);
+
+static void s_s3_client_endpoint_shutdown_callback(struct aws_s3_client *client);
+
+/* Default factory function for creating a meta request. */
+static struct aws_s3_meta_request *s_s3_client_meta_request_factory_default(
+ struct aws_s3_client *client,
+ const struct aws_s3_meta_request_options *options);
+
+static struct aws_s3_client_vtable s_s3_client_default_vtable = {
+ .meta_request_factory = s_s3_client_meta_request_factory_default,
+ .acquire_http_connection = aws_http_connection_manager_acquire_connection,
+ .get_host_address_count = aws_host_resolver_get_host_address_count,
+ .schedule_process_work_synced = s_s3_client_schedule_process_work_synced_default,
+ .process_work = s_s3_client_process_work_default,
+ .endpoint_shutdown_callback = s_s3_client_endpoint_shutdown_callback,
+ .finish_destroy = s_s3_client_finish_destroy_default,
+};
+
+void aws_s3_set_dns_ttl(size_t ttl) {
+ s_dns_host_address_ttl_seconds = ttl;
+}
+
+/* Returns the max number of connections allowed.
+ *
+ * When meta request is NULL, this will return the overall allowed number of connections.
+ *
+ * If meta_request is not NULL, this will give the max number of connections allowed for that meta request type on
+ * that endpoint.
+ */
+uint32_t aws_s3_client_get_max_active_connections(
+ struct aws_s3_client *client,
+ struct aws_s3_meta_request *meta_request) {
+ AWS_PRECONDITION(client);
+
+ uint32_t num_connections_per_vip = g_max_num_connections_per_vip;
+ uint32_t num_vips = client->ideal_vip_count;
+
+ if (meta_request != NULL) {
+ num_connections_per_vip = g_num_conns_per_vip_meta_request_look_up[meta_request->type];
+
+ struct aws_s3_endpoint *endpoint = meta_request->endpoint;
+ AWS_ASSERT(endpoint != NULL);
+
+ AWS_ASSERT(client->vtable->get_host_address_count);
+ size_t num_known_vips = client->vtable->get_host_address_count(
+ client->client_bootstrap->host_resolver, endpoint->host_name, AWS_GET_HOST_ADDRESS_COUNT_RECORD_TYPE_A);
+
+ /* If the number of known vips is less than our ideal VIP count, clamp it. */
+ if (num_known_vips < (size_t)num_vips) {
+ num_vips = (uint32_t)num_known_vips;
+ }
+ }
+
+ /* We always want to allow for at least one VIP worth of connections. */
+ if (num_vips == 0) {
+ num_vips = 1;
+ }
+
+ uint32_t max_active_connections = num_vips * num_connections_per_vip;
+
+ if (client->max_active_connections_override > 0 &&
+ client->max_active_connections_override < max_active_connections) {
+ max_active_connections = client->max_active_connections_override;
+ }
+
+ return max_active_connections;
+}
+
+/* Returns the max number of requests allowed to be in memory */
+uint32_t aws_s3_client_get_max_requests_in_flight(struct aws_s3_client *client) {
+ AWS_PRECONDITION(client);
+ return aws_s3_client_get_max_active_connections(client, NULL) * s_max_requests_multiplier;
+}
+
+/* Returns the max number of requests that should be in preparation stage (ie: reading from a stream, being signed,
+ * etc.) */
+uint32_t aws_s3_client_get_max_requests_prepare(struct aws_s3_client *client) {
+ return aws_s3_client_get_max_active_connections(client, NULL);
+}
+
+static uint32_t s_s3_client_get_num_requests_network_io(
+ struct aws_s3_client *client,
+ enum aws_s3_meta_request_type meta_request_type) {
+ AWS_PRECONDITION(client);
+
+ uint32_t num_requests_network_io = 0;
+
+ if (meta_request_type == AWS_S3_META_REQUEST_TYPE_MAX) {
+ for (uint32_t i = 0; i < AWS_S3_META_REQUEST_TYPE_MAX; ++i) {
+ num_requests_network_io += (uint32_t)aws_atomic_load_int(&client->stats.num_requests_network_io[i]);
+ }
+ } else {
+ num_requests_network_io =
+ (uint32_t)aws_atomic_load_int(&client->stats.num_requests_network_io[meta_request_type]);
+ }
+
+ return num_requests_network_io;
+}
+
+void aws_s3_client_lock_synced_data(struct aws_s3_client *client) {
+ aws_mutex_lock(&client->synced_data.lock);
+}
+
+void aws_s3_client_unlock_synced_data(struct aws_s3_client *client) {
+ aws_mutex_unlock(&client->synced_data.lock);
+}
+
+struct aws_s3_client *aws_s3_client_new(
+ struct aws_allocator *allocator,
+ const struct aws_s3_client_config *client_config) {
+
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(client_config);
+
+ if (client_config->client_bootstrap == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_CLIENT,
+ "Cannot create client from client_config; client_bootstrap provided in options is invalid.");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ /* Cannot be less than zero. If zero, use default. */
+ if (client_config->throughput_target_gbps < 0.0) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_CLIENT,
+ "Cannot create client from client_config; throughput_target_gbps cannot less than or equal to 0.");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+#ifdef BYO_CRYPTO
+ if (client_config->tls_mode == AWS_MR_TLS_ENABLED && client_config->tls_connection_options == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_CLIENT,
+ "Cannot create client from client_config; when using BYO_CRYPTO, tls_connection_options can not be "
+ "NULL when TLS is enabled.");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+#endif
+
+ struct aws_s3_client *client = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_client));
+
+ client->allocator = allocator;
+ client->vtable = &s_s3_client_default_vtable;
+
+ aws_ref_count_init(&client->ref_count, client, (aws_simple_completion_callback *)s_s3_client_start_destroy);
+
+ if (aws_mutex_init(&client->synced_data.lock) != AWS_OP_SUCCESS) {
+ goto lock_init_fail;
+ }
+
+ aws_linked_list_init(&client->synced_data.pending_meta_request_work);
+ aws_linked_list_init(&client->synced_data.prepared_requests);
+
+ aws_linked_list_init(&client->threaded_data.meta_requests);
+ aws_linked_list_init(&client->threaded_data.request_queue);
+
+ aws_atomic_init_int(&client->stats.num_requests_in_flight, 0);
+
+ for (uint32_t i = 0; i < (uint32_t)AWS_S3_META_REQUEST_TYPE_MAX; ++i) {
+ aws_atomic_init_int(&client->stats.num_requests_network_io[i], 0);
+ }
+
+ aws_atomic_init_int(&client->stats.num_requests_stream_queued_waiting, 0);
+ aws_atomic_init_int(&client->stats.num_requests_streaming, 0);
+
+ *((uint32_t *)&client->max_active_connections_override) = client_config->max_active_connections_override;
+
+ /* Store our client bootstrap. */
+ client->client_bootstrap = aws_client_bootstrap_acquire(client_config->client_bootstrap);
+
+ struct aws_event_loop_group *event_loop_group = client_config->client_bootstrap->event_loop_group;
+ aws_event_loop_group_acquire(event_loop_group);
+
+ client->process_work_event_loop = aws_event_loop_group_get_next_loop(event_loop_group);
+
+ /* Make a copy of the region string. */
+ client->region = aws_string_new_from_array(allocator, client_config->region.ptr, client_config->region.len);
+
+ if (client_config->part_size != 0) {
+ *((size_t *)&client->part_size) = client_config->part_size;
+ } else {
+ *((size_t *)&client->part_size) = s_default_part_size;
+ }
+
+ if (client_config->max_part_size != 0) {
+ *((size_t *)&client->max_part_size) = client_config->max_part_size;
+ } else {
+ *((size_t *)&client->max_part_size) = (size_t)s_default_max_part_size;
+ }
+
+ if (client_config->max_part_size < client_config->part_size) {
+ *((size_t *)&client_config->max_part_size) = client_config->part_size;
+ }
+
+ client->connect_timeout_ms = client_config->connect_timeout_ms;
+ if (client_config->proxy_ev_settings) {
+ client->proxy_ev_settings = aws_mem_calloc(allocator, 1, sizeof(struct proxy_env_var_settings));
+ *client->proxy_ev_settings = *client_config->proxy_ev_settings;
+
+ if (client_config->proxy_ev_settings->tls_options) {
+ client->proxy_ev_tls_options = aws_mem_calloc(allocator, 1, sizeof(struct aws_tls_connection_options));
+ if (aws_tls_connection_options_copy(client->proxy_ev_tls_options, client->proxy_ev_settings->tls_options)) {
+ goto on_error;
+ }
+ client->proxy_ev_settings->tls_options = client->proxy_ev_tls_options;
+ }
+ }
+
+ if (client_config->tcp_keep_alive_options) {
+ client->tcp_keep_alive_options = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_tcp_keep_alive_options));
+ *client->tcp_keep_alive_options = *client_config->tcp_keep_alive_options;
+ }
+
+ if (client_config->monitoring_options) {
+ client->monitoring_options = *client_config->monitoring_options;
+ } else {
+ client->monitoring_options.minimum_throughput_bytes_per_second = 1;
+ client->monitoring_options.allowable_throughput_failure_interval_seconds =
+ s_default_throughput_failure_interval_seconds;
+ }
+
+ if (client_config->tls_mode == AWS_MR_TLS_ENABLED) {
+ client->tls_connection_options =
+ aws_mem_calloc(client->allocator, 1, sizeof(struct aws_tls_connection_options));
+
+ if (client_config->tls_connection_options != NULL) {
+ aws_tls_connection_options_copy(client->tls_connection_options, client_config->tls_connection_options);
+ } else {
+#ifdef BYO_CRYPTO
+ AWS_FATAL_ASSERT(false);
+ goto on_error;
+#else
+ struct aws_tls_ctx_options default_tls_ctx_options;
+ AWS_ZERO_STRUCT(default_tls_ctx_options);
+
+ aws_tls_ctx_options_init_default_client(&default_tls_ctx_options, allocator);
+
+ struct aws_tls_ctx *default_tls_ctx = aws_tls_client_ctx_new(allocator, &default_tls_ctx_options);
+ if (default_tls_ctx == NULL) {
+ goto on_error;
+ }
+
+ aws_tls_connection_options_init_from_ctx(client->tls_connection_options, default_tls_ctx);
+
+ aws_tls_ctx_release(default_tls_ctx);
+ aws_tls_ctx_options_clean_up(&default_tls_ctx_options);
+#endif
+ }
+ }
+
+ if (client_config->proxy_options) {
+ client->proxy_config = aws_http_proxy_config_new_from_proxy_options_with_tls_info(
+ allocator, client_config->proxy_options, client_config->tls_mode == AWS_MR_TLS_ENABLED);
+ if (client->proxy_config == NULL) {
+ goto on_error;
+ }
+ }
+
+ /* Set up body streaming ELG */
+ {
+ uint16_t num_event_loops =
+ (uint16_t)aws_array_list_length(&client->client_bootstrap->event_loop_group->event_loops);
+ uint16_t num_streaming_threads = num_event_loops;
+
+ if (num_streaming_threads < 1) {
+ num_streaming_threads = 1;
+ }
+
+ struct aws_shutdown_callback_options body_streaming_elg_shutdown_options = {
+ .shutdown_callback_fn = s_s3_client_body_streaming_elg_shutdown,
+ .shutdown_callback_user_data = client,
+ };
+
+ if (aws_get_cpu_group_count() > 1) {
+ client->body_streaming_elg = aws_event_loop_group_new_default_pinned_to_cpu_group(
+ client->allocator, num_streaming_threads, 1, &body_streaming_elg_shutdown_options);
+ } else {
+ client->body_streaming_elg = aws_event_loop_group_new_default(
+ client->allocator, num_streaming_threads, &body_streaming_elg_shutdown_options);
+ }
+ if (!client->body_streaming_elg) {
+ /* Fail to create elg, we should fail the call */
+ goto on_error;
+ }
+ client->synced_data.body_streaming_elg_allocated = true;
+ }
+ /* Setup cannot fail after this point. */
+
+ if (client_config->throughput_target_gbps != 0.0) {
+ *((double *)&client->throughput_target_gbps) = client_config->throughput_target_gbps;
+ } else {
+ *((double *)&client->throughput_target_gbps) = s_default_throughput_target_gbps;
+ }
+
+ *((enum aws_s3_meta_request_compute_content_md5 *)&client->compute_content_md5) =
+ client_config->compute_content_md5;
+
+ /* Determine how many vips are ideal by dividing target-throughput by throughput-per-vip. */
+ {
+ double ideal_vip_count_double = client->throughput_target_gbps / s_throughput_per_vip_gbps;
+ *((uint32_t *)&client->ideal_vip_count) = (uint32_t)ceil(ideal_vip_count_double);
+ }
+
+ if (client_config->signing_config) {
+ client->cached_signing_config = aws_cached_signing_config_new(client->allocator, client_config->signing_config);
+ }
+
+ client->synced_data.active = true;
+
+ if (client_config->retry_strategy != NULL) {
+ aws_retry_strategy_acquire(client_config->retry_strategy);
+ client->retry_strategy = client_config->retry_strategy;
+ } else {
+ struct aws_exponential_backoff_retry_options backoff_retry_options = {
+ .el_group = client_config->client_bootstrap->event_loop_group,
+ .max_retries = s_default_max_retries,
+ };
+
+ struct aws_standard_retry_options retry_options = {
+ .backoff_retry_options = backoff_retry_options,
+ };
+
+ client->retry_strategy = aws_retry_strategy_new_standard(allocator, &retry_options);
+ }
+
+ aws_hash_table_init(
+ &client->synced_data.endpoints,
+ client->allocator,
+ 10,
+ aws_hash_string,
+ aws_hash_callback_string_eq,
+ aws_hash_callback_string_destroy,
+ NULL);
+
+ /* Initialize shutdown options and tracking. */
+ client->shutdown_callback = client_config->shutdown_callback;
+ client->shutdown_callback_user_data = client_config->shutdown_callback_user_data;
+
+ *((bool *)&client->enable_read_backpressure) = client_config->enable_read_backpressure;
+ *((size_t *)&client->initial_read_window) = client_config->initial_read_window;
+
+ return client;
+
+on_error:
+ aws_string_destroy(client->region);
+
+ if (client->tls_connection_options) {
+ aws_tls_connection_options_clean_up(client->tls_connection_options);
+ aws_mem_release(client->allocator, client->tls_connection_options);
+ client->tls_connection_options = NULL;
+ }
+ if (client->proxy_config) {
+ aws_http_proxy_config_destroy(client->proxy_config);
+ }
+ if (client->proxy_ev_tls_options) {
+ aws_tls_connection_options_clean_up(client->proxy_ev_tls_options);
+ aws_mem_release(client->allocator, client->proxy_ev_tls_options);
+ client->proxy_ev_settings->tls_options = NULL;
+ }
+ aws_mem_release(client->allocator, client->proxy_ev_settings);
+ aws_mem_release(client->allocator, client->tcp_keep_alive_options);
+
+ aws_event_loop_group_release(client->client_bootstrap->event_loop_group);
+ aws_client_bootstrap_release(client->client_bootstrap);
+ aws_mutex_clean_up(&client->synced_data.lock);
+lock_init_fail:
+ aws_mem_release(client->allocator, client);
+ return NULL;
+}
+
+struct aws_s3_client *aws_s3_client_acquire(struct aws_s3_client *client) {
+ AWS_PRECONDITION(client);
+
+ aws_ref_count_acquire(&client->ref_count);
+ return client;
+}
+
+struct aws_s3_client *aws_s3_client_release(struct aws_s3_client *client) {
+ if (client != NULL) {
+ aws_ref_count_release(&client->ref_count);
+ }
+
+ return NULL;
+}
+
+static void s_s3_client_start_destroy(void *user_data) {
+ struct aws_s3_client *client = user_data;
+ AWS_PRECONDITION(client);
+
+ AWS_LOGF_DEBUG(AWS_LS_S3_CLIENT, "id=%p Client starting destruction.", (void *)client);
+
+ struct aws_linked_list local_vip_list;
+ aws_linked_list_init(&local_vip_list);
+
+ /* BEGIN CRITICAL SECTION */
+ {
+ aws_s3_client_lock_synced_data(client);
+
+ client->synced_data.active = false;
+
+ /* Prevent the client from cleaning up in between the mutex unlock/re-lock below.*/
+ client->synced_data.start_destroy_executing = true;
+
+ aws_s3_client_unlock_synced_data(client);
+ }
+ /* END CRITICAL SECTION */
+
+ aws_event_loop_group_release(client->body_streaming_elg);
+ client->body_streaming_elg = NULL;
+
+ /* BEGIN CRITICAL SECTION */
+ {
+ aws_s3_client_lock_synced_data(client);
+ client->synced_data.start_destroy_executing = false;
+
+ /* Schedule the work task to clean up outstanding connections and to call s_s3_client_finish_destroy function if
+ * everything cleaning up asynchronously has finished. */
+ s_s3_client_schedule_process_work_synced(client);
+ aws_s3_client_unlock_synced_data(client);
+ }
+ /* END CRITICAL SECTION */
+}
+
+static void s_s3_client_finish_destroy_default(struct aws_s3_client *client) {
+ AWS_PRECONDITION(client);
+
+ AWS_LOGF_DEBUG(AWS_LS_S3_CLIENT, "id=%p Client finishing destruction.", (void *)client);
+
+ aws_string_destroy(client->region);
+ client->region = NULL;
+
+ if (client->tls_connection_options) {
+ aws_tls_connection_options_clean_up(client->tls_connection_options);
+ aws_mem_release(client->allocator, client->tls_connection_options);
+ client->tls_connection_options = NULL;
+ }
+
+ if (client->proxy_config) {
+ aws_http_proxy_config_destroy(client->proxy_config);
+ }
+
+ if (client->proxy_ev_tls_options) {
+ aws_tls_connection_options_clean_up(client->proxy_ev_tls_options);
+ aws_mem_release(client->allocator, client->proxy_ev_tls_options);
+ client->proxy_ev_settings->tls_options = NULL;
+ }
+ aws_mem_release(client->allocator, client->proxy_ev_settings);
+ aws_mem_release(client->allocator, client->tcp_keep_alive_options);
+
+ aws_mutex_clean_up(&client->synced_data.lock);
+
+ AWS_ASSERT(aws_linked_list_empty(&client->synced_data.pending_meta_request_work));
+ AWS_ASSERT(aws_linked_list_empty(&client->threaded_data.meta_requests));
+ aws_hash_table_clean_up(&client->synced_data.endpoints);
+
+ aws_retry_strategy_release(client->retry_strategy);
+
+ aws_event_loop_group_release(client->client_bootstrap->event_loop_group);
+
+ aws_client_bootstrap_release(client->client_bootstrap);
+ aws_cached_signing_config_destroy(client->cached_signing_config);
+
+ aws_s3_client_shutdown_complete_callback_fn *shutdown_callback = client->shutdown_callback;
+ void *shutdown_user_data = client->shutdown_callback_user_data;
+
+ aws_mem_release(client->allocator, client);
+ client = NULL;
+
+ if (shutdown_callback != NULL) {
+ shutdown_callback(shutdown_user_data);
+ }
+}
+
+static void s_s3_client_body_streaming_elg_shutdown(void *user_data) {
+ struct aws_s3_client *client = user_data;
+ AWS_PRECONDITION(client);
+
+ AWS_LOGF_DEBUG(AWS_LS_S3_CLIENT, "id=%p Client body streaming ELG shutdown.", (void *)client);
+
+ /* BEGIN CRITICAL SECTION */
+ {
+ aws_s3_client_lock_synced_data(client);
+ client->synced_data.body_streaming_elg_allocated = false;
+ s_s3_client_schedule_process_work_synced(client);
+ aws_s3_client_unlock_synced_data(client);
+ }
+ /* END CRITICAL SECTION */
+}
+
+uint32_t aws_s3_client_queue_requests_threaded(
+ struct aws_s3_client *client,
+ struct aws_linked_list *request_list,
+ bool queue_front) {
+ AWS_PRECONDITION(client);
+ AWS_PRECONDITION(request_list);
+
+ uint32_t request_list_size = 0;
+
+ for (struct aws_linked_list_node *node = aws_linked_list_begin(request_list);
+ node != aws_linked_list_end(request_list);
+ node = aws_linked_list_next(node)) {
+ ++request_list_size;
+ }
+
+ if (queue_front) {
+ aws_linked_list_move_all_front(&client->threaded_data.request_queue, request_list);
+ } else {
+ aws_linked_list_move_all_back(&client->threaded_data.request_queue, request_list);
+ }
+
+ client->threaded_data.request_queue_size += request_list_size;
+ return request_list_size;
+}
+
+struct aws_s3_request *aws_s3_client_dequeue_request_threaded(struct aws_s3_client *client) {
+ AWS_PRECONDITION(client);
+
+ if (aws_linked_list_empty(&client->threaded_data.request_queue)) {
+ return NULL;
+ }
+
+ struct aws_linked_list_node *request_node = aws_linked_list_pop_front(&client->threaded_data.request_queue);
+ struct aws_s3_request *request = AWS_CONTAINER_OF(request_node, struct aws_s3_request, node);
+
+ --client->threaded_data.request_queue_size;
+
+ return request;
+}
+
+/*
+ * There is currently some overlap between user provided Host header and endpoint
+ * override. This function handles the corner cases for when either or both are provided.
+ */
+int s_apply_endpoint_override(
+ const struct aws_s3_client *client,
+ struct aws_http_headers *message_headers,
+ const struct aws_uri *endpoint) {
+ AWS_PRECONDITION(message_headers);
+
+ const struct aws_byte_cursor *endpoint_authority = endpoint == NULL ? NULL : aws_uri_authority(endpoint);
+
+ if (!aws_http_headers_has(message_headers, g_host_header_name)) {
+ if (endpoint_authority == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_CLIENT,
+ "id=%p Cannot create meta s3 request; message provided in options does not have either 'Host' header "
+ "set or endpoint override.",
+ (void *)client);
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ if (aws_http_headers_set(message_headers, g_host_header_name, *endpoint_authority)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_CLIENT,
+ "id=%p Cannot create meta s3 request; failed to set 'Host' header based on endpoint override.",
+ (void *)client);
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ }
+
+ struct aws_byte_cursor host_value;
+ AWS_FATAL_ASSERT(aws_http_headers_get(message_headers, g_host_header_name, &host_value) == AWS_OP_SUCCESS);
+
+ if (endpoint_authority != NULL && !aws_byte_cursor_eq(&host_value, endpoint_authority)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_CLIENT,
+ "id=%p Cannot create meta s3 request; host header value " PRInSTR
+ " does not match endpoint override " PRInSTR,
+ (void *)client,
+ AWS_BYTE_CURSOR_PRI(host_value),
+ AWS_BYTE_CURSOR_PRI(*endpoint_authority));
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* Public facing make-meta-request function. */
+struct aws_s3_meta_request *aws_s3_client_make_meta_request(
+ struct aws_s3_client *client,
+ const struct aws_s3_meta_request_options *options) {
+
+ AWS_LOGF_INFO(AWS_LS_S3_CLIENT, "id=%p Initiating making of meta request", (void *)client);
+
+ AWS_PRECONDITION(client);
+ AWS_PRECONDITION(client->vtable);
+ AWS_PRECONDITION(client->vtable->meta_request_factory);
+ AWS_PRECONDITION(options);
+
+ if (options->type >= AWS_S3_META_REQUEST_TYPE_MAX) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_CLIENT,
+ "id=%p Cannot create meta s3 request; invalid meta request type specified.",
+ (void *)client);
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ if (options->message == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_CLIENT,
+ "id=%p Cannot create meta s3 request; message provided in options is invalid.",
+ (void *)client);
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_http_headers *message_headers = aws_http_message_get_headers(options->message);
+
+ if (message_headers == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_CLIENT,
+ "id=%p Cannot create meta s3 request; message provided in options does not contain headers.",
+ (void *)client);
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ if (options->checksum_config) {
+ if (options->checksum_config->location == AWS_SCL_TRAILER) {
+ struct aws_http_headers *headers = aws_http_message_get_headers(options->message);
+ struct aws_byte_cursor existing_encoding;
+ AWS_ZERO_STRUCT(existing_encoding);
+ if (aws_http_headers_get(headers, g_content_encoding_header_name, &existing_encoding) == AWS_OP_SUCCESS) {
+ if (aws_byte_cursor_find_exact(&existing_encoding, &g_content_encoding_header_aws_chunked, NULL) ==
+ AWS_OP_SUCCESS) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_CLIENT,
+ "id=%p Cannot create meta s3 request; for trailer checksum, the original request cannot be "
+ "aws-chunked encoding. The client will encode the request instead.",
+ (void *)client);
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+ }
+ }
+
+ if (options->checksum_config->location == AWS_SCL_HEADER) {
+ /* TODO: support calculate checksum to add to header */
+ aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION);
+ return NULL;
+ }
+
+ if (options->checksum_config->location != AWS_SCL_NONE &&
+ options->checksum_config->checksum_algorithm == AWS_SCA_NONE) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_CLIENT,
+ "id=%p Cannot create meta s3 request; checksum algorithm must be set to calculate checksum.",
+ (void *)client);
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+ if (options->checksum_config->checksum_algorithm != AWS_SCA_NONE &&
+ options->checksum_config->location == AWS_SCL_NONE) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_CLIENT,
+ "id=%p Cannot create meta s3 request; checksum algorithm cannot be set if not calculate checksum from "
+ "client.",
+ (void *)client);
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+ }
+
+ if (s_apply_endpoint_override(client, message_headers, options->endpoint)) {
+ return NULL;
+ }
+
+ struct aws_byte_cursor host_header_value;
+ /* The Host header must be set from s_apply_endpoint_override, if not errored out */
+ AWS_FATAL_ASSERT(aws_http_headers_get(message_headers, g_host_header_name, &host_header_value) == AWS_OP_SUCCESS);
+
+ bool is_https = true;
+ uint16_t port = 0;
+
+ if (options->endpoint != NULL) {
+ struct aws_byte_cursor https_scheme = aws_byte_cursor_from_c_str("https");
+ struct aws_byte_cursor http_scheme = aws_byte_cursor_from_c_str("http");
+
+ const struct aws_byte_cursor *scheme = aws_uri_scheme(options->endpoint);
+
+ is_https = aws_byte_cursor_eq_ignore_case(scheme, &https_scheme);
+
+ if (!is_https && !aws_byte_cursor_eq_ignore_case(scheme, &http_scheme)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_CLIENT,
+ "id=%p Cannot create meta s3 request; unexpected scheme '" PRInSTR "' in endpoint override.",
+ (void *)client,
+ AWS_BYTE_CURSOR_PRI(*scheme));
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ port = aws_uri_port(options->endpoint);
+ }
+
+ struct aws_s3_meta_request *meta_request = client->vtable->meta_request_factory(client, options);
+
+ if (meta_request == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_S3_CLIENT, "id=%p: Could not create new meta request.", (void *)client);
+ return NULL;
+ }
+
+ bool error_occurred = false;
+
+ /* BEGIN CRITICAL SECTION */
+ {
+ aws_s3_client_lock_synced_data(client);
+
+ struct aws_string *endpoint_host_name = NULL;
+
+ if (options->endpoint != NULL) {
+ endpoint_host_name = aws_string_new_from_cursor(client->allocator, aws_uri_host_name(options->endpoint));
+ } else {
+ struct aws_uri host_uri;
+ if (aws_uri_init_parse(&host_uri, client->allocator, &host_header_value)) {
+ error_occurred = true;
+ goto unlock;
+ }
+
+ endpoint_host_name = aws_string_new_from_cursor(client->allocator, aws_uri_host_name(&host_uri));
+ aws_uri_clean_up(&host_uri);
+ }
+
+ struct aws_s3_endpoint *endpoint = NULL;
+ struct aws_hash_element *endpoint_hash_element = NULL;
+
+ int was_created = 0;
+ if (aws_hash_table_create(
+ &client->synced_data.endpoints, endpoint_host_name, &endpoint_hash_element, &was_created)) {
+ aws_string_destroy(endpoint_host_name);
+ error_occurred = true;
+ goto unlock;
+ }
+
+ if (was_created) {
+ struct aws_s3_endpoint_options endpoint_options = {
+ .host_name = endpoint_host_name,
+ .client_bootstrap = client->client_bootstrap,
+ .tls_connection_options = is_https ? client->tls_connection_options : NULL,
+ .dns_host_address_ttl_seconds = s_dns_host_address_ttl_seconds,
+ .client = client,
+ .max_connections = aws_s3_client_get_max_active_connections(client, NULL),
+ .port = port,
+ .proxy_config = client->proxy_config,
+ .proxy_ev_settings = client->proxy_ev_settings,
+ .connect_timeout_ms = client->connect_timeout_ms,
+ .tcp_keep_alive_options = client->tcp_keep_alive_options,
+ .monitoring_options = &client->monitoring_options,
+ };
+
+ endpoint = aws_s3_endpoint_new(client->allocator, &endpoint_options);
+
+ if (endpoint == NULL) {
+ aws_hash_table_remove(&client->synced_data.endpoints, endpoint_host_name, NULL, NULL);
+ aws_string_destroy(endpoint_host_name);
+ error_occurred = true;
+ goto unlock;
+ }
+ endpoint_hash_element->value = endpoint;
+ ++client->synced_data.num_endpoints_allocated;
+ } else {
+ endpoint = endpoint_hash_element->value;
+
+ aws_s3_endpoint_acquire(endpoint, true /*already_holding_lock*/);
+
+ aws_string_destroy(endpoint_host_name);
+ endpoint_host_name = NULL;
+ }
+
+ meta_request->endpoint = endpoint;
+
+ s_s3_client_push_meta_request_synced(client, meta_request);
+ s_s3_client_schedule_process_work_synced(client);
+
+ unlock:
+ aws_s3_client_unlock_synced_data(client);
+ }
+ /* END CRITICAL SECTION */
+
+ if (error_occurred) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_CLIENT,
+ "id=%p Could not create meta request due to error %d (%s)",
+ (void *)client,
+ aws_last_error(),
+ aws_error_str(aws_last_error()));
+
+ meta_request = aws_s3_meta_request_release(meta_request);
+ } else {
+ AWS_LOGF_INFO(AWS_LS_S3_CLIENT, "id=%p: Created meta request %p", (void *)client, (void *)meta_request);
+ }
+
+ return meta_request;
+}
+
+static void s_s3_client_endpoint_shutdown_callback(struct aws_s3_client *client) {
+ AWS_PRECONDITION(client);
+
+ /* BEGIN CRITICAL SECTION */
+ {
+ aws_s3_client_lock_synced_data(client);
+ --client->synced_data.num_endpoints_allocated;
+ s_s3_client_schedule_process_work_synced(client);
+ aws_s3_client_unlock_synced_data(client);
+ }
+ /* END CRITICAL SECTION */
+}
+
+static struct aws_s3_meta_request *s_s3_client_meta_request_factory_default(
+ struct aws_s3_client *client,
+ const struct aws_s3_meta_request_options *options) {
+ AWS_PRECONDITION(client);
+ AWS_PRECONDITION(options);
+
+ struct aws_http_headers *initial_message_headers = aws_http_message_get_headers(options->message);
+ AWS_ASSERT(initial_message_headers);
+
+ uint64_t content_length = 0;
+ struct aws_byte_cursor content_length_cursor;
+ bool content_length_header_found = false;
+
+ if (!aws_http_headers_get(initial_message_headers, g_content_length_header_name, &content_length_cursor)) {
+ if (aws_byte_cursor_utf8_parse_u64(content_length_cursor, &content_length)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST,
+ "Could not parse Content-Length header. header value is:" PRInSTR "",
+ AWS_BYTE_CURSOR_PRI(content_length_cursor));
+ aws_raise_error(AWS_ERROR_S3_INVALID_CONTENT_LENGTH_HEADER);
+ return NULL;
+ }
+ content_length_header_found = true;
+ }
+
+ /* Call the appropriate meta-request new function. */
+ switch (options->type) {
+ case AWS_S3_META_REQUEST_TYPE_GET_OBJECT: {
+ /* If the initial request already has partNumber, the request is not
+ * splittable(?). Treat it as a Default request.
+ * TODO: Still need tests to verify that the request of a part is
+ * splittable or not */
+ if (aws_http_headers_has(initial_message_headers, aws_byte_cursor_from_c_str("partNumber"))) {
+ return aws_s3_meta_request_default_new(client->allocator, client, content_length, false, options);
+ }
+
+ return aws_s3_meta_request_auto_ranged_get_new(client->allocator, client, client->part_size, options);
+ }
+ case AWS_S3_META_REQUEST_TYPE_PUT_OBJECT: {
+
+ if (!content_length_header_found) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST,
+ "Could not create auto-ranged-put meta request; there is no Content-Length header present.");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_input_stream *input_stream = aws_http_message_get_body_stream(options->message);
+
+ if ((input_stream == NULL) && (options->send_filepath.len == 0)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST,
+ "Could not create auto-ranged-put meta request; filepath or body stream must be set.");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ if (options->resume_token == NULL) {
+
+ size_t client_part_size = client->part_size;
+ size_t client_max_part_size = client->max_part_size;
+
+ if (client_part_size < g_s3_min_upload_part_size) {
+ AWS_LOGF_WARN(
+ AWS_LS_S3_META_REQUEST,
+ "Client config part size of %" PRIu64 " is less than the minimum upload part size of %" PRIu64
+ ". Using to the minimum part-size for upload.",
+ (uint64_t)client_part_size,
+ (uint64_t)g_s3_min_upload_part_size);
+
+ client_part_size = g_s3_min_upload_part_size;
+ }
+
+ if (client_max_part_size < g_s3_min_upload_part_size) {
+ AWS_LOGF_WARN(
+ AWS_LS_S3_META_REQUEST,
+ "Client config max part size of %" PRIu64
+ " is less than the minimum upload part size of %" PRIu64
+ ". Clamping to the minimum part-size for upload.",
+ (uint64_t)client_max_part_size,
+ (uint64_t)g_s3_min_upload_part_size);
+
+ client_max_part_size = g_s3_min_upload_part_size;
+ }
+ if (content_length <= client_part_size) {
+ return aws_s3_meta_request_default_new(
+ client->allocator,
+ client,
+ content_length,
+ client->compute_content_md5 == AWS_MR_CONTENT_MD5_ENABLED &&
+ !aws_http_headers_has(initial_message_headers, g_content_md5_header_name),
+ options);
+ } else {
+ if (aws_s3_message_util_check_checksum_header(options->message)) {
+ /* The checksum header has been set and the request will be splitted. We fail the request */
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST,
+ "Could not create auto-ranged-put meta request; checksum headers has been set for "
+ "auto-ranged-put that will be split. Pre-calculated checksums are only supported for "
+ "single "
+ "part upload.");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+ }
+
+ uint64_t part_size_uint64 = content_length / (uint64_t)g_s3_max_num_upload_parts;
+
+ if (part_size_uint64 > SIZE_MAX) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST,
+ "Could not create auto-ranged-put meta request; required part size of %" PRIu64
+ " bytes is too large for platform.",
+ part_size_uint64);
+
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ size_t part_size = (size_t)part_size_uint64;
+
+ if (part_size > client_max_part_size) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST,
+ "Could not create auto-ranged-put meta request; required part size for put request is %" PRIu64
+ ", but current maximum part size is %" PRIu64,
+ (uint64_t)part_size,
+ (uint64_t)client_max_part_size);
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ if (part_size < client_part_size) {
+ part_size = client_part_size;
+ }
+
+ uint32_t num_parts = (uint32_t)(content_length / part_size);
+
+ if ((content_length % part_size) > 0) {
+ ++num_parts;
+ }
+
+ return aws_s3_meta_request_auto_ranged_put_new(
+ client->allocator, client, part_size, content_length, num_parts, options);
+ } else {
+ /* dont pass part size and total num parts. constructor will pick it up from token */
+ return aws_s3_meta_request_auto_ranged_put_new(
+ client->allocator, client, 0, content_length, 0, options);
+ }
+ }
+ case AWS_S3_META_REQUEST_TYPE_COPY_OBJECT: {
+ /* TODO: support copy object correctly. */
+ AWS_LOGF_ERROR(AWS_LS_S3_META_REQUEST, "CopyObject is not currently supported");
+ aws_raise_error(AWS_ERROR_UNIMPLEMENTED);
+ return NULL;
+ }
+ case AWS_S3_META_REQUEST_TYPE_DEFAULT:
+ return aws_s3_meta_request_default_new(client->allocator, client, content_length, false, options);
+ default:
+ AWS_FATAL_ASSERT(false);
+ }
+
+ return NULL;
+}
+
+static void s_s3_client_push_meta_request_synced(
+ struct aws_s3_client *client,
+ struct aws_s3_meta_request *meta_request) {
+ AWS_PRECONDITION(client);
+ AWS_PRECONDITION(meta_request);
+ ASSERT_SYNCED_DATA_LOCK_HELD(client);
+
+ struct aws_s3_meta_request_work *meta_request_work =
+ aws_mem_calloc(client->allocator, 1, sizeof(struct aws_s3_meta_request_work));
+
+ aws_s3_meta_request_acquire(meta_request);
+ meta_request_work->meta_request = meta_request;
+ aws_linked_list_push_back(&client->synced_data.pending_meta_request_work, &meta_request_work->node);
+}
+
+static void s_s3_client_schedule_process_work_synced(struct aws_s3_client *client) {
+ AWS_PRECONDITION(client);
+ AWS_PRECONDITION(client->vtable);
+ AWS_PRECONDITION(client->vtable->schedule_process_work_synced);
+
+ ASSERT_SYNCED_DATA_LOCK_HELD(client);
+
+ client->vtable->schedule_process_work_synced(client);
+}
+
+static void s_s3_client_schedule_process_work_synced_default(struct aws_s3_client *client) {
+ ASSERT_SYNCED_DATA_LOCK_HELD(client);
+
+ if (client->synced_data.process_work_task_scheduled) {
+ return;
+ }
+
+ aws_task_init(
+ &client->synced_data.process_work_task, s_s3_client_process_work_task, client, "s3_client_process_work_task");
+
+ aws_event_loop_schedule_task_now(client->process_work_event_loop, &client->synced_data.process_work_task);
+
+ client->synced_data.process_work_task_scheduled = true;
+}
+
+void aws_s3_client_schedule_process_work(struct aws_s3_client *client) {
+ AWS_PRECONDITION(client);
+
+ /* BEGIN CRITICAL SECTION */
+ {
+ aws_s3_client_lock_synced_data(client);
+ s_s3_client_schedule_process_work_synced(client);
+ aws_s3_client_unlock_synced_data(client);
+ }
+ /* END CRITICAL SECTION */
+}
+
+static void s_s3_client_remove_meta_request_threaded(
+ struct aws_s3_client *client,
+ struct aws_s3_meta_request *meta_request) {
+ AWS_PRECONDITION(client);
+ AWS_PRECONDITION(meta_request);
+ (void)client;
+
+ aws_linked_list_remove(&meta_request->client_process_work_threaded_data.node);
+ meta_request->client_process_work_threaded_data.scheduled = false;
+ aws_s3_meta_request_release(meta_request);
+}
+
+/* Task function for trying to find a request that can be processed. */
+static void s_s3_client_process_work_task(struct aws_task *task, void *arg, enum aws_task_status task_status) {
+ AWS_PRECONDITION(task);
+ (void)task;
+ (void)task_status;
+
+ /* Client keeps a reference to the event loop group; a 'canceled' status should not happen.*/
+ AWS_ASSERT(task_status == AWS_TASK_STATUS_RUN_READY);
+
+ struct aws_s3_client *client = arg;
+ AWS_PRECONDITION(client);
+ AWS_PRECONDITION(client->vtable);
+ AWS_PRECONDITION(client->vtable->process_work);
+
+ client->vtable->process_work(client);
+}
+
+static void s_s3_client_process_work_default(struct aws_s3_client *client) {
+ AWS_PRECONDITION(client);
+ AWS_PRECONDITION(client->vtable);
+ AWS_PRECONDITION(client->vtable->finish_destroy);
+
+ struct aws_linked_list meta_request_work_list;
+ aws_linked_list_init(&meta_request_work_list);
+
+ /*******************/
+ /* Step 1: Move relevant data into thread local memory. */
+ /*******************/
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_CLIENT,
+ "id=%p s_s3_client_process_work_default - Moving relevant synced_data into threaded_data.",
+ (void *)client);
+
+ /* BEGIN CRITICAL SECTION */
+ aws_s3_client_lock_synced_data(client);
+ /* Once we exit this mutex, someone can reschedule this task. */
+ client->synced_data.process_work_task_scheduled = false;
+ client->synced_data.process_work_task_in_progress = true;
+
+ aws_linked_list_swap_contents(&meta_request_work_list, &client->synced_data.pending_meta_request_work);
+
+ uint32_t num_requests_queued =
+ aws_s3_client_queue_requests_threaded(client, &client->synced_data.prepared_requests, false);
+
+ {
+ int sub_result = aws_sub_u32_checked(
+ client->threaded_data.num_requests_being_prepared,
+ num_requests_queued,
+ &client->threaded_data.num_requests_being_prepared);
+
+ AWS_ASSERT(sub_result == AWS_OP_SUCCESS);
+ (void)sub_result;
+ }
+
+ {
+ int sub_result = aws_sub_u32_checked(
+ client->threaded_data.num_requests_being_prepared,
+ client->synced_data.num_failed_prepare_requests,
+ &client->threaded_data.num_requests_being_prepared);
+
+ client->synced_data.num_failed_prepare_requests = 0;
+
+ AWS_ASSERT(sub_result == AWS_OP_SUCCESS);
+ (void)sub_result;
+ }
+
+ uint32_t num_endpoints_in_table = (uint32_t)aws_hash_table_get_entry_count(&client->synced_data.endpoints);
+ uint32_t num_endpoints_allocated = client->synced_data.num_endpoints_allocated;
+
+ aws_s3_client_unlock_synced_data(client);
+ /* END CRITICAL SECTION */
+
+ /*******************/
+ /* Step 2: Push meta requests into the thread local list if they haven't already been scheduled. */
+ /*******************/
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_CLIENT, "id=%p s_s3_client_process_work_default - Processing any new meta requests.", (void *)client);
+
+ while (!aws_linked_list_empty(&meta_request_work_list)) {
+ struct aws_linked_list_node *node = aws_linked_list_pop_back(&meta_request_work_list);
+ struct aws_s3_meta_request_work *meta_request_work =
+ AWS_CONTAINER_OF(node, struct aws_s3_meta_request_work, node);
+
+ AWS_FATAL_ASSERT(meta_request_work != NULL);
+ AWS_FATAL_ASSERT(meta_request_work->meta_request != NULL);
+
+ struct aws_s3_meta_request *meta_request = meta_request_work->meta_request;
+
+ if (!meta_request->client_process_work_threaded_data.scheduled) {
+ aws_linked_list_push_back(
+ &client->threaded_data.meta_requests, &meta_request->client_process_work_threaded_data.node);
+
+ meta_request->client_process_work_threaded_data.scheduled = true;
+ } else {
+ meta_request = aws_s3_meta_request_release(meta_request);
+ }
+
+ aws_mem_release(client->allocator, meta_request_work);
+ }
+
+ /*******************/
+ /* Step 3: Update relevant meta requests and connections. */
+ /*******************/
+ {
+ AWS_LOGF_DEBUG(AWS_LS_S3_CLIENT, "id=%p Updating meta requests.", (void *)client);
+ aws_s3_client_update_meta_requests_threaded(client);
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_CLIENT, "id=%p Updating connections, assigning requests where possible.", (void *)client);
+ aws_s3_client_update_connections_threaded(client);
+ }
+
+ /*******************/
+ /* Step 4: Log client stats. */
+ /*******************/
+ {
+ uint32_t num_requests_tracked_requests = (uint32_t)aws_atomic_load_int(&client->stats.num_requests_in_flight);
+
+ uint32_t num_auto_ranged_get_network_io =
+ s_s3_client_get_num_requests_network_io(client, AWS_S3_META_REQUEST_TYPE_GET_OBJECT);
+ uint32_t num_auto_ranged_put_network_io =
+ s_s3_client_get_num_requests_network_io(client, AWS_S3_META_REQUEST_TYPE_PUT_OBJECT);
+ uint32_t num_auto_default_network_io =
+ s_s3_client_get_num_requests_network_io(client, AWS_S3_META_REQUEST_TYPE_DEFAULT);
+
+ uint32_t num_requests_network_io =
+ s_s3_client_get_num_requests_network_io(client, AWS_S3_META_REQUEST_TYPE_MAX);
+
+ uint32_t num_requests_stream_queued_waiting =
+ (uint32_t)aws_atomic_load_int(&client->stats.num_requests_stream_queued_waiting);
+ uint32_t num_requests_streaming = (uint32_t)aws_atomic_load_int(&client->stats.num_requests_streaming);
+
+ uint32_t total_approx_requests = num_requests_network_io + num_requests_stream_queued_waiting +
+ num_requests_streaming + client->threaded_data.num_requests_being_prepared +
+ client->threaded_data.request_queue_size;
+ AWS_LOGF(
+ s_log_level_client_stats,
+ AWS_LS_S3_CLIENT_STATS,
+ "id=%p Requests-in-flight(approx/exact):%d/%d Requests-preparing:%d Requests-queued:%d "
+ "Requests-network(get/put/default/total):%d/%d/%d/%d Requests-streaming-waiting:%d Requests-streaming:%d "
+ " Endpoints(in-table/allocated):%d/%d",
+ (void *)client,
+ total_approx_requests,
+ num_requests_tracked_requests,
+ client->threaded_data.num_requests_being_prepared,
+ client->threaded_data.request_queue_size,
+ num_auto_ranged_get_network_io,
+ num_auto_ranged_put_network_io,
+ num_auto_default_network_io,
+ num_requests_network_io,
+ num_requests_stream_queued_waiting,
+ num_requests_streaming,
+ num_endpoints_in_table,
+ num_endpoints_allocated);
+ }
+
+ /*******************/
+ /* Step 5: Check for client shutdown. */
+ /*******************/
+ {
+ /* BEGIN CRITICAL SECTION */
+ aws_s3_client_lock_synced_data(client);
+ client->synced_data.process_work_task_in_progress = false;
+
+ /* This flag should never be set twice. If it was, that means a double-free could occur.*/
+ AWS_ASSERT(!client->synced_data.finish_destroy);
+
+ bool finish_destroy = client->synced_data.active == false &&
+ client->synced_data.start_destroy_executing == false &&
+ client->synced_data.body_streaming_elg_allocated == false &&
+ client->synced_data.process_work_task_scheduled == false &&
+ client->synced_data.process_work_task_in_progress == false &&
+ client->synced_data.num_endpoints_allocated == 0;
+
+ client->synced_data.finish_destroy = finish_destroy;
+
+ if (!client->synced_data.active) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_CLIENT,
+ "id=%p Client shutdown progress: starting_destroy_executing=%d body_streaming_elg_allocated=%d "
+ "process_work_task_scheduled=%d process_work_task_in_progress=%d num_endpoints_allocated=%d "
+ "finish_destroy=%d",
+ (void *)client,
+ (int)client->synced_data.start_destroy_executing,
+ (int)client->synced_data.body_streaming_elg_allocated,
+ (int)client->synced_data.process_work_task_scheduled,
+ (int)client->synced_data.process_work_task_in_progress,
+ (int)client->synced_data.num_endpoints_allocated,
+ (int)client->synced_data.finish_destroy);
+ }
+
+ aws_s3_client_unlock_synced_data(client);
+ /* END CRITICAL SECTION */
+
+ if (finish_destroy) {
+ client->vtable->finish_destroy(client);
+ }
+ }
+}
+
+static void s_s3_client_prepare_callback_queue_request(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request,
+ int error_code,
+ void *user_data);
+
+void aws_s3_client_update_meta_requests_threaded(struct aws_s3_client *client) {
+ AWS_PRECONDITION(client);
+
+ const uint32_t max_requests_in_flight = aws_s3_client_get_max_requests_in_flight(client);
+ const uint32_t max_requests_prepare = aws_s3_client_get_max_requests_prepare(client);
+
+ struct aws_linked_list meta_requests_work_remaining;
+ aws_linked_list_init(&meta_requests_work_remaining);
+
+ uint32_t num_requests_in_flight = (uint32_t)aws_atomic_load_int(&client->stats.num_requests_in_flight);
+
+ const uint32_t pass_flags[] = {
+ AWS_S3_META_REQUEST_UPDATE_FLAG_CONSERVATIVE,
+ 0,
+ };
+
+ const uint32_t num_passes = AWS_ARRAY_SIZE(pass_flags);
+
+ for (uint32_t pass_index = 0; pass_index < num_passes; ++pass_index) {
+
+ /* While:
+ * * Number of being-prepared + already-prepared-and-queued requests is less than the max that can be in the
+ * preparation stage.
+ * * Total number of requests tracked by the client is less than the max tracked ("in flight") requests.
+ * * There are meta requests to get requests from.
+ *
+ * Then update meta requests to get new requests that can then be prepared (reading from any streams, signing,
+ * etc.) for sending.
+ */
+ while ((client->threaded_data.num_requests_being_prepared + client->threaded_data.request_queue_size) <
+ max_requests_prepare &&
+ num_requests_in_flight < max_requests_in_flight &&
+ !aws_linked_list_empty(&client->threaded_data.meta_requests)) {
+
+ struct aws_linked_list_node *meta_request_node =
+ aws_linked_list_begin(&client->threaded_data.meta_requests);
+ struct aws_s3_meta_request *meta_request =
+ AWS_CONTAINER_OF(meta_request_node, struct aws_s3_meta_request, client_process_work_threaded_data);
+
+ struct aws_s3_endpoint *endpoint = meta_request->endpoint;
+ AWS_ASSERT(endpoint != NULL);
+
+ AWS_ASSERT(client->vtable->get_host_address_count);
+ size_t num_known_vips = client->vtable->get_host_address_count(
+ client->client_bootstrap->host_resolver, endpoint->host_name, AWS_GET_HOST_ADDRESS_COUNT_RECORD_TYPE_A);
+
+ /* If this particular endpoint doesn't have any known addresses yet, then we don't want to go full speed in
+ * ramping up requests just yet. If there is already enough in the queue for one address (even if those
+ * aren't for this particular endpoint) we skip over this meta request for now. */
+ if (num_known_vips == 0 && (client->threaded_data.num_requests_being_prepared +
+ client->threaded_data.request_queue_size) >= g_max_num_connections_per_vip) {
+ aws_linked_list_remove(&meta_request->client_process_work_threaded_data.node);
+ aws_linked_list_push_back(
+ &meta_requests_work_remaining, &meta_request->client_process_work_threaded_data.node);
+ continue;
+ }
+
+ struct aws_s3_request *request = NULL;
+
+ /* Try to grab the next request from the meta request. */
+ bool work_remaining = aws_s3_meta_request_update(meta_request, pass_flags[pass_index], &request);
+
+ if (work_remaining) {
+ /* If there is work remaining, but we didn't get a request back, take the meta request out of the
+ * list so that we don't use it again during this function, with the intention of putting it back in
+ * the list before this function ends. */
+ if (request == NULL) {
+ aws_linked_list_remove(&meta_request->client_process_work_threaded_data.node);
+ aws_linked_list_push_back(
+ &meta_requests_work_remaining, &meta_request->client_process_work_threaded_data.node);
+ } else {
+ request->tracked_by_client = true;
+
+ ++client->threaded_data.num_requests_being_prepared;
+
+ num_requests_in_flight =
+ (uint32_t)aws_atomic_fetch_add(&client->stats.num_requests_in_flight, 1) + 1;
+
+ aws_s3_meta_request_prepare_request(
+ meta_request, request, s_s3_client_prepare_callback_queue_request, client);
+ }
+ } else {
+ s_s3_client_remove_meta_request_threaded(client, meta_request);
+ }
+ }
+
+ aws_linked_list_move_all_front(&client->threaded_data.meta_requests, &meta_requests_work_remaining);
+ }
+}
+
+static void s_s3_client_meta_request_finished_request(
+ struct aws_s3_client *client,
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request,
+ int error_code) {
+ AWS_PRECONDITION(client);
+ AWS_PRECONDITION(request);
+
+ if (request->tracked_by_client) {
+ /* BEGIN CRITICAL SECTION */
+ aws_s3_client_lock_synced_data(client);
+ aws_atomic_fetch_sub(&client->stats.num_requests_in_flight, 1);
+ s_s3_client_schedule_process_work_synced(client);
+ aws_s3_client_unlock_synced_data(client);
+ /* END CRITICAL SECTION */
+ }
+ aws_s3_meta_request_finished_request(meta_request, request, error_code);
+}
+
+static void s_s3_client_prepare_callback_queue_request(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request,
+ int error_code,
+ void *user_data) {
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(request);
+
+ struct aws_s3_client *client = user_data;
+ AWS_PRECONDITION(client);
+
+ if (error_code != AWS_ERROR_SUCCESS) {
+ s_s3_client_meta_request_finished_request(client, meta_request, request, error_code);
+
+ aws_s3_request_release(request);
+ request = NULL;
+ }
+
+ /* BEGIN CRITICAL SECTION */
+ {
+ aws_s3_client_lock_synced_data(client);
+
+ if (error_code == AWS_ERROR_SUCCESS) {
+ aws_linked_list_push_back(&client->synced_data.prepared_requests, &request->node);
+ } else {
+ ++client->synced_data.num_failed_prepare_requests;
+ }
+
+ s_s3_client_schedule_process_work_synced(client);
+ aws_s3_client_unlock_synced_data(client);
+ }
+ /* END CRITICAL SECTION */
+}
+
+void aws_s3_client_update_connections_threaded(struct aws_s3_client *client) {
+ AWS_PRECONDITION(client);
+ AWS_PRECONDITION(client->vtable);
+
+ struct aws_linked_list left_over_requests;
+ aws_linked_list_init(&left_over_requests);
+
+ while (s_s3_client_get_num_requests_network_io(client, AWS_S3_META_REQUEST_TYPE_MAX) <
+ aws_s3_client_get_max_active_connections(client, NULL) &&
+ !aws_linked_list_empty(&client->threaded_data.request_queue)) {
+
+ struct aws_s3_request *request = aws_s3_client_dequeue_request_threaded(client);
+ const uint32_t max_active_connections = aws_s3_client_get_max_active_connections(client, request->meta_request);
+
+ /* Unless the request is marked "always send", if this meta request has a finish result, then finish the request
+ * now and release it. */
+ if (!request->always_send && aws_s3_meta_request_has_finish_result(request->meta_request)) {
+ s_s3_client_meta_request_finished_request(client, request->meta_request, request, AWS_ERROR_S3_CANCELED);
+
+ aws_s3_request_release(request);
+ request = NULL;
+ } else if (
+ s_s3_client_get_num_requests_network_io(client, request->meta_request->type) < max_active_connections) {
+ s_s3_client_create_connection_for_request(client, request);
+ } else {
+ /* Push the request into the left-over list to be used in a future call of this function. */
+ aws_linked_list_push_back(&left_over_requests, &request->node);
+ }
+ }
+
+ aws_s3_client_queue_requests_threaded(client, &left_over_requests, true);
+}
+
+static void s_s3_client_acquired_retry_token(
+ struct aws_retry_strategy *retry_strategy,
+ int error_code,
+ struct aws_retry_token *token,
+ void *user_data);
+
+static void s_s3_client_retry_ready(struct aws_retry_token *token, int error_code, void *user_data);
+
+static void s_s3_client_create_connection_for_request_default(
+ struct aws_s3_client *client,
+ struct aws_s3_request *request);
+
+static void s_s3_client_create_connection_for_request(struct aws_s3_client *client, struct aws_s3_request *request) {
+ AWS_PRECONDITION(client);
+ AWS_PRECONDITION(client->vtable);
+
+ if (client->vtable->create_connection_for_request) {
+ client->vtable->create_connection_for_request(client, request);
+ return;
+ }
+
+ s_s3_client_create_connection_for_request_default(client, request);
+}
+
+static void s_s3_client_create_connection_for_request_default(
+ struct aws_s3_client *client,
+ struct aws_s3_request *request) {
+ AWS_PRECONDITION(client);
+ AWS_PRECONDITION(request);
+
+ struct aws_s3_meta_request *meta_request = request->meta_request;
+ AWS_PRECONDITION(meta_request);
+
+ aws_atomic_fetch_add(&client->stats.num_requests_network_io[meta_request->type], 1);
+
+ struct aws_s3_connection *connection = aws_mem_calloc(client->allocator, 1, sizeof(struct aws_s3_connection));
+
+ connection->endpoint = aws_s3_endpoint_acquire(meta_request->endpoint, false /*already_holding_lock*/);
+ connection->request = request;
+
+ struct aws_byte_cursor host_header_value;
+ AWS_ZERO_STRUCT(host_header_value);
+
+ struct aws_http_headers *message_headers = aws_http_message_get_headers(meta_request->initial_request_message);
+ AWS_ASSERT(message_headers);
+
+ int get_header_result = aws_http_headers_get(message_headers, g_host_header_name, &host_header_value);
+ AWS_ASSERT(get_header_result == AWS_OP_SUCCESS);
+ (void)get_header_result;
+
+ if (aws_retry_strategy_acquire_retry_token(
+ client->retry_strategy, &host_header_value, s_s3_client_acquired_retry_token, connection, 0)) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_CLIENT,
+ "id=%p Client could not acquire retry token for request %p due to error %d (%s)",
+ (void *)client,
+ (void *)request,
+ aws_last_error_or_unknown(),
+ aws_error_str(aws_last_error_or_unknown()));
+
+ goto reset_connection;
+ }
+
+ return;
+
+reset_connection:
+
+ aws_s3_client_notify_connection_finished(
+ client, connection, aws_last_error_or_unknown(), AWS_S3_CONNECTION_FINISH_CODE_FAILED);
+}
+
+static void s_s3_client_acquired_retry_token(
+ struct aws_retry_strategy *retry_strategy,
+ int error_code,
+ struct aws_retry_token *token,
+ void *user_data) {
+
+ AWS_PRECONDITION(retry_strategy);
+ (void)retry_strategy;
+
+ struct aws_s3_connection *connection = user_data;
+ AWS_PRECONDITION(connection);
+
+ struct aws_s3_request *request = connection->request;
+ AWS_PRECONDITION(request);
+
+ struct aws_s3_meta_request *meta_request = request->meta_request;
+ AWS_PRECONDITION(meta_request);
+
+ struct aws_s3_endpoint *endpoint = meta_request->endpoint;
+ AWS_ASSERT(endpoint != NULL);
+
+ struct aws_s3_client *client = endpoint->client;
+ AWS_ASSERT(client != NULL);
+
+ if (error_code != AWS_ERROR_SUCCESS) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_CLIENT,
+ "id=%p Client could not get retry token for connection %p processing request %p due to error %d (%s)",
+ (void *)client,
+ (void *)connection,
+ (void *)request,
+ error_code,
+ aws_error_str(error_code));
+
+ goto error_clean_up;
+ }
+
+ AWS_ASSERT(token);
+
+ connection->retry_token = token;
+
+ AWS_ASSERT(client->vtable->acquire_http_connection);
+
+ /* client needs to be kept alive until s_s3_client_on_acquire_http_connection completes */
+ /* TODO: not a blocker, consider managing the life time of aws_s3_client from aws_s3_endpoint to simplify usage */
+ aws_s3_client_acquire(client);
+
+ client->vtable->acquire_http_connection(
+ endpoint->http_connection_manager, s_s3_client_on_acquire_http_connection, connection);
+
+ return;
+
+error_clean_up:
+
+ aws_s3_client_notify_connection_finished(client, connection, error_code, AWS_S3_CONNECTION_FINISH_CODE_FAILED);
+}
+
+static void s_s3_client_on_acquire_http_connection(
+ struct aws_http_connection *incoming_http_connection,
+ int error_code,
+ void *user_data) {
+
+ struct aws_s3_connection *connection = user_data;
+ AWS_PRECONDITION(connection);
+
+ struct aws_s3_request *request = connection->request;
+ AWS_PRECONDITION(request);
+
+ struct aws_s3_meta_request *meta_request = request->meta_request;
+ AWS_PRECONDITION(meta_request);
+
+ struct aws_s3_endpoint *endpoint = meta_request->endpoint;
+ AWS_ASSERT(endpoint != NULL);
+
+ struct aws_s3_client *client = endpoint->client;
+ AWS_ASSERT(client != NULL);
+
+ if (error_code != AWS_ERROR_SUCCESS) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_ENDPOINT,
+ "id=%p: Could not acquire connection due to error code %d (%s)",
+ (void *)endpoint,
+ error_code,
+ aws_error_str(error_code));
+
+ if (error_code == AWS_IO_DNS_INVALID_NAME) {
+ goto error_fail;
+ }
+
+ goto error_retry;
+ }
+
+ connection->http_connection = incoming_http_connection;
+ aws_s3_meta_request_send_request(meta_request, connection);
+ aws_s3_client_release(client); /* kept since this callback was registered */
+ return;
+
+error_retry:
+
+ aws_s3_client_notify_connection_finished(client, connection, error_code, AWS_S3_CONNECTION_FINISH_CODE_RETRY);
+ aws_s3_client_release(client); /* kept since this callback was registered */
+ return;
+
+error_fail:
+
+ aws_s3_client_notify_connection_finished(client, connection, error_code, AWS_S3_CONNECTION_FINISH_CODE_FAILED);
+ aws_s3_client_release(client); /* kept since this callback was registered */
+}
+
+/* Called by aws_s3_meta_request when it has finished using this connection for a single request. */
+void aws_s3_client_notify_connection_finished(
+ struct aws_s3_client *client,
+ struct aws_s3_connection *connection,
+ int error_code,
+ enum aws_s3_connection_finish_code finish_code) {
+ AWS_PRECONDITION(client);
+ AWS_PRECONDITION(connection);
+
+ struct aws_s3_request *request = connection->request;
+ AWS_PRECONDITION(request);
+
+ struct aws_s3_meta_request *meta_request = request->meta_request;
+
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(meta_request->initial_request_message);
+
+ struct aws_s3_endpoint *endpoint = meta_request->endpoint;
+ AWS_PRECONDITION(endpoint);
+
+ /* If we're trying to setup a retry... */
+ if (finish_code == AWS_S3_CONNECTION_FINISH_CODE_RETRY) {
+
+ if (connection->retry_token == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_CLIENT,
+ "id=%p Client could not schedule retry of request %p for meta request %p, as retry token is NULL.",
+ (void *)client,
+ (void *)request,
+ (void *)meta_request);
+
+ goto reset_connection;
+ }
+
+ if (aws_s3_meta_request_is_finished(meta_request)) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_CLIENT,
+ "id=%p Client not scheduling retry of request %p for meta request %p with token %p because meta "
+ "request has been flagged as finished.",
+ (void *)client,
+ (void *)request,
+ (void *)meta_request,
+ (void *)connection->retry_token);
+
+ goto reset_connection;
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_CLIENT,
+ "id=%p Client scheduling retry of request %p for meta request %p with token %p.",
+ (void *)client,
+ (void *)request,
+ (void *)meta_request,
+ (void *)connection->retry_token);
+
+ enum aws_retry_error_type error_type = AWS_RETRY_ERROR_TYPE_TRANSIENT;
+
+ switch (error_code) {
+ case AWS_ERROR_S3_INTERNAL_ERROR:
+ error_type = AWS_RETRY_ERROR_TYPE_SERVER_ERROR;
+ break;
+
+ case AWS_ERROR_S3_SLOW_DOWN:
+ error_type = AWS_RETRY_ERROR_TYPE_THROTTLING;
+ break;
+ }
+
+ if (connection->http_connection != NULL) {
+ AWS_ASSERT(endpoint->http_connection_manager);
+
+ aws_http_connection_manager_release_connection(
+ endpoint->http_connection_manager, connection->http_connection);
+
+ connection->http_connection = NULL;
+ }
+
+ /* Ask the retry strategy to schedule a retry of the request. */
+ if (aws_retry_strategy_schedule_retry(
+ connection->retry_token, error_type, s_s3_client_retry_ready, connection)) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_CLIENT,
+ "id=%p Client could not retry request %p for meta request %p with token %p due to error %d (%s)",
+ (void *)client,
+ (void *)request,
+ (void *)meta_request,
+ (void *)connection->retry_token,
+ aws_last_error_or_unknown(),
+ aws_error_str(aws_last_error_or_unknown()));
+
+ goto reset_connection;
+ }
+
+ return;
+ }
+
+reset_connection:
+
+ if (connection->retry_token != NULL) {
+ /* If we have a retry token and successfully finished, record that success. */
+ if (finish_code == AWS_S3_CONNECTION_FINISH_CODE_SUCCESS) {
+ aws_retry_token_record_success(connection->retry_token);
+ }
+
+ aws_retry_token_release(connection->retry_token);
+ connection->retry_token = NULL;
+ }
+
+ /* If we weren't successful, and we're here, that means this failure is not eligible for a retry. So finish the
+ * request, and close our HTTP connection. */
+ if (finish_code != AWS_S3_CONNECTION_FINISH_CODE_SUCCESS) {
+ if (connection->http_connection != NULL) {
+ aws_http_connection_close(connection->http_connection);
+ }
+ }
+
+ aws_atomic_fetch_sub(&client->stats.num_requests_network_io[meta_request->type], 1);
+
+ s_s3_client_meta_request_finished_request(client, meta_request, request, error_code);
+
+ if (connection->http_connection != NULL) {
+ AWS_ASSERT(endpoint->http_connection_manager);
+
+ aws_http_connection_manager_release_connection(endpoint->http_connection_manager, connection->http_connection);
+
+ connection->http_connection = NULL;
+ }
+
+ if (connection->request != NULL) {
+ aws_s3_request_release(connection->request);
+ connection->request = NULL;
+ }
+
+ aws_retry_token_release(connection->retry_token);
+ connection->retry_token = NULL;
+
+ aws_s3_endpoint_release(connection->endpoint);
+ connection->endpoint = NULL;
+
+ aws_mem_release(client->allocator, connection);
+ connection = NULL;
+
+ /* BEGIN CRITICAL SECTION */
+ {
+ aws_s3_client_lock_synced_data(client);
+ s_s3_client_schedule_process_work_synced(client);
+ aws_s3_client_unlock_synced_data(client);
+ }
+ /* END CRITICAL SECTION */
+}
+
+static void s_s3_client_prepare_request_callback_retry_request(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request,
+ int error_code,
+ void *user_data);
+
+static void s_s3_client_retry_ready(struct aws_retry_token *token, int error_code, void *user_data) {
+ AWS_PRECONDITION(token);
+ (void)token;
+
+ struct aws_s3_connection *connection = user_data;
+ AWS_PRECONDITION(connection);
+
+ struct aws_s3_request *request = connection->request;
+ AWS_PRECONDITION(request);
+
+ struct aws_s3_meta_request *meta_request = request->meta_request;
+ AWS_PRECONDITION(meta_request);
+
+ struct aws_s3_endpoint *endpoint = meta_request->endpoint;
+ AWS_PRECONDITION(endpoint);
+
+ struct aws_s3_client *client = endpoint->client;
+ AWS_PRECONDITION(client);
+
+ /* If we couldn't retry this request, then bail on the entire meta request. */
+ if (error_code != AWS_ERROR_SUCCESS) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_CLIENT,
+ "id=%p Client could not retry request %p for meta request %p due to error %d (%s)",
+ (void *)client,
+ (void *)meta_request,
+ (void *)request,
+ error_code,
+ aws_error_str(error_code));
+
+ goto error_clean_up;
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p Client retrying request %p for meta request %p on connection %p with retry token %p",
+ (void *)client,
+ (void *)request,
+ (void *)meta_request,
+ (void *)connection,
+ (void *)connection->retry_token);
+
+ aws_s3_meta_request_prepare_request(
+ meta_request, request, s_s3_client_prepare_request_callback_retry_request, connection);
+
+ return;
+
+error_clean_up:
+
+ aws_s3_client_notify_connection_finished(client, connection, error_code, AWS_S3_CONNECTION_FINISH_CODE_FAILED);
+}
+
+static void s_s3_client_prepare_request_callback_retry_request(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request,
+ int error_code,
+ void *user_data) {
+ AWS_PRECONDITION(meta_request);
+ (void)meta_request;
+
+ AWS_PRECONDITION(request);
+ (void)request;
+
+ struct aws_s3_connection *connection = user_data;
+ AWS_PRECONDITION(connection);
+
+ struct aws_s3_endpoint *endpoint = meta_request->endpoint;
+ AWS_ASSERT(endpoint != NULL);
+
+ struct aws_s3_client *client = endpoint->client;
+ AWS_ASSERT(client != NULL);
+
+ if (error_code == AWS_ERROR_SUCCESS) {
+ AWS_ASSERT(connection->retry_token);
+
+ s_s3_client_acquired_retry_token(
+ client->retry_strategy, AWS_ERROR_SUCCESS, connection->retry_token, connection);
+ } else {
+ aws_s3_client_notify_connection_finished(client, connection, error_code, AWS_S3_CONNECTION_FINISH_CODE_FAILED);
+ }
+}
+
+static void s_resume_token_ref_count_zero_callback(void *arg) {
+ struct aws_s3_meta_request_resume_token *token = arg;
+
+ aws_string_destroy(token->multipart_upload_id);
+
+ aws_mem_release(token->allocator, token);
+}
+
+struct aws_s3_meta_request_resume_token *aws_s3_meta_request_resume_token_new(struct aws_allocator *allocator) {
+ struct aws_s3_meta_request_resume_token *token =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_meta_request_resume_token));
+
+ token->allocator = allocator;
+ aws_ref_count_init(&token->ref_count, token, s_resume_token_ref_count_zero_callback);
+
+ return token;
+}
+
+struct aws_s3_meta_request_resume_token *aws_s3_meta_request_resume_token_new_upload(
+ struct aws_allocator *allocator,
+ const struct aws_s3_upload_resume_token_options *options) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(options);
+
+ struct aws_s3_meta_request_resume_token *token = aws_s3_meta_request_resume_token_new(allocator);
+ token->multipart_upload_id = aws_string_new_from_cursor(allocator, &options->upload_id);
+ token->part_size = options->part_size;
+ token->total_num_parts = options->total_num_parts;
+ token->num_parts_completed = options->num_parts_completed;
+ token->type = AWS_S3_META_REQUEST_TYPE_PUT_OBJECT;
+ return token;
+}
+
+struct aws_s3_meta_request_resume_token *aws_s3_meta_request_resume_token_acquire(
+ struct aws_s3_meta_request_resume_token *resume_token) {
+ if (resume_token) {
+ aws_ref_count_acquire(&resume_token->ref_count);
+ }
+ return resume_token;
+}
+
+struct aws_s3_meta_request_resume_token *aws_s3_meta_request_resume_token_release(
+ struct aws_s3_meta_request_resume_token *resume_token) {
+ if (resume_token) {
+ aws_ref_count_release(&resume_token->ref_count);
+ }
+ return NULL;
+}
+
+enum aws_s3_meta_request_type aws_s3_meta_request_resume_token_type(
+ struct aws_s3_meta_request_resume_token *resume_token) {
+ AWS_FATAL_PRECONDITION(resume_token);
+ return resume_token->type;
+}
+
+size_t aws_s3_meta_request_resume_token_part_size(struct aws_s3_meta_request_resume_token *resume_token) {
+ AWS_FATAL_PRECONDITION(resume_token);
+ return resume_token->part_size;
+}
+
+size_t aws_s3_meta_request_resume_token_total_num_parts(struct aws_s3_meta_request_resume_token *resume_token) {
+ AWS_FATAL_PRECONDITION(resume_token);
+ return resume_token->total_num_parts;
+}
+
+size_t aws_s3_meta_request_resume_token_num_parts_completed(struct aws_s3_meta_request_resume_token *resume_token) {
+ AWS_FATAL_PRECONDITION(resume_token);
+ return resume_token->num_parts_completed;
+}
+
+struct aws_byte_cursor aws_s3_meta_request_resume_token_upload_id(
+ struct aws_s3_meta_request_resume_token *resume_token) {
+ AWS_FATAL_PRECONDITION(resume_token);
+ if (resume_token->type == AWS_S3_META_REQUEST_TYPE_PUT_OBJECT && resume_token->multipart_upload_id != NULL) {
+ return aws_byte_cursor_from_string(resume_token->multipart_upload_id);
+ }
+
+ return aws_byte_cursor_from_c_str("");
+}
diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_copy_object.c b/contrib/restricted/aws/aws-c-s3/source/s3_copy_object.c
new file mode 100644
index 0000000000..60c80d4b11
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/source/s3_copy_object.c
@@ -0,0 +1,787 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include "aws/s3/private/s3_copy_object.h"
+#include "aws/s3/private/s3_request_messages.h"
+#include "aws/s3/private/s3_util.h"
+#include <aws/common/string.h>
+#include <aws/io/stream.h>
+
+/* Objects with size smaller than the constant below are bypassed as S3 CopyObject instead of multipart copy */
+static const size_t s_multipart_copy_minimum_object_size = 1L * 1024L * 1024L * 1024L;
+
+static const size_t s_etags_initial_capacity = 16;
+static const struct aws_byte_cursor s_upload_id = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("UploadId");
+static const size_t s_complete_multipart_upload_init_body_size_bytes = 512;
+static const size_t s_abort_multipart_upload_init_body_size_bytes = 512;
+
+static const struct aws_byte_cursor s_create_multipart_upload_copy_headers[] = {
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-algorithm"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-key-MD5"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-context"),
+};
+
+static void s_s3_meta_request_copy_object_destroy(struct aws_s3_meta_request *meta_request);
+
+static bool s_s3_copy_object_update(
+ struct aws_s3_meta_request *meta_request,
+ uint32_t flags,
+ struct aws_s3_request **out_request);
+
+static int s_s3_copy_object_prepare_request(struct aws_s3_meta_request *meta_request, struct aws_s3_request *request);
+
+static void s_s3_copy_object_request_finished(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request,
+ int error_code);
+
+static struct aws_s3_meta_request_vtable s_s3_copy_object_vtable = {
+ .update = s_s3_copy_object_update,
+ .send_request_finish = aws_s3_meta_request_send_request_finish_handle_async_error,
+ .prepare_request = s_s3_copy_object_prepare_request,
+ .init_signing_date_time = aws_s3_meta_request_init_signing_date_time_default,
+ .sign_request = aws_s3_meta_request_sign_request_default,
+ .finished_request = s_s3_copy_object_request_finished,
+ .destroy = s_s3_meta_request_copy_object_destroy,
+ .finish = aws_s3_meta_request_finish_default,
+};
+
+/* Allocate a new copy object meta request */
+struct aws_s3_meta_request *aws_s3_meta_request_copy_object_new(
+ struct aws_allocator *allocator,
+ struct aws_s3_client *client,
+ const struct aws_s3_meta_request_options *options) {
+
+ /* These should already have been validated by the caller. */
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(client);
+ AWS_PRECONDITION(options);
+ AWS_PRECONDITION(options->message);
+
+ struct aws_s3_copy_object *copy_object = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_copy_object));
+
+ /* part size and content length will be fetched later using a HEAD object request */
+ const size_t UNKNOWN_PART_SIZE = 0;
+ const size_t UNKNOWN_CONTENT_LENGTH = 0;
+ const int UNKNOWN_NUM_PARTS = 0;
+
+ /* TODO Handle and test multipart copy */
+ if (aws_s3_meta_request_init_base(
+ allocator,
+ client,
+ UNKNOWN_PART_SIZE,
+ false,
+ options,
+ copy_object,
+ &s_s3_copy_object_vtable,
+ &copy_object->base)) {
+ aws_mem_release(allocator, copy_object);
+ return NULL;
+ }
+
+ aws_array_list_init_dynamic(
+ &copy_object->synced_data.etag_list, allocator, s_etags_initial_capacity, sizeof(struct aws_string *));
+
+ copy_object->synced_data.content_length = UNKNOWN_CONTENT_LENGTH;
+ copy_object->synced_data.total_num_parts = UNKNOWN_NUM_PARTS;
+ copy_object->threaded_update_data.next_part_number = 1;
+
+ AWS_LOGF_DEBUG(AWS_LS_S3_META_REQUEST, "id=%p Created new CopyObject Meta Request.", (void *)&copy_object->base);
+
+ return &copy_object->base;
+}
+
+static void s_s3_meta_request_copy_object_destroy(struct aws_s3_meta_request *meta_request) {
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(meta_request->impl);
+
+ struct aws_s3_copy_object *copy_object = meta_request->impl;
+
+ aws_string_destroy(copy_object->upload_id);
+ copy_object->upload_id = NULL;
+
+ for (size_t etag_index = 0; etag_index < aws_array_list_length(&copy_object->synced_data.etag_list); ++etag_index) {
+ struct aws_string *etag = NULL;
+
+ aws_array_list_get_at(&copy_object->synced_data.etag_list, &etag, etag_index);
+ aws_string_destroy(etag);
+ }
+
+ aws_array_list_clean_up(&copy_object->synced_data.etag_list);
+ aws_http_headers_release(copy_object->synced_data.needed_response_headers);
+ aws_mem_release(meta_request->allocator, copy_object);
+}
+
+static bool s_s3_copy_object_update(
+ struct aws_s3_meta_request *meta_request,
+ uint32_t flags,
+ struct aws_s3_request **out_request) {
+
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(out_request);
+
+ struct aws_s3_request *request = NULL;
+ bool work_remaining = false;
+
+ struct aws_s3_copy_object *copy_object = meta_request->impl;
+
+ aws_s3_meta_request_lock_synced_data(meta_request);
+
+ if (!aws_s3_meta_request_has_finish_result_synced(meta_request)) {
+
+ /* If we haven't already sent the GetObject HEAD request to get the source object size, do so now. */
+ if (!copy_object->synced_data.head_object_sent) {
+ request = aws_s3_request_new(
+ meta_request,
+ AWS_S3_COPY_OBJECT_REQUEST_TAG_GET_OBJECT_SIZE,
+ 0,
+ AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS);
+
+ copy_object->synced_data.head_object_sent = true;
+
+ goto has_work_remaining;
+ }
+
+ if (!copy_object->synced_data.head_object_completed) {
+ /* we have not received the object size response yet */
+ goto has_work_remaining;
+ }
+
+ if (copy_object->synced_data.content_length < s_multipart_copy_minimum_object_size) {
+ /* object is too small to use multipart upload: forwards the original CopyObject request to S3 instead. */
+ if (!copy_object->synced_data.copy_request_bypass_sent) {
+ request = aws_s3_request_new(
+ meta_request,
+ AWS_S3_COPY_OBJECT_REQUEST_TAG_BYPASS,
+ 1,
+ AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS);
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p: Meta Request CopyObject created bypass request %p",
+ (void *)meta_request,
+ (void *)request);
+
+ copy_object->synced_data.copy_request_bypass_sent = true;
+ goto has_work_remaining;
+ }
+
+ /* If the bypass request hasn't been completed, then wait for it to be completed. */
+ if (!copy_object->synced_data.copy_request_bypass_completed) {
+ goto has_work_remaining;
+ } else {
+ goto no_work_remaining;
+ }
+ }
+
+ /* Object size is large enough to use multipart copy. If we haven't already sent a create-multipart-upload
+ * message, do so now. */
+ if (!copy_object->synced_data.create_multipart_upload_sent) {
+ request = aws_s3_request_new(
+ meta_request,
+ AWS_S3_COPY_OBJECT_REQUEST_TAG_CREATE_MULTIPART_UPLOAD,
+ 0,
+ AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS);
+
+ copy_object->synced_data.create_multipart_upload_sent = true;
+ goto has_work_remaining;
+ }
+
+ /* If the create-multipart-upload message hasn't been completed, then there is still additional work to do, but
+ * it can't be done yet. */
+ if (!copy_object->synced_data.create_multipart_upload_completed) {
+ goto has_work_remaining;
+ }
+
+ /* If we haven't sent all of the parts yet, then set up to send a new part now. */
+ if (copy_object->synced_data.num_parts_sent < copy_object->synced_data.total_num_parts) {
+
+ if ((flags & AWS_S3_META_REQUEST_UPDATE_FLAG_CONSERVATIVE) != 0) {
+ uint32_t num_parts_in_flight =
+ (copy_object->synced_data.num_parts_sent - copy_object->synced_data.num_parts_completed);
+
+ /* TODO: benchmark if there is need to limit the amount of upload part copy in flight requests */
+ if (num_parts_in_flight > 0) {
+ goto has_work_remaining;
+ }
+ }
+
+ /* Allocate a request for another part. */
+ request = aws_s3_request_new(
+ meta_request,
+ AWS_S3_COPY_OBJECT_REQUEST_TAG_MULTIPART_COPY,
+ 0,
+ AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS);
+
+ request->part_number = copy_object->threaded_update_data.next_part_number;
+
+ ++copy_object->threaded_update_data.next_part_number;
+ ++copy_object->synced_data.num_parts_sent;
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p: Returning request %p for part %d",
+ (void *)meta_request,
+ (void *)request,
+ request->part_number);
+
+ goto has_work_remaining;
+ }
+
+ /* There is one more request to send after all of the parts (the complete-multipart-upload) but it can't be done
+ * until all of the parts have been completed.*/
+ if (copy_object->synced_data.num_parts_completed != copy_object->synced_data.total_num_parts) {
+ goto has_work_remaining;
+ }
+
+ /* If the complete-multipart-upload request hasn't been set yet, then send it now. */
+ if (!copy_object->synced_data.complete_multipart_upload_sent) {
+ request = aws_s3_request_new(
+ meta_request,
+ AWS_S3_COPY_OBJECT_REQUEST_TAG_COMPLETE_MULTIPART_UPLOAD,
+ 0,
+ AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS);
+
+ copy_object->synced_data.complete_multipart_upload_sent = true;
+ goto has_work_remaining;
+ }
+
+ /* Wait for the complete-multipart-upload request to finish. */
+ if (!copy_object->synced_data.complete_multipart_upload_completed) {
+ goto has_work_remaining;
+ }
+
+ goto no_work_remaining;
+ } else {
+
+ /* If the create multipart upload hasn't been sent, then there is nothing left to do when canceling. */
+ if (!copy_object->synced_data.create_multipart_upload_sent) {
+ goto no_work_remaining;
+ }
+
+ /* If the create-multipart-upload request is still in flight, wait for it to finish. */
+ if (!copy_object->synced_data.create_multipart_upload_completed) {
+ goto has_work_remaining;
+ }
+
+ /* If the number of parts completed is less than the number of parts sent, then we need to wait until all of
+ * those parts are done sending before aborting. */
+ if (copy_object->synced_data.num_parts_completed < copy_object->synced_data.num_parts_sent) {
+ goto has_work_remaining;
+ }
+
+ /* If the complete-multipart-upload is already in flight, then we can't necessarily send an abort. */
+ if (copy_object->synced_data.complete_multipart_upload_sent &&
+ !copy_object->synced_data.complete_multipart_upload_completed) {
+ goto has_work_remaining;
+ }
+
+ /* If the complete-multipart-upload completed successfully, then there is nothing to abort since the transfer
+ * has already finished. */
+ if (copy_object->synced_data.complete_multipart_upload_completed &&
+ copy_object->synced_data.complete_multipart_upload_error_code == AWS_ERROR_SUCCESS) {
+ goto no_work_remaining;
+ }
+
+ /* If we made it here, and the abort-multipart-upload message hasn't been sent yet, then do so now. */
+ if (!copy_object->synced_data.abort_multipart_upload_sent) {
+ if (copy_object->upload_id == NULL) {
+ goto no_work_remaining;
+ }
+
+ request = aws_s3_request_new(
+ meta_request,
+ AWS_S3_COPY_OBJECT_REQUEST_TAG_ABORT_MULTIPART_UPLOAD,
+ 0,
+ AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS | AWS_S3_REQUEST_FLAG_ALWAYS_SEND);
+
+ copy_object->synced_data.abort_multipart_upload_sent = true;
+
+ goto has_work_remaining;
+ }
+
+ /* Wait for the multipart upload to be completed. */
+ if (!copy_object->synced_data.abort_multipart_upload_completed) {
+ goto has_work_remaining;
+ }
+
+ goto no_work_remaining;
+ }
+
+has_work_remaining:
+ work_remaining = true;
+
+no_work_remaining:
+
+ if (!work_remaining) {
+ aws_s3_meta_request_set_success_synced(meta_request, AWS_S3_RESPONSE_STATUS_SUCCESS);
+ }
+
+ aws_s3_meta_request_unlock_synced_data(meta_request);
+
+ if (work_remaining) {
+ *out_request = request;
+ } else {
+ AWS_ASSERT(request == NULL);
+
+ aws_s3_meta_request_finish(meta_request);
+ }
+
+ return work_remaining;
+}
+
+/* Given a request, prepare it for sending based on its description. */
+static int s_s3_copy_object_prepare_request(struct aws_s3_meta_request *meta_request, struct aws_s3_request *request) {
+ AWS_PRECONDITION(meta_request);
+
+ struct aws_s3_copy_object *copy_object = meta_request->impl;
+ AWS_PRECONDITION(copy_object);
+
+ aws_s3_meta_request_lock_synced_data(meta_request);
+
+ struct aws_http_message *message = NULL;
+
+ switch (request->request_tag) {
+
+ /* Prepares the GetObject HEAD request to get the source object size. */
+ case AWS_S3_COPY_OBJECT_REQUEST_TAG_GET_OBJECT_SIZE: {
+ message = aws_s3_get_source_object_size_message_new(
+ meta_request->allocator, meta_request->initial_request_message);
+ break;
+ }
+
+ /* The S3 object is not large enough for multi-part copy. Bypasses a copy of the original CopyObject request to
+ * S3. */
+ case AWS_S3_COPY_OBJECT_REQUEST_TAG_BYPASS: {
+ message = aws_s3_message_util_copy_http_message_no_body_all_headers(
+ meta_request->allocator, meta_request->initial_request_message);
+ break;
+ }
+
+ /* Prepares the CreateMultipartUpload sub-request. */
+ case AWS_S3_COPY_OBJECT_REQUEST_TAG_CREATE_MULTIPART_UPLOAD: {
+ uint64_t part_size_uint64 = copy_object->synced_data.content_length / (uint64_t)g_s3_max_num_upload_parts;
+
+ if (part_size_uint64 > SIZE_MAX) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST,
+ "Could not create multipart copy meta request; required part size of %" PRIu64
+ " bytes is too large for platform.",
+ part_size_uint64);
+
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return AWS_OP_ERR;
+ }
+
+ size_t part_size = (size_t)part_size_uint64;
+
+ const size_t MIN_PART_SIZE = 64L * 1024L * 1024L; /* minimum partition size */
+ if (part_size < MIN_PART_SIZE) {
+ part_size = MIN_PART_SIZE;
+ }
+
+ uint32_t num_parts = (uint32_t)(copy_object->synced_data.content_length / part_size);
+
+ if ((copy_object->synced_data.content_length % part_size) > 0) {
+ ++num_parts;
+ }
+
+ copy_object->synced_data.total_num_parts = num_parts;
+ copy_object->synced_data.part_size = part_size;
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST,
+ "Starting multi-part Copy using part size=%zu, total_num_parts=%zu",
+ part_size,
+ (size_t)num_parts);
+
+ /* Create the message to create a new multipart upload. */
+ message = aws_s3_create_multipart_upload_message_new(
+ meta_request->allocator,
+ meta_request->initial_request_message,
+ meta_request->checksum_config.checksum_algorithm);
+
+ break;
+ }
+
+ /* Prepares the UploadPartCopy sub-request. */
+ case AWS_S3_COPY_OBJECT_REQUEST_TAG_MULTIPART_COPY: {
+ /* Create a new uploadPartCopy message to upload a part. */
+ /* compute sub-request range */
+ uint64_t range_start = (request->part_number - 1) * copy_object->synced_data.part_size;
+ uint64_t range_end = range_start + copy_object->synced_data.part_size - 1;
+ if (range_end >= copy_object->synced_data.content_length) {
+ /* adjust size of last part */
+ range_end = copy_object->synced_data.content_length - 1;
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST,
+ "Starting UploadPartCopy for partition %" PRIu32 ", range_start=%" PRIu64 ", range_end=%" PRIu64
+ ", full object length=%" PRIu64,
+ request->part_number,
+ range_start,
+ range_end,
+ copy_object->synced_data.content_length);
+
+ message = aws_s3_upload_part_copy_message_new(
+ meta_request->allocator,
+ meta_request->initial_request_message,
+ &request->request_body,
+ request->part_number,
+ range_start,
+ range_end,
+ copy_object->upload_id,
+ meta_request->should_compute_content_md5);
+ break;
+ }
+
+ /* Prepares the CompleteMultiPartUpload sub-request. */
+ case AWS_S3_COPY_OBJECT_REQUEST_TAG_COMPLETE_MULTIPART_UPLOAD: {
+
+ if (request->num_times_prepared == 0) {
+ aws_byte_buf_init(
+ &request->request_body, meta_request->allocator, s_complete_multipart_upload_init_body_size_bytes);
+ } else {
+ aws_byte_buf_reset(&request->request_body, false);
+ }
+
+ AWS_FATAL_ASSERT(copy_object->upload_id);
+ AWS_ASSERT(request->request_body.capacity > 0);
+ aws_byte_buf_reset(&request->request_body, false);
+
+ /* Build the message to complete our multipart upload, which includes a payload describing all of our
+ * completed parts. */
+ message = aws_s3_complete_multipart_message_new(
+ meta_request->allocator,
+ meta_request->initial_request_message,
+ &request->request_body,
+ copy_object->upload_id,
+ &copy_object->synced_data.etag_list,
+ NULL,
+ AWS_SCA_NONE);
+
+ break;
+ }
+
+ /* Prepares the AbortMultiPartUpload sub-request. */
+ case AWS_S3_COPY_OBJECT_REQUEST_TAG_ABORT_MULTIPART_UPLOAD: {
+ AWS_FATAL_ASSERT(copy_object->upload_id);
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p Abort multipart upload request for upload id %s.",
+ (void *)meta_request,
+ aws_string_c_str(copy_object->upload_id));
+
+ if (request->num_times_prepared == 0) {
+ aws_byte_buf_init(
+ &request->request_body, meta_request->allocator, s_abort_multipart_upload_init_body_size_bytes);
+ } else {
+ aws_byte_buf_reset(&request->request_body, false);
+ }
+
+ /* Build the message to abort our multipart upload */
+ message = aws_s3_abort_multipart_upload_message_new(
+ meta_request->allocator, meta_request->initial_request_message, copy_object->upload_id);
+
+ break;
+ }
+ }
+
+ aws_s3_meta_request_unlock_synced_data(meta_request);
+
+ if (message == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p Could not allocate message for request with tag %d for CopyObject meta request.",
+ (void *)meta_request,
+ request->request_tag);
+ goto message_create_failed;
+ }
+
+ aws_s3_request_setup_send_data(request, message);
+
+ aws_http_message_release(message);
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p: Prepared request %p for part %d",
+ (void *)meta_request,
+ (void *)request,
+ request->part_number);
+
+ return AWS_OP_SUCCESS;
+
+message_create_failed:
+
+ return AWS_OP_ERR;
+}
+
+/* For UploadPartCopy requests, etag is sent in the request body, within XML entity quotes */
+static struct aws_string *s_etag_new_from_upload_part_copy_response(
+ struct aws_allocator *allocator,
+ struct aws_byte_buf *response_body) {
+ struct aws_string *etag = NULL;
+
+ struct aws_byte_cursor response_body_cursor = aws_byte_cursor_from_buf(response_body);
+
+ struct aws_string *etag_within_xml_quotes =
+ aws_xml_get_top_level_tag(allocator, &g_etag_header_name, &response_body_cursor);
+
+ struct aws_byte_buf etag_within_quotes_byte_buf;
+ AWS_ZERO_STRUCT(etag_within_quotes_byte_buf);
+ replace_quote_entities(allocator, etag_within_xml_quotes, &etag_within_quotes_byte_buf);
+
+ /* Remove the quotes surrounding the etag. */
+ struct aws_byte_cursor etag_within_quotes_byte_cursor = aws_byte_cursor_from_buf(&etag_within_quotes_byte_buf);
+ if (etag_within_quotes_byte_cursor.len >= 2 && etag_within_quotes_byte_cursor.ptr[0] == '"' &&
+ etag_within_quotes_byte_cursor.ptr[etag_within_quotes_byte_cursor.len - 1] == '"') {
+
+ aws_byte_cursor_advance(&etag_within_quotes_byte_cursor, 1);
+ --etag_within_quotes_byte_cursor.len;
+ }
+
+ etag = aws_string_new_from_cursor(allocator, &etag_within_quotes_byte_cursor);
+ aws_byte_buf_clean_up(&etag_within_quotes_byte_buf);
+ aws_string_destroy(etag_within_xml_quotes);
+
+ return etag;
+}
+
+static void s_s3_copy_object_request_finished(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request,
+ int error_code) {
+
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(meta_request->impl);
+ AWS_PRECONDITION(request);
+
+ struct aws_s3_copy_object *copy_object = meta_request->impl;
+ aws_s3_meta_request_lock_synced_data(meta_request);
+
+ switch (request->request_tag) {
+
+ case AWS_S3_COPY_OBJECT_REQUEST_TAG_GET_OBJECT_SIZE: {
+ if (error_code == AWS_ERROR_SUCCESS) {
+ struct aws_byte_cursor content_length_cursor;
+ if (!aws_http_headers_get(
+ request->send_data.response_headers, g_content_length_header_name, &content_length_cursor)) {
+
+ if (!aws_byte_cursor_utf8_parse_u64(
+ content_length_cursor, &copy_object->synced_data.content_length)) {
+ copy_object->synced_data.head_object_completed = true;
+ } else {
+ /* HEAD request returned an invalid content-length */
+ aws_s3_meta_request_set_fail_synced(
+ meta_request, request, AWS_ERROR_S3_INVALID_CONTENT_LENGTH_HEADER);
+ }
+ } else {
+ /* HEAD request didn't return content-length header */
+ aws_s3_meta_request_set_fail_synced(
+ meta_request, request, AWS_ERROR_S3_INVALID_CONTENT_LENGTH_HEADER);
+ }
+ } else {
+ aws_s3_meta_request_set_fail_synced(meta_request, request, error_code);
+ }
+
+ break;
+ }
+
+ /* The S3 object is not large enough for multi-part copy. A copy of the original CopyObject request
+ * was bypassed to S3 and is now finished. */
+ case AWS_S3_COPY_OBJECT_REQUEST_TAG_BYPASS: {
+
+ /* Invoke headers callback if it was requested for this meta request */
+ if (meta_request->headers_callback != NULL) {
+ struct aws_http_headers *final_response_headers = aws_http_headers_new(meta_request->allocator);
+
+ /* Copy all the response headers from this request. */
+ copy_http_headers(request->send_data.response_headers, final_response_headers);
+
+ /* Notify the user of the headers. */
+ if (meta_request->headers_callback(
+ meta_request,
+ final_response_headers,
+ request->send_data.response_status,
+ meta_request->user_data)) {
+
+ error_code = aws_last_error_or_unknown();
+ }
+ meta_request->headers_callback = NULL;
+
+ aws_http_headers_release(final_response_headers);
+ }
+
+ /* Signals completion of the meta request */
+ if (error_code == AWS_ERROR_SUCCESS) {
+ copy_object->synced_data.copy_request_bypass_completed = true;
+ } else {
+ /* Bypassed CopyObject request failed */
+ aws_s3_meta_request_set_fail_synced(meta_request, request, error_code);
+ }
+ break;
+ }
+
+ case AWS_S3_COPY_OBJECT_REQUEST_TAG_CREATE_MULTIPART_UPLOAD: {
+ struct aws_http_headers *needed_response_headers = NULL;
+
+ if (error_code == AWS_ERROR_SUCCESS) {
+ needed_response_headers = aws_http_headers_new(meta_request->allocator);
+ const size_t copy_header_count = AWS_ARRAY_SIZE(s_create_multipart_upload_copy_headers);
+
+ /* Copy any headers now that we'll need for the final, transformed headers later. */
+ for (size_t header_index = 0; header_index < copy_header_count; ++header_index) {
+ const struct aws_byte_cursor *header_name = &s_create_multipart_upload_copy_headers[header_index];
+ struct aws_byte_cursor header_value;
+ AWS_ZERO_STRUCT(header_value);
+
+ if (!aws_http_headers_get(request->send_data.response_headers, *header_name, &header_value)) {
+ aws_http_headers_set(needed_response_headers, *header_name, header_value);
+ }
+ }
+
+ struct aws_byte_cursor buffer_byte_cursor = aws_byte_cursor_from_buf(&request->send_data.response_body);
+
+ /* Find the upload id for this multipart upload. */
+ struct aws_string *upload_id =
+ aws_xml_get_top_level_tag(meta_request->allocator, &s_upload_id, &buffer_byte_cursor);
+
+ if (upload_id == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p Could not find upload-id in create-multipart-upload response",
+ (void *)meta_request);
+
+ aws_raise_error(AWS_ERROR_S3_MISSING_UPLOAD_ID);
+ error_code = AWS_ERROR_S3_MISSING_UPLOAD_ID;
+ } else {
+ /* Store the multipart upload id. */
+ copy_object->upload_id = upload_id;
+ }
+ }
+
+ AWS_ASSERT(copy_object->synced_data.needed_response_headers == NULL);
+ copy_object->synced_data.needed_response_headers = needed_response_headers;
+
+ copy_object->synced_data.create_multipart_upload_completed = true;
+ copy_object->synced_data.create_multipart_upload_error_code = error_code;
+
+ if (error_code != AWS_ERROR_SUCCESS) {
+ aws_s3_meta_request_set_fail_synced(meta_request, request, error_code);
+ }
+ break;
+ }
+
+ case AWS_S3_COPY_OBJECT_REQUEST_TAG_MULTIPART_COPY: {
+ size_t part_number = request->part_number;
+ AWS_FATAL_ASSERT(part_number > 0);
+ size_t part_index = part_number - 1;
+
+ ++copy_object->synced_data.num_parts_completed;
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p: %d out of %d parts have completed.",
+ (void *)meta_request,
+ copy_object->synced_data.num_parts_completed,
+ copy_object->synced_data.total_num_parts);
+
+ if (error_code == AWS_ERROR_SUCCESS) {
+ struct aws_string *etag = s_etag_new_from_upload_part_copy_response(
+ meta_request->allocator, &request->send_data.response_body);
+
+ AWS_ASSERT(etag != NULL);
+
+ ++copy_object->synced_data.num_parts_successful;
+ if (meta_request->progress_callback != NULL) {
+ struct aws_s3_meta_request_progress progress = {
+ .bytes_transferred = copy_object->synced_data.part_size,
+ .content_length = copy_object->synced_data.content_length};
+ meta_request->progress_callback(meta_request, &progress, meta_request->user_data);
+ }
+
+ struct aws_string *null_etag = NULL;
+ /* ETags need to be associated with their part number, so we keep the etag indices consistent with
+ * part numbers. This means we may have to add padding to the list in the case that parts finish out
+ * of order. */
+ while (aws_array_list_length(&copy_object->synced_data.etag_list) < part_number) {
+ int push_back_result = aws_array_list_push_back(&copy_object->synced_data.etag_list, &null_etag);
+ AWS_FATAL_ASSERT(push_back_result == AWS_OP_SUCCESS);
+ }
+ aws_array_list_set_at(&copy_object->synced_data.etag_list, &etag, part_index);
+ } else {
+ ++copy_object->synced_data.num_parts_failed;
+ aws_s3_meta_request_set_fail_synced(meta_request, request, error_code);
+ }
+
+ break;
+ }
+
+ case AWS_S3_COPY_OBJECT_REQUEST_TAG_COMPLETE_MULTIPART_UPLOAD: {
+ if (error_code == AWS_ERROR_SUCCESS && meta_request->headers_callback != NULL) {
+ struct aws_http_headers *final_response_headers = aws_http_headers_new(meta_request->allocator);
+
+ /* Copy all the response headers from this request. */
+ copy_http_headers(request->send_data.response_headers, final_response_headers);
+
+ /* Copy over any response headers that we've previously determined are needed for this final
+ * response.
+ */
+ copy_http_headers(copy_object->synced_data.needed_response_headers, final_response_headers);
+
+ struct aws_byte_cursor response_body_cursor =
+ aws_byte_cursor_from_buf(&request->send_data.response_body);
+
+ /* Grab the ETag for the entire object, and set it as a header. */
+ struct aws_string *etag_header_value =
+ aws_xml_get_top_level_tag(meta_request->allocator, &g_etag_header_name, &response_body_cursor);
+
+ if (etag_header_value != NULL) {
+ struct aws_byte_buf etag_header_value_byte_buf;
+ AWS_ZERO_STRUCT(etag_header_value_byte_buf);
+
+ replace_quote_entities(meta_request->allocator, etag_header_value, &etag_header_value_byte_buf);
+
+ aws_http_headers_set(
+ final_response_headers,
+ g_etag_header_name,
+ aws_byte_cursor_from_buf(&etag_header_value_byte_buf));
+
+ aws_string_destroy(etag_header_value);
+ aws_byte_buf_clean_up(&etag_header_value_byte_buf);
+ }
+
+ /* Notify the user of the headers. */
+ if (meta_request->headers_callback(
+ meta_request,
+ final_response_headers,
+ request->send_data.response_status,
+ meta_request->user_data)) {
+
+ error_code = aws_last_error_or_unknown();
+ }
+ meta_request->headers_callback = NULL;
+
+ aws_http_headers_release(final_response_headers);
+ }
+
+ copy_object->synced_data.complete_multipart_upload_completed = true;
+ copy_object->synced_data.complete_multipart_upload_error_code = error_code;
+
+ if (error_code != AWS_ERROR_SUCCESS) {
+ aws_s3_meta_request_set_fail_synced(meta_request, request, error_code);
+ }
+
+ break;
+ }
+ case AWS_S3_COPY_OBJECT_REQUEST_TAG_ABORT_MULTIPART_UPLOAD: {
+ copy_object->synced_data.abort_multipart_upload_error_code = error_code;
+ copy_object->synced_data.abort_multipart_upload_completed = true;
+ break;
+ }
+ }
+ aws_s3_meta_request_unlock_synced_data(meta_request);
+}
diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_default_meta_request.c b/contrib/restricted/aws/aws-c-s3/source/s3_default_meta_request.c
new file mode 100644
index 0000000000..5bc39a7316
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/source/s3_default_meta_request.c
@@ -0,0 +1,298 @@
+#include "aws/s3/private/s3_default_meta_request.h"
+#include "aws/s3/private/s3_client_impl.h"
+#include "aws/s3/private/s3_meta_request_impl.h"
+#include "aws/s3/private/s3_request_messages.h"
+#include "aws/s3/private/s3_util.h"
+#include <aws/common/string.h>
+#include <inttypes.h>
+
+#ifdef _MSC_VER
+/* sscanf warning (not currently scanning for strings) */
+# pragma warning(disable : 4996)
+#endif
+
+static void s_s3_meta_request_default_destroy(struct aws_s3_meta_request *meta_request);
+
+static bool s_s3_meta_request_default_update(
+ struct aws_s3_meta_request *meta_request,
+ uint32_t flags,
+ struct aws_s3_request **out_request);
+
+static int s_s3_meta_request_default_prepare_request(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request);
+
+static void s_s3_meta_request_default_request_finished(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request,
+ int error_code);
+
+static struct aws_s3_meta_request_vtable s_s3_meta_request_default_vtable = {
+ .update = s_s3_meta_request_default_update,
+ .send_request_finish = aws_s3_meta_request_send_request_finish_handle_async_error,
+ .prepare_request = s_s3_meta_request_default_prepare_request,
+ .init_signing_date_time = aws_s3_meta_request_init_signing_date_time_default,
+ .sign_request = aws_s3_meta_request_sign_request_default,
+ .finished_request = s_s3_meta_request_default_request_finished,
+ .destroy = s_s3_meta_request_default_destroy,
+ .finish = aws_s3_meta_request_finish_default,
+};
+
+/* Allocate a new default meta request. */
+struct aws_s3_meta_request *aws_s3_meta_request_default_new(
+ struct aws_allocator *allocator,
+ struct aws_s3_client *client,
+ uint64_t content_length,
+ bool should_compute_content_md5,
+ const struct aws_s3_meta_request_options *options) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(client);
+ AWS_PRECONDITION(options);
+ AWS_PRECONDITION(options->message);
+
+ struct aws_byte_cursor request_method;
+ if (aws_http_message_get_request_method(options->message, &request_method)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST,
+ "Could not create Default Meta Request; could not get request method from message.");
+
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ if (content_length > SIZE_MAX) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST,
+ "Could not create Default Meta Request; content length of %" PRIu64 " bytes is too large for platform.",
+ content_length);
+
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return NULL;
+ }
+
+ struct aws_s3_meta_request_default *meta_request_default =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_meta_request_default));
+
+ /* Try to initialize the base type. */
+ if (aws_s3_meta_request_init_base(
+ allocator,
+ client,
+ 0,
+ should_compute_content_md5,
+ options,
+ meta_request_default,
+ &s_s3_meta_request_default_vtable,
+ &meta_request_default->base)) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p Could not initialize base type for Default Meta Request.",
+ (void *)meta_request_default);
+
+ aws_mem_release(allocator, meta_request_default);
+ return NULL;
+ }
+
+ meta_request_default->content_length = (size_t)content_length;
+
+ AWS_LOGF_DEBUG(AWS_LS_S3_META_REQUEST, "id=%p Created new Default Meta Request.", (void *)meta_request_default);
+
+ return &meta_request_default->base;
+}
+
+static void s_s3_meta_request_default_destroy(struct aws_s3_meta_request *meta_request) {
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(meta_request->impl);
+
+ struct aws_s3_meta_request_default *meta_request_default = meta_request->impl;
+ aws_mem_release(meta_request->allocator, meta_request_default);
+}
+
+/* Try to get the next request that should be processed. */
+static bool s_s3_meta_request_default_update(
+ struct aws_s3_meta_request *meta_request,
+ uint32_t flags,
+ struct aws_s3_request **out_request) {
+ (void)flags;
+
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(out_request);
+
+ struct aws_s3_meta_request_default *meta_request_default = meta_request->impl;
+ struct aws_s3_request *request = NULL;
+ bool work_remaining = false;
+
+ /* BEGIN CRITICAL SECTION */
+ {
+ aws_s3_meta_request_lock_synced_data(meta_request);
+
+ if (!aws_s3_meta_request_has_finish_result_synced(meta_request)) {
+
+ /* If the request hasn't been sent, then create and send it now. */
+ if (!meta_request_default->synced_data.request_sent) {
+ if (out_request == NULL) {
+ goto has_work_remaining;
+ }
+
+ request = aws_s3_request_new(meta_request, 0, 1, AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS);
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p: Meta Request Default created request %p",
+ (void *)meta_request,
+ (void *)request);
+
+ meta_request_default->synced_data.request_sent = true;
+ goto has_work_remaining;
+ }
+
+ /* If the request hasn't been completed, then wait for it to be completed. */
+ if (!meta_request_default->synced_data.request_completed) {
+ goto has_work_remaining;
+ }
+
+ /* If delivery hasn't been attempted yet for the response body, wait for that to happen. */
+ if (meta_request->synced_data.num_parts_delivery_completed < 1) {
+ goto has_work_remaining;
+ }
+
+ goto no_work_remaining;
+
+ } else {
+
+ /* If we are canceling, and the request hasn't been sent yet, then there is nothing to wait for. */
+ if (!meta_request_default->synced_data.request_sent) {
+ goto no_work_remaining;
+ }
+
+ /* If the request hasn't been completed yet, then wait for that to happen. */
+ if (!meta_request_default->synced_data.request_completed) {
+ goto has_work_remaining;
+ }
+
+ /* If some parts are still being delivered to the caller, then wait for those to finish. */
+ if (meta_request->synced_data.num_parts_delivery_completed <
+ meta_request->synced_data.num_parts_delivery_sent) {
+ goto has_work_remaining;
+ }
+
+ goto no_work_remaining;
+ }
+
+ has_work_remaining:
+ work_remaining = true;
+
+ no_work_remaining:
+
+ if (!work_remaining) {
+ aws_s3_meta_request_set_success_synced(
+ meta_request, meta_request_default->synced_data.cached_response_status);
+ }
+
+ aws_s3_meta_request_unlock_synced_data(meta_request);
+ }
+ /* END CRITICAL SECTION */
+
+ if (work_remaining) {
+ if (request != NULL) {
+ AWS_ASSERT(out_request != NULL);
+ *out_request = request;
+ }
+ } else {
+ AWS_ASSERT(request == NULL);
+
+ aws_s3_meta_request_finish(meta_request);
+ }
+
+ return work_remaining;
+}
+
+/* Given a request, prepare it for sending based on its description. */
+static int s_s3_meta_request_default_prepare_request(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request) {
+ AWS_PRECONDITION(meta_request);
+
+ struct aws_s3_meta_request_default *meta_request_default = meta_request->impl;
+ AWS_PRECONDITION(meta_request_default);
+
+ if (meta_request_default->content_length > 0 && request->num_times_prepared == 0) {
+ aws_byte_buf_init(&request->request_body, meta_request->allocator, meta_request_default->content_length);
+
+ if (aws_s3_meta_request_read_body(meta_request, &request->request_body)) {
+ return AWS_OP_ERR;
+ }
+ }
+
+ struct aws_http_message *message = aws_s3_message_util_copy_http_message_no_body_all_headers(
+ meta_request->allocator, meta_request->initial_request_message);
+
+ bool flexible_checksum = meta_request->checksum_config.location != AWS_SCL_NONE;
+ if (!flexible_checksum && meta_request->should_compute_content_md5) {
+ /* If flexible checksum used, client MUST skip Content-MD5 header computation */
+ aws_s3_message_util_add_content_md5_header(meta_request->allocator, &request->request_body, message);
+ }
+
+ if (meta_request->checksum_config.validate_response_checksum) {
+ struct aws_http_headers *headers = aws_http_message_get_headers(message);
+ aws_http_headers_set(headers, g_request_validation_mode, g_enabled);
+ }
+ aws_s3_message_util_assign_body(
+ meta_request->allocator,
+ &request->request_body,
+ message,
+ &meta_request->checksum_config,
+ NULL /* out_checksum */);
+
+ aws_s3_request_setup_send_data(request, message);
+
+ aws_http_message_release(message);
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST, "id=%p: Meta Request prepared request %p", (void *)meta_request, (void *)request);
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_s3_meta_request_default_request_finished(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request,
+ int error_code) {
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(meta_request->impl);
+ AWS_PRECONDITION(request);
+
+ struct aws_s3_meta_request_default *meta_request_default = meta_request->impl;
+ AWS_PRECONDITION(meta_request_default);
+
+ if (error_code == AWS_ERROR_SUCCESS && meta_request->headers_callback != NULL &&
+ request->send_data.response_headers != NULL) {
+
+ if (meta_request->headers_callback(
+ meta_request,
+ request->send_data.response_headers,
+ request->send_data.response_status,
+ meta_request->user_data)) {
+ error_code = aws_last_error_or_unknown();
+ }
+
+ meta_request->headers_callback = NULL;
+ }
+
+ /* BEGIN CRITICAL SECTION */
+ {
+ aws_s3_meta_request_lock_synced_data(meta_request);
+ meta_request_default->synced_data.cached_response_status = request->send_data.response_status;
+ meta_request_default->synced_data.request_completed = true;
+ meta_request_default->synced_data.request_error_code = error_code;
+
+ if (error_code == AWS_ERROR_SUCCESS) {
+ aws_s3_meta_request_stream_response_body_synced(meta_request, request);
+ } else {
+ aws_s3_meta_request_set_fail_synced(meta_request, request, error_code);
+ }
+
+ aws_s3_meta_request_unlock_synced_data(meta_request);
+ }
+ /* END CRITICAL SECTION */
+}
diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_endpoint.c b/contrib/restricted/aws/aws-c-s3/source/s3_endpoint.c
new file mode 100644
index 0000000000..74075ccb8a
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/source/s3_endpoint.c
@@ -0,0 +1,334 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include "aws/s3/private/s3_auto_ranged_get.h"
+#include "aws/s3/private/s3_auto_ranged_put.h"
+#include "aws/s3/private/s3_client_impl.h"
+#include "aws/s3/private/s3_default_meta_request.h"
+#include "aws/s3/private/s3_meta_request_impl.h"
+#include "aws/s3/private/s3_util.h"
+
+#include <aws/auth/credentials.h>
+#include <aws/common/assert.h>
+#include <aws/common/atomics.h>
+#include <aws/common/clock.h>
+#include <aws/common/device_random.h>
+#include <aws/common/environment.h>
+#include <aws/common/string.h>
+#include <aws/common/system_info.h>
+#include <aws/http/connection.h>
+#include <aws/http/connection_manager.h>
+#include <aws/http/request_response.h>
+#include <aws/io/channel_bootstrap.h>
+#include <aws/io/event_loop.h>
+#include <aws/io/host_resolver.h>
+#include <aws/io/retry_strategy.h>
+#include <aws/io/socket.h>
+#include <aws/io/stream.h>
+#include <aws/io/tls_channel_handler.h>
+#include <aws/io/uri.h>
+
+#include <inttypes.h>
+#include <math.h>
+
+static const uint32_t s_connection_timeout_ms = 3000;
+static const uint16_t s_http_port = 80;
+static const uint16_t s_https_port = 443;
+
+static void s_s3_endpoint_on_host_resolver_address_resolved(
+ struct aws_host_resolver *resolver,
+ const struct aws_string *host_name,
+ int err_code,
+ const struct aws_array_list *host_addresses,
+ void *user_data);
+
+static struct aws_http_connection_manager *s_s3_endpoint_create_http_connection_manager(
+ struct aws_s3_endpoint *endpoint,
+ const struct aws_string *host_name,
+ struct aws_client_bootstrap *client_bootstrap,
+ const struct aws_tls_connection_options *tls_connection_options,
+ uint32_t max_connections,
+ uint16_t port,
+ const struct aws_http_proxy_config *proxy_config,
+ const struct proxy_env_var_settings *proxy_ev_settings,
+ uint32_t connect_timeout_ms,
+ const struct aws_s3_tcp_keep_alive_options *tcp_keep_alive_options,
+ const struct aws_http_connection_monitoring_options *monitoring_options);
+
+static void s_s3_endpoint_http_connection_manager_shutdown_callback(void *user_data);
+
+static void s_s3_endpoint_ref_count_zero(struct aws_s3_endpoint *endpoint);
+
+static void s_s3_endpoint_acquire(struct aws_s3_endpoint *endpoint, bool already_holding_lock);
+
+static void s_s3_endpoint_release(struct aws_s3_endpoint *endpoint);
+
+static const struct aws_s3_endpoint_system_vtable s_s3_endpoint_default_system_vtable = {
+ .acquire = s_s3_endpoint_acquire,
+ .release = s_s3_endpoint_release,
+};
+
+static const struct aws_s3_endpoint_system_vtable *s_s3_endpoint_system_vtable = &s_s3_endpoint_default_system_vtable;
+
+void aws_s3_endpoint_set_system_vtable(const struct aws_s3_endpoint_system_vtable *vtable) {
+ s_s3_endpoint_system_vtable = vtable;
+}
+
+struct aws_s3_endpoint *aws_s3_endpoint_new(
+ struct aws_allocator *allocator,
+ const struct aws_s3_endpoint_options *options) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(options);
+ AWS_PRECONDITION(options->host_name);
+
+ struct aws_s3_endpoint *endpoint = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_endpoint));
+ endpoint->client_synced_data.ref_count = 1;
+
+ endpoint->allocator = allocator;
+ endpoint->host_name = options->host_name;
+
+ struct aws_host_resolution_config host_resolver_config;
+ AWS_ZERO_STRUCT(host_resolver_config);
+ host_resolver_config.impl = aws_default_dns_resolve;
+ host_resolver_config.max_ttl = options->dns_host_address_ttl_seconds;
+ host_resolver_config.impl_data = NULL;
+
+ if (aws_host_resolver_resolve_host(
+ options->client_bootstrap->host_resolver,
+ endpoint->host_name,
+ s_s3_endpoint_on_host_resolver_address_resolved,
+ &host_resolver_config,
+ NULL)) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_ENDPOINT,
+ "id=%p: Error trying to resolve host for endpoint %s",
+ (void *)endpoint,
+ (const char *)endpoint->host_name->bytes);
+
+ goto error_cleanup;
+ }
+
+ endpoint->http_connection_manager = s_s3_endpoint_create_http_connection_manager(
+ endpoint,
+ options->host_name,
+ options->client_bootstrap,
+ options->tls_connection_options,
+ options->max_connections,
+ options->port,
+ options->proxy_config,
+ options->proxy_ev_settings,
+ options->connect_timeout_ms,
+ options->tcp_keep_alive_options,
+ options->monitoring_options);
+
+ if (endpoint->http_connection_manager == NULL) {
+ goto error_cleanup;
+ }
+
+ endpoint->client = options->client;
+
+ return endpoint;
+
+error_cleanup:
+
+ aws_string_destroy(options->host_name);
+
+ aws_mem_release(allocator, endpoint);
+
+ return NULL;
+}
+
+static struct aws_http_connection_manager *s_s3_endpoint_create_http_connection_manager(
+ struct aws_s3_endpoint *endpoint,
+ const struct aws_string *host_name,
+ struct aws_client_bootstrap *client_bootstrap,
+ const struct aws_tls_connection_options *tls_connection_options,
+ uint32_t max_connections,
+ uint16_t port,
+ const struct aws_http_proxy_config *proxy_config,
+ const struct proxy_env_var_settings *proxy_ev_settings,
+ uint32_t connect_timeout_ms,
+ const struct aws_s3_tcp_keep_alive_options *tcp_keep_alive_options,
+ const struct aws_http_connection_monitoring_options *monitoring_options) {
+
+ AWS_PRECONDITION(endpoint);
+ AWS_PRECONDITION(client_bootstrap);
+ AWS_PRECONDITION(host_name);
+
+ struct aws_byte_cursor host_name_cursor = aws_byte_cursor_from_string(host_name);
+
+ /* Try to set up an HTTP connection manager. */
+ struct aws_socket_options socket_options;
+ AWS_ZERO_STRUCT(socket_options);
+ socket_options.type = AWS_SOCKET_STREAM;
+ socket_options.domain = AWS_SOCKET_IPV4;
+ socket_options.connect_timeout_ms = connect_timeout_ms == 0 ? s_connection_timeout_ms : connect_timeout_ms;
+ if (tcp_keep_alive_options != NULL) {
+ socket_options.keepalive = true;
+ socket_options.keep_alive_interval_sec = tcp_keep_alive_options->keep_alive_interval_sec;
+ socket_options.keep_alive_timeout_sec = tcp_keep_alive_options->keep_alive_timeout_sec;
+ socket_options.keep_alive_max_failed_probes = tcp_keep_alive_options->keep_alive_max_failed_probes;
+ }
+ struct proxy_env_var_settings proxy_ev_settings_default;
+ /* Turn on environment variable for proxy by default */
+ if (proxy_ev_settings == NULL) {
+ AWS_ZERO_STRUCT(proxy_ev_settings_default);
+ proxy_ev_settings_default.env_var_type = AWS_HPEV_ENABLE;
+ proxy_ev_settings = &proxy_ev_settings_default;
+ }
+
+ struct aws_http_connection_manager_options manager_options;
+ AWS_ZERO_STRUCT(manager_options);
+ manager_options.bootstrap = client_bootstrap;
+ manager_options.initial_window_size = SIZE_MAX;
+ manager_options.socket_options = &socket_options;
+ manager_options.host = host_name_cursor;
+ manager_options.max_connections = max_connections;
+ manager_options.shutdown_complete_callback = s_s3_endpoint_http_connection_manager_shutdown_callback;
+ manager_options.shutdown_complete_user_data = endpoint;
+ manager_options.proxy_ev_settings = proxy_ev_settings;
+ if (monitoring_options != NULL) {
+ manager_options.monitoring_options = monitoring_options;
+ }
+
+ struct aws_http_proxy_options proxy_options;
+ if (proxy_config != NULL) {
+ aws_http_proxy_options_init_from_config(&proxy_options, proxy_config);
+ manager_options.proxy_options = &proxy_options;
+ }
+
+ struct aws_tls_connection_options *manager_tls_options = NULL;
+
+ if (tls_connection_options != NULL) {
+ manager_tls_options = aws_mem_calloc(endpoint->allocator, 1, sizeof(struct aws_tls_connection_options));
+ aws_tls_connection_options_copy(manager_tls_options, tls_connection_options);
+
+ /* TODO fix this in the actual aws_tls_connection_options_set_server_name function. */
+ if (manager_tls_options->server_name != NULL) {
+ aws_string_destroy(manager_tls_options->server_name);
+ manager_tls_options->server_name = NULL;
+ }
+
+ aws_tls_connection_options_set_server_name(manager_tls_options, endpoint->allocator, &host_name_cursor);
+
+ manager_options.tls_connection_options = manager_tls_options;
+ manager_options.port = port == 0 ? s_https_port : port;
+ } else {
+ manager_options.port = port == 0 ? s_http_port : port;
+ }
+
+ struct aws_http_connection_manager *http_connection_manager =
+ aws_http_connection_manager_new(endpoint->allocator, &manager_options);
+
+ if (manager_tls_options != NULL) {
+ aws_tls_connection_options_clean_up(manager_tls_options);
+ aws_mem_release(endpoint->allocator, manager_tls_options);
+ manager_tls_options = NULL;
+ }
+
+ if (http_connection_manager == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_S3_ENDPOINT, "id=%p: Could not create http connection manager.", (void *)endpoint);
+ return NULL;
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_ENDPOINT,
+ "id=%p: Created connection manager %p for endpoint",
+ (void *)endpoint,
+ (void *)http_connection_manager);
+
+ return http_connection_manager;
+}
+
+struct aws_s3_endpoint *aws_s3_endpoint_acquire(struct aws_s3_endpoint *endpoint, bool already_holding_lock) {
+ if (endpoint) {
+ s_s3_endpoint_system_vtable->acquire(endpoint, already_holding_lock);
+ }
+ return endpoint;
+}
+
+static void s_s3_endpoint_acquire(struct aws_s3_endpoint *endpoint, bool already_holding_lock) {
+ AWS_PRECONDITION(endpoint);
+
+ if (!already_holding_lock) {
+ aws_s3_client_lock_synced_data(endpoint->client);
+ }
+
+ AWS_ASSERT(endpoint->client_synced_data.ref_count > 0);
+ ++endpoint->client_synced_data.ref_count;
+
+ if (!already_holding_lock) {
+ aws_s3_client_unlock_synced_data(endpoint->client);
+ }
+}
+
+void aws_s3_endpoint_release(struct aws_s3_endpoint *endpoint) {
+ if (endpoint) {
+ s_s3_endpoint_system_vtable->release(endpoint);
+ }
+}
+
+static void s_s3_endpoint_release(struct aws_s3_endpoint *endpoint) {
+ AWS_PRECONDITION(endpoint);
+ AWS_PRECONDITION(endpoint->client);
+
+ /* BEGIN CRITICAL SECTION */
+ aws_s3_client_lock_synced_data(endpoint->client);
+
+ bool should_destroy = (endpoint->client_synced_data.ref_count == 1);
+ if (should_destroy) {
+ aws_hash_table_remove(&endpoint->client->synced_data.endpoints, endpoint->host_name, NULL, NULL);
+ } else {
+ --endpoint->client_synced_data.ref_count;
+ }
+
+ aws_s3_client_unlock_synced_data(endpoint->client);
+ /* END CRITICAL SECTION */
+
+ if (should_destroy) {
+ /* The endpoint may have async cleanup to do (connection manager).
+ * When that's all done we'll invoke a completion callback.
+ * Since it's a crime to hold a lock while invoking a callback,
+ * we make sure that we've released the client's lock before proceeding... */
+ s_s3_endpoint_ref_count_zero(endpoint);
+ }
+}
+
+static void s_s3_endpoint_ref_count_zero(struct aws_s3_endpoint *endpoint) {
+ AWS_PRECONDITION(endpoint);
+ AWS_PRECONDITION(endpoint->http_connection_manager);
+
+ struct aws_http_connection_manager *http_connection_manager = endpoint->http_connection_manager;
+ endpoint->http_connection_manager = NULL;
+
+ /* Cleanup continues once the manager's shutdown callback is invoked */
+ aws_http_connection_manager_release(http_connection_manager);
+}
+
+static void s_s3_endpoint_http_connection_manager_shutdown_callback(void *user_data) {
+ struct aws_s3_endpoint *endpoint = user_data;
+ AWS_ASSERT(endpoint);
+
+ struct aws_s3_client *client = endpoint->client;
+
+ aws_mem_release(endpoint->allocator, endpoint);
+
+ client->vtable->endpoint_shutdown_callback(client);
+}
+
+static void s_s3_endpoint_on_host_resolver_address_resolved(
+ struct aws_host_resolver *resolver,
+ const struct aws_string *host_name,
+ int err_code,
+ const struct aws_array_list *host_addresses,
+ void *user_data) {
+ (void)resolver;
+ (void)host_name;
+ (void)err_code;
+ (void)host_addresses;
+ (void)user_data;
+ /* DO NOT add any logic here, unless you also ensure the endpoint lives long enough */
+}
diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_list_objects.c b/contrib/restricted/aws/aws-c-s3/source/s3_list_objects.c
new file mode 100644
index 0000000000..117bd98379
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/source/s3_list_objects.c
@@ -0,0 +1,300 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/s3/private/s3_list_objects.h>
+#include <aws/s3/private/s3_paginator.h>
+#include <aws/s3/private/s3_util.h>
+
+#include <aws/common/ref_count.h>
+#include <aws/common/xml_parser.h>
+
+#include <aws/io/uri.h>
+
+#include <aws/http/http.h>
+#include <aws/http/request_response.h>
+
+struct aws_s3_operation_data {
+ struct aws_allocator *allocator;
+
+ struct aws_string *prefix;
+ struct aws_string *delimiter;
+
+ struct aws_ref_count ref_count;
+
+ aws_s3_on_object_fn *on_object;
+
+ void *user_data;
+};
+
+static void s_ref_count_zero_callback(void *arg) {
+ struct aws_s3_operation_data *operation_data = arg;
+
+ if (operation_data->delimiter) {
+ aws_string_destroy(operation_data->delimiter);
+ }
+
+ if (operation_data->prefix) {
+ aws_string_destroy(operation_data->prefix);
+ }
+
+ aws_mem_release(operation_data->allocator, operation_data);
+}
+
+static void s_on_paginator_cleanup(void *user_data) {
+ struct aws_s3_operation_data *operation_data = user_data;
+
+ aws_ref_count_release(&operation_data->ref_count);
+}
+
+struct fs_parser_wrapper {
+ struct aws_allocator *allocator;
+ struct aws_s3_object_info fs_info;
+};
+
+/* invoked when the ListBucketResult/Contents node is iterated. */
+static bool s_on_contents_node(struct aws_xml_parser *parser, struct aws_xml_node *node, void *user_data) {
+ struct fs_parser_wrapper *fs_wrapper = user_data;
+ struct aws_s3_object_info *fs_info = &fs_wrapper->fs_info;
+
+ /* for each Contents node, get the info from it and send it off as an object we've encountered */
+ struct aws_byte_cursor node_name;
+ aws_xml_node_get_name(node, &node_name);
+
+ if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "ETag")) {
+ return aws_xml_node_as_body(parser, node, &fs_info->e_tag) == AWS_OP_SUCCESS;
+ }
+
+ if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Key")) {
+ return aws_xml_node_as_body(parser, node, &fs_info->key) == AWS_OP_SUCCESS;
+ }
+
+ if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "LastModified")) {
+ struct aws_byte_cursor date_cur;
+ if (aws_xml_node_as_body(parser, node, &date_cur) == AWS_OP_SUCCESS) {
+ aws_date_time_init_from_str_cursor(&fs_info->last_modified, &date_cur, AWS_DATE_FORMAT_ISO_8601);
+ return true;
+ }
+
+ return false;
+ }
+
+ if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Size")) {
+ struct aws_byte_cursor size_cur;
+
+ if (aws_xml_node_as_body(parser, node, &size_cur) == AWS_OP_SUCCESS) {
+ if (aws_byte_cursor_utf8_parse_u64(size_cur, &fs_info->size)) {
+ return false;
+ }
+ return true;
+ }
+ }
+
+ return true;
+}
+
+/* invoked when the ListBucketResult/CommonPrefixes node is iterated. */
+static bool s_on_common_prefixes_node(struct aws_xml_parser *parser, struct aws_xml_node *node, void *user_data) {
+ struct fs_parser_wrapper *fs_wrapper = user_data;
+
+ struct aws_byte_cursor node_name;
+ aws_xml_node_get_name(node, &node_name);
+
+ if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Prefix")) {
+ return aws_xml_node_as_body(parser, node, &fs_wrapper->fs_info.prefix) == AWS_OP_SUCCESS;
+ }
+
+ return true;
+}
+
+static bool s_on_list_bucket_result_node_encountered(
+ struct aws_xml_parser *parser,
+ struct aws_xml_node *node,
+ void *user_data) {
+ struct aws_s3_operation_data *operation_data = user_data;
+
+ struct aws_byte_cursor node_name;
+ aws_xml_node_get_name(node, &node_name);
+
+ struct fs_parser_wrapper fs_wrapper;
+ AWS_ZERO_STRUCT(fs_wrapper);
+
+ if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Contents")) {
+ fs_wrapper.allocator = operation_data->allocator;
+ /* this will traverse the current Contents node, get the metadata necessary to construct
+ * an instance of fs_info so we can invoke the callback on it. This happens once per object. */
+ bool ret_val = aws_xml_node_traverse(parser, node, s_on_contents_node, &fs_wrapper) == AWS_OP_SUCCESS;
+
+ if (operation_data->prefix && !fs_wrapper.fs_info.prefix.len) {
+ fs_wrapper.fs_info.prefix = aws_byte_cursor_from_string(operation_data->prefix);
+ }
+
+ struct aws_byte_buf trimmed_etag;
+ AWS_ZERO_STRUCT(trimmed_etag);
+
+ if (fs_wrapper.fs_info.e_tag.len) {
+ struct aws_string *quoted_etag_str =
+ aws_string_new_from_cursor(fs_wrapper.allocator, &fs_wrapper.fs_info.e_tag);
+ replace_quote_entities(fs_wrapper.allocator, quoted_etag_str, &trimmed_etag);
+ fs_wrapper.fs_info.e_tag = aws_byte_cursor_from_buf(&trimmed_etag);
+ aws_string_destroy(quoted_etag_str);
+ }
+
+ if (ret_val && operation_data->on_object) {
+ ret_val |= operation_data->on_object(&fs_wrapper.fs_info, operation_data->user_data);
+ }
+
+ if (trimmed_etag.len) {
+ aws_byte_buf_clean_up(&trimmed_etag);
+ }
+
+ return ret_val;
+ }
+
+ if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "CommonPrefixes")) {
+ /* this will traverse the current CommonPrefixes node, get the metadata necessary to construct
+ * an instance of fs_info so we can invoke the callback on it. This happens once per prefix. */
+ bool ret_val = aws_xml_node_traverse(parser, node, s_on_common_prefixes_node, &fs_wrapper) == AWS_OP_SUCCESS;
+
+ if (ret_val && operation_data->on_object) {
+ ret_val |= operation_data->on_object(&fs_wrapper.fs_info, operation_data->user_data);
+ }
+
+ return ret_val;
+ }
+
+ return true;
+}
+
+static int s_construct_next_request_http_message(
+ struct aws_byte_cursor *continuation_token,
+ void *user_data,
+ struct aws_http_message **out_message) {
+ AWS_PRECONDITION(user_data);
+
+ struct aws_s3_operation_data *operation_data = user_data;
+
+ struct aws_byte_cursor s_path_start = aws_byte_cursor_from_c_str("/?list-type=2");
+ struct aws_byte_buf request_path;
+ aws_byte_buf_init_copy_from_cursor(&request_path, operation_data->allocator, s_path_start);
+
+ if (operation_data->prefix) {
+ struct aws_byte_cursor s_prefix = aws_byte_cursor_from_c_str("&prefix=");
+ aws_byte_buf_append_dynamic(&request_path, &s_prefix);
+ struct aws_byte_cursor s_prefix_val = aws_byte_cursor_from_string(operation_data->prefix);
+ aws_byte_buf_append_encoding_uri_param(&request_path, &s_prefix_val);
+ }
+
+ if (operation_data->delimiter) {
+ struct aws_byte_cursor s_delimiter = aws_byte_cursor_from_c_str("&delimiter=");
+ aws_byte_buf_append_dynamic(&request_path, &s_delimiter);
+ struct aws_byte_cursor s_delimiter_val = aws_byte_cursor_from_string(operation_data->delimiter);
+ aws_byte_buf_append_dynamic(&request_path, &s_delimiter_val);
+ }
+
+ if (continuation_token) {
+ struct aws_byte_cursor s_continuation = aws_byte_cursor_from_c_str("&continuation-token=");
+ aws_byte_buf_append_dynamic(&request_path, &s_continuation);
+ aws_byte_buf_append_encoding_uri_param(&request_path, continuation_token);
+ }
+
+ struct aws_http_message *list_objects_v2_request = aws_http_message_new_request(operation_data->allocator);
+ aws_http_message_set_request_path(list_objects_v2_request, aws_byte_cursor_from_buf(&request_path));
+
+ aws_byte_buf_clean_up(&request_path);
+
+ struct aws_http_header accept_header = {
+ .name = aws_byte_cursor_from_c_str("accept"),
+ .value = aws_byte_cursor_from_c_str("application/xml"),
+ };
+
+ aws_http_message_add_header(list_objects_v2_request, accept_header);
+
+ aws_http_message_set_request_method(list_objects_v2_request, aws_http_method_get);
+
+ *out_message = list_objects_v2_request;
+ return AWS_OP_SUCCESS;
+}
+
+struct aws_s3_paginator *aws_s3_initiate_list_objects(
+ struct aws_allocator *allocator,
+ const struct aws_s3_list_objects_params *params) {
+ AWS_FATAL_PRECONDITION(params);
+ AWS_FATAL_PRECONDITION(params->client);
+ AWS_FATAL_PRECONDITION(params->bucket_name.len);
+ AWS_FATAL_PRECONDITION(params->endpoint.len);
+
+ struct aws_s3_operation_data *operation_data = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_operation_data));
+ operation_data->allocator = allocator;
+ operation_data->delimiter =
+ params->delimiter.len > 0 ? aws_string_new_from_cursor(allocator, &params->delimiter) : NULL;
+ operation_data->prefix = params->prefix.len > 0 ? aws_string_new_from_cursor(allocator, &params->prefix) : NULL;
+ operation_data->on_object = params->on_object;
+ operation_data->user_data = params->user_data;
+
+ aws_ref_count_init(&operation_data->ref_count, operation_data, s_ref_count_zero_callback);
+
+ struct aws_byte_cursor xml_result_node_name = aws_byte_cursor_from_c_str("ListBucketResult");
+ struct aws_byte_cursor continuation_node_name = aws_byte_cursor_from_c_str("NextContinuationToken");
+ struct aws_s3_paginated_operation_params operation_params = {
+ .next_message = s_construct_next_request_http_message,
+ .on_result_node_encountered_fn = s_on_list_bucket_result_node_encountered,
+ .on_paginated_operation_cleanup = s_on_paginator_cleanup,
+ .result_xml_node_name = &xml_result_node_name,
+ .continuation_token_node_name = &continuation_node_name,
+ .user_data = operation_data,
+ };
+
+ struct aws_s3_paginated_operation *operation = aws_s3_paginated_operation_new(allocator, &operation_params);
+
+ struct aws_s3_paginator_params paginator_params = {
+ .client = params->client,
+ .bucket_name = params->bucket_name,
+ .endpoint = params->endpoint,
+ .on_page_finished_fn = params->on_list_finished,
+ .operation = operation,
+ .user_data = params->user_data,
+ };
+
+ struct aws_s3_paginator *paginator = aws_s3_initiate_paginator(allocator, &paginator_params);
+
+ // transfer control to paginator
+ aws_s3_paginated_operation_release(operation);
+
+ return paginator;
+}
+
+struct aws_s3_paginated_operation *aws_s3_list_objects_operation_new(
+ struct aws_allocator *allocator,
+ const struct aws_s3_list_objects_params *params) {
+ AWS_FATAL_PRECONDITION(params);
+ AWS_FATAL_PRECONDITION(params->client);
+ AWS_FATAL_PRECONDITION(params->bucket_name.len);
+ AWS_FATAL_PRECONDITION(params->endpoint.len);
+
+ struct aws_s3_operation_data *operation_data = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_operation_data));
+ operation_data->allocator = allocator;
+ operation_data->delimiter =
+ params->delimiter.len > 0 ? aws_string_new_from_cursor(allocator, &params->delimiter) : NULL;
+ operation_data->prefix = params->prefix.len > 0 ? aws_string_new_from_cursor(allocator, &params->prefix) : NULL;
+ operation_data->on_object = params->on_object;
+ operation_data->user_data = params->user_data;
+
+ aws_ref_count_init(&operation_data->ref_count, operation_data, s_ref_count_zero_callback);
+
+ struct aws_byte_cursor xml_result_node_name = aws_byte_cursor_from_c_str("ListBucketResult");
+ struct aws_byte_cursor continuation_node_name = aws_byte_cursor_from_c_str("NextContinuationToken");
+ struct aws_s3_paginated_operation_params operation_params = {
+ .next_message = s_construct_next_request_http_message,
+ .on_result_node_encountered_fn = s_on_list_bucket_result_node_encountered,
+ .on_paginated_operation_cleanup = s_on_paginator_cleanup,
+ .result_xml_node_name = &xml_result_node_name,
+ .continuation_token_node_name = &continuation_node_name,
+ .user_data = operation_data,
+ };
+
+ struct aws_s3_paginated_operation *operation = aws_s3_paginated_operation_new(allocator, &operation_params);
+
+ return operation;
+}
diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_list_parts.c b/contrib/restricted/aws/aws-c-s3/source/s3_list_parts.c
new file mode 100644
index 0000000000..8856e6aa18
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/source/s3_list_parts.c
@@ -0,0 +1,294 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/s3/private/s3_list_parts.h>
+#include <aws/s3/private/s3_paginator.h>
+#include <aws/s3/private/s3_util.h>
+
+#include <aws/common/ref_count.h>
+#include <aws/common/xml_parser.h>
+
+#include <aws/io/uri.h>
+
+#include <aws/http/http.h>
+#include <aws/http/request_response.h>
+
+struct aws_s3_operation_data {
+ struct aws_allocator *allocator;
+
+ struct aws_string *key;
+ struct aws_string *upload_id;
+
+ struct aws_ref_count ref_count;
+
+ aws_s3_on_part_fn *on_part;
+
+ void *user_data;
+};
+
+static void s_ref_count_zero_callback(void *arg) {
+ struct aws_s3_operation_data *operation_data = arg;
+
+ if (operation_data->key) {
+ aws_string_destroy(operation_data->key);
+ }
+
+ if (operation_data->upload_id) {
+ aws_string_destroy(operation_data->upload_id);
+ }
+
+ aws_mem_release(operation_data->allocator, operation_data);
+}
+
+static void s_on_paginator_cleanup(void *user_data) {
+ struct aws_s3_operation_data *operation_data = user_data;
+
+ aws_ref_count_release(&operation_data->ref_count);
+}
+
+struct result_wrapper {
+ struct aws_allocator *allocator;
+ struct aws_s3_part_info part_info;
+};
+
+/* invoked when the ListPartResult/Parts node is iterated. */
+static bool s_on_parts_node(struct aws_xml_parser *parser, struct aws_xml_node *node, void *user_data) {
+ struct result_wrapper *result_wrapper = user_data;
+ struct aws_s3_part_info *part_info = &result_wrapper->part_info;
+
+ /* for each Parts node, get the info from it and send it off as an part we've encountered */
+ struct aws_byte_cursor node_name;
+ aws_xml_node_get_name(node, &node_name);
+
+ if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "ETag")) {
+ return aws_xml_node_as_body(parser, node, &part_info->e_tag) == AWS_OP_SUCCESS;
+ }
+
+ if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "LastModified")) {
+ struct aws_byte_cursor date_cur;
+ if (aws_xml_node_as_body(parser, node, &date_cur) == AWS_OP_SUCCESS) {
+ aws_date_time_init_from_str_cursor(&part_info->last_modified, &date_cur, AWS_DATE_FORMAT_ISO_8601);
+ return true;
+ }
+
+ return false;
+ }
+
+ if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Size")) {
+ struct aws_byte_cursor size_cur;
+
+ if (aws_xml_node_as_body(parser, node, &size_cur) == AWS_OP_SUCCESS) {
+ if (aws_byte_cursor_utf8_parse_u64(size_cur, &part_info->size)) {
+ return false;
+ }
+ return true;
+ }
+ }
+
+ if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "PartNumber")) {
+ struct aws_byte_cursor part_number_cur;
+
+ if (aws_xml_node_as_body(parser, node, &part_number_cur) == AWS_OP_SUCCESS) {
+ uint64_t part_number = 0;
+ if (aws_byte_cursor_utf8_parse_u64(part_number_cur, &part_number)) {
+ return false;
+ }
+ if (part_number > UINT32_MAX) {
+ aws_raise_error(AWS_ERROR_OVERFLOW_DETECTED);
+ return false;
+ }
+ part_info->part_number = (uint32_t)part_number;
+ return true;
+ }
+ }
+
+ if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "ChecksumCRC32")) {
+ return aws_xml_node_as_body(parser, node, &part_info->checksumCRC32) == AWS_OP_SUCCESS;
+ }
+
+ if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "ChecksumCRC32C")) {
+ return aws_xml_node_as_body(parser, node, &part_info->checksumCRC32C) == AWS_OP_SUCCESS;
+ }
+
+ if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "ChecksumSHA1")) {
+ return aws_xml_node_as_body(parser, node, &part_info->checksumSHA1) == AWS_OP_SUCCESS;
+ }
+
+ if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "ChecksumSHA256")) {
+ return aws_xml_node_as_body(parser, node, &part_info->checksumSHA256) == AWS_OP_SUCCESS;
+ }
+
+ return true;
+}
+
+static bool s_on_list_bucket_result_node_encountered(
+ struct aws_xml_parser *parser,
+ struct aws_xml_node *node,
+ void *user_data) {
+ struct aws_s3_operation_data *operation_data = user_data;
+
+ struct aws_byte_cursor node_name;
+ aws_xml_node_get_name(node, &node_name);
+
+ struct result_wrapper result_wrapper;
+ AWS_ZERO_STRUCT(result_wrapper);
+
+ if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "Part")) {
+ result_wrapper.allocator = operation_data->allocator;
+ /* this will traverse the current Parts node, get the metadata necessary to construct
+ * an instance of fs_info so we can invoke the callback on it. This happens once per part. */
+ bool ret_val = aws_xml_node_traverse(parser, node, s_on_parts_node, &result_wrapper) == AWS_OP_SUCCESS;
+
+ struct aws_byte_buf trimmed_etag;
+ AWS_ZERO_STRUCT(trimmed_etag);
+
+ if (result_wrapper.part_info.e_tag.len) {
+ struct aws_string *quoted_etag_str =
+ aws_string_new_from_cursor(result_wrapper.allocator, &result_wrapper.part_info.e_tag);
+ replace_quote_entities(result_wrapper.allocator, quoted_etag_str, &trimmed_etag);
+ result_wrapper.part_info.e_tag = aws_byte_cursor_from_buf(&trimmed_etag);
+ aws_string_destroy(quoted_etag_str);
+ }
+
+ if (ret_val && operation_data->on_part) {
+ ret_val |= operation_data->on_part(&result_wrapper.part_info, operation_data->user_data);
+ }
+
+ if (trimmed_etag.len) {
+ aws_byte_buf_clean_up(&trimmed_etag);
+ }
+
+ return ret_val;
+ }
+
+ return true;
+}
+
+static int s_construct_next_request_http_message(
+ struct aws_byte_cursor *continuation_token,
+ void *user_data,
+ struct aws_http_message **out_message) {
+
+ AWS_PRECONDITION(user_data);
+
+ struct aws_s3_operation_data *operation_data = user_data;
+
+ struct aws_byte_buf request_path;
+ struct aws_byte_cursor key_val = aws_byte_cursor_from_string(operation_data->key);
+ aws_byte_buf_init_copy_from_cursor(&request_path, operation_data->allocator, key_val);
+
+ if (operation_data->upload_id) {
+ struct aws_byte_cursor upload_id = aws_byte_cursor_from_c_str("?uploadId=");
+ aws_byte_buf_append_dynamic(&request_path, &upload_id);
+ struct aws_byte_cursor upload_id_val = aws_byte_cursor_from_string(operation_data->upload_id);
+ aws_byte_buf_append_dynamic(&request_path, &upload_id_val);
+ }
+
+ if (continuation_token) {
+ struct aws_byte_cursor continuation = aws_byte_cursor_from_c_str("&part-number-marker=");
+ aws_byte_buf_append_dynamic(&request_path, &continuation);
+ aws_byte_buf_append_encoding_uri_param(&request_path, continuation_token);
+ }
+
+ struct aws_http_message *list_parts_request = aws_http_message_new_request(operation_data->allocator);
+ aws_http_message_set_request_path(list_parts_request, aws_byte_cursor_from_buf(&request_path));
+
+ aws_byte_buf_clean_up(&request_path);
+
+ struct aws_http_header accept_header = {
+ .name = aws_byte_cursor_from_c_str("accept"),
+ .value = aws_byte_cursor_from_c_str("application/xml"),
+ };
+
+ aws_http_message_add_header(list_parts_request, accept_header);
+
+ aws_http_message_set_request_method(list_parts_request, aws_http_method_get);
+
+ *out_message = list_parts_request;
+ return AWS_OP_SUCCESS;
+}
+
+struct aws_s3_paginator *aws_s3_initiate_list_parts(
+ struct aws_allocator *allocator,
+ const struct aws_s3_list_parts_params *params) {
+ AWS_FATAL_PRECONDITION(params);
+ AWS_FATAL_PRECONDITION(params->client);
+ AWS_FATAL_PRECONDITION(params->bucket_name.len);
+ AWS_FATAL_PRECONDITION(params->key.len);
+ AWS_FATAL_PRECONDITION(params->upload_id.len);
+ AWS_FATAL_PRECONDITION(params->endpoint.len);
+
+ struct aws_s3_operation_data *operation_data = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_operation_data));
+ operation_data->allocator = allocator;
+ operation_data->key = aws_string_new_from_cursor(allocator, &params->key);
+ operation_data->upload_id = aws_string_new_from_cursor(allocator, &params->upload_id);
+ operation_data->on_part = params->on_part;
+ operation_data->user_data = params->user_data;
+
+ aws_ref_count_init(&operation_data->ref_count, operation_data, s_ref_count_zero_callback);
+
+ struct aws_byte_cursor xml_result_node_name = aws_byte_cursor_from_c_str("ListPartsResult");
+ const struct aws_byte_cursor continuation_node_name = aws_byte_cursor_from_c_str("NextPartNumberMarker");
+
+ struct aws_s3_paginated_operation_params operation_params = {
+ .next_message = s_construct_next_request_http_message,
+ .on_result_node_encountered_fn = s_on_list_bucket_result_node_encountered,
+ .on_paginated_operation_cleanup = s_on_paginator_cleanup,
+ .result_xml_node_name = &xml_result_node_name,
+ .continuation_token_node_name = &continuation_node_name,
+ .user_data = operation_data,
+ };
+
+ struct aws_s3_paginated_operation *operation = aws_s3_paginated_operation_new(allocator, &operation_params);
+
+ struct aws_s3_paginator_params paginator_params = {
+ .client = params->client,
+ .bucket_name = params->bucket_name,
+ .endpoint = params->endpoint,
+ .operation = operation,
+ .on_page_finished_fn = params->on_list_finished,
+ .user_data = params->user_data,
+ };
+
+ struct aws_s3_paginator *paginator = aws_s3_initiate_paginator(allocator, &paginator_params);
+
+ // transfer control to paginator
+ aws_s3_paginated_operation_release(operation);
+
+ return paginator;
+}
+
+struct aws_s3_paginated_operation *aws_s3_list_parts_operation_new(
+ struct aws_allocator *allocator,
+ const struct aws_s3_list_parts_params *params) {
+ AWS_FATAL_PRECONDITION(params);
+ AWS_FATAL_PRECONDITION(params->key.len);
+ AWS_FATAL_PRECONDITION(params->upload_id.len);
+
+ struct aws_s3_operation_data *operation_data = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_operation_data));
+ operation_data->allocator = allocator;
+ operation_data->key = aws_string_new_from_cursor(allocator, &params->key);
+ operation_data->upload_id = aws_string_new_from_cursor(allocator, &params->upload_id);
+ operation_data->on_part = params->on_part;
+ operation_data->user_data = params->user_data;
+
+ aws_ref_count_init(&operation_data->ref_count, operation_data, s_ref_count_zero_callback);
+
+ struct aws_byte_cursor xml_result_node_name = aws_byte_cursor_from_c_str("ListPartsResult");
+ const struct aws_byte_cursor continuation_node_name = aws_byte_cursor_from_c_str("NextPartNumberMarker");
+
+ struct aws_s3_paginated_operation_params operation_params = {
+ .next_message = s_construct_next_request_http_message,
+ .on_result_node_encountered_fn = s_on_list_bucket_result_node_encountered,
+ .on_paginated_operation_cleanup = s_on_paginator_cleanup,
+ .result_xml_node_name = &xml_result_node_name,
+ .continuation_token_node_name = &continuation_node_name,
+ .user_data = operation_data,
+ };
+
+ struct aws_s3_paginated_operation *operation = aws_s3_paginated_operation_new(allocator, &operation_params);
+
+ return operation;
+}
diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_meta_request.c b/contrib/restricted/aws/aws-c-s3/source/s3_meta_request.c
new file mode 100644
index 0000000000..ff521dc9fb
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/source/s3_meta_request.c
@@ -0,0 +1,1607 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include "aws/s3/private/s3_auto_ranged_get.h"
+#include "aws/s3/private/s3_checksums.h"
+#include "aws/s3/private/s3_client_impl.h"
+#include "aws/s3/private/s3_meta_request_impl.h"
+#include "aws/s3/private/s3_request_messages.h"
+#include "aws/s3/private/s3_util.h"
+#include <aws/auth/signable.h>
+#include <aws/auth/signing.h>
+#include <aws/auth/signing_config.h>
+#include <aws/auth/signing_result.h>
+#include <aws/common/encoding.h>
+#include <aws/common/string.h>
+#include <aws/common/system_info.h>
+#include <aws/io/event_loop.h>
+#include <aws/io/retry_strategy.h>
+#include <aws/io/stream.h>
+#include <inttypes.h>
+
+static const size_t s_dynamic_body_initial_buf_size = KB_TO_BYTES(1);
+static const size_t s_default_body_streaming_priority_queue_size = 16;
+
+static int s_s3_request_priority_queue_pred(const void *a, const void *b);
+static void s_s3_meta_request_destroy(void *user_data);
+
+static void s_s3_meta_request_init_signing_date_time(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_date_time *date_time);
+
+static void s_s3_meta_request_sign_request(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request,
+ aws_signing_complete_fn *on_signing_complete,
+ void *user_data);
+
+static void s_s3_meta_request_request_on_signed(
+ struct aws_signing_result *signing_result,
+ int error_code,
+ void *user_data);
+
+static int s_s3_meta_request_incoming_body(
+ struct aws_http_stream *stream,
+ const struct aws_byte_cursor *data,
+ void *user_data);
+
+static int s_s3_meta_request_incoming_headers(
+ struct aws_http_stream *stream,
+ enum aws_http_header_block header_block,
+ const struct aws_http_header *headers,
+ size_t headers_count,
+ void *user_data);
+
+static void s_s3_meta_request_stream_complete(struct aws_http_stream *stream, int error_code, void *user_data);
+
+static void s_s3_meta_request_send_request_finish(
+ struct aws_s3_connection *connection,
+ struct aws_http_stream *stream,
+ int error_code);
+
+void aws_s3_meta_request_lock_synced_data(struct aws_s3_meta_request *meta_request) {
+ AWS_PRECONDITION(meta_request);
+
+ aws_mutex_lock(&meta_request->synced_data.lock);
+}
+
+void aws_s3_meta_request_unlock_synced_data(struct aws_s3_meta_request *meta_request) {
+ AWS_PRECONDITION(meta_request);
+
+ aws_mutex_unlock(&meta_request->synced_data.lock);
+}
+
+static int s_meta_request_get_response_headers_checksum_callback(
+ struct aws_s3_meta_request *meta_request,
+ const struct aws_http_headers *headers,
+ int response_status,
+ void *user_data) {
+ for (int i = AWS_SCA_INIT; i <= AWS_SCA_END; i++) {
+ if (!aws_s3_meta_request_checksum_config_has_algorithm(meta_request, i)) {
+ /* If user doesn't select this algorithm, skip */
+ continue;
+ }
+ const struct aws_byte_cursor *algorithm_header_name = aws_get_http_header_name_from_algorithm(i);
+ if (aws_http_headers_has(headers, *algorithm_header_name)) {
+ struct aws_byte_cursor header_sum;
+ aws_http_headers_get(headers, *algorithm_header_name, &header_sum);
+ size_t encoded_len = 0;
+ aws_base64_compute_encoded_len(aws_get_digest_size_from_algorithm(i), &encoded_len);
+ if (header_sum.len == encoded_len - 1) {
+ /* encoded_len includes the nullptr length. -1 is the expected length. */
+ aws_byte_buf_init_copy_from_cursor(
+ &meta_request->meta_request_level_response_header_checksum, aws_default_allocator(), header_sum);
+ meta_request->meta_request_level_running_response_sum = aws_checksum_new(aws_default_allocator(), i);
+ }
+ break;
+ }
+ }
+ if (meta_request->headers_user_callback_after_checksum) {
+ return meta_request->headers_user_callback_after_checksum(meta_request, headers, response_status, user_data);
+ } else {
+ return AWS_OP_SUCCESS;
+ }
+}
+
+/* warning this might get screwed up with retrys/restarts */
+static int s_meta_request_get_response_body_checksum_callback(
+ struct aws_s3_meta_request *meta_request,
+ const struct aws_byte_cursor *body,
+ uint64_t range_start,
+ void *user_data) {
+ if (meta_request->meta_request_level_running_response_sum) {
+ aws_checksum_update(meta_request->meta_request_level_running_response_sum, body);
+ }
+
+ if (meta_request->body_user_callback_after_checksum) {
+ return meta_request->body_user_callback_after_checksum(meta_request, body, range_start, user_data);
+ } else {
+ return AWS_OP_SUCCESS;
+ }
+}
+
+static void s_meta_request_get_response_finish_checksum_callback(
+ struct aws_s3_meta_request *meta_request,
+ const struct aws_s3_meta_request_result *meta_request_result,
+ void *user_data) {
+ struct aws_byte_buf response_body_sum;
+ struct aws_byte_buf encoded_response_body_sum;
+ AWS_ZERO_STRUCT(response_body_sum);
+ AWS_ZERO_STRUCT(encoded_response_body_sum);
+
+ struct aws_s3_meta_request_result *mut_meta_request_result =
+ (struct aws_s3_meta_request_result *)meta_request_result;
+ if (meta_request_result->error_code == AWS_OP_SUCCESS && meta_request->meta_request_level_running_response_sum) {
+ mut_meta_request_result->did_validate = true;
+ mut_meta_request_result->validation_algorithm =
+ meta_request->meta_request_level_running_response_sum->algorithm;
+ size_t encoded_checksum_len = 0;
+ /* what error should I raise for these? */
+ aws_base64_compute_encoded_len(
+ meta_request->meta_request_level_running_response_sum->digest_size, &encoded_checksum_len);
+ aws_byte_buf_init(&encoded_response_body_sum, aws_default_allocator(), encoded_checksum_len);
+ aws_byte_buf_init(
+ &response_body_sum,
+ aws_default_allocator(),
+ meta_request->meta_request_level_running_response_sum->digest_size);
+ aws_checksum_finalize(meta_request->meta_request_level_running_response_sum, &response_body_sum, 0);
+ struct aws_byte_cursor response_body_sum_cursor = aws_byte_cursor_from_buf(&response_body_sum);
+ aws_base64_encode(&response_body_sum_cursor, &encoded_response_body_sum);
+ if (!aws_byte_buf_eq(&encoded_response_body_sum, &meta_request->meta_request_level_response_header_checksum)) {
+ mut_meta_request_result->error_code = AWS_ERROR_S3_RESPONSE_CHECKSUM_MISMATCH;
+ }
+ }
+ if (meta_request->finish_user_callback_after_checksum) {
+ meta_request->finish_user_callback_after_checksum(meta_request, meta_request_result, user_data);
+ }
+ aws_byte_buf_clean_up(&response_body_sum);
+ aws_byte_buf_clean_up(&encoded_response_body_sum);
+ aws_checksum_destroy(meta_request->meta_request_level_running_response_sum);
+ aws_byte_buf_clean_up(&meta_request->meta_request_level_response_header_checksum);
+}
+
+int aws_s3_meta_request_init_base(
+ struct aws_allocator *allocator,
+ struct aws_s3_client *client,
+ size_t part_size,
+ bool should_compute_content_md5,
+ const struct aws_s3_meta_request_options *options,
+ void *impl,
+ struct aws_s3_meta_request_vtable *vtable,
+ struct aws_s3_meta_request *meta_request) {
+
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(options);
+ AWS_PRECONDITION(options->message);
+ AWS_PRECONDITION(impl);
+ AWS_PRECONDITION(meta_request);
+
+ AWS_ZERO_STRUCT(*meta_request);
+
+ AWS_ASSERT(vtable->update);
+ AWS_ASSERT(vtable->prepare_request);
+ AWS_ASSERT(vtable->destroy);
+ AWS_ASSERT(vtable->sign_request);
+ AWS_ASSERT(vtable->init_signing_date_time);
+ AWS_ASSERT(vtable->finished_request);
+ AWS_ASSERT(vtable->send_request_finish);
+
+ meta_request->allocator = allocator;
+ meta_request->type = options->type;
+ /* Set up reference count. */
+ aws_ref_count_init(&meta_request->ref_count, meta_request, s_s3_meta_request_destroy);
+
+ if (part_size == SIZE_MAX) {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ goto error;
+ }
+
+ if (aws_mutex_init(&meta_request->synced_data.lock)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST, "id=%p Could not initialize mutex for meta request", (void *)meta_request);
+ goto error;
+ }
+
+ if (aws_priority_queue_init_dynamic(
+ &meta_request->synced_data.pending_body_streaming_requests,
+ meta_request->allocator,
+ s_default_body_streaming_priority_queue_size,
+ sizeof(struct aws_s3_request *),
+ s_s3_request_priority_queue_pred)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST, "id=%p Could not initialize priority queue for meta request", (void *)meta_request);
+ /* Priority queue */
+ goto error;
+ }
+
+ *((size_t *)&meta_request->part_size) = part_size;
+ *((bool *)&meta_request->should_compute_content_md5) = should_compute_content_md5;
+ checksum_config_init(&meta_request->checksum_config, options->checksum_config);
+ if (options->signing_config) {
+ meta_request->cached_signing_config = aws_cached_signing_config_new(allocator, options->signing_config);
+ }
+
+ /* Set initial_meta_request */
+ if (options->send_filepath.len > 0) {
+ /* Create copy of original message, but with body-stream that reads directly from file */
+ meta_request->initial_request_message = aws_s3_message_util_copy_http_message_filepath_body_all_headers(
+ allocator, options->message, options->send_filepath);
+ if (meta_request->initial_request_message == NULL) {
+ goto error;
+ }
+ } else {
+ /* Keep a reference to the original message structure passed in. */
+ meta_request->initial_request_message = aws_http_message_acquire(options->message);
+ }
+
+ /* Client is currently optional to allow spinning up a meta_request without a client in a test. */
+ if (client != NULL) {
+ aws_s3_client_acquire(client);
+ meta_request->client = client;
+ meta_request->io_event_loop = aws_event_loop_group_get_next_loop(client->body_streaming_elg);
+ meta_request->synced_data.read_window_running_total = client->initial_read_window;
+ }
+
+ meta_request->synced_data.next_streaming_part = 1;
+
+ meta_request->meta_request_level_running_response_sum = NULL;
+ meta_request->user_data = options->user_data;
+ meta_request->shutdown_callback = options->shutdown_callback;
+ meta_request->progress_callback = options->progress_callback;
+
+ if (meta_request->checksum_config.validate_response_checksum) {
+ /* TODO: the validate for auto range get should happen for each response received. */
+ meta_request->headers_user_callback_after_checksum = options->headers_callback;
+ meta_request->body_user_callback_after_checksum = options->body_callback;
+ meta_request->finish_user_callback_after_checksum = options->finish_callback;
+
+ meta_request->headers_callback = s_meta_request_get_response_headers_checksum_callback;
+ meta_request->body_callback = s_meta_request_get_response_body_checksum_callback;
+ meta_request->finish_callback = s_meta_request_get_response_finish_checksum_callback;
+ } else {
+ meta_request->headers_callback = options->headers_callback;
+ meta_request->body_callback = options->body_callback;
+ meta_request->finish_callback = options->finish_callback;
+ }
+
+ /* Nothing can fail after here. Leave the impl not affected by failure of initializing base. */
+ meta_request->impl = impl;
+ meta_request->vtable = vtable;
+
+ return AWS_OP_SUCCESS;
+error:
+ s_s3_meta_request_destroy((void *)meta_request);
+ return AWS_OP_ERR;
+}
+
+void aws_s3_meta_request_increment_read_window(struct aws_s3_meta_request *meta_request, uint64_t bytes) {
+ AWS_PRECONDITION(meta_request);
+
+ if (bytes == 0) {
+ return;
+ }
+
+ if (!meta_request->client->enable_read_backpressure) {
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p: Ignoring call to increment read window. This client has not enabled read backpressure.",
+ (void *)meta_request);
+ return;
+ }
+
+ AWS_LOGF_TRACE(AWS_LS_S3_META_REQUEST, "id=%p: Incrementing read window by %" PRIu64, (void *)meta_request, bytes);
+
+ /* BEGIN CRITICAL SECTION */
+ aws_s3_meta_request_lock_synced_data(meta_request);
+
+ /* Response will never approach UINT64_MAX, so do a saturating sum instead of worrying about overflow */
+ meta_request->synced_data.read_window_running_total =
+ aws_add_u64_saturating(bytes, meta_request->synced_data.read_window_running_total);
+
+ aws_s3_meta_request_unlock_synced_data(meta_request);
+ /* END CRITICAL SECTION */
+
+ /* Schedule the work task, to continue processing the meta-request */
+ aws_s3_client_schedule_process_work(meta_request->client);
+}
+
+void aws_s3_meta_request_cancel(struct aws_s3_meta_request *meta_request) {
+ /* BEGIN CRITICAL SECTION */
+ aws_s3_meta_request_lock_synced_data(meta_request);
+ aws_s3_meta_request_set_fail_synced(meta_request, NULL, AWS_ERROR_S3_CANCELED);
+ aws_s3_meta_request_unlock_synced_data(meta_request);
+ /* END CRITICAL SECTION */
+}
+
+int aws_s3_meta_request_pause(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_meta_request_resume_token **out_resume_token) {
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(meta_request->vtable);
+
+ *out_resume_token = NULL;
+
+ if (!meta_request->vtable->pause) {
+ return aws_raise_error(AWS_ERROR_UNSUPPORTED_OPERATION);
+ }
+
+ return meta_request->vtable->pause(meta_request, out_resume_token);
+}
+
+void aws_s3_meta_request_set_fail_synced(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *failed_request,
+ int error_code) {
+ AWS_PRECONDITION(meta_request);
+ ASSERT_SYNCED_DATA_LOCK_HELD(meta_request);
+
+ if (meta_request->synced_data.finish_result_set) {
+ return;
+ }
+
+ meta_request->synced_data.finish_result_set = true;
+
+ if ((error_code == AWS_ERROR_S3_INVALID_RESPONSE_STATUS || error_code == AWS_ERROR_S3_NON_RECOVERABLE_ASYNC_ERROR ||
+ error_code == AWS_ERROR_S3_OBJECT_MODIFIED) &&
+ failed_request != NULL) {
+ aws_s3_meta_request_result_setup(
+ meta_request,
+ &meta_request->synced_data.finish_result,
+ failed_request,
+ failed_request->send_data.response_status,
+ error_code);
+ } else {
+ AWS_ASSERT(error_code != AWS_ERROR_S3_INVALID_RESPONSE_STATUS);
+
+ aws_s3_meta_request_result_setup(meta_request, &meta_request->synced_data.finish_result, NULL, 0, error_code);
+ }
+}
+
+void aws_s3_meta_request_set_success_synced(struct aws_s3_meta_request *meta_request, int response_status) {
+ AWS_PRECONDITION(meta_request);
+ ASSERT_SYNCED_DATA_LOCK_HELD(meta_request);
+
+ if (meta_request->synced_data.finish_result_set) {
+ return;
+ }
+
+ meta_request->synced_data.finish_result_set = true;
+
+ aws_s3_meta_request_result_setup(
+ meta_request, &meta_request->synced_data.finish_result, NULL, response_status, AWS_ERROR_SUCCESS);
+}
+
+bool aws_s3_meta_request_has_finish_result(struct aws_s3_meta_request *meta_request) {
+ AWS_PRECONDITION(meta_request);
+
+ /* BEGIN CRITICAL SECTION */
+ aws_s3_meta_request_lock_synced_data(meta_request);
+ bool is_finishing = aws_s3_meta_request_has_finish_result_synced(meta_request);
+ aws_s3_meta_request_unlock_synced_data(meta_request);
+ /* END CRITICAL SECTION */
+
+ return is_finishing;
+}
+
+bool aws_s3_meta_request_has_finish_result_synced(struct aws_s3_meta_request *meta_request) {
+ AWS_PRECONDITION(meta_request);
+ ASSERT_SYNCED_DATA_LOCK_HELD(meta_request);
+
+ if (!meta_request->synced_data.finish_result_set) {
+ return false;
+ }
+
+ return true;
+}
+
+struct aws_s3_meta_request *aws_s3_meta_request_acquire(struct aws_s3_meta_request *meta_request) {
+ AWS_PRECONDITION(meta_request);
+
+ aws_ref_count_acquire(&meta_request->ref_count);
+ return meta_request;
+}
+
+struct aws_s3_meta_request *aws_s3_meta_request_release(struct aws_s3_meta_request *meta_request) {
+ if (meta_request != NULL) {
+ aws_ref_count_release(&meta_request->ref_count);
+ }
+
+ return NULL;
+}
+
+static void s_s3_meta_request_destroy(void *user_data) {
+ struct aws_s3_meta_request *meta_request = user_data;
+ AWS_PRECONDITION(meta_request);
+
+ AWS_LOGF_DEBUG(AWS_LS_S3_META_REQUEST, "id=%p Cleaning up meta request", (void *)meta_request);
+
+ /* Clean up our initial http message */
+ if (meta_request->initial_request_message != NULL) {
+ aws_http_message_release(meta_request->initial_request_message);
+ meta_request->initial_request_message = NULL;
+ }
+
+ void *meta_request_user_data = meta_request->user_data;
+ aws_s3_meta_request_shutdown_fn *shutdown_callback = meta_request->shutdown_callback;
+
+ aws_cached_signing_config_destroy(meta_request->cached_signing_config);
+ aws_mutex_clean_up(&meta_request->synced_data.lock);
+ /* endpoint should have already been released and set NULL by the meta request finish call.
+ * But call release() again, just in case we're tearing down a half-initialized meta request */
+ aws_s3_endpoint_release(meta_request->endpoint);
+ meta_request->client = aws_s3_client_release(meta_request->client);
+
+ aws_priority_queue_clean_up(&meta_request->synced_data.pending_body_streaming_requests);
+ aws_s3_meta_request_result_clean_up(meta_request, &meta_request->synced_data.finish_result);
+
+ if (meta_request->vtable != NULL) {
+ AWS_LOGF_TRACE(
+ AWS_LS_S3_META_REQUEST, "id=%p Calling virtual meta request destroy function.", (void *)meta_request);
+ meta_request->vtable->destroy(meta_request);
+ }
+ meta_request = NULL;
+
+ if (shutdown_callback != NULL) {
+ AWS_LOGF_TRACE(AWS_LS_S3_META_REQUEST, "id=%p Calling meta request shutdown callback.", (void *)meta_request);
+ shutdown_callback(meta_request_user_data);
+ }
+
+ AWS_LOGF_TRACE(AWS_LS_S3_META_REQUEST, "id=%p Meta request clean up finished.", (void *)meta_request);
+}
+
+static int s_s3_request_priority_queue_pred(const void *a, const void *b) {
+ const struct aws_s3_request **request_a = (const struct aws_s3_request **)a;
+ AWS_PRECONDITION(request_a);
+ AWS_PRECONDITION(*request_a);
+
+ const struct aws_s3_request **request_b = (const struct aws_s3_request **)b;
+ AWS_PRECONDITION(request_b);
+ AWS_PRECONDITION(*request_b);
+
+ return (*request_a)->part_number > (*request_b)->part_number;
+}
+
+bool aws_s3_meta_request_update(
+ struct aws_s3_meta_request *meta_request,
+ uint32_t flags,
+ struct aws_s3_request **out_request) {
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(meta_request->vtable);
+ AWS_PRECONDITION(meta_request->vtable->update);
+
+ return meta_request->vtable->update(meta_request, flags, out_request);
+}
+
+bool aws_s3_meta_request_is_active(struct aws_s3_meta_request *meta_request) {
+ AWS_PRECONDITION(meta_request);
+
+ /* BEGIN CRITICAL SECTION */
+ aws_s3_meta_request_lock_synced_data(meta_request);
+ bool active = meta_request->synced_data.state == AWS_S3_META_REQUEST_STATE_ACTIVE;
+ aws_s3_meta_request_unlock_synced_data(meta_request);
+ /* END CRITICAL SECTION */
+
+ return active;
+}
+
+bool aws_s3_meta_request_is_finished(struct aws_s3_meta_request *meta_request) {
+ AWS_PRECONDITION(meta_request);
+
+ /* BEGIN CRITICAL SECTION */
+ aws_s3_meta_request_lock_synced_data(meta_request);
+ bool is_finished = meta_request->synced_data.state == AWS_S3_META_REQUEST_STATE_FINISHED;
+ aws_s3_meta_request_unlock_synced_data(meta_request);
+ /* END CRITICAL SECTION */
+
+ return is_finished;
+}
+
+static void s_s3_meta_request_prepare_request_task(struct aws_task *task, void *arg, enum aws_task_status task_status);
+
+static void s_s3_prepare_request_payload_callback_and_destroy(
+ struct aws_s3_prepare_request_payload *payload,
+ int error_code) {
+ AWS_PRECONDITION(payload);
+ AWS_PRECONDITION(payload->request);
+
+ struct aws_s3_meta_request *meta_request = payload->request->meta_request;
+ AWS_PRECONDITION(meta_request);
+
+ AWS_PRECONDITION(meta_request->client);
+ struct aws_s3_client *client = aws_s3_client_acquire(meta_request->client);
+
+ struct aws_allocator *allocator = client->allocator;
+ AWS_PRECONDITION(allocator);
+
+ if (payload->callback != NULL) {
+ payload->callback(meta_request, payload->request, error_code, payload->user_data);
+ }
+
+ aws_mem_release(allocator, payload);
+ aws_s3_client_release(client);
+}
+
+static void s_s3_meta_request_schedule_prepare_request_default(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request,
+ aws_s3_meta_request_prepare_request_callback_fn *callback,
+ void *user_data);
+
+void aws_s3_meta_request_prepare_request(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request,
+ aws_s3_meta_request_prepare_request_callback_fn *callback,
+ void *user_data) {
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(meta_request->vtable);
+
+ if (meta_request->vtable->schedule_prepare_request) {
+ meta_request->vtable->schedule_prepare_request(meta_request, request, callback, user_data);
+ } else {
+ s_s3_meta_request_schedule_prepare_request_default(meta_request, request, callback, user_data);
+ }
+}
+
+static void s_s3_meta_request_schedule_prepare_request_default(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request,
+ aws_s3_meta_request_prepare_request_callback_fn *callback,
+ void *user_data) {
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(request);
+
+ struct aws_s3_client *client = meta_request->client;
+ AWS_PRECONDITION(client);
+
+ struct aws_allocator *allocator = client->allocator;
+ AWS_PRECONDITION(allocator);
+
+ struct aws_s3_prepare_request_payload *payload =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_prepare_request_payload));
+
+ payload->request = request;
+ payload->callback = callback;
+ payload->user_data = user_data;
+
+ aws_task_init(
+ &payload->task, s_s3_meta_request_prepare_request_task, payload, "s3_meta_request_prepare_request_task");
+ aws_event_loop_schedule_task_now(meta_request->io_event_loop, &payload->task);
+}
+
+static void s_s3_meta_request_prepare_request_task(struct aws_task *task, void *arg, enum aws_task_status task_status) {
+ (void)task;
+ (void)task_status;
+
+ struct aws_s3_prepare_request_payload *payload = arg;
+ AWS_PRECONDITION(payload);
+
+ struct aws_s3_request *request = payload->request;
+ AWS_PRECONDITION(request);
+
+ struct aws_s3_meta_request *meta_request = request->meta_request;
+ AWS_PRECONDITION(meta_request);
+
+ struct aws_s3_meta_request_vtable *vtable = meta_request->vtable;
+ AWS_PRECONDITION(vtable);
+
+ /* Client owns this event loop group. A cancel should not be possible. */
+ AWS_ASSERT(task_status == AWS_TASK_STATUS_RUN_READY);
+
+ int error_code = AWS_ERROR_SUCCESS;
+
+ if (!request->always_send && aws_s3_meta_request_has_finish_result(meta_request)) {
+ aws_raise_error(AWS_ERROR_S3_CANCELED);
+ goto dont_send_clean_up;
+ }
+
+ if (vtable->prepare_request(meta_request, request)) {
+ ++request->num_times_prepared;
+ goto dont_send_clean_up;
+ }
+
+ ++request->num_times_prepared;
+
+ aws_s3_add_user_agent_header(meta_request->allocator, request->send_data.message);
+
+ /* Sign the newly created message. */
+ s_s3_meta_request_sign_request(meta_request, request, s_s3_meta_request_request_on_signed, payload);
+
+ return;
+
+dont_send_clean_up:
+
+ error_code = aws_last_error_or_unknown();
+
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p Could not prepare request %p due to error %d (%s).",
+ (void *)meta_request,
+ (void *)request,
+ error_code,
+ aws_error_str(error_code));
+
+ /* BEGIN CRITICAL SECTION */
+ {
+ aws_s3_meta_request_lock_synced_data(meta_request);
+ aws_s3_meta_request_set_fail_synced(meta_request, request, error_code);
+ aws_s3_meta_request_unlock_synced_data(meta_request);
+ }
+ /* END CRITICAL SECTION */
+
+ s_s3_prepare_request_payload_callback_and_destroy(payload, error_code);
+}
+
+static void s_s3_meta_request_init_signing_date_time(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_date_time *date_time) {
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(meta_request->vtable);
+ AWS_PRECONDITION(meta_request->vtable->init_signing_date_time);
+
+ meta_request->vtable->init_signing_date_time(meta_request, date_time);
+}
+
+void aws_s3_meta_request_init_signing_date_time_default(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_date_time *date_time) {
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(date_time);
+ (void)meta_request;
+
+ aws_date_time_init_now(date_time);
+}
+
+static void s_s3_meta_request_sign_request(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request,
+ aws_signing_complete_fn *on_signing_complete,
+ void *user_data) {
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(meta_request->vtable);
+ AWS_PRECONDITION(meta_request->vtable->sign_request);
+
+ meta_request->vtable->sign_request(meta_request, request, on_signing_complete, user_data);
+}
+
+/* Handles signing a message for the caller. */
+void aws_s3_meta_request_sign_request_default(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request,
+ aws_signing_complete_fn *on_signing_complete,
+ void *user_data) {
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(request);
+ AWS_PRECONDITION(on_signing_complete);
+
+ struct aws_s3_client *client = meta_request->client;
+ AWS_ASSERT(client);
+
+ struct aws_signing_config_aws signing_config;
+
+ if (meta_request->cached_signing_config != NULL) {
+ signing_config = meta_request->cached_signing_config->config;
+ } else if (client->cached_signing_config != NULL) {
+ signing_config = client->cached_signing_config->config;
+ } else {
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p: No signing config present. Not signing request %p.",
+ (void *)meta_request,
+ (void *)request);
+
+ on_signing_complete(NULL, AWS_ERROR_SUCCESS, user_data);
+ return;
+ }
+
+ s_s3_meta_request_init_signing_date_time(meta_request, &signing_config.date);
+
+ request->send_data.signable = aws_signable_new_http_request(meta_request->allocator, request->send_data.message);
+
+ AWS_LOGF_TRACE(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p Created signable %p for request %p with message %p",
+ (void *)meta_request,
+ (void *)request->send_data.signable,
+ (void *)request,
+ (void *)request->send_data.message);
+
+ if (request->send_data.signable == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p: Could not allocate signable for request %p",
+ (void *)meta_request,
+ (void *)request);
+
+ on_signing_complete(NULL, aws_last_error_or_unknown(), user_data);
+ return;
+ }
+
+ /* If the checksum is configured to be added to the trailer, the payload will be aws-chunked encoded. The payload
+ * will need to be streaming signed/unsigned. */
+ if (meta_request->checksum_config.location == AWS_SCL_TRAILER &&
+ aws_byte_cursor_eq(&signing_config.signed_body_value, &g_aws_signed_body_value_unsigned_payload)) {
+ signing_config.signed_body_value = g_aws_signed_body_value_streaming_unsigned_payload_trailer;
+ }
+ /* However the initial request for a multipart upload does not have a trailing checksum and is not chunked so it
+ * must have an unsigned_payload signed_body value*/
+ if (request->part_number == 0 &&
+ aws_byte_cursor_eq(
+ &signing_config.signed_body_value, &g_aws_signed_body_value_streaming_unsigned_payload_trailer)) {
+ signing_config.signed_body_value = g_aws_signed_body_value_unsigned_payload;
+ }
+
+ if (aws_sign_request_aws(
+ meta_request->allocator,
+ request->send_data.signable,
+ (struct aws_signing_config_base *)&signing_config,
+ on_signing_complete,
+ user_data)) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST, "id=%p: Could not sign request %p", (void *)meta_request, (void *)request);
+
+ on_signing_complete(NULL, aws_last_error_or_unknown(), user_data);
+ return;
+ }
+}
+
+/* Handle the signing result, getting an HTTP connection for the request if signing succeeded. */
+static void s_s3_meta_request_request_on_signed(
+ struct aws_signing_result *signing_result,
+ int error_code,
+ void *user_data) {
+
+ struct aws_s3_prepare_request_payload *payload = user_data;
+ AWS_PRECONDITION(payload);
+
+ struct aws_s3_request *request = payload->request;
+ AWS_PRECONDITION(request);
+
+ struct aws_s3_meta_request *meta_request = request->meta_request;
+ AWS_PRECONDITION(meta_request);
+
+ if (error_code != AWS_ERROR_SUCCESS) {
+ goto finish;
+ }
+
+ if (signing_result != NULL &&
+ aws_apply_signing_result_to_http_request(request->send_data.message, meta_request->allocator, signing_result)) {
+
+ error_code = aws_last_error_or_unknown();
+
+ goto finish;
+ }
+
+finish:
+
+ if (error_code != AWS_ERROR_SUCCESS) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p Meta request could not sign TTP request due to error code %d (%s)",
+ (void *)meta_request,
+ error_code,
+ aws_error_str(error_code));
+
+ /* BEGIN CRITICAL SECTION */
+ {
+ aws_s3_meta_request_lock_synced_data(meta_request);
+ aws_s3_meta_request_set_fail_synced(meta_request, request, error_code);
+ aws_s3_meta_request_unlock_synced_data(meta_request);
+ }
+ /* END CRITICAL SECTION */
+ }
+
+ s_s3_prepare_request_payload_callback_and_destroy(payload, error_code);
+}
+
+void aws_s3_meta_request_send_request(struct aws_s3_meta_request *meta_request, struct aws_s3_connection *connection) {
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(connection);
+ AWS_PRECONDITION(connection->http_connection);
+
+ struct aws_s3_request *request = connection->request;
+ AWS_PRECONDITION(request);
+
+ /* Now that we have a signed request and a connection, go ahead and issue the request. */
+ struct aws_http_make_request_options options;
+ AWS_ZERO_STRUCT(options);
+
+ options.self_size = sizeof(struct aws_http_make_request_options);
+ options.request = request->send_data.message;
+ options.user_data = connection;
+ options.on_response_headers = s_s3_meta_request_incoming_headers;
+ options.on_response_header_block_done = NULL;
+ options.on_response_body = s_s3_meta_request_incoming_body;
+ options.on_complete = s_s3_meta_request_stream_complete;
+
+ struct aws_http_stream *stream = aws_http_connection_make_request(connection->http_connection, &options);
+
+ if (stream == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST, "id=%p: Could not make HTTP request %p", (void *)meta_request, (void *)request);
+
+ goto error_finish;
+ }
+
+ AWS_LOGF_TRACE(AWS_LS_S3_META_REQUEST, "id=%p: Sending request %p", (void *)meta_request, (void *)request);
+
+ if (aws_http_stream_activate(stream) != AWS_OP_SUCCESS) {
+ aws_http_stream_release(stream);
+ stream = NULL;
+
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST, "id=%p: Could not activate HTTP stream %p", (void *)meta_request, (void *)request);
+
+ goto error_finish;
+ }
+
+ return;
+
+error_finish:
+
+ s_s3_meta_request_send_request_finish(connection, NULL, aws_last_error_or_unknown());
+}
+
+static int s_s3_meta_request_error_code_from_response_status(int response_status) {
+ int error_code = AWS_ERROR_UNKNOWN;
+
+ switch (response_status) {
+ case AWS_S3_RESPONSE_STATUS_SUCCESS:
+ case AWS_S3_RESPONSE_STATUS_RANGE_SUCCESS:
+ case AWS_S3_RESPONSE_STATUS_NO_CONTENT_SUCCESS:
+ error_code = AWS_ERROR_SUCCESS;
+ break;
+ case AWS_S3_RESPONSE_STATUS_INTERNAL_ERROR:
+ error_code = AWS_ERROR_S3_INTERNAL_ERROR;
+ break;
+ case AWS_S3_RESPONSE_STATUS_SLOW_DOWN:
+ error_code = AWS_ERROR_S3_SLOW_DOWN;
+ break;
+ default:
+ error_code = AWS_ERROR_S3_INVALID_RESPONSE_STATUS;
+ break;
+ }
+
+ return error_code;
+}
+
+static bool s_header_value_from_list(
+ const struct aws_http_header *headers,
+ size_t headers_count,
+ const struct aws_byte_cursor *name,
+ struct aws_byte_cursor *out_value) {
+ for (size_t i = 0; i < headers_count; ++i) {
+ if (aws_byte_cursor_eq(&headers[i].name, name)) {
+ *out_value = headers[i].value;
+ return true;
+ }
+ }
+ return false;
+}
+
+static void s_get_part_response_headers_checksum_helper(
+ struct aws_s3_connection *connection,
+ struct aws_s3_meta_request *meta_request,
+ const struct aws_http_header *headers,
+ size_t headers_count) {
+ for (int i = AWS_SCA_INIT; i <= AWS_SCA_END; i++) {
+ if (!aws_s3_meta_request_checksum_config_has_algorithm(meta_request, i)) {
+ /* If user doesn't select this algorithm, skip */
+ continue;
+ }
+ const struct aws_byte_cursor *algorithm_header_name = aws_get_http_header_name_from_algorithm(i);
+ struct aws_byte_cursor header_sum;
+ if (s_header_value_from_list(headers, headers_count, algorithm_header_name, &header_sum)) {
+ size_t encoded_len = 0;
+ aws_base64_compute_encoded_len(aws_get_digest_size_from_algorithm(i), &encoded_len);
+ if (header_sum.len == encoded_len - 1) {
+ aws_byte_buf_init_copy_from_cursor(
+ &connection->request->request_level_response_header_checksum, aws_default_allocator(), header_sum);
+ connection->request->request_level_running_response_sum = aws_checksum_new(aws_default_allocator(), i);
+ }
+ break;
+ }
+ }
+}
+
+/* warning this might get screwed up with retrys/restarts */
+static void s_get_part_response_body_checksum_helper(
+ struct aws_s3_checksum *running_response_sum,
+ const struct aws_byte_cursor *body) {
+ if (running_response_sum) {
+ aws_checksum_update(running_response_sum, body);
+ }
+}
+
+static void s_get_response_part_finish_checksum_helper(struct aws_s3_connection *connection, int error_code) {
+ struct aws_byte_buf response_body_sum;
+ struct aws_byte_buf encoded_response_body_sum;
+ AWS_ZERO_STRUCT(response_body_sum);
+ AWS_ZERO_STRUCT(encoded_response_body_sum);
+
+ struct aws_s3_request *request = connection->request;
+ if (error_code == AWS_OP_SUCCESS && request->request_level_running_response_sum) {
+ size_t encoded_checksum_len = 0;
+ request->did_validate = true;
+ aws_base64_compute_encoded_len(request->request_level_running_response_sum->digest_size, &encoded_checksum_len);
+ aws_byte_buf_init(&encoded_response_body_sum, aws_default_allocator(), encoded_checksum_len);
+ aws_byte_buf_init(
+ &response_body_sum, aws_default_allocator(), request->request_level_running_response_sum->digest_size);
+ aws_checksum_finalize(request->request_level_running_response_sum, &response_body_sum, 0);
+ struct aws_byte_cursor response_body_sum_cursor = aws_byte_cursor_from_buf(&response_body_sum);
+ aws_base64_encode(&response_body_sum_cursor, &encoded_response_body_sum);
+ request->checksum_match =
+ aws_byte_buf_eq(&encoded_response_body_sum, &request->request_level_response_header_checksum);
+
+ request->validation_algorithm = request->request_level_running_response_sum->algorithm;
+ aws_byte_buf_clean_up(&response_body_sum);
+ aws_byte_buf_clean_up(&encoded_response_body_sum);
+ aws_checksum_destroy(request->request_level_running_response_sum);
+ aws_byte_buf_clean_up(&request->request_level_response_header_checksum);
+ request->request_level_running_response_sum = NULL;
+ } else {
+ request->did_validate = false;
+ }
+}
+
+static int s_s3_meta_request_incoming_headers(
+ struct aws_http_stream *stream,
+ enum aws_http_header_block header_block,
+ const struct aws_http_header *headers,
+ size_t headers_count,
+ void *user_data) {
+
+ (void)header_block;
+
+ AWS_PRECONDITION(stream);
+
+ struct aws_s3_connection *connection = user_data;
+ AWS_PRECONDITION(connection);
+
+ struct aws_s3_request *request = connection->request;
+ AWS_PRECONDITION(request);
+
+ struct aws_s3_meta_request *meta_request = request->meta_request;
+ AWS_PRECONDITION(meta_request);
+
+ if (aws_http_stream_get_incoming_response_status(stream, &request->send_data.response_status)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p Could not get incoming response status for request %p",
+ (void *)meta_request,
+ (void *)request);
+ }
+
+ bool successful_response =
+ s_s3_meta_request_error_code_from_response_status(request->send_data.response_status) == AWS_ERROR_SUCCESS;
+
+ if (successful_response && meta_request->checksum_config.validate_response_checksum &&
+ request->request_tag == AWS_S3_AUTO_RANGE_GET_REQUEST_TYPE_PART) {
+ s_get_part_response_headers_checksum_helper(connection, meta_request, headers, headers_count);
+ }
+
+ /* Only record headers if an error has taken place, or if the request_desc has asked for them. */
+ bool should_record_headers = !successful_response || request->record_response_headers;
+
+ if (should_record_headers) {
+ if (request->send_data.response_headers == NULL) {
+ request->send_data.response_headers = aws_http_headers_new(meta_request->allocator);
+ }
+
+ for (size_t i = 0; i < headers_count; ++i) {
+ const struct aws_byte_cursor *name = &headers[i].name;
+ const struct aws_byte_cursor *value = &headers[i].value;
+
+ aws_http_headers_add(request->send_data.response_headers, *name, *value);
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_s3_meta_request_incoming_body(
+ struct aws_http_stream *stream,
+ const struct aws_byte_cursor *data,
+ void *user_data) {
+ (void)stream;
+
+ struct aws_s3_connection *connection = user_data;
+ AWS_PRECONDITION(connection);
+
+ struct aws_s3_request *request = connection->request;
+ AWS_PRECONDITION(request);
+
+ struct aws_s3_meta_request *meta_request = request->meta_request;
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(meta_request->vtable);
+
+ AWS_LOGF_TRACE(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p Incoming body for request %p. Response status: %d. Data Size: %" PRIu64 ". connection: %p.",
+ (void *)meta_request,
+ (void *)request,
+ request->send_data.response_status,
+ (uint64_t)data->len,
+ (void *)connection);
+ if (request->send_data.response_status < 200 || request->send_data.response_status > 299) {
+ AWS_LOGF_TRACE(AWS_LS_S3_META_REQUEST, "response body: \n" PRInSTR "\n", AWS_BYTE_CURSOR_PRI(*data));
+ }
+
+ if (meta_request->checksum_config.validate_response_checksum) {
+ s_get_part_response_body_checksum_helper(request->request_level_running_response_sum, data);
+ }
+
+ if (request->send_data.response_body.capacity == 0) {
+ size_t buffer_size = s_dynamic_body_initial_buf_size;
+
+ if (request->part_size_response_body) {
+ buffer_size = meta_request->part_size;
+ }
+
+ aws_byte_buf_init(&request->send_data.response_body, meta_request->allocator, buffer_size);
+ }
+
+ if (aws_byte_buf_append_dynamic(&request->send_data.response_body, data)) {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p: Request %p could not append to response body due to error %d (%s)",
+ (void *)meta_request,
+ (void *)request,
+ aws_last_error_or_unknown(),
+ aws_error_str(aws_last_error_or_unknown()));
+
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/* Finish up the processing of the request work. */
+static void s_s3_meta_request_stream_complete(struct aws_http_stream *stream, int error_code, void *user_data) {
+
+ struct aws_s3_connection *connection = user_data;
+ AWS_PRECONDITION(connection);
+ if (connection->request->meta_request->checksum_config.validate_response_checksum) {
+ s_get_response_part_finish_checksum_helper(connection, error_code);
+ }
+ s_s3_meta_request_send_request_finish(connection, stream, error_code);
+}
+
+static void s_s3_meta_request_send_request_finish(
+ struct aws_s3_connection *connection,
+ struct aws_http_stream *stream,
+ int error_code) {
+ AWS_PRECONDITION(connection);
+
+ struct aws_s3_request *request = connection->request;
+ AWS_PRECONDITION(request);
+
+ struct aws_s3_meta_request *meta_request = request->meta_request;
+ AWS_PRECONDITION(meta_request);
+
+ struct aws_s3_meta_request_vtable *vtable = meta_request->vtable;
+ AWS_PRECONDITION(vtable);
+
+ vtable->send_request_finish(connection, stream, error_code);
+}
+
+static int s_s3_meta_request_error_code_from_response_body(struct aws_s3_request *request) {
+ AWS_PRECONDITION(request);
+ if (request->send_data.response_body.len == 0) {
+ /* Empty body is success */
+ return AWS_ERROR_SUCCESS;
+ }
+ struct aws_byte_cursor response_body_cursor = aws_byte_cursor_from_buf(&request->send_data.response_body);
+ bool root_name_mismatch = false;
+ struct aws_string *error_code_string = aws_xml_get_top_level_tag_with_root_name(
+ request->allocator, &g_code_body_xml_name, &g_error_body_xml_name, &root_name_mismatch, &response_body_cursor);
+ if (error_code_string == NULL) {
+ if (root_name_mismatch || aws_last_error() == AWS_ERROR_MALFORMED_INPUT_STRING) {
+ /* The xml body is not Error, we can safely think the request succeed. */
+ aws_reset_error();
+ return AWS_ERROR_SUCCESS;
+ } else {
+ return aws_last_error();
+ }
+ } else {
+ /* Check the error code. Map the S3 error code to CRT error code. */
+ int error_code = aws_s3_crt_error_code_from_server_error_code_string(error_code_string);
+ if (error_code == AWS_ERROR_UNKNOWN) {
+ /* All error besides of internal error from async error are not recoverable from retry for now. */
+ error_code = AWS_ERROR_S3_NON_RECOVERABLE_ASYNC_ERROR;
+ }
+ aws_string_destroy(error_code_string);
+ return error_code;
+ }
+}
+
+static void s_s3_meta_request_send_request_finish_helper(
+ struct aws_s3_connection *connection,
+ struct aws_http_stream *stream,
+ int error_code,
+ bool handle_async_error) {
+
+ struct aws_s3_request *request = connection->request;
+ AWS_PRECONDITION(request);
+
+ struct aws_s3_meta_request *meta_request = request->meta_request;
+ AWS_PRECONDITION(meta_request);
+
+ struct aws_s3_client *client = meta_request->client;
+ AWS_PRECONDITION(client);
+
+ int response_status = request->send_data.response_status;
+ /* If our error code is currently success, then we have some other calls to make that could still indicate a
+ * failure. */
+ if (error_code == AWS_ERROR_SUCCESS) {
+ if (handle_async_error && response_status == AWS_HTTP_STATUS_CODE_200_OK) {
+ error_code = s_s3_meta_request_error_code_from_response_body(request);
+ } else {
+ error_code = s_s3_meta_request_error_code_from_response_status(response_status);
+ }
+
+ if (error_code != AWS_ERROR_SUCCESS) {
+ aws_raise_error(error_code);
+ }
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p: Request %p finished with error code %d (%s) and response status %d",
+ (void *)meta_request,
+ (void *)request,
+ error_code,
+ aws_error_debug_str(error_code),
+ response_status);
+
+ enum aws_s3_connection_finish_code finish_code = AWS_S3_CONNECTION_FINISH_CODE_FAILED;
+
+ if (error_code == AWS_ERROR_SUCCESS) {
+ if (connection->request->meta_request->type == AWS_S3_META_REQUEST_TYPE_GET_OBJECT && request->did_validate &&
+ !request->checksum_match) {
+ finish_code = AWS_S3_CONNECTION_FINISH_CODE_FAILED;
+ error_code = AWS_ERROR_S3_RESPONSE_CHECKSUM_MISMATCH;
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p Meta request cannot recover from checksum mismatch. (request=%p, response status=%d)",
+ (void *)meta_request,
+ (void *)request,
+ response_status);
+ } else {
+ finish_code = AWS_S3_CONNECTION_FINISH_CODE_SUCCESS;
+ }
+
+ } else {
+ /* BEGIN CRITICAL SECTION */
+ aws_s3_meta_request_lock_synced_data(meta_request);
+ bool meta_request_finishing = aws_s3_meta_request_has_finish_result_synced(meta_request);
+ aws_s3_meta_request_unlock_synced_data(meta_request);
+ /* END CRITICAL SECTION */
+
+ /* If the request failed due to an invalid (ie: unrecoverable) response status, or the meta request already
+ * has a result, then make sure that this request isn't retried. */
+ if (error_code == AWS_ERROR_S3_INVALID_RESPONSE_STATUS ||
+ error_code == AWS_ERROR_S3_NON_RECOVERABLE_ASYNC_ERROR || meta_request_finishing) {
+ finish_code = AWS_S3_CONNECTION_FINISH_CODE_FAILED;
+
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p Meta request cannot recover from error %d (%s). (request=%p, response status=%d)",
+ (void *)meta_request,
+ error_code,
+ aws_error_str(error_code),
+ (void *)request,
+ response_status);
+
+ } else {
+
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p Meta request failed from error %d (%s). (request=%p, response status=%d). Try to setup a "
+ "retry.",
+ (void *)meta_request,
+ error_code,
+ aws_error_str(error_code),
+ (void *)request,
+ response_status);
+
+ /* Otherwise, set this up for a retry if the meta request is active. */
+ finish_code = AWS_S3_CONNECTION_FINISH_CODE_RETRY;
+ }
+ }
+
+ if (stream != NULL) {
+ aws_http_stream_release(stream);
+ stream = NULL;
+ }
+
+ aws_s3_client_notify_connection_finished(client, connection, error_code, finish_code);
+}
+
+void aws_s3_meta_request_send_request_finish_default(
+ struct aws_s3_connection *connection,
+ struct aws_http_stream *stream,
+ int error_code) {
+ s_s3_meta_request_send_request_finish_helper(connection, stream, error_code, false /*async error*/);
+}
+
+void aws_s3_meta_request_send_request_finish_handle_async_error(
+ struct aws_s3_connection *connection,
+ struct aws_http_stream *stream,
+ int error_code) {
+ s_s3_meta_request_send_request_finish_helper(connection, stream, error_code, true /*async error*/);
+}
+
+void aws_s3_meta_request_finished_request(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request,
+ int error_code) {
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(meta_request->vtable);
+ AWS_PRECONDITION(meta_request->vtable->finished_request);
+
+ meta_request->vtable->finished_request(meta_request, request, error_code);
+}
+
+struct s3_stream_response_body_payload {
+ struct aws_s3_meta_request *meta_request;
+ struct aws_linked_list requests;
+ struct aws_task task;
+};
+
+/* Pushes a request into the body streaming priority queue. Derived meta request types should not call this--they
+ * should instead call aws_s3_meta_request_stream_response_body_synced.*/
+static void s_s3_meta_request_body_streaming_push_synced(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request);
+
+/* Pops the next available request from the body streaming priority queue. If the parts previous the next request in
+ * the priority queue have not been placed in the priority queue yet, the priority queue will remain the same, and
+ * NULL will be returned. (Should not be needed to be called by derived types.) */
+static struct aws_s3_request *s_s3_meta_request_body_streaming_pop_next_synced(
+ struct aws_s3_meta_request *meta_request);
+
+static void s_s3_meta_request_body_streaming_task(struct aws_task *task, void *arg, enum aws_task_status task_status);
+
+void aws_s3_meta_request_stream_response_body_synced(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request) {
+ ASSERT_SYNCED_DATA_LOCK_HELD(meta_request);
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(request);
+ AWS_PRECONDITION(request->part_number > 0);
+
+ struct aws_linked_list streaming_requests;
+ aws_linked_list_init(&streaming_requests);
+
+ /* Push it into the priority queue. */
+ s_s3_meta_request_body_streaming_push_synced(meta_request, request);
+
+ struct aws_s3_client *client = meta_request->client;
+ AWS_PRECONDITION(client);
+ aws_atomic_fetch_add(&client->stats.num_requests_stream_queued_waiting, 1);
+
+ /* Grab the next request that can be streamed back to the caller. */
+ struct aws_s3_request *next_streaming_request = s_s3_meta_request_body_streaming_pop_next_synced(meta_request);
+ uint32_t num_streaming_requests = 0;
+
+ /* Grab any additional requests that could be streamed to the caller. */
+ while (next_streaming_request != NULL) {
+ aws_atomic_fetch_sub(&client->stats.num_requests_stream_queued_waiting, 1);
+
+ aws_linked_list_push_back(&streaming_requests, &next_streaming_request->node);
+ ++num_streaming_requests;
+ next_streaming_request = s_s3_meta_request_body_streaming_pop_next_synced(meta_request);
+ }
+
+ if (aws_linked_list_empty(&streaming_requests)) {
+ return;
+ }
+
+ aws_atomic_fetch_add(&client->stats.num_requests_streaming, num_streaming_requests);
+
+ meta_request->synced_data.num_parts_delivery_sent += num_streaming_requests;
+
+ struct s3_stream_response_body_payload *payload =
+ aws_mem_calloc(client->allocator, 1, sizeof(struct s3_stream_response_body_payload));
+
+ aws_s3_meta_request_acquire(meta_request);
+ payload->meta_request = meta_request;
+
+ aws_linked_list_init(&payload->requests);
+ aws_linked_list_swap_contents(&payload->requests, &streaming_requests);
+
+ aws_task_init(
+ &payload->task, s_s3_meta_request_body_streaming_task, payload, "s_s3_meta_request_body_streaming_task");
+ aws_event_loop_schedule_task_now(meta_request->io_event_loop, &payload->task);
+}
+
+static void s_s3_meta_request_body_streaming_task(struct aws_task *task, void *arg, enum aws_task_status task_status) {
+ (void)task;
+ (void)task_status;
+
+ struct s3_stream_response_body_payload *payload = arg;
+ AWS_PRECONDITION(payload);
+
+ struct aws_s3_meta_request *meta_request = payload->meta_request;
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(meta_request->vtable);
+
+ struct aws_s3_client *client = meta_request->client;
+ AWS_PRECONDITION(client);
+
+ /* Client owns this event loop group. A cancel should not be possible. */
+ AWS_ASSERT(task_status == AWS_TASK_STATUS_RUN_READY);
+
+ struct aws_linked_list completed_requests;
+ aws_linked_list_init(&completed_requests);
+
+ int error_code = AWS_ERROR_SUCCESS;
+ uint32_t num_successful = 0;
+ uint32_t num_failed = 0;
+
+ while (!aws_linked_list_empty(&payload->requests)) {
+ struct aws_linked_list_node *request_node = aws_linked_list_pop_front(&payload->requests);
+ struct aws_s3_request *request = AWS_CONTAINER_OF(request_node, struct aws_s3_request, node);
+ AWS_ASSERT(meta_request == request->meta_request);
+ struct aws_byte_cursor body_buffer_byte_cursor = aws_byte_cursor_from_buf(&request->send_data.response_body);
+
+ AWS_ASSERT(request->part_number >= 1);
+
+ if (aws_s3_meta_request_has_finish_result(meta_request)) {
+ ++num_failed;
+ } else {
+ if (body_buffer_byte_cursor.len > 0 && error_code == AWS_ERROR_SUCCESS && meta_request->body_callback &&
+ meta_request->body_callback(
+ meta_request, &body_buffer_byte_cursor, request->part_range_start, meta_request->user_data)) {
+ error_code = aws_last_error_or_unknown();
+ }
+
+ if (error_code == AWS_ERROR_SUCCESS) {
+ ++num_successful;
+ } else {
+ ++num_failed;
+ }
+ }
+
+ aws_atomic_fetch_sub(&client->stats.num_requests_streaming, 1);
+ aws_s3_request_release(request);
+ }
+ /* BEGIN CRITICAL SECTION */
+ {
+ aws_s3_meta_request_lock_synced_data(meta_request);
+ if (error_code != AWS_ERROR_SUCCESS) {
+ aws_s3_meta_request_set_fail_synced(meta_request, NULL, error_code);
+ }
+
+ meta_request->synced_data.num_parts_delivery_completed += (num_failed + num_successful);
+ meta_request->synced_data.num_parts_delivery_failed += num_failed;
+ meta_request->synced_data.num_parts_delivery_succeeded += num_successful;
+ aws_s3_meta_request_unlock_synced_data(meta_request);
+ }
+ /* END CRITICAL SECTION */
+ aws_mem_release(client->allocator, payload);
+ payload = NULL;
+
+ aws_s3_client_schedule_process_work(client);
+ aws_s3_meta_request_release(meta_request);
+}
+
+static void s_s3_meta_request_body_streaming_push_synced(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_request *request) {
+ ASSERT_SYNCED_DATA_LOCK_HELD(meta_request);
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(request);
+
+ AWS_ASSERT(request->meta_request == meta_request);
+
+ aws_s3_request_acquire(request);
+
+ aws_priority_queue_push(&meta_request->synced_data.pending_body_streaming_requests, &request);
+}
+
+static struct aws_s3_request *s_s3_meta_request_body_streaming_pop_next_synced(
+ struct aws_s3_meta_request *meta_request) {
+ AWS_PRECONDITION(meta_request);
+ ASSERT_SYNCED_DATA_LOCK_HELD(meta_request);
+
+ if (0 == aws_priority_queue_size(&meta_request->synced_data.pending_body_streaming_requests)) {
+ return NULL;
+ }
+
+ struct aws_s3_request **top_request = NULL;
+
+ aws_priority_queue_top(&meta_request->synced_data.pending_body_streaming_requests, (void **)&top_request);
+
+ AWS_ASSERT(top_request);
+
+ AWS_FATAL_ASSERT(*top_request);
+
+ if ((*top_request)->part_number != meta_request->synced_data.next_streaming_part) {
+ return NULL;
+ }
+
+ struct aws_s3_request *request = NULL;
+ aws_priority_queue_pop(&meta_request->synced_data.pending_body_streaming_requests, (void **)&request);
+
+ ++meta_request->synced_data.next_streaming_part;
+
+ return request;
+}
+
+void aws_s3_meta_request_finish(struct aws_s3_meta_request *meta_request) {
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(meta_request->vtable);
+ AWS_PRECONDITION(meta_request->vtable->finish);
+
+ meta_request->vtable->finish(meta_request);
+}
+
+void aws_s3_meta_request_finish_default(struct aws_s3_meta_request *meta_request) {
+ AWS_PRECONDITION(meta_request);
+
+ bool already_finished = false;
+ struct aws_linked_list release_request_list;
+ aws_linked_list_init(&release_request_list);
+
+ struct aws_s3_meta_request_result finish_result;
+ AWS_ZERO_STRUCT(finish_result);
+
+ /* BEGIN CRITICAL SECTION */
+ {
+ aws_s3_meta_request_lock_synced_data(meta_request);
+
+ if (meta_request->synced_data.state == AWS_S3_META_REQUEST_STATE_FINISHED) {
+ already_finished = true;
+ goto unlock;
+ }
+
+ meta_request->synced_data.state = AWS_S3_META_REQUEST_STATE_FINISHED;
+
+ /* Clean out the pending-stream-to-caller priority queue*/
+ while (aws_priority_queue_size(&meta_request->synced_data.pending_body_streaming_requests) > 0) {
+ struct aws_s3_request *request = NULL;
+ aws_priority_queue_pop(&meta_request->synced_data.pending_body_streaming_requests, (void **)&request);
+ AWS_FATAL_ASSERT(request != NULL);
+
+ aws_linked_list_push_back(&release_request_list, &request->node);
+ }
+
+ finish_result = meta_request->synced_data.finish_result;
+ AWS_ZERO_STRUCT(meta_request->synced_data.finish_result);
+
+ unlock:
+ aws_s3_meta_request_unlock_synced_data(meta_request);
+ }
+ /* END CRITICAL SECTION */
+
+ if (already_finished) {
+ return;
+ }
+
+ while (!aws_linked_list_empty(&release_request_list)) {
+ struct aws_linked_list_node *request_node = aws_linked_list_pop_front(&release_request_list);
+ struct aws_s3_request *release_request = AWS_CONTAINER_OF(request_node, struct aws_s3_request, node);
+ AWS_FATAL_ASSERT(release_request != NULL);
+ aws_s3_request_release(release_request);
+ }
+
+ if (meta_request->headers_callback && finish_result.error_response_headers) {
+ if (meta_request->headers_callback(
+ meta_request,
+ finish_result.error_response_headers,
+ finish_result.response_status,
+ meta_request->user_data)) {
+ finish_result.error_code = aws_last_error_or_unknown();
+ }
+ meta_request->headers_callback = NULL;
+ }
+
+ AWS_LOGF_DEBUG(
+ AWS_LS_S3_META_REQUEST,
+ "id=%p Meta request finished with error code %d (%s)",
+ (void *)meta_request,
+ finish_result.error_code,
+ aws_error_str(finish_result.error_code));
+
+ /* As the meta request has been finished with any HTTP message, we can safely release the http message that hold. So
+ * that, the downstream high level language doesn't need to wait for shutdown to clean related resource (eg: input
+ * stream) */
+ if (meta_request->initial_request_message) {
+ aws_http_message_release(meta_request->initial_request_message);
+ meta_request->initial_request_message = NULL;
+ }
+
+ if (meta_request->finish_callback != NULL) {
+ meta_request->finish_callback(meta_request, &finish_result, meta_request->user_data);
+ }
+
+ aws_s3_meta_request_result_clean_up(meta_request, &finish_result);
+
+ aws_s3_endpoint_release(meta_request->endpoint);
+ meta_request->endpoint = NULL;
+
+ meta_request->io_event_loop = NULL;
+}
+
+int aws_s3_meta_request_read_body(struct aws_s3_meta_request *meta_request, struct aws_byte_buf *buffer) {
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(buffer);
+
+ struct aws_input_stream *initial_body_stream =
+ aws_http_message_get_body_stream(meta_request->initial_request_message);
+ AWS_FATAL_ASSERT(initial_body_stream);
+
+ /* Copy it into our buffer. */
+ if (aws_input_stream_read(initial_body_stream, buffer)) {
+ AWS_LOGF_ERROR(AWS_LS_S3_META_REQUEST, "id=%p Could not read from body stream.", (void *)meta_request);
+ return AWS_OP_ERR;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+void aws_s3_meta_request_result_setup(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_meta_request_result *result,
+ struct aws_s3_request *request,
+ int response_status,
+ int error_code) {
+
+ if (request != NULL) {
+ if (request->send_data.response_headers != NULL) {
+ result->error_response_headers = request->send_data.response_headers;
+ aws_http_headers_acquire(result->error_response_headers);
+ }
+
+ if (request->send_data.response_body.capacity > 0) {
+ result->error_response_body = aws_mem_calloc(meta_request->allocator, 1, sizeof(struct aws_byte_buf));
+
+ aws_byte_buf_init_copy(
+ result->error_response_body, meta_request->allocator, &request->send_data.response_body);
+ }
+ }
+
+ result->response_status = response_status;
+ result->error_code = error_code;
+}
+
+void aws_s3_meta_request_result_clean_up(
+ struct aws_s3_meta_request *meta_request,
+ struct aws_s3_meta_request_result *result) {
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(result);
+
+ aws_http_headers_release(result->error_response_headers);
+
+ if (result->error_response_body != NULL) {
+ aws_byte_buf_clean_up(result->error_response_body);
+ aws_mem_release(meta_request->allocator, result->error_response_body);
+ }
+
+ AWS_ZERO_STRUCT(*result);
+}
+
+bool aws_s3_meta_request_checksum_config_has_algorithm(
+ struct aws_s3_meta_request *meta_request,
+ enum aws_s3_checksum_algorithm algorithm) {
+ AWS_PRECONDITION(meta_request);
+
+ switch (algorithm) {
+ case AWS_SCA_CRC32C:
+ return meta_request->checksum_config.response_checksum_algorithms.crc32c;
+ case AWS_SCA_CRC32:
+ return meta_request->checksum_config.response_checksum_algorithms.crc32;
+ case AWS_SCA_SHA1:
+ return meta_request->checksum_config.response_checksum_algorithms.sha1;
+ case AWS_SCA_SHA256:
+ return meta_request->checksum_config.response_checksum_algorithms.sha256;
+ default:
+ return false;
+ }
+}
diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_paginator.c b/contrib/restricted/aws/aws-c-s3/source/s3_paginator.c
new file mode 100644
index 0000000000..e42845d87f
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/source/s3_paginator.c
@@ -0,0 +1,466 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/s3/private/s3_paginator.h>
+#include <aws/s3/s3_client.h>
+
+#include <aws/common/assert.h>
+#include <aws/common/atomics.h>
+#include <aws/common/byte_buf.h>
+#include <aws/common/mutex.h>
+#include <aws/common/ref_count.h>
+#include <aws/common/string.h>
+#include <aws/common/xml_parser.h>
+#include <aws/http/request_response.h>
+
+static const size_t s_dynamic_body_initial_buf_size = 1024;
+
+enum operation_state {
+ OS_NOT_STARTED,
+ OS_INITIATED,
+ OS_COMPLETED,
+ OS_ERROR,
+};
+
+struct aws_s3_paginated_operation {
+ struct aws_allocator *allocator;
+
+ struct aws_string *result_xml_node_name;
+ struct aws_string *continuation_xml_node_name;
+
+ aws_s3_next_http_message_fn *next_http_message;
+ aws_s3_on_result_node_encountered_fn *on_result_node_encountered;
+
+ aws_s3_on_paginated_operation_cleanup_fn *on_paginated_operation_cleanup;
+
+ void *user_data;
+
+ struct aws_ref_count ref_count;
+};
+
+struct aws_s3_paginator {
+ struct aws_allocator *allocator;
+ struct aws_s3_client *client;
+
+ /** The current, in-flight paginated request to s3. */
+ struct aws_atomic_var current_request;
+
+ struct aws_string *bucket_name;
+ struct aws_string *endpoint;
+
+ struct aws_s3_paginated_operation *operation;
+
+ struct aws_ref_count ref_count;
+ struct {
+ struct aws_string *continuation_token;
+ enum operation_state operation_state;
+ struct aws_mutex lock;
+ bool has_more_results;
+ } shared_mt_state;
+
+ struct aws_byte_buf result_body;
+
+ aws_s3_on_page_finished_fn *on_page_finished;
+ void *user_data;
+};
+
+static void s_operation_ref_count_zero_callback(void *arg) {
+ struct aws_s3_paginated_operation *operation = arg;
+
+ if (operation->on_paginated_operation_cleanup) {
+ operation->on_paginated_operation_cleanup(operation->user_data);
+ }
+
+ if (operation->result_xml_node_name) {
+ aws_string_destroy(operation->result_xml_node_name);
+ }
+
+ if (operation->continuation_xml_node_name) {
+ aws_string_destroy(operation->continuation_xml_node_name);
+ }
+
+ aws_mem_release(operation->allocator, operation);
+}
+
+static void s_paginator_ref_count_zero_callback(void *arg) {
+ struct aws_s3_paginator *paginator = arg;
+
+ aws_s3_client_release(paginator->client);
+ aws_s3_paginated_operation_release(paginator->operation);
+
+ aws_byte_buf_clean_up(&paginator->result_body);
+
+ struct aws_s3_meta_request *previous_request = aws_atomic_exchange_ptr(&paginator->current_request, NULL);
+ if (previous_request != NULL) {
+ aws_s3_meta_request_release(previous_request);
+ }
+
+ if (paginator->bucket_name) {
+ aws_string_destroy(paginator->bucket_name);
+ }
+
+ if (paginator->endpoint) {
+ aws_string_destroy(paginator->endpoint);
+ }
+
+ if (paginator->shared_mt_state.continuation_token) {
+ aws_string_destroy(paginator->shared_mt_state.continuation_token);
+ }
+
+ aws_mem_release(paginator->allocator, paginator);
+}
+
+struct aws_s3_paginator *aws_s3_initiate_paginator(
+ struct aws_allocator *allocator,
+ const struct aws_s3_paginator_params *params) {
+ AWS_FATAL_PRECONDITION(params);
+ AWS_FATAL_PRECONDITION(params->client);
+
+ struct aws_s3_paginator *paginator = aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_paginator));
+ paginator->allocator = allocator;
+ paginator->client = aws_s3_client_acquire(params->client);
+ paginator->operation = params->operation;
+ paginator->on_page_finished = params->on_page_finished_fn;
+ paginator->user_data = params->user_data;
+
+ paginator->bucket_name = aws_string_new_from_cursor(allocator, &params->bucket_name);
+ paginator->endpoint = aws_string_new_from_cursor(allocator, &params->endpoint);
+
+ aws_s3_paginated_operation_acquire(params->operation);
+
+ aws_byte_buf_init(&paginator->result_body, allocator, s_dynamic_body_initial_buf_size);
+ aws_ref_count_init(&paginator->ref_count, paginator, s_paginator_ref_count_zero_callback);
+ aws_mutex_init(&paginator->shared_mt_state.lock);
+ aws_atomic_init_ptr(&paginator->current_request, NULL);
+ paginator->shared_mt_state.operation_state = OS_NOT_STARTED;
+
+ return paginator;
+}
+
+void aws_s3_paginator_release(struct aws_s3_paginator *paginator) {
+ if (paginator) {
+ aws_ref_count_release(&paginator->ref_count);
+ }
+}
+
+void aws_s3_paginator_acquire(struct aws_s3_paginator *paginator) {
+ AWS_FATAL_PRECONDITION(paginator);
+ aws_ref_count_acquire(&paginator->ref_count);
+}
+
+struct aws_s3_paginated_operation *aws_s3_paginated_operation_new(
+ struct aws_allocator *allocator,
+ const struct aws_s3_paginated_operation_params *params) {
+
+ struct aws_s3_paginated_operation *operation =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_s3_paginated_operation));
+ operation->allocator = allocator;
+
+ operation->result_xml_node_name = aws_string_new_from_cursor(allocator, params->result_xml_node_name);
+ operation->continuation_xml_node_name = aws_string_new_from_cursor(allocator, params->continuation_token_node_name);
+
+ operation->next_http_message = params->next_message;
+ operation->on_result_node_encountered = params->on_result_node_encountered_fn;
+ operation->on_paginated_operation_cleanup = params->on_paginated_operation_cleanup;
+
+ operation->user_data = params->user_data;
+
+ aws_ref_count_init(&operation->ref_count, operation, s_operation_ref_count_zero_callback);
+
+ return operation;
+}
+
+void aws_s3_paginated_operation_acquire(struct aws_s3_paginated_operation *operation) {
+ AWS_FATAL_PRECONDITION(operation);
+ aws_ref_count_acquire(&operation->ref_count);
+}
+
+void aws_s3_paginated_operation_release(struct aws_s3_paginated_operation *operation) {
+ if (operation) {
+ aws_ref_count_release(&operation->ref_count);
+ }
+}
+
+bool aws_s3_paginator_has_more_results(const struct aws_s3_paginator *paginator) {
+ AWS_PRECONDITION(paginator);
+ bool has_more_results = false;
+ struct aws_s3_paginator *paginator_mut = (struct aws_s3_paginator *)paginator;
+ aws_mutex_lock(&paginator_mut->shared_mt_state.lock);
+ has_more_results = paginator->shared_mt_state.has_more_results;
+ aws_mutex_unlock(&paginator_mut->shared_mt_state.lock);
+ AWS_LOGF_INFO(AWS_LS_S3_GENERAL, "has more %d", has_more_results);
+ return has_more_results;
+}
+
+struct aws_string *s_paginator_get_continuation_token(const struct aws_s3_paginator *paginator) {
+ AWS_PRECONDITION(paginator);
+ struct aws_string *continuation_token = NULL;
+ struct aws_s3_paginator *paginator_mut = (struct aws_s3_paginator *)paginator;
+ aws_mutex_lock(&paginator_mut->shared_mt_state.lock);
+ if (paginator->shared_mt_state.continuation_token) {
+ continuation_token =
+ aws_string_clone_or_reuse(paginator->allocator, paginator->shared_mt_state.continuation_token);
+ }
+ aws_mutex_unlock(&paginator_mut->shared_mt_state.lock);
+ return continuation_token;
+}
+
+static inline int s_set_paginator_state_if_legal(
+ struct aws_s3_paginator *paginator,
+ enum operation_state expected,
+ enum operation_state state) {
+ aws_mutex_lock(&paginator->shared_mt_state.lock);
+ if (paginator->shared_mt_state.operation_state != expected) {
+ aws_mutex_unlock(&paginator->shared_mt_state.lock);
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+ paginator->shared_mt_state.operation_state = state;
+ aws_mutex_unlock(&paginator->shared_mt_state.lock);
+ return AWS_OP_SUCCESS;
+}
+
+/**
+ * On a successful operation, this is an xml document. Just copy the buffers over until we're ready to parse (upon
+ * completion) of the response body.
+ */
+static int s_receive_body_callback(
+ struct aws_s3_meta_request *meta_request,
+ const struct aws_byte_cursor *body,
+ uint64_t range_start,
+ void *user_data) {
+ (void)range_start;
+ (void)meta_request;
+
+ struct aws_s3_paginator *paginator = user_data;
+
+ if (body && body->len) {
+ aws_byte_buf_append_dynamic(&paginator->result_body, body);
+ }
+ return AWS_OP_SUCCESS;
+}
+
+struct parser_wrapper {
+ struct aws_s3_paginated_operation *operation;
+ struct aws_string *next_continuation_token;
+ bool has_more_results;
+};
+
+static bool s_on_result_node_encountered(struct aws_xml_parser *parser, struct aws_xml_node *node, void *user_data) {
+
+ struct parser_wrapper *wrapper = user_data;
+
+ struct aws_byte_cursor node_name;
+ aws_xml_node_get_name(node, &node_name);
+
+ struct aws_byte_cursor continuation_name_val =
+ aws_byte_cursor_from_string(wrapper->operation->continuation_xml_node_name);
+ if (aws_byte_cursor_eq_ignore_case(&node_name, &continuation_name_val)) {
+ struct aws_byte_cursor continuation_token_cur;
+ bool ret_val = aws_xml_node_as_body(parser, node, &continuation_token_cur) == AWS_OP_SUCCESS;
+
+ if (ret_val) {
+ wrapper->next_continuation_token =
+ aws_string_new_from_cursor(wrapper->operation->allocator, &continuation_token_cur);
+ }
+
+ return ret_val;
+ }
+
+ if (aws_byte_cursor_eq_c_str_ignore_case(&node_name, "IsTruncated")) {
+ struct aws_byte_cursor truncated_cur;
+ bool ret_val = aws_xml_node_as_body(parser, node, &truncated_cur) == AWS_OP_SUCCESS;
+
+ if (ret_val) {
+ if (aws_byte_cursor_eq_c_str_ignore_case(&truncated_cur, "true")) {
+ wrapper->has_more_results = true;
+ }
+ }
+
+ return ret_val;
+ }
+
+ return wrapper->operation->on_result_node_encountered(parser, node, wrapper->operation->user_data);
+}
+
+static bool s_on_root_node_encountered(struct aws_xml_parser *parser, struct aws_xml_node *node, void *user_data) {
+ struct parser_wrapper *wrapper = user_data;
+
+ struct aws_byte_cursor node_name;
+ aws_xml_node_get_name(node, &node_name);
+ struct aws_byte_cursor result_name_val = aws_byte_cursor_from_string(wrapper->operation->result_xml_node_name);
+ if (aws_byte_cursor_eq_ignore_case(&node_name, &result_name_val)) {
+ return aws_xml_node_traverse(parser, node, s_on_result_node_encountered, wrapper);
+ }
+
+ return false;
+}
+
+static void s_on_request_finished(
+ struct aws_s3_meta_request *meta_request,
+ const struct aws_s3_meta_request_result *meta_request_result,
+ void *user_data) {
+ (void)meta_request;
+ struct aws_s3_paginator *paginator = user_data;
+
+ if (meta_request_result->response_status == 200) {
+ /* clears previous continuation token */
+ aws_mutex_lock(&paginator->shared_mt_state.lock);
+ if (paginator->shared_mt_state.continuation_token) {
+ aws_string_destroy(paginator->shared_mt_state.continuation_token);
+ paginator->shared_mt_state.continuation_token = NULL;
+ paginator->shared_mt_state.has_more_results = false;
+ }
+ aws_mutex_unlock(&paginator->shared_mt_state.lock);
+
+ struct aws_byte_cursor result_body_cursor = aws_byte_cursor_from_buf(&paginator->result_body);
+ struct aws_string *continuation_token = NULL;
+ bool has_more_results = false;
+ aws_s3_paginated_operation_on_response(
+ paginator->operation, &result_body_cursor, &continuation_token, &has_more_results);
+
+ aws_mutex_lock(&paginator->shared_mt_state.lock);
+
+ if (paginator->shared_mt_state.continuation_token) {
+ aws_string_destroy(paginator->shared_mt_state.continuation_token);
+ }
+
+ paginator->shared_mt_state.continuation_token = continuation_token;
+ paginator->shared_mt_state.has_more_results = has_more_results;
+ aws_mutex_unlock(&paginator->shared_mt_state.lock);
+
+ if (has_more_results) {
+ s_set_paginator_state_if_legal(paginator, OS_INITIATED, OS_NOT_STARTED);
+ } else {
+ s_set_paginator_state_if_legal(paginator, OS_INITIATED, OS_COMPLETED);
+ }
+
+ } else {
+ s_set_paginator_state_if_legal(paginator, OS_INITIATED, OS_ERROR);
+ }
+
+ if (paginator->on_page_finished) {
+ paginator->on_page_finished(paginator, meta_request_result->error_code, paginator->user_data);
+ }
+
+ /* this ref count was done right before we kicked off the request to keep the paginator object alive. Release it now
+ * that the operation has completed. */
+ aws_s3_paginator_release(paginator);
+}
+
+int aws_s3_paginated_operation_on_response(
+ struct aws_s3_paginated_operation *operation,
+ struct aws_byte_cursor *response_body,
+ struct aws_string **continuation_token_out,
+ bool *has_more_results_out) {
+
+ struct aws_xml_parser_options parser_options = {
+ .doc = *response_body,
+ .max_depth = 16U,
+ };
+
+ struct parser_wrapper wrapper = {.operation = operation};
+
+ /* we've got a full xml document now and the request succeeded, parse the document and fire all the callbacks
+ * for each object and prefix. All of that happens in these three lines. */
+ struct aws_xml_parser *parser = aws_xml_parser_new(operation->allocator, &parser_options);
+ int error_code = aws_xml_parser_parse(parser, s_on_root_node_encountered, &wrapper);
+ aws_xml_parser_destroy(parser);
+
+ if (error_code == AWS_OP_SUCCESS) {
+ *continuation_token_out = wrapper.next_continuation_token;
+ *has_more_results_out = wrapper.has_more_results;
+ }
+
+ return error_code;
+}
+
+int aws_s3_construct_next_paginated_request_http_message(
+ struct aws_s3_paginated_operation *operation,
+ struct aws_byte_cursor *continuation_token,
+ struct aws_http_message **out_message) {
+ return operation->next_http_message(continuation_token, operation->user_data, out_message);
+}
+
+int aws_s3_paginator_continue(struct aws_s3_paginator *paginator, const struct aws_signing_config_aws *signing_config) {
+ AWS_PRECONDITION(paginator);
+ AWS_PRECONDITION(signing_config);
+
+ int re_code = AWS_OP_ERR;
+
+ if (s_set_paginator_state_if_legal(paginator, OS_NOT_STARTED, OS_INITIATED)) {
+ return re_code;
+ }
+
+ struct aws_http_message *paginated_request_message = NULL;
+ struct aws_string *continuation_string = NULL;
+ struct aws_byte_buf host_buf;
+ AWS_ZERO_STRUCT(host_buf);
+
+ struct aws_byte_cursor host_cur = aws_byte_cursor_from_string(paginator->bucket_name);
+ struct aws_byte_cursor period_cur = aws_byte_cursor_from_c_str(".");
+ struct aws_byte_cursor endpoint_val = aws_byte_cursor_from_string(paginator->endpoint);
+ if (aws_byte_buf_init_copy_from_cursor(&host_buf, paginator->allocator, host_cur) ||
+ aws_byte_buf_append_dynamic(&host_buf, &period_cur) || aws_byte_buf_append_dynamic(&host_buf, &endpoint_val)) {
+ goto done;
+ }
+
+ struct aws_http_header host_header = {
+ .name = aws_byte_cursor_from_c_str("host"),
+ .value = aws_byte_cursor_from_buf(&host_buf),
+ };
+
+ continuation_string = s_paginator_get_continuation_token(paginator);
+ struct aws_byte_cursor continuation_cursor;
+ AWS_ZERO_STRUCT(continuation_cursor);
+ struct aws_byte_cursor *continuation = NULL;
+ if (continuation_string) {
+ continuation_cursor = aws_byte_cursor_from_string(continuation_string);
+ continuation = &continuation_cursor;
+ }
+ if (paginator->operation->next_http_message(
+ continuation, paginator->operation->user_data, &paginated_request_message)) {
+ goto done;
+ }
+
+ if (aws_http_message_add_header(paginated_request_message, host_header)) {
+ goto done;
+ }
+
+ struct aws_s3_meta_request_options request_options = {
+ .user_data = paginator,
+ .signing_config = (struct aws_signing_config_aws *)signing_config,
+ .type = AWS_S3_META_REQUEST_TYPE_DEFAULT,
+ .body_callback = s_receive_body_callback,
+ .finish_callback = s_on_request_finished,
+ .message = paginated_request_message,
+ };
+
+ /* re-use the current buffer. */
+ aws_byte_buf_reset(&paginator->result_body, false);
+
+ /* we're kicking off an asynchronous request. ref-count the paginator to keep it alive until we finish. */
+ aws_s3_paginator_acquire(paginator);
+
+ struct aws_s3_meta_request *previous_request = aws_atomic_exchange_ptr(&paginator->current_request, NULL);
+ if (previous_request != NULL) {
+ /* release request from previous page */
+ aws_s3_meta_request_release(previous_request);
+ }
+
+ struct aws_s3_meta_request *new_request = aws_s3_client_make_meta_request(paginator->client, &request_options);
+ aws_atomic_store_ptr(&paginator->current_request, new_request);
+
+ if (new_request == NULL) {
+ s_set_paginator_state_if_legal(paginator, OS_INITIATED, OS_ERROR);
+ goto done;
+ }
+
+ re_code = AWS_OP_SUCCESS;
+done:
+ aws_http_message_release(paginated_request_message);
+ aws_string_destroy(continuation_string);
+ aws_byte_buf_clean_up(&host_buf);
+ return re_code;
+}
diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_request.c b/contrib/restricted/aws/aws-c-s3/source/s3_request.c
new file mode 100644
index 0000000000..d92dfa955b
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/source/s3_request.c
@@ -0,0 +1,97 @@
+#include "aws/s3/private/s3_request.h"
+#include "aws/s3/private/s3_meta_request_impl.h"
+#include <aws/auth/signable.h>
+#include <aws/io/stream.h>
+
+static void s_s3_request_destroy(void *user_data);
+
+struct aws_s3_request *aws_s3_request_new(
+ struct aws_s3_meta_request *meta_request,
+ int request_tag,
+ uint32_t part_number,
+ uint32_t flags) {
+ AWS_PRECONDITION(meta_request);
+ AWS_PRECONDITION(meta_request->allocator);
+
+ struct aws_s3_request *request = aws_mem_calloc(meta_request->allocator, 1, sizeof(struct aws_s3_request));
+
+ aws_ref_count_init(&request->ref_count, request, (aws_simple_completion_callback *)s_s3_request_destroy);
+
+ request->allocator = meta_request->allocator;
+ request->meta_request = aws_s3_meta_request_acquire(meta_request);
+
+ request->request_tag = request_tag;
+ request->part_number = part_number;
+ request->record_response_headers = (flags & AWS_S3_REQUEST_FLAG_RECORD_RESPONSE_HEADERS) != 0;
+ request->part_size_response_body = (flags & AWS_S3_REQUEST_FLAG_PART_SIZE_RESPONSE_BODY) != 0;
+ request->always_send = (flags & AWS_S3_REQUEST_FLAG_ALWAYS_SEND) != 0;
+
+ return request;
+}
+
+void aws_s3_request_setup_send_data(struct aws_s3_request *request, struct aws_http_message *message) {
+ AWS_PRECONDITION(request);
+ AWS_PRECONDITION(message);
+
+ aws_s3_request_clean_up_send_data(request);
+
+ request->send_data.message = message;
+ aws_http_message_acquire(message);
+}
+
+static void s_s3_request_clean_up_send_data_message(struct aws_s3_request *request) {
+ AWS_PRECONDITION(request);
+
+ struct aws_http_message *message = request->send_data.message;
+
+ if (message == NULL) {
+ return;
+ }
+
+ request->send_data.message = NULL;
+ aws_http_message_release(message);
+}
+
+void aws_s3_request_clean_up_send_data(struct aws_s3_request *request) {
+ AWS_PRECONDITION(request);
+
+ s_s3_request_clean_up_send_data_message(request);
+
+ aws_signable_destroy(request->send_data.signable);
+ request->send_data.signable = NULL;
+
+ aws_http_headers_release(request->send_data.response_headers);
+ request->send_data.response_headers = NULL;
+
+ aws_byte_buf_clean_up(&request->send_data.response_body);
+
+ AWS_ZERO_STRUCT(request->send_data);
+}
+
+void aws_s3_request_acquire(struct aws_s3_request *request) {
+ AWS_PRECONDITION(request);
+
+ aws_ref_count_acquire(&request->ref_count);
+}
+
+void aws_s3_request_release(struct aws_s3_request *request) {
+ if (request == NULL) {
+ return;
+ }
+
+ aws_ref_count_release(&request->ref_count);
+}
+
+static void s_s3_request_destroy(void *user_data) {
+ struct aws_s3_request *request = user_data;
+
+ if (request == NULL) {
+ return;
+ }
+
+ aws_s3_request_clean_up_send_data(request);
+ aws_byte_buf_clean_up(&request->request_body);
+ aws_s3_meta_request_release(request->meta_request);
+
+ aws_mem_release(request->allocator, request);
+}
diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_request_messages.c b/contrib/restricted/aws/aws-c-s3/source/s3_request_messages.c
new file mode 100644
index 0000000000..93aa00a08d
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/source/s3_request_messages.c
@@ -0,0 +1,1142 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include "aws/s3/private/s3_request_messages.h"
+#include "aws/s3/private/s3_checksums.h"
+#include "aws/s3/private/s3_client_impl.h"
+#include "aws/s3/private/s3_meta_request_impl.h"
+#include "aws/s3/private/s3_util.h"
+#include <aws/cal/hash.h>
+#include <aws/common/byte_buf.h>
+#include <aws/common/encoding.h>
+#include <aws/common/string.h>
+#include <aws/http/request_response.h>
+#include <aws/io/stream.h>
+#include <aws/io/uri.h>
+#include <aws/s3/s3.h>
+#include <inttypes.h>
+
+const struct aws_byte_cursor g_s3_create_multipart_upload_excluded_headers[] = {
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-MD5"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source-range"),
+};
+
+const size_t g_s3_create_multipart_upload_excluded_headers_count =
+ AWS_ARRAY_SIZE(g_s3_create_multipart_upload_excluded_headers);
+
+const struct aws_byte_cursor g_s3_upload_part_excluded_headers[] = {
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-acl"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Cache-Control"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Disposition"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Encoding"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Language"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-MD5"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Type"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Expires"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-full-control"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-read"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-read-acp"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-write-acp"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-storage-class"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-website-redirect-location"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-aws-kms-key-id"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-context"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-bucket-key-enabled"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-tagging"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-mode"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-retain-until-date"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-legal-hold"),
+};
+
+const size_t g_s3_upload_part_excluded_headers_count = AWS_ARRAY_SIZE(g_s3_upload_part_excluded_headers);
+
+const struct aws_byte_cursor g_s3_complete_multipart_upload_excluded_headers[] = {
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-acl"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Cache-Control"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Disposition"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Encoding"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Language"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-MD5"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Type"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Expires"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-full-control"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-read"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-read-acp"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-write-acp"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-storage-class"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-website-redirect-location"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-algorithm"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-key"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-key-MD5"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-aws-kms-key-id"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-context"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-bucket-key-enabled"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-tagging"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-mode"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-retain-until-date"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-legal-hold"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source-range"),
+};
+
+const size_t g_s3_complete_multipart_upload_excluded_headers_count =
+ AWS_ARRAY_SIZE(g_s3_complete_multipart_upload_excluded_headers);
+
+/* The server-side encryption (SSE) is needed only when the object was created using a checksum algorithm for complete
+ * multipart upload. */
+const struct aws_byte_cursor g_s3_complete_multipart_upload_with_checksum_excluded_headers[] = {
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-acl"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Cache-Control"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Disposition"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Encoding"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Language"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-MD5"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Type"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Expires"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-full-control"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-read"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-read-acp"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-write-acp"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-storage-class"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-website-redirect-location"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-aws-kms-key-id"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-context"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-bucket-key-enabled"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-tagging"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-mode"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-retain-until-date"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-legal-hold"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source-range"),
+};
+
+const struct aws_byte_cursor g_s3_list_parts_excluded_headers[] = {
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-acl"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Cache-Control"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Disposition"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Encoding"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Language"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-MD5"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Type"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Expires"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-full-control"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-read"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-read-acp"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-write-acp"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-storage-class"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-website-redirect-location"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-algorithm"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-key"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-key-MD5"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-aws-kms-key-id"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-context"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-bucket-key-enabled"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-tagging"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-mode"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-retain-until-date"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-legal-hold"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source-range"),
+};
+
+const size_t g_s3_list_parts_excluded_headers_count = AWS_ARRAY_SIZE(g_s3_list_parts_excluded_headers);
+
+const struct aws_byte_cursor g_s3_list_parts_with_checksum_excluded_headers[] = {
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-acl"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Cache-Control"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Disposition"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Encoding"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Language"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-MD5"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Type"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Expires"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-full-control"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-read"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-read-acp"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-write-acp"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-storage-class"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-website-redirect-location"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-aws-kms-key-id"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-context"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-bucket-key-enabled"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-tagging"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-mode"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-retain-until-date"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-legal-hold"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source-range"),
+};
+
+const size_t g_s3_list_parts_with_checksum_excluded_headers_count =
+ AWS_ARRAY_SIZE(g_s3_list_parts_with_checksum_excluded_headers);
+
+const struct aws_byte_cursor g_s3_abort_multipart_upload_excluded_headers[] = {
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-acl"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Cache-Control"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Disposition"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Encoding"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Language"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-MD5"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Type"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Expires"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-full-control"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-read"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-read-acp"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-grant-write-acp"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-storage-class"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-website-redirect-location"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-algorithm"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-key"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-customer-key-MD5"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-aws-kms-key-id"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-context"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-server-side-encryption-bucket-key-enabled"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-tagging"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-mode"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-retain-until-date"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-object-lock-legal-hold"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source"),
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source-range"),
+};
+
+static const struct aws_byte_cursor s_x_amz_meta_prefix = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-meta-");
+
+const size_t g_s3_abort_multipart_upload_excluded_headers_count =
+ AWS_ARRAY_SIZE(g_s3_abort_multipart_upload_excluded_headers);
+
+static void s_s3_message_util_add_range_header(
+ uint64_t part_range_start,
+ uint64_t part_range_end,
+ struct aws_http_message *out_message);
+
+/* Create a new get object request from an existing get object request. Currently just adds an optional ranged header.
+ */
+struct aws_http_message *aws_s3_ranged_get_object_message_new(
+ struct aws_allocator *allocator,
+ struct aws_http_message *base_message,
+ uint64_t range_start,
+ uint64_t range_end) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(base_message);
+
+ struct aws_http_message *message =
+ aws_s3_message_util_copy_http_message_no_body_all_headers(allocator, base_message);
+
+ if (message == NULL) {
+ return NULL;
+ }
+
+ s_s3_message_util_add_range_header(range_start, range_end, message);
+
+ return message;
+}
+
+/* Creates a create-multipart-upload request from a given put objet request. */
+struct aws_http_message *aws_s3_create_multipart_upload_message_new(
+ struct aws_allocator *allocator,
+ struct aws_http_message *base_message,
+ enum aws_s3_checksum_algorithm algorithm) {
+ AWS_PRECONDITION(allocator);
+
+ /* For multipart upload, some headers should ONLY be in the initial create-multipart request.
+ * Headers such as:
+ * - SSE related headers
+ * - user metadata (prefixed "x-amz-meta-") headers */
+ struct aws_http_message *message = aws_s3_message_util_copy_http_message_no_body_filter_headers(
+ allocator,
+ base_message,
+ g_s3_create_multipart_upload_excluded_headers,
+ AWS_ARRAY_SIZE(g_s3_create_multipart_upload_excluded_headers),
+ false /*exclude_x_amz_meta*/);
+
+ if (message == NULL) {
+ return NULL;
+ }
+
+ if (aws_s3_message_util_set_multipart_request_path(allocator, NULL, 0, true, message)) {
+ goto error_clean_up;
+ }
+
+ struct aws_http_headers *headers = aws_http_message_get_headers(message);
+
+ if (headers == NULL) {
+ goto error_clean_up;
+ }
+
+ if (aws_http_headers_erase(headers, g_content_md5_header_name)) {
+ if (aws_last_error_or_unknown() != AWS_ERROR_HTTP_HEADER_NOT_FOUND) {
+ goto error_clean_up;
+ }
+ }
+ if (algorithm) {
+ if (aws_http_headers_set(
+ headers,
+ g_create_mpu_checksum_header_name,
+ *aws_get_create_mpu_header_name_from_algorithm(algorithm))) {
+ goto error_clean_up;
+ }
+ }
+
+ aws_http_message_set_request_method(message, g_post_method);
+ aws_http_message_set_body_stream(message, NULL);
+
+ return message;
+
+error_clean_up:
+ aws_http_message_release(message);
+ return NULL;
+}
+
+/* Create a new put object request from an existing put object request. Currently just optionally adds part information
+ * for a multipart upload. */
+struct aws_http_message *aws_s3_upload_part_message_new(
+ struct aws_allocator *allocator,
+ struct aws_http_message *base_message,
+ struct aws_byte_buf *buffer,
+ uint32_t part_number,
+ const struct aws_string *upload_id,
+ bool should_compute_content_md5,
+ const struct checksum_config *checksum_config,
+ struct aws_byte_buf *encoded_checksum_output) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(base_message);
+ AWS_PRECONDITION(part_number > 0);
+ AWS_PRECONDITION(buffer);
+
+ struct aws_http_message *message = aws_s3_message_util_copy_http_message_no_body_filter_headers(
+ allocator,
+ base_message,
+ g_s3_upload_part_excluded_headers,
+ AWS_ARRAY_SIZE(g_s3_upload_part_excluded_headers),
+ true /*exclude_x_amz_meta*/);
+
+ if (message == NULL) {
+ return NULL;
+ }
+
+ if (aws_s3_message_util_set_multipart_request_path(allocator, upload_id, part_number, false, message)) {
+ goto error_clean_up;
+ }
+
+ if (aws_s3_message_util_assign_body(allocator, buffer, message, checksum_config, encoded_checksum_output) == NULL) {
+ goto error_clean_up;
+ }
+
+ if (should_compute_content_md5) {
+ if (!checksum_config || checksum_config->location == AWS_SCL_NONE) {
+ /* MD5 will be skiped if flexible checksum used */
+ if (aws_s3_message_util_add_content_md5_header(allocator, buffer, message)) {
+ goto error_clean_up;
+ }
+ }
+ }
+
+ return message;
+
+error_clean_up:
+ aws_http_message_release(message);
+ return NULL;
+}
+
+struct aws_http_message *aws_s3_upload_part_copy_message_new(
+ struct aws_allocator *allocator,
+ struct aws_http_message *base_message,
+ struct aws_byte_buf *buffer,
+ uint32_t part_number,
+ uint64_t range_start,
+ uint64_t range_end,
+ const struct aws_string *upload_id,
+ bool should_compute_content_md5) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(base_message);
+ AWS_PRECONDITION(part_number > 0);
+
+ struct aws_http_message *message = aws_s3_message_util_copy_http_message_no_body_filter_headers(
+ allocator,
+ base_message,
+ g_s3_upload_part_excluded_headers,
+ AWS_ARRAY_SIZE(g_s3_upload_part_excluded_headers),
+ true /*exclude_x_amz_meta*/);
+
+ if (message == NULL) {
+ goto error_clean_up;
+ }
+
+ if (aws_s3_message_util_set_multipart_request_path(allocator, upload_id, part_number, false, message)) {
+ goto error_clean_up;
+ }
+
+ if (buffer != NULL) {
+ /* part copy does not have a ChecksumAlgorithm member, it will use the same algorithm as the create
+ * multipart upload request specifies */
+ if (aws_s3_message_util_assign_body(
+ allocator, buffer, message, NULL /* checksum_config */, NULL /* out_checksum */) == NULL) {
+ goto error_clean_up;
+ }
+
+ if (should_compute_content_md5) {
+ if (aws_s3_message_util_add_content_md5_header(allocator, buffer, message)) {
+ goto error_clean_up;
+ }
+ }
+ }
+
+ char source_range[1024];
+ snprintf(source_range, sizeof(source_range), "bytes=%" PRIu64 "-%" PRIu64, range_start, range_end);
+
+ struct aws_http_header source_range_header = {
+ .name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source-range"),
+ .value = aws_byte_cursor_from_c_str(source_range),
+ };
+
+ struct aws_http_headers *headers = aws_http_message_get_headers(message);
+ aws_http_headers_add_header(headers, &source_range_header);
+
+ return message;
+
+error_clean_up:
+
+ if (message != NULL) {
+ aws_http_message_release(message);
+ message = NULL;
+ }
+
+ return NULL;
+}
+
+/* Creates a HEAD GetObject request to get the size of the specified object. */
+struct aws_http_message *aws_s3_get_object_size_message_new(
+ struct aws_allocator *allocator,
+ struct aws_http_message *base_message,
+ struct aws_byte_cursor source_bucket,
+ struct aws_byte_cursor source_key) {
+
+ (void)base_message;
+
+ AWS_PRECONDITION(allocator);
+
+ struct aws_http_message *message = aws_http_message_new_request(allocator);
+
+ if (message == NULL) {
+ return NULL;
+ }
+
+ const struct aws_byte_cursor head_operation = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("HEAD");
+ if (aws_http_message_set_request_method(message, head_operation)) {
+ goto error_clean_up;
+ }
+
+ char destination_path[1024];
+ snprintf(destination_path, sizeof(destination_path), "/%.*s", (int)source_key.len, source_key.ptr);
+ /* TODO: url encode */
+
+ if (aws_http_message_set_request_path(message, aws_byte_cursor_from_c_str(destination_path))) {
+ goto error_clean_up;
+ }
+
+ char host_header_value[1024];
+ /* TODO: Fix the hard-coded host name. */
+ snprintf(
+ host_header_value,
+ sizeof(host_header_value),
+ "%.*s.s3.us-west-2.amazonaws.com",
+ (int)source_bucket.len,
+ source_bucket.ptr);
+ struct aws_http_header host_header = {
+ .name = g_host_header_name,
+ .value = aws_byte_cursor_from_c_str(host_header_value),
+ };
+ aws_http_message_add_header(message, host_header);
+
+ aws_http_message_set_body_stream(message, NULL);
+
+ return message;
+
+error_clean_up:
+
+ if (message != NULL) {
+ aws_http_message_release(message);
+ message = NULL;
+ }
+
+ return NULL;
+}
+
+/* Creates a HEAD GetObject sub-request to get the size of the source object of a Copy meta request. */
+struct aws_http_message *aws_s3_get_source_object_size_message_new(
+ struct aws_allocator *allocator,
+ struct aws_http_message *base_message) {
+ AWS_PRECONDITION(allocator);
+
+ struct aws_http_message *message = NULL;
+
+ /* find the x-amz-copy-source header */
+ struct aws_http_headers *headers = aws_http_message_get_headers(base_message);
+
+ struct aws_byte_cursor source_bucket;
+ AWS_ZERO_STRUCT(source_bucket);
+
+ const struct aws_byte_cursor copy_source_header = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-copy-source");
+ if (aws_http_headers_get(headers, copy_source_header, &source_bucket) != AWS_OP_SUCCESS) {
+ AWS_LOGF_ERROR(AWS_LS_S3_GENERAL, "CopyRequest is missing the x-amz-copy-source header");
+ return NULL;
+ }
+
+ if (source_bucket.len > 1 && source_bucket.ptr[0] == '/') {
+ /* skip the leading slash */
+ aws_byte_cursor_advance(&source_bucket, 1);
+ }
+ /* as we skipped the optional leading slash, from this point source format is always {bucket}/{key}. split them.
+ */
+ struct aws_byte_cursor source_key = source_bucket;
+
+ while (source_key.len > 0) {
+ if (*source_key.ptr == '/') {
+ source_bucket.len = source_key.ptr - source_bucket.ptr;
+ aws_byte_cursor_advance(&source_key, 1); /* skip the / between bucket and key */
+ break;
+ }
+ aws_byte_cursor_advance(&source_key, 1);
+ }
+
+ if (source_bucket.len == 0 || source_key.len == 0) {
+ AWS_LOGF_ERROR(
+ AWS_LS_S3_GENERAL,
+ "The CopyRequest x-amz-copy-source header must contain the bucket and object key separated by a slash");
+ goto error_cleanup;
+ }
+ message = aws_s3_get_object_size_message_new(allocator, base_message, source_bucket, source_key);
+
+error_cleanup:
+ return message;
+}
+
+static const struct aws_byte_cursor s_complete_payload_begin = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(
+ "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"
+ "<CompleteMultipartUpload xmlns=\"http://s3.amazonaws.com/doc/2006-03-01/\">\n");
+
+static const struct aws_byte_cursor s_complete_payload_end =
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("</CompleteMultipartUpload>");
+
+static const struct aws_byte_cursor s_part_section_string_0 = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(" <Part>\n"
+ " <ETag>");
+
+static const struct aws_byte_cursor s_part_section_string_1 =
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("</ETag>\n"
+ " <PartNumber>");
+
+static const struct aws_byte_cursor s_close_part_number_tag = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("</PartNumber>\n");
+static const struct aws_byte_cursor s_close_part_tag = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(" </Part>\n");
+static const struct aws_byte_cursor s_open_start_bracket = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(" <");
+static const struct aws_byte_cursor s_open_end_bracket = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("</");
+static const struct aws_byte_cursor s_close_bracket = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(">");
+static const struct aws_byte_cursor s_close_bracket_new_line = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(">\n");
+/* Create a complete-multipart message, which includes an XML payload of all completed parts. */
+struct aws_http_message *aws_s3_complete_multipart_message_new(
+ struct aws_allocator *allocator,
+ struct aws_http_message *base_message,
+ struct aws_byte_buf *body_buffer,
+ const struct aws_string *upload_id,
+ const struct aws_array_list *etags,
+ struct aws_byte_buf *checksums,
+ enum aws_s3_checksum_algorithm algorithm) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(base_message);
+ AWS_PRECONDITION(body_buffer);
+ AWS_PRECONDITION(upload_id);
+ AWS_PRECONDITION(etags);
+
+ const struct aws_byte_cursor *mpu_algorithm_checksum_name = aws_get_complete_mpu_name_from_algorithm(algorithm);
+
+ struct aws_http_message *message = NULL;
+ if (algorithm == AWS_SCA_NONE) {
+ /* We don't need to worry about the pre-calculated checksum from user as for multipart upload, only way to
+ * calculate checksum is from client. */
+ message = aws_s3_message_util_copy_http_message_no_body_filter_headers(
+ allocator,
+ base_message,
+ g_s3_complete_multipart_upload_excluded_headers,
+ AWS_ARRAY_SIZE(g_s3_complete_multipart_upload_excluded_headers),
+ true /*exclude_x_amz_meta*/);
+ } else {
+ message = aws_s3_message_util_copy_http_message_no_body_filter_headers(
+ allocator,
+ base_message,
+ g_s3_complete_multipart_upload_with_checksum_excluded_headers,
+ AWS_ARRAY_SIZE(g_s3_complete_multipart_upload_with_checksum_excluded_headers),
+ true /*exclude_x_amz_meta*/);
+ }
+
+ struct aws_http_headers *headers = NULL;
+
+ if (message == NULL) {
+ goto error_clean_up;
+ }
+
+ if (aws_s3_message_util_set_multipart_request_path(allocator, upload_id, 0, false, message)) {
+ goto error_clean_up;
+ }
+
+ aws_http_message_set_request_method(message, g_post_method);
+
+ headers = aws_http_message_get_headers(message);
+
+ if (headers == NULL) {
+ goto error_clean_up;
+ }
+
+ /* Create XML payload with all of the etags of finished parts */
+ {
+ aws_byte_buf_reset(body_buffer, false);
+
+ if (aws_byte_buf_append_dynamic(body_buffer, &s_complete_payload_begin)) {
+ goto error_clean_up;
+ }
+
+ for (size_t etag_index = 0; etag_index < aws_array_list_length(etags); ++etag_index) {
+ struct aws_string *etag = NULL;
+
+ aws_array_list_get_at(etags, &etag, etag_index);
+
+ AWS_FATAL_ASSERT(etag != NULL);
+
+ if (aws_byte_buf_append_dynamic(body_buffer, &s_part_section_string_0)) {
+ goto error_clean_up;
+ }
+
+ struct aws_byte_cursor etag_byte_cursor = aws_byte_cursor_from_string(etag);
+
+ if (aws_byte_buf_append_dynamic(body_buffer, &etag_byte_cursor)) {
+ goto error_clean_up;
+ }
+
+ if (aws_byte_buf_append_dynamic(body_buffer, &s_part_section_string_1)) {
+ goto error_clean_up;
+ }
+
+ char part_number_buffer[32] = "";
+ int part_number = (int)(etag_index + 1);
+ int part_number_num_char = snprintf(part_number_buffer, sizeof(part_number_buffer), "%d", part_number);
+ struct aws_byte_cursor part_number_byte_cursor =
+ aws_byte_cursor_from_array(part_number_buffer, part_number_num_char);
+
+ if (aws_byte_buf_append_dynamic(body_buffer, &part_number_byte_cursor)) {
+ goto error_clean_up;
+ }
+
+ if (aws_byte_buf_append_dynamic(body_buffer, &s_close_part_number_tag)) {
+ goto error_clean_up;
+ }
+ if (mpu_algorithm_checksum_name) {
+ struct aws_byte_cursor checksum = aws_byte_cursor_from_buf(&checksums[etag_index]);
+
+ if (aws_byte_buf_append_dynamic(body_buffer, &s_open_start_bracket)) {
+ goto error_clean_up;
+ }
+ if (aws_byte_buf_append_dynamic(body_buffer, mpu_algorithm_checksum_name)) {
+ goto error_clean_up;
+ }
+ if (aws_byte_buf_append_dynamic(body_buffer, &s_close_bracket)) {
+ goto error_clean_up;
+ }
+ if (aws_byte_buf_append_dynamic(body_buffer, &checksum)) {
+ goto error_clean_up;
+ }
+ if (aws_byte_buf_append_dynamic(body_buffer, &s_open_end_bracket)) {
+ goto error_clean_up;
+ }
+ if (aws_byte_buf_append_dynamic(body_buffer, mpu_algorithm_checksum_name)) {
+ goto error_clean_up;
+ }
+ if (aws_byte_buf_append_dynamic(body_buffer, &s_close_bracket_new_line)) {
+ goto error_clean_up;
+ }
+ }
+ if (aws_byte_buf_append_dynamic(body_buffer, &s_close_part_tag)) {
+ goto error_clean_up;
+ }
+ }
+
+ if (aws_byte_buf_append_dynamic(body_buffer, &s_complete_payload_end)) {
+ goto error_clean_up;
+ }
+
+ aws_s3_message_util_assign_body(
+ allocator, body_buffer, message, NULL /* checksum_config */, NULL /* out_checksum */);
+ }
+
+ return message;
+
+error_clean_up:
+
+ AWS_LOGF_ERROR(AWS_LS_S3_GENERAL, "Could not create complete multipart message");
+
+ if (message != NULL) {
+ aws_http_message_release(message);
+ message = NULL;
+ }
+
+ return NULL;
+}
+
+struct aws_http_message *aws_s3_abort_multipart_upload_message_new(
+ struct aws_allocator *allocator,
+ struct aws_http_message *base_message,
+ const struct aws_string *upload_id) {
+
+ struct aws_http_message *message = aws_s3_message_util_copy_http_message_no_body_filter_headers(
+ allocator,
+ base_message,
+ g_s3_abort_multipart_upload_excluded_headers,
+ AWS_ARRAY_SIZE(g_s3_abort_multipart_upload_excluded_headers),
+ true /*exclude_x_amz_meta*/);
+
+ if (aws_s3_message_util_set_multipart_request_path(allocator, upload_id, 0, false, message)) {
+ goto error_clean_up;
+ }
+ aws_http_message_set_request_method(message, g_delete_method);
+
+ return message;
+
+error_clean_up:
+
+ AWS_LOGF_ERROR(AWS_LS_S3_GENERAL, "Could not create abort multipart upload message");
+
+ if (message != NULL) {
+ aws_http_message_release(message);
+ message = NULL;
+ }
+
+ return NULL;
+}
+
+/* Assign a buffer to an HTTP message, creating a stream and setting the content-length header */
+struct aws_input_stream *aws_s3_message_util_assign_body(
+ struct aws_allocator *allocator,
+ struct aws_byte_buf *byte_buf,
+ struct aws_http_message *out_message,
+ const struct checksum_config *checksum_config,
+ struct aws_byte_buf *out_checksum) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(out_message);
+ AWS_PRECONDITION(byte_buf);
+
+ struct aws_byte_cursor buffer_byte_cursor = aws_byte_cursor_from_buf(byte_buf);
+ struct aws_http_headers *headers = aws_http_message_get_headers(out_message);
+
+ if (headers == NULL) {
+ return NULL;
+ }
+
+ struct aws_input_stream *input_stream = aws_input_stream_new_from_cursor(allocator, &buffer_byte_cursor);
+
+ if (input_stream == NULL) {
+ goto error_clean_up;
+ }
+
+ if (checksum_config) {
+ if (checksum_config->location == AWS_SCL_TRAILER) {
+ /* aws-chunked encode the payload and add related headers */
+
+ /* set Content-Encoding header. TODO: the aws-chunked should be appended to the existing content encoding.
+ */
+ if (aws_http_headers_set(headers, g_content_encoding_header_name, g_content_encoding_header_aws_chunked)) {
+ goto error_clean_up;
+ }
+ /* set x-amz-trailer header */
+ if (aws_http_headers_set(
+ headers,
+ g_trailer_header_name,
+ *aws_get_http_header_name_from_algorithm(checksum_config->checksum_algorithm))) {
+ goto error_clean_up;
+ }
+ /* set x-amz-decoded-content-length header */
+ char decoded_content_length_buffer[64] = "";
+ snprintf(
+ decoded_content_length_buffer,
+ sizeof(decoded_content_length_buffer),
+ "%" PRIu64,
+ (uint64_t)buffer_byte_cursor.len);
+ struct aws_byte_cursor decode_content_length_cursor =
+ aws_byte_cursor_from_array(decoded_content_length_buffer, strlen(decoded_content_length_buffer));
+ if (aws_http_headers_set(headers, g_decoded_content_length_header_name, decode_content_length_cursor)) {
+ goto error_clean_up;
+ }
+ /* set input stream to chunk stream */
+ struct aws_input_stream *chunk_stream =
+ aws_chunk_stream_new(allocator, input_stream, checksum_config->checksum_algorithm, out_checksum);
+ if (!chunk_stream) {
+ goto error_clean_up;
+ }
+ aws_input_stream_release(input_stream);
+ input_stream = chunk_stream;
+ }
+ }
+ int64_t stream_length = 0;
+ if (aws_input_stream_get_length(input_stream, &stream_length)) {
+ goto error_clean_up;
+ }
+ char content_length_buffer[64] = "";
+ snprintf(content_length_buffer, sizeof(content_length_buffer), "%" PRIu64, (uint64_t)stream_length);
+ struct aws_byte_cursor content_length_cursor =
+ aws_byte_cursor_from_array(content_length_buffer, strlen(content_length_buffer));
+ if (aws_http_headers_set(headers, g_content_length_header_name, content_length_cursor)) {
+ goto error_clean_up;
+ }
+
+ aws_http_message_set_body_stream(out_message, input_stream);
+ /* Let the message take the full ownership */
+ aws_input_stream_release(input_stream);
+
+ return input_stream;
+
+error_clean_up:
+ AWS_LOGF_ERROR(AWS_LS_S3_CLIENT, "Failed to assign body for s3 request http message, from body buffer .");
+ aws_input_stream_release(input_stream);
+ return NULL;
+}
+
+bool aws_s3_message_util_check_checksum_header(struct aws_http_message *message) {
+ struct aws_http_headers *headers = aws_http_message_get_headers(message);
+ for (int algorithm = AWS_SCA_INIT; algorithm <= AWS_SCA_END; algorithm++) {
+ const struct aws_byte_cursor *algorithm_header_name = aws_get_http_header_name_from_algorithm(algorithm);
+ if (aws_http_headers_has(headers, *algorithm_header_name)) {
+ return true;
+ }
+ }
+ return false;
+}
+
+/* Add a content-md5 header. */
+int aws_s3_message_util_add_content_md5_header(
+ struct aws_allocator *allocator,
+ struct aws_byte_buf *input_buf,
+ struct aws_http_message *out_message) {
+
+ AWS_PRECONDITION(out_message);
+
+ /* Compute MD5 */
+ struct aws_byte_cursor md5_input = aws_byte_cursor_from_buf(input_buf);
+ uint8_t md5_output[AWS_MD5_LEN];
+ struct aws_byte_buf md5_output_buf = aws_byte_buf_from_empty_array(md5_output, sizeof(md5_output));
+ if (aws_md5_compute(allocator, &md5_input, &md5_output_buf, 0)) {
+ return AWS_OP_ERR;
+ }
+
+ /* Compute Base64 encoding of MD5 */
+ struct aws_byte_cursor base64_input = aws_byte_cursor_from_buf(&md5_output_buf);
+ size_t base64_output_size = 0;
+ if (aws_base64_compute_encoded_len(md5_output_buf.len, &base64_output_size)) {
+ return AWS_OP_ERR;
+ }
+ struct aws_byte_buf base64_output_buf;
+ if (aws_byte_buf_init(&base64_output_buf, allocator, base64_output_size)) {
+ return AWS_OP_ERR;
+ }
+ if (aws_base64_encode(&base64_input, &base64_output_buf)) {
+ goto error_clean_up;
+ }
+
+ struct aws_http_headers *headers = aws_http_message_get_headers(out_message);
+ if (aws_http_headers_set(headers, g_content_md5_header_name, aws_byte_cursor_from_buf(&base64_output_buf))) {
+ goto error_clean_up;
+ }
+
+ aws_byte_buf_clean_up(&base64_output_buf);
+ return AWS_OP_SUCCESS;
+
+error_clean_up:
+
+ aws_byte_buf_clean_up(&base64_output_buf);
+ return AWS_OP_ERR;
+}
+
+/* Copy an existing HTTP message's headers, method, and path. */
+struct aws_http_message *aws_s3_message_util_copy_http_message_no_body_all_headers(
+ struct aws_allocator *allocator,
+ struct aws_http_message *base_message) {
+
+ return aws_s3_message_util_copy_http_message_no_body_filter_headers(allocator, base_message, NULL, 0, false);
+}
+
+struct aws_http_message *aws_s3_message_util_copy_http_message_no_body_filter_headers(
+ struct aws_allocator *allocator,
+ struct aws_http_message *base_message,
+ const struct aws_byte_cursor *excluded_header_array,
+ size_t excluded_header_array_size,
+ bool exclude_x_amz_meta) {
+
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(base_message);
+
+ struct aws_http_message *message = aws_http_message_new_request(allocator);
+ AWS_ASSERT(message);
+
+ struct aws_byte_cursor request_method;
+ if (aws_http_message_get_request_method(base_message, &request_method)) {
+ AWS_LOGF_ERROR(AWS_LS_S3_CLIENT, "Failed to get request method.");
+ goto error_clean_up;
+ }
+
+ if (aws_http_message_set_request_method(message, request_method)) {
+ goto error_clean_up;
+ }
+
+ struct aws_byte_cursor request_path;
+ if (aws_http_message_get_request_path(base_message, &request_path)) {
+ AWS_LOGF_ERROR(AWS_LS_S3_CLIENT, "Failed to get request path.");
+ goto error_clean_up;
+ }
+
+ if (aws_http_message_set_request_path(message, request_path)) {
+ goto error_clean_up;
+ }
+
+ aws_s3_message_util_copy_headers(
+ base_message, message, excluded_header_array, excluded_header_array_size, exclude_x_amz_meta);
+
+ return message;
+
+error_clean_up:
+ aws_http_message_release(message);
+ return NULL;
+}
+
+/* Copy message and retain all headers, but replace body with one that reads directly from a filepath. */
+struct aws_http_message *aws_s3_message_util_copy_http_message_filepath_body_all_headers(
+ struct aws_allocator *allocator,
+ struct aws_http_message *base_message,
+ struct aws_byte_cursor filepath) {
+
+ bool success = false;
+ struct aws_string *filepath_str = NULL;
+ struct aws_input_stream *body_stream = NULL;
+ struct aws_http_message *message = NULL;
+
+ /* Copy message and retain all headers */
+ message = aws_s3_message_util_copy_http_message_no_body_filter_headers(
+ allocator,
+ base_message,
+ NULL /*excluded_header_array*/,
+ 0 /*excluded_header_array_size*/,
+ false /*exclude_x_amz_meta*/);
+ if (!message) {
+ goto clean_up;
+ }
+
+ /* Create body-stream that reads from file */
+ filepath_str = aws_string_new_from_cursor(allocator, &filepath);
+ body_stream = aws_input_stream_new_from_file(allocator, aws_string_c_str(filepath_str));
+ if (!body_stream) {
+ goto clean_up;
+ }
+ aws_http_message_set_body_stream(message, body_stream);
+
+ success = true;
+
+clean_up:
+ aws_string_destroy(filepath_str);
+ aws_input_stream_release(body_stream);
+ if (success) {
+ return message;
+ } else {
+ aws_http_message_release(message);
+ return NULL;
+ }
+}
+
+void aws_s3_message_util_copy_headers(
+ struct aws_http_message *source_message,
+ struct aws_http_message *dest_message,
+ const struct aws_byte_cursor *excluded_header_array,
+ size_t excluded_header_array_size,
+ bool exclude_x_amz_meta) {
+
+ size_t num_headers = aws_http_message_get_header_count(source_message);
+
+ for (size_t header_index = 0; header_index < num_headers; ++header_index) {
+ struct aws_http_header header;
+
+ int error = aws_http_message_get_header(source_message, &header, header_index);
+
+ if (excluded_header_array && excluded_header_array_size > 0) {
+ bool exclude_header = false;
+
+ for (size_t exclude_index = 0; exclude_index < excluded_header_array_size; ++exclude_index) {
+ if (aws_byte_cursor_eq_ignore_case(&header.name, &excluded_header_array[exclude_index])) {
+ exclude_header = true;
+ break;
+ }
+ }
+
+ if (exclude_header) {
+ continue;
+ }
+ }
+
+ if (exclude_x_amz_meta) {
+ if (aws_byte_cursor_starts_with_ignore_case(&header.name, &s_x_amz_meta_prefix)) {
+ continue;
+ }
+ }
+
+ error |= aws_http_message_add_header(dest_message, header);
+ (void)error;
+ AWS_ASSERT(!error);
+ }
+}
+
+/* Add a range header.*/
+static void s_s3_message_util_add_range_header(
+ uint64_t part_range_start,
+ uint64_t part_range_end,
+ struct aws_http_message *out_message) {
+ AWS_PRECONDITION(out_message);
+
+ /* ((2^64)-1 = 20 characters; 2*20 + length-of("bytes=-") < 128) */
+ char range_value_buffer[128] = "";
+ snprintf(
+ range_value_buffer, sizeof(range_value_buffer), "bytes=%" PRIu64 "-%" PRIu64, part_range_start, part_range_end);
+
+ struct aws_http_header range_header;
+ AWS_ZERO_STRUCT(range_header);
+ range_header.name = g_range_header_name;
+ range_header.value = aws_byte_cursor_from_c_str(range_value_buffer);
+
+ struct aws_http_headers *headers = aws_http_message_get_headers(out_message);
+ AWS_ASSERT(headers != NULL);
+
+ int erase_result = aws_http_headers_erase(headers, range_header.name);
+ AWS_ASSERT(erase_result == AWS_OP_SUCCESS || aws_last_error() == AWS_ERROR_HTTP_HEADER_NOT_FOUND);
+
+ /* Only failed when the header has invalid name, which is impossible here. */
+ erase_result = aws_http_message_add_header(out_message, range_header);
+ AWS_ASSERT(erase_result == AWS_OP_SUCCESS);
+ (void)erase_result;
+}
+
+/* Handle setting up the multipart request path for a message. */
+int aws_s3_message_util_set_multipart_request_path(
+ struct aws_allocator *allocator,
+ const struct aws_string *upload_id,
+ uint32_t part_number,
+ bool append_uploads_suffix,
+ struct aws_http_message *message) {
+
+ const struct aws_byte_cursor question_mark = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("?");
+ const struct aws_byte_cursor ampersand = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("&");
+
+ const struct aws_byte_cursor uploads_suffix = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("uploads");
+ const struct aws_byte_cursor part_number_arg = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("partNumber=");
+ const struct aws_byte_cursor upload_id_arg = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("uploadId=");
+
+ struct aws_byte_buf request_path_buf;
+ struct aws_byte_cursor request_path;
+
+ if (aws_http_message_get_request_path(message, &request_path)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_byte_buf_init(&request_path_buf, allocator, request_path.len)) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_byte_buf_append_dynamic(&request_path_buf, &request_path)) {
+ goto error_clean_up;
+ }
+
+ bool has_existing_query_parameters = false;
+
+ for (size_t i = 0; i < request_path.len; ++i) {
+ if (request_path.ptr[i] == '?') {
+ has_existing_query_parameters = true;
+ break;
+ }
+ }
+
+ if (part_number > 0) {
+ if (aws_byte_buf_append_dynamic(
+ &request_path_buf, has_existing_query_parameters ? &ampersand : &question_mark)) {
+ goto error_clean_up;
+ }
+
+ if (aws_byte_buf_append_dynamic(&request_path_buf, &part_number_arg)) {
+ goto error_clean_up;
+ }
+
+ char part_number_buffer[32] = "";
+ snprintf(part_number_buffer, sizeof(part_number_buffer), "%d", part_number);
+ struct aws_byte_cursor part_number_cursor =
+ aws_byte_cursor_from_array(part_number_buffer, strlen(part_number_buffer));
+
+ if (aws_byte_buf_append_dynamic(&request_path_buf, &part_number_cursor)) {
+ goto error_clean_up;
+ }
+
+ has_existing_query_parameters = true;
+ }
+
+ if (upload_id != NULL) {
+
+ struct aws_byte_cursor upload_id_cursor = aws_byte_cursor_from_string(upload_id);
+
+ if (aws_byte_buf_append_dynamic(
+ &request_path_buf, has_existing_query_parameters ? &ampersand : &question_mark)) {
+ goto error_clean_up;
+ }
+
+ if (aws_byte_buf_append_dynamic(&request_path_buf, &upload_id_arg)) {
+ goto error_clean_up;
+ }
+
+ if (aws_byte_buf_append_dynamic(&request_path_buf, &upload_id_cursor)) {
+ goto error_clean_up;
+ }
+
+ has_existing_query_parameters = true;
+ }
+
+ if (append_uploads_suffix) {
+ if (aws_byte_buf_append_dynamic(
+ &request_path_buf, has_existing_query_parameters ? &ampersand : &question_mark)) {
+ goto error_clean_up;
+ }
+
+ if (aws_byte_buf_append_dynamic(&request_path_buf, &uploads_suffix)) {
+ goto error_clean_up;
+ }
+
+ has_existing_query_parameters = true;
+ }
+
+ struct aws_byte_cursor new_request_path = aws_byte_cursor_from_buf(&request_path_buf);
+
+ if (aws_http_message_set_request_path(message, new_request_path)) {
+ goto error_clean_up;
+ }
+
+ aws_byte_buf_clean_up(&request_path_buf);
+ return AWS_OP_SUCCESS;
+
+error_clean_up:
+
+ aws_byte_buf_clean_up(&request_path_buf);
+
+ return AWS_OP_ERR;
+}
diff --git a/contrib/restricted/aws/aws-c-s3/source/s3_util.c b/contrib/restricted/aws/aws-c-s3/source/s3_util.c
new file mode 100644
index 0000000000..87b840fc37
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/source/s3_util.c
@@ -0,0 +1,558 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include "aws/s3/private/s3_util.h"
+#include "aws/s3/private/s3_client_impl.h"
+#include <aws/auth/credentials.h>
+#include <aws/common/string.h>
+#include <aws/common/xml_parser.h>
+#include <aws/http/request_response.h>
+#include <aws/s3/s3.h>
+#include <aws/s3/s3_client.h>
+#include <inttypes.h>
+
+#ifdef _MSC_VER
+/* sscanf warning (not currently scanning for strings) */
+# pragma warning(disable : 4996)
+#endif
+
+const struct aws_byte_cursor g_s3_client_version = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(AWS_S3_CLIENT_VERSION);
+const struct aws_byte_cursor g_s3_service_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("s3");
+const struct aws_byte_cursor g_host_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Host");
+const struct aws_byte_cursor g_range_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Range");
+const struct aws_byte_cursor g_if_match_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("If-Match");
+const struct aws_byte_cursor g_etag_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ETag");
+const struct aws_byte_cursor g_content_range_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Range");
+const struct aws_byte_cursor g_content_type_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Type");
+const struct aws_byte_cursor g_content_encoding_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Encoding");
+const struct aws_byte_cursor g_content_encoding_header_aws_chunked =
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("aws-chunked");
+const struct aws_byte_cursor g_content_length_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-Length");
+const struct aws_byte_cursor g_decoded_content_length_header_name =
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-decoded-content-length");
+const struct aws_byte_cursor g_content_md5_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Content-MD5");
+const struct aws_byte_cursor g_trailer_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-trailer");
+const struct aws_byte_cursor g_request_validation_mode = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-checksum-mode");
+const struct aws_byte_cursor g_enabled = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("enabled");
+
+const struct aws_byte_cursor g_create_mpu_checksum_header_name =
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-checksum-algorithm");
+const struct aws_byte_cursor g_crc32c_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-checksum-crc32c");
+const struct aws_byte_cursor g_crc32_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-checksum-crc32");
+const struct aws_byte_cursor g_sha1_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-checksum-sha1");
+const struct aws_byte_cursor g_sha256_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-checksum-sha256");
+const struct aws_byte_cursor g_crc32c_create_mpu_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("CRC32C");
+const struct aws_byte_cursor g_crc32_create_mpu_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("CRC32");
+const struct aws_byte_cursor g_sha1_create_mpu_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("SHA1");
+const struct aws_byte_cursor g_sha256_create_mpu_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("SHA256");
+const struct aws_byte_cursor g_crc32c_complete_mpu_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ChecksumCRC32C");
+const struct aws_byte_cursor g_crc32_complete_mpu_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ChecksumCRC32");
+const struct aws_byte_cursor g_sha1_complete_mpu_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ChecksumSHA1");
+const struct aws_byte_cursor g_sha256_complete_mpu_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("ChecksumSHA256");
+const struct aws_byte_cursor g_accept_ranges_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("accept-ranges");
+const struct aws_byte_cursor g_acl_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("x-amz-acl");
+const struct aws_byte_cursor g_post_method = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("POST");
+const struct aws_byte_cursor g_head_method = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("HEAD");
+const struct aws_byte_cursor g_delete_method = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("DELETE");
+
+const struct aws_byte_cursor g_user_agent_header_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("User-Agent");
+const struct aws_byte_cursor g_user_agent_header_product_name =
+ AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("CRTS3NativeClient");
+
+const struct aws_byte_cursor g_error_body_xml_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Error");
+const struct aws_byte_cursor g_code_body_xml_name = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("Code");
+
+const struct aws_byte_cursor g_s3_internal_error_code = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("InternalError");
+const struct aws_byte_cursor g_s3_slow_down_error_code = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("SlowDown");
+/* The special error code as Asynchronous Error Codes */
+const struct aws_byte_cursor g_s3_internal_errors_code = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("InternalErrors");
+
+const uint32_t g_s3_max_num_upload_parts = 10000;
+const size_t g_s3_min_upload_part_size = MB_TO_BYTES(5);
+
+void copy_http_headers(const struct aws_http_headers *src, struct aws_http_headers *dest) {
+ AWS_PRECONDITION(src);
+ AWS_PRECONDITION(dest);
+
+ size_t headers_count = aws_http_headers_count(src);
+
+ for (size_t header_index = 0; header_index < headers_count; ++header_index) {
+ struct aws_http_header header;
+
+ aws_http_headers_get_index(src, header_index, &header);
+ aws_http_headers_set(dest, header.name, header.value);
+ }
+}
+
+struct top_level_xml_tag_value_with_root_value_user_data {
+ struct aws_allocator *allocator;
+ const struct aws_byte_cursor *tag_name;
+ const struct aws_byte_cursor *expected_root_name;
+ bool *root_name_mismatch;
+ struct aws_string *result;
+};
+
+static bool s_top_level_xml_tag_value_child_xml_node(
+ struct aws_xml_parser *parser,
+ struct aws_xml_node *node,
+ void *user_data) {
+
+ struct aws_byte_cursor node_name;
+
+ /* If we can't get the name of the node, stop traversing. */
+ if (aws_xml_node_get_name(node, &node_name)) {
+ return false;
+ }
+
+ struct top_level_xml_tag_value_with_root_value_user_data *xml_user_data = user_data;
+
+ /* If the name of the node is what we are looking for, store the body of the node in our result, and stop
+ * traversing. */
+ if (aws_byte_cursor_eq(&node_name, xml_user_data->tag_name)) {
+
+ struct aws_byte_cursor node_body;
+ aws_xml_node_as_body(parser, node, &node_body);
+
+ xml_user_data->result = aws_string_new_from_cursor(xml_user_data->allocator, &node_body);
+
+ return false;
+ }
+
+ /* If we made it here, the tag hasn't been found yet, so return true to keep looking. */
+ return true;
+}
+
+static bool s_top_level_xml_tag_value_root_xml_node(
+ struct aws_xml_parser *parser,
+ struct aws_xml_node *node,
+ void *user_data) {
+ struct top_level_xml_tag_value_with_root_value_user_data *xml_user_data = user_data;
+ if (xml_user_data->expected_root_name) {
+ /* If we can't get the name of the node, stop traversing. */
+ struct aws_byte_cursor node_name;
+ if (aws_xml_node_get_name(node, &node_name)) {
+ return false;
+ }
+ if (!aws_byte_cursor_eq(&node_name, xml_user_data->expected_root_name)) {
+ /* Not match the expected root name, stop parsing. */
+ *xml_user_data->root_name_mismatch = true;
+ return false;
+ }
+ }
+
+ /* Traverse the root node, and then return false to stop. */
+ aws_xml_node_traverse(parser, node, s_top_level_xml_tag_value_child_xml_node, user_data);
+ return false;
+}
+
+struct aws_string *aws_xml_get_top_level_tag_with_root_name(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *tag_name,
+ const struct aws_byte_cursor *expected_root_name,
+ bool *out_root_name_mismatch,
+ struct aws_byte_cursor *xml_body) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(tag_name);
+ AWS_PRECONDITION(xml_body);
+
+ struct aws_xml_parser_options parser_options = {.doc = *xml_body};
+ struct aws_xml_parser *parser = aws_xml_parser_new(allocator, &parser_options);
+ bool root_name_mismatch = false;
+
+ struct top_level_xml_tag_value_with_root_value_user_data xml_user_data = {
+ allocator,
+ tag_name,
+ expected_root_name,
+ &root_name_mismatch,
+ NULL,
+ };
+
+ if (aws_xml_parser_parse(parser, s_top_level_xml_tag_value_root_xml_node, (void *)&xml_user_data)) {
+ aws_string_destroy(xml_user_data.result);
+ xml_user_data.result = NULL;
+ goto clean_up;
+ }
+ if (out_root_name_mismatch) {
+ *out_root_name_mismatch = root_name_mismatch;
+ }
+
+clean_up:
+
+ aws_xml_parser_destroy(parser);
+
+ return xml_user_data.result;
+}
+
+struct aws_string *aws_xml_get_top_level_tag(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *tag_name,
+ struct aws_byte_cursor *xml_body) {
+ return aws_xml_get_top_level_tag_with_root_name(allocator, tag_name, NULL, NULL, xml_body);
+}
+
+struct aws_cached_signing_config_aws *aws_cached_signing_config_new(
+ struct aws_allocator *allocator,
+ const struct aws_signing_config_aws *signing_config) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(signing_config);
+
+ struct aws_cached_signing_config_aws *cached_signing_config =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_cached_signing_config_aws));
+
+ cached_signing_config->allocator = allocator;
+
+ cached_signing_config->config.config_type = signing_config->config_type;
+ cached_signing_config->config.algorithm = signing_config->algorithm;
+ cached_signing_config->config.signature_type = signing_config->signature_type;
+
+ AWS_ASSERT(aws_byte_cursor_is_valid(&signing_config->region));
+
+ if (signing_config->region.len > 0) {
+ cached_signing_config->region = aws_string_new_from_cursor(allocator, &signing_config->region);
+
+ cached_signing_config->config.region = aws_byte_cursor_from_string(cached_signing_config->region);
+ }
+
+ AWS_ASSERT(aws_byte_cursor_is_valid(&signing_config->service));
+
+ if (signing_config->service.len > 0) {
+ cached_signing_config->service = aws_string_new_from_cursor(allocator, &signing_config->service);
+
+ cached_signing_config->config.service = aws_byte_cursor_from_string(cached_signing_config->service);
+ }
+
+ cached_signing_config->config.date = signing_config->date;
+
+ cached_signing_config->config.should_sign_header = signing_config->should_sign_header;
+ cached_signing_config->config.flags = signing_config->flags;
+
+ AWS_ASSERT(aws_byte_cursor_is_valid(&signing_config->signed_body_value));
+
+ if (signing_config->service.len > 0) {
+ cached_signing_config->signed_body_value =
+ aws_string_new_from_cursor(allocator, &signing_config->signed_body_value);
+
+ cached_signing_config->config.signed_body_value =
+ aws_byte_cursor_from_string(cached_signing_config->signed_body_value);
+ }
+
+ cached_signing_config->config.signed_body_header = signing_config->signed_body_header;
+
+ if (signing_config->credentials != NULL) {
+ aws_credentials_acquire(signing_config->credentials);
+ cached_signing_config->config.credentials = signing_config->credentials;
+ }
+
+ if (signing_config->credentials_provider != NULL) {
+ aws_credentials_provider_acquire(signing_config->credentials_provider);
+ cached_signing_config->config.credentials_provider = signing_config->credentials_provider;
+ }
+
+ cached_signing_config->config.expiration_in_seconds = signing_config->expiration_in_seconds;
+
+ return cached_signing_config;
+}
+
+void aws_cached_signing_config_destroy(struct aws_cached_signing_config_aws *cached_signing_config) {
+ if (cached_signing_config == NULL) {
+ return;
+ }
+
+ aws_credentials_release(cached_signing_config->config.credentials);
+ aws_credentials_provider_release(cached_signing_config->config.credentials_provider);
+
+ aws_string_destroy(cached_signing_config->service);
+ aws_string_destroy(cached_signing_config->region);
+ aws_string_destroy(cached_signing_config->signed_body_value);
+
+ aws_mem_release(cached_signing_config->allocator, cached_signing_config);
+}
+
+void aws_s3_init_default_signing_config(
+ struct aws_signing_config_aws *signing_config,
+ const struct aws_byte_cursor region,
+ struct aws_credentials_provider *credentials_provider) {
+ AWS_PRECONDITION(signing_config);
+ AWS_PRECONDITION(credentials_provider);
+
+ AWS_ZERO_STRUCT(*signing_config);
+
+ signing_config->config_type = AWS_SIGNING_CONFIG_AWS;
+ signing_config->algorithm = AWS_SIGNING_ALGORITHM_V4;
+ signing_config->credentials_provider = credentials_provider;
+ signing_config->region = region;
+ signing_config->service = g_s3_service_name;
+ signing_config->signed_body_header = AWS_SBHT_X_AMZ_CONTENT_SHA256;
+ signing_config->signed_body_value = g_aws_signed_body_value_unsigned_payload;
+}
+
+void replace_quote_entities(struct aws_allocator *allocator, struct aws_string *str, struct aws_byte_buf *out_buf) {
+ AWS_PRECONDITION(str);
+
+ aws_byte_buf_init(out_buf, allocator, str->len);
+
+ struct aws_byte_cursor quote_entity = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("&quot;");
+ struct aws_byte_cursor quote = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("\"");
+
+ size_t i = 0;
+
+ while (i < str->len) {
+ size_t chars_remaining = str->len - i;
+
+ if (chars_remaining >= quote_entity.len &&
+ !strncmp((const char *)&str->bytes[i], (const char *)quote_entity.ptr, quote_entity.len)) {
+ /* Append quote */
+ aws_byte_buf_append(out_buf, &quote);
+ i += quote_entity.len;
+ } else {
+ /* Append character */
+ struct aws_byte_cursor character_cursor = aws_byte_cursor_from_array(&str->bytes[i], 1);
+ aws_byte_buf_append(out_buf, &character_cursor);
+ ++i;
+ }
+ }
+}
+
+struct aws_string *aws_strip_quotes(struct aws_allocator *allocator, struct aws_byte_cursor in_cur) {
+
+ if (in_cur.len >= 2 && in_cur.ptr[0] == '"' && in_cur.ptr[in_cur.len - 1] == '"') {
+ aws_byte_cursor_advance(&in_cur, 1);
+ --in_cur.len;
+ }
+
+ return aws_string_new_from_cursor(allocator, &in_cur);
+}
+
+int aws_last_error_or_unknown() {
+ int error = aws_last_error();
+
+ if (error == AWS_ERROR_SUCCESS) {
+ return AWS_ERROR_UNKNOWN;
+ }
+
+ return error;
+}
+
+void aws_s3_add_user_agent_header(struct aws_allocator *allocator, struct aws_http_message *message) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(message);
+
+ const struct aws_byte_cursor space_delimeter = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(" ");
+ const struct aws_byte_cursor forward_slash = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/");
+
+ const size_t user_agent_product_version_length =
+ g_user_agent_header_product_name.len + forward_slash.len + g_s3_client_version.len;
+
+ struct aws_http_headers *headers = aws_http_message_get_headers(message);
+ AWS_ASSERT(headers != NULL);
+
+ struct aws_byte_cursor current_user_agent_header;
+ AWS_ZERO_STRUCT(current_user_agent_header);
+
+ struct aws_byte_buf user_agent_buffer;
+ AWS_ZERO_STRUCT(user_agent_buffer);
+
+ if (aws_http_headers_get(headers, g_user_agent_header_name, &current_user_agent_header) == AWS_OP_SUCCESS) {
+ /* If the header was found, then create a buffer with the total size we'll need, and append the curent user
+ * agent header with a trailing space. */
+ aws_byte_buf_init(
+ &user_agent_buffer,
+ allocator,
+ current_user_agent_header.len + space_delimeter.len + user_agent_product_version_length);
+
+ aws_byte_buf_append_dynamic(&user_agent_buffer, &current_user_agent_header);
+
+ aws_byte_buf_append_dynamic(&user_agent_buffer, &space_delimeter);
+
+ } else {
+ AWS_ASSERT(aws_last_error() == AWS_ERROR_HTTP_HEADER_NOT_FOUND);
+
+ /* If the header was not found, then create a buffer with just the size of the user agent string that is about
+ * to be appended to the buffer. */
+ aws_byte_buf_init(&user_agent_buffer, allocator, user_agent_product_version_length);
+ }
+
+ /* Append the client's user-agent string. */
+ {
+ aws_byte_buf_append_dynamic(&user_agent_buffer, &g_user_agent_header_product_name);
+ aws_byte_buf_append_dynamic(&user_agent_buffer, &forward_slash);
+ aws_byte_buf_append_dynamic(&user_agent_buffer, &g_s3_client_version);
+ }
+
+ /* Apply the updated header. */
+ aws_http_headers_set(headers, g_user_agent_header_name, aws_byte_cursor_from_buf(&user_agent_buffer));
+
+ /* Clean up the scratch buffer. */
+ aws_byte_buf_clean_up(&user_agent_buffer);
+}
+
+int aws_s3_parse_content_range_response_header(
+ struct aws_allocator *allocator,
+ struct aws_http_headers *response_headers,
+ uint64_t *out_range_start,
+ uint64_t *out_range_end,
+ uint64_t *out_object_size) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(response_headers);
+
+ struct aws_byte_cursor content_range_header_value;
+
+ if (aws_http_headers_get(response_headers, g_content_range_header_name, &content_range_header_value)) {
+ aws_raise_error(AWS_ERROR_S3_MISSING_CONTENT_RANGE_HEADER);
+ return AWS_OP_ERR;
+ }
+
+ int result = AWS_OP_ERR;
+
+ uint64_t range_start = 0;
+ uint64_t range_end = 0;
+ uint64_t object_size = 0;
+
+ struct aws_string *content_range_header_value_str =
+ aws_string_new_from_cursor(allocator, &content_range_header_value);
+
+ /* Expected Format of header is: "bytes StartByte-EndByte/TotalObjectSize" */
+ int num_fields_found = sscanf(
+ (const char *)content_range_header_value_str->bytes,
+ "bytes %" PRIu64 "-%" PRIu64 "/%" PRIu64,
+ &range_start,
+ &range_end,
+ &object_size);
+
+ if (num_fields_found < 3) {
+ aws_raise_error(AWS_ERROR_S3_INVALID_CONTENT_RANGE_HEADER);
+ goto clean_up;
+ }
+
+ if (out_range_start != NULL) {
+ *out_range_start = range_start;
+ }
+
+ if (out_range_end != NULL) {
+ *out_range_end = range_end;
+ }
+
+ if (out_object_size != NULL) {
+ *out_object_size = object_size;
+ }
+
+ result = AWS_OP_SUCCESS;
+
+clean_up:
+ aws_string_destroy(content_range_header_value_str);
+ content_range_header_value_str = NULL;
+
+ return result;
+}
+
+int aws_s3_parse_content_length_response_header(
+ struct aws_allocator *allocator,
+ struct aws_http_headers *response_headers,
+ uint64_t *out_content_length) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(response_headers);
+ AWS_PRECONDITION(out_content_length);
+
+ struct aws_byte_cursor content_length_header_value;
+
+ if (aws_http_headers_get(response_headers, g_content_length_header_name, &content_length_header_value)) {
+ aws_raise_error(AWS_ERROR_S3_MISSING_CONTENT_LENGTH_HEADER);
+ return AWS_OP_ERR;
+ }
+
+ struct aws_string *content_length_header_value_str =
+ aws_string_new_from_cursor(allocator, &content_length_header_value);
+
+ int result = AWS_OP_ERR;
+
+ if (sscanf((const char *)content_length_header_value_str->bytes, "%" PRIu64, out_content_length) == 1) {
+ result = AWS_OP_SUCCESS;
+ } else {
+ aws_raise_error(AWS_ERROR_S3_INVALID_CONTENT_LENGTH_HEADER);
+ }
+
+ aws_string_destroy(content_length_header_value_str);
+ return result;
+}
+
+uint32_t aws_s3_get_num_parts(size_t part_size, uint64_t object_range_start, uint64_t object_range_end) {
+ uint32_t num_parts = 1;
+
+ uint64_t first_part_size = part_size;
+ uint64_t first_part_alignment_offset = object_range_start % part_size;
+
+ /* If the first part size isn't aligned on the assumed part boundary, make it smaller so that it is. */
+ if (first_part_alignment_offset > 0) {
+ first_part_size = part_size - first_part_alignment_offset;
+ }
+
+ uint64_t second_part_start = object_range_start + first_part_size;
+
+ /* If the range has room for a second part, calculate the additional amount of parts. */
+ if (second_part_start <= object_range_end) {
+ uint64_t aligned_range_remainder = object_range_end + 1 - second_part_start;
+ num_parts += (uint32_t)(aligned_range_remainder / (uint64_t)part_size);
+
+ if ((aligned_range_remainder % part_size) > 0) {
+ ++num_parts;
+ }
+ }
+
+ return num_parts;
+}
+
+void aws_s3_get_part_range(
+ uint64_t object_range_start,
+ uint64_t object_range_end,
+ size_t part_size,
+ uint32_t part_number,
+ uint64_t *out_part_range_start,
+ uint64_t *out_part_range_end) {
+ AWS_PRECONDITION(out_part_range_start);
+ AWS_PRECONDITION(out_part_range_end);
+
+ AWS_ASSERT(part_number > 0);
+
+ const uint32_t part_index = part_number - 1;
+
+ /* Part index is assumed to be in a valid range. */
+ AWS_ASSERT(part_index < aws_s3_get_num_parts(part_size, object_range_start, object_range_end));
+
+ uint64_t part_size_uint64 = (uint64_t)part_size;
+ uint64_t first_part_size = part_size_uint64;
+ uint64_t first_part_alignment_offset = object_range_start % part_size_uint64;
+
+ /* Shrink the part to a smaller size if need be to align to the assumed part boundary. */
+ if (first_part_alignment_offset > 0) {
+ first_part_size = part_size_uint64 - first_part_alignment_offset;
+ }
+
+ if (part_index == 0) {
+ /* If this is the first part, then use the first part size. */
+ *out_part_range_start = object_range_start;
+ *out_part_range_end = *out_part_range_start + first_part_size - 1;
+ } else {
+ /* Else, find the next part by adding the object range + total number of whole parts before this one + initial
+ * part size*/
+ *out_part_range_start = object_range_start + ((uint64_t)(part_index - 1)) * part_size_uint64 + first_part_size;
+ *out_part_range_end = *out_part_range_start + part_size_uint64 - 1;
+ }
+
+ /* Cap the part's range end using the object's range end. */
+ if (*out_part_range_end > object_range_end) {
+ *out_part_range_end = object_range_end;
+ }
+}
+
+int aws_s3_crt_error_code_from_server_error_code_string(const struct aws_string *error_code_string) {
+ if (aws_string_eq_byte_cursor(error_code_string, &g_s3_slow_down_error_code)) {
+ return AWS_ERROR_S3_SLOW_DOWN;
+ }
+ if (aws_string_eq_byte_cursor(error_code_string, &g_s3_internal_error_code) ||
+ aws_string_eq_byte_cursor(error_code_string, &g_s3_internal_errors_code)) {
+ return AWS_ERROR_S3_INTERNAL_ERROR;
+ }
+ return AWS_ERROR_UNKNOWN;
+}
diff --git a/contrib/restricted/aws/aws-c-s3/ya.make b/contrib/restricted/aws/aws-c-s3/ya.make
new file mode 100644
index 0000000000..f67086f61c
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-s3/ya.make
@@ -0,0 +1,78 @@
+# Generated by devtools/yamaker from nixpkgs 23.05.
+
+LIBRARY()
+
+LICENSE(Apache-2.0)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+VERSION(0.2.8)
+
+ORIGINAL_SOURCE(https://github.com/awslabs/aws-c-s3/archive/v0.2.8.tar.gz)
+
+PEERDIR(
+ contrib/restricted/aws/aws-c-auth
+ contrib/restricted/aws/aws-c-cal
+ contrib/restricted/aws/aws-c-common
+ contrib/restricted/aws/aws-c-http
+ contrib/restricted/aws/aws-c-io
+ contrib/restricted/aws/aws-c-sdkutils
+ contrib/restricted/aws/aws-checksums
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/aws/aws-c-s3/include
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_RUNTIME()
+
+CFLAGS(
+ -DAWS_AUTH_USE_IMPORT_EXPORT
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_CHECKSUMS_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_S3_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DCJSON_HIDE_SYMBOLS
+ -DHAVE_SYSCONF
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+)
+
+SRCS(
+ source/s3.c
+ source/s3_auto_ranged_get.c
+ source/s3_auto_ranged_put.c
+ source/s3_checksum_stream.c
+ source/s3_checksums.c
+ source/s3_chunk_stream.c
+ source/s3_client.c
+ source/s3_copy_object.c
+ source/s3_default_meta_request.c
+ source/s3_endpoint.c
+ source/s3_list_objects.c
+ source/s3_list_parts.c
+ source/s3_meta_request.c
+ source/s3_paginator.c
+ source/s3_request.c
+ source/s3_request_messages.c
+ source/s3_util.c
+)
+
+END()
diff --git a/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.darwin-arm64.txt b/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.darwin-arm64.txt
new file mode 100644
index 0000000000..dae0956520
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.darwin-arm64.txt
@@ -0,0 +1,33 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-sdkutils)
+target_compile_options(restricted-aws-aws-c-sdkutils PRIVATE
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
+ -DHAVE_SYSCONF
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-sdkutils PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/include
+)
+target_link_libraries(restricted-aws-aws-c-sdkutils PUBLIC
+ restricted-aws-aws-c-common
+)
+target_sources(restricted-aws-aws-c-sdkutils PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/aws_profile.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_rule_engine.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_ruleset.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_standard_lib.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_types_impl.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_util.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/partitions.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/resource_name.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/sdkutils.c
+)
diff --git a/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.darwin-x86_64.txt b/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.darwin-x86_64.txt
new file mode 100644
index 0000000000..dae0956520
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.darwin-x86_64.txt
@@ -0,0 +1,33 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-sdkutils)
+target_compile_options(restricted-aws-aws-c-sdkutils PRIVATE
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
+ -DHAVE_SYSCONF
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-sdkutils PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/include
+)
+target_link_libraries(restricted-aws-aws-c-sdkutils PUBLIC
+ restricted-aws-aws-c-common
+)
+target_sources(restricted-aws-aws-c-sdkutils PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/aws_profile.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_rule_engine.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_ruleset.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_standard_lib.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_types_impl.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_util.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/partitions.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/resource_name.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/sdkutils.c
+)
diff --git a/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.linux-aarch64.txt b/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.linux-aarch64.txt
new file mode 100644
index 0000000000..7aa506361e
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.linux-aarch64.txt
@@ -0,0 +1,34 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-sdkutils)
+target_compile_options(restricted-aws-aws-c-sdkutils PRIVATE
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
+ -DHAVE_SYSCONF
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-sdkutils PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/include
+)
+target_link_libraries(restricted-aws-aws-c-sdkutils PUBLIC
+ contrib-libs-linux-headers
+ restricted-aws-aws-c-common
+)
+target_sources(restricted-aws-aws-c-sdkutils PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/aws_profile.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_rule_engine.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_ruleset.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_standard_lib.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_types_impl.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_util.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/partitions.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/resource_name.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/sdkutils.c
+)
diff --git a/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.linux-x86_64.txt b/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.linux-x86_64.txt
new file mode 100644
index 0000000000..7aa506361e
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.linux-x86_64.txt
@@ -0,0 +1,34 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-sdkutils)
+target_compile_options(restricted-aws-aws-c-sdkutils PRIVATE
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
+ -DHAVE_SYSCONF
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-sdkutils PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/include
+)
+target_link_libraries(restricted-aws-aws-c-sdkutils PUBLIC
+ contrib-libs-linux-headers
+ restricted-aws-aws-c-common
+)
+target_sources(restricted-aws-aws-c-sdkutils PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/aws_profile.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_rule_engine.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_ruleset.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_standard_lib.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_types_impl.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_util.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/partitions.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/resource_name.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/sdkutils.c
+)
diff --git a/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.txt b/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.txt
new file mode 100644
index 0000000000..2dce3a77fe
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.txt
@@ -0,0 +1,19 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+if (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" AND NOT HAVE_CUDA)
+ include(CMakeLists.linux-aarch64.txt)
+elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
+ include(CMakeLists.darwin-x86_64.txt)
+elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
+ include(CMakeLists.darwin-arm64.txt)
+elseif (WIN32 AND CMAKE_SYSTEM_PROCESSOR STREQUAL "AMD64" AND NOT HAVE_CUDA)
+ include(CMakeLists.windows-x86_64.txt)
+elseif (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT HAVE_CUDA)
+ include(CMakeLists.linux-x86_64.txt)
+endif()
diff --git a/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.windows-x86_64.txt b/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.windows-x86_64.txt
new file mode 100644
index 0000000000..dae0956520
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/CMakeLists.windows-x86_64.txt
@@ -0,0 +1,33 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-c-sdkutils)
+target_compile_options(restricted-aws-aws-c-sdkutils PRIVATE
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
+ -DHAVE_SYSCONF
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-c-sdkutils PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/include
+)
+target_link_libraries(restricted-aws-aws-c-sdkutils PUBLIC
+ restricted-aws-aws-c-common
+)
+target_sources(restricted-aws-aws-c-sdkutils PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/aws_profile.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_rule_engine.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_ruleset.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_standard_lib.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_types_impl.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_util.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/partitions.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/resource_name.c
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-c-sdkutils/source/sdkutils.c
+)
diff --git a/contrib/restricted/aws/aws-c-sdkutils/CODE_OF_CONDUCT.md b/contrib/restricted/aws/aws-c-sdkutils/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..5b627cfa60
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/CODE_OF_CONDUCT.md
@@ -0,0 +1,4 @@
+## Code of Conduct
+This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
+For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
+opensource-codeofconduct@amazon.com with any additional questions or comments.
diff --git a/contrib/restricted/aws/aws-c-sdkutils/CONTRIBUTING.md b/contrib/restricted/aws/aws-c-sdkutils/CONTRIBUTING.md
new file mode 100644
index 0000000000..c4b6a1c508
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/CONTRIBUTING.md
@@ -0,0 +1,59 @@
+# Contributing Guidelines
+
+Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional
+documentation, we greatly value feedback and contributions from our community.
+
+Please read through this document before submitting any issues or pull requests to ensure we have all the necessary
+information to effectively respond to your bug report or contribution.
+
+
+## Reporting Bugs/Feature Requests
+
+We welcome you to use the GitHub issue tracker to report bugs or suggest features.
+
+When filing an issue, please check existing open, or recently closed, issues to make sure somebody else hasn't already
+reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
+
+* A reproducible test case or series of steps
+* The version of our code being used
+* Any modifications you've made relevant to the bug
+* Anything unusual about your environment or deployment
+
+
+## Contributing via Pull Requests
+Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that:
+
+1. You are working against the latest source on the *main* branch.
+2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
+3. You open an issue to discuss any significant work - we would hate for your time to be wasted.
+
+To send us a pull request, please:
+
+1. Fork the repository.
+2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change.
+3. Ensure local tests pass.
+4. Commit to your fork using clear commit messages.
+5. Send us a pull request, answering any default questions in the pull request interface.
+6. Pay attention to any automated CI failures reported in the pull request, and stay involved in the conversation.
+
+GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
+[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
+
+
+## Finding contributions to work on
+Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any 'help wanted' issues is a great place to start.
+
+
+## Code of Conduct
+This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
+For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
+opensource-codeofconduct@amazon.com with any additional questions or comments.
+
+
+## Security issue notifications
+If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue.
+
+
+## Licensing
+
+See the [LICENSE](LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution.
diff --git a/contrib/restricted/aws/aws-c-sdkutils/LICENSE b/contrib/restricted/aws/aws-c-sdkutils/LICENSE
new file mode 100644
index 0000000000..67db858821
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/LICENSE
@@ -0,0 +1,175 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
diff --git a/contrib/restricted/aws/aws-c-sdkutils/NOTICE b/contrib/restricted/aws/aws-c-sdkutils/NOTICE
new file mode 100644
index 0000000000..616fc58894
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/NOTICE
@@ -0,0 +1 @@
+Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
diff --git a/contrib/restricted/aws/aws-c-sdkutils/README.md b/contrib/restricted/aws/aws-c-sdkutils/README.md
new file mode 100644
index 0000000000..12d391f5be
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/README.md
@@ -0,0 +1,29 @@
+## AWS C SDKUTILS
+
+C99 library implementing AWS SDK specific utilities. Includes utilities for ARN
+parsing, reading AWS profiles, etc...
+
+## License
+
+This library is licensed under the Apache 2.0 License.
+
+## Usage
+
+### Building
+
+CMake 3.0+ is required to build.
+
+`<install-path>` must be an absolute path in the following instructions.
+
+
+#### Building aws-c-sdkutils
+
+```
+git clone git@github.com:awslabs/aws-c-common.git
+cmake -S aws-c-common -B aws-c-common/build -DCMAKE_INSTALL_PREFIX=<install-path>
+cmake --build aws-c-common/build --target install
+
+git clone git@github.com:awslabs/aws-c-sdkutils.git
+cmake -S aws-c-sdkutils -B aws-c-sdkutils/build -DCMAKE_INSTALL_PREFIX=<install-path> -DCMAKE_PREFIX_PATH=<install-path>
+cmake --build aws-c-sdkutils/build --target install
+``` \ No newline at end of file
diff --git a/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/aws_profile.h b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/aws_profile.h
new file mode 100644
index 0000000000..5a200654cc
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/aws_profile.h
@@ -0,0 +1,218 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#ifndef AWS_SDKUTILS_AWS_PROFILE_H
+#define AWS_SDKUTILS_AWS_PROFILE_H
+#include <aws/sdkutils/sdkutils.h>
+
+struct aws_allocator;
+struct aws_string;
+struct aws_byte_buf;
+struct aws_byte_cursor;
+
+/*
+ * A set of data types that model the aws profile specification
+ *
+ * A profile collection is a collection of zero or more named profiles
+ * Each profile is a set of properties (named key-value pairs)
+ * Empty-valued properties may have sub properties (named key-value pairs)
+ *
+ * Resolution rules exist to determine what profile to use, what files to
+ * read profile collections from, and what types of credentials have priority.
+ *
+ * The profile specification is informally defined as "what the aws cli does" and
+ * formally defined in internal aws documents.
+ */
+struct aws_profile_property;
+struct aws_profile;
+struct aws_profile_collection;
+
+/**
+ * The profile specification has rule exceptions based on what file
+ * the profile collection comes from.
+ */
+enum aws_profile_source_type { AWS_PST_NONE, AWS_PST_CONFIG, AWS_PST_CREDENTIALS };
+
+/*
+ * The collection can hold different types of sections.
+ */
+enum aws_profile_section_type {
+ AWS_PROFILE_SECTION_TYPE_PROFILE,
+ AWS_PROFILE_SECTION_TYPE_SSO_SESSION,
+
+ AWS_PROFILE_SECTION_TYPE_COUNT,
+};
+
+AWS_EXTERN_C_BEGIN
+
+/*************************
+ * Profile collection APIs
+ *************************/
+
+/**
+ * Increments the reference count on the profile collection, allowing the caller to take a reference to it.
+ *
+ * Returns the same profile collection passed in.
+ */
+AWS_SDKUTILS_API
+struct aws_profile_collection *aws_profile_collection_acquire(struct aws_profile_collection *collection);
+
+/**
+ * Decrements a profile collection's ref count. When the ref count drops to zero, the collection will be destroyed.
+ * Returns NULL.
+ */
+AWS_SDKUTILS_API
+struct aws_profile_collection *aws_profile_collection_release(struct aws_profile_collection *collection);
+
+/**
+ * @Deprecated This is equivalent to aws_profile_collection_release.
+ */
+AWS_SDKUTILS_API
+void aws_profile_collection_destroy(struct aws_profile_collection *profile_collection);
+
+/**
+ * Create a new profile collection by parsing a file with the specified path
+ */
+AWS_SDKUTILS_API
+struct aws_profile_collection *aws_profile_collection_new_from_file(
+ struct aws_allocator *allocator,
+ const struct aws_string *file_path,
+ enum aws_profile_source_type source);
+
+/**
+ * Create a new profile collection by merging a config-file-based profile
+ * collection and a credentials-file-based profile collection
+ */
+AWS_SDKUTILS_API
+struct aws_profile_collection *aws_profile_collection_new_from_merge(
+ struct aws_allocator *allocator,
+ const struct aws_profile_collection *config_profiles,
+ const struct aws_profile_collection *credentials_profiles);
+
+/**
+ * Create a new profile collection by parsing text in a buffer. Primarily
+ * for testing.
+ */
+AWS_SDKUTILS_API
+struct aws_profile_collection *aws_profile_collection_new_from_buffer(
+ struct aws_allocator *allocator,
+ const struct aws_byte_buf *buffer,
+ enum aws_profile_source_type source);
+
+/**
+ * Retrieves a reference to a profile with the specified name, if it exists, from the profile collection
+ */
+AWS_SDKUTILS_API
+const struct aws_profile *aws_profile_collection_get_profile(
+ const struct aws_profile_collection *profile_collection,
+ const struct aws_string *profile_name);
+
+/*
+ * Retrieves a reference to a section with the specified name and type, if it exists, from the profile collection.
+ * You can get the "default" profile or credentials file sections by passing `AWS_PROFILE_SECTION_TYPE_PROFILE`
+ */
+AWS_SDKUTILS_API
+const struct aws_profile *aws_profile_collection_get_section(
+ const struct aws_profile_collection *profile_collection,
+ const enum aws_profile_section_type section_type,
+ const struct aws_string *section_name);
+
+/**
+ * Returns the number of profiles in a collection
+ */
+AWS_SDKUTILS_API
+size_t aws_profile_collection_get_profile_count(const struct aws_profile_collection *profile_collection);
+
+/**
+ * Returns the number of elements of the specified section in a collection.
+ */
+AWS_SDKUTILS_API
+size_t aws_profile_collection_get_section_count(
+ const struct aws_profile_collection *profile_collection,
+ const enum aws_profile_section_type section_type);
+
+/**
+ * Returns a reference to the name of the provided profile
+ */
+AWS_SDKUTILS_API
+const struct aws_string *aws_profile_get_name(const struct aws_profile *profile);
+
+/**************
+ * profile APIs
+ **************/
+
+/**
+ * Retrieves a reference to a property with the specified name, if it exists, from a profile
+ */
+AWS_SDKUTILS_API
+const struct aws_profile_property *aws_profile_get_property(
+ const struct aws_profile *profile,
+ const struct aws_string *property_name);
+
+/**
+ * Returns how many properties a profile holds
+ */
+AWS_SDKUTILS_API
+size_t aws_profile_get_property_count(const struct aws_profile *profile);
+
+/**
+ * Returns a reference to the property's string value
+ */
+AWS_SDKUTILS_API
+const struct aws_string *aws_profile_property_get_value(const struct aws_profile_property *property);
+
+/***********************
+ * profile property APIs
+ ***********************/
+
+/**
+ * Returns a reference to the value of a sub property with the given name, if it exists, in the property
+ */
+AWS_SDKUTILS_API
+const struct aws_string *aws_profile_property_get_sub_property(
+ const struct aws_profile_property *property,
+ const struct aws_string *sub_property_name);
+
+/**
+ * Returns how many sub properties the property holds
+ */
+AWS_SDKUTILS_API
+size_t aws_profile_property_get_sub_property_count(const struct aws_profile_property *property);
+
+/***********
+ * Misc APIs
+ ***********/
+
+/**
+ * Computes the final platform-specific path for the profile credentials file. Does limited home directory
+ * expansion/resolution.
+ *
+ * override_path, if not null, will be searched first instead of using the standard home directory config path
+ */
+AWS_SDKUTILS_API
+struct aws_string *aws_get_credentials_file_path(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *override_path);
+
+/**
+ * Computes the final platform-specific path for the profile config file. Does limited home directory
+ * expansion/resolution.
+ *
+ * override_path, if not null, will be searched first instead of using the standard home directory config path
+ */
+AWS_SDKUTILS_API
+struct aws_string *aws_get_config_file_path(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *override_path);
+
+/**
+ * Computes the profile to use for credentials lookups based on profile resolution rules
+ */
+AWS_SDKUTILS_API
+struct aws_string *aws_get_profile_name(struct aws_allocator *allocator, const struct aws_byte_cursor *override_name);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_SDKUTILS_AWS_PROFILE_H */
diff --git a/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/endpoints_rule_engine.h b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/endpoints_rule_engine.h
new file mode 100644
index 0000000000..701ba1bd93
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/endpoints_rule_engine.h
@@ -0,0 +1,303 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#ifndef AWS_SDKUTILS_ENDPOINTS_RULESET_H
+#define AWS_SDKUTILS_ENDPOINTS_RULESET_H
+
+#include <aws/common/byte_buf.h>
+#include <aws/sdkutils/sdkutils.h>
+
+struct aws_endpoints_ruleset;
+struct aws_partitions_config;
+struct aws_endpoints_parameter;
+struct aws_endpoints_rule_engine;
+struct aws_endpoints_resolved_endpoint;
+struct aws_endpoints_request_context;
+struct aws_hash_table;
+
+enum aws_endpoints_parameter_type { AWS_ENDPOINTS_PARAMETER_STRING, AWS_ENDPOINTS_PARAMETER_BOOLEAN };
+enum aws_endpoints_resolved_endpoint_type { AWS_ENDPOINTS_RESOLVED_ENDPOINT, AWS_ENDPOINTS_RESOLVED_ERROR };
+
+AWS_EXTERN_C_BEGIN
+
+AWS_SDKUTILS_API struct aws_byte_cursor aws_endpoints_get_supported_ruleset_version(void);
+
+/*
+******************************
+* Parameter
+******************************
+*/
+
+/*
+ * Value type of parameter.
+ */
+AWS_SDKUTILS_API enum aws_endpoints_parameter_type aws_endpoints_parameter_get_type(
+ const struct aws_endpoints_parameter *parameter);
+
+/*
+ * Specifies whether parameter maps to one of SDK built ins (ex. "AWS::Region").
+ * Return is a cursor specifying the name of associated built in.
+ * If there is no mapping, cursor will be empty.
+ * Cursor is guaranteed to be valid for lifetime of paramater.
+ */
+AWS_SDKUTILS_API struct aws_byte_cursor aws_endpoints_parameter_get_built_in(
+ const struct aws_endpoints_parameter *parameter);
+
+/*
+ * Default string value.
+ * out_cursor will point to default string value if one exist and will be empty
+ * otherwise.
+ * Cursor is guaranteed to be valid for lifetime of paramater.
+ * Returns AWS_OP_ERR if parameter is not a string.
+ */
+AWS_SDKUTILS_API int aws_endpoints_parameter_get_default_string(
+ const struct aws_endpoints_parameter *parameter,
+ struct aws_byte_cursor *out_cursor);
+
+/*
+ * Default boolean value.
+ * out_bool will have pointer to value if default is specified, NULL otherwise.
+ * Owned by parameter.
+ * Returns AWS_OP_ERR if parameter is not a boolean.
+ */
+AWS_SDKUTILS_API int aws_endpoints_parameter_get_default_boolean(
+ const struct aws_endpoints_parameter *parameter,
+ const bool **out_bool);
+
+/*
+ * Whether parameter is required.
+ */
+AWS_SDKUTILS_API bool aws_endpoints_parameter_get_is_required(const struct aws_endpoints_parameter *parameter);
+
+/*
+ * Returns cursor to parameter documentation.
+ * Cursor is guaranteed to be valid for lifetime of paramater.
+ * Will not be empty as doc is required.
+ */
+AWS_SDKUTILS_API struct aws_byte_cursor aws_endpoints_parameter_get_documentation(
+ const struct aws_endpoints_parameter *parameter);
+
+/*
+ * Whether parameter is deprecated.
+ */
+AWS_SDKUTILS_API bool aws_endpoints_parameters_get_is_deprecated(const struct aws_endpoints_parameter *parameter);
+
+/*
+ * Deprecation message. Cursor is empty if parameter is not deprecated.
+ * Cursor is guaranteed to be valid for lifetime of paramater.
+ */
+AWS_SDKUTILS_API struct aws_byte_cursor aws_endpoints_parameter_get_deprecated_message(
+ const struct aws_endpoints_parameter *parameter);
+
+/*
+ * Deprecated since. Cursor is empty if parameter is not deprecated.
+ * Cursor is guaranteed to be valid for lifetime of paramater.
+ */
+AWS_SDKUTILS_API struct aws_byte_cursor aws_endpoints_parameter_get_deprecated_since(
+ const struct aws_endpoints_parameter *parameter);
+
+/*
+******************************
+* Ruleset
+******************************
+*/
+
+/*
+ * Create new ruleset from a json string.
+ * In cases of failure NULL is returned and last error is set.
+ */
+AWS_SDKUTILS_API struct aws_endpoints_ruleset *aws_endpoints_ruleset_new_from_string(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor ruleset_json);
+
+/*
+ * Increment ref count
+ */
+AWS_SDKUTILS_API struct aws_endpoints_ruleset *aws_endpoints_ruleset_acquire(struct aws_endpoints_ruleset *ruleset);
+
+/*
+ * Decrement ref count
+ */
+AWS_SDKUTILS_API struct aws_endpoints_ruleset *aws_endpoints_ruleset_release(struct aws_endpoints_ruleset *ruleset);
+
+/*
+ * Get ruleset parameters.
+ * Return is a hashtable with paramater name as a key (aws_byte_cursor *) and parameter
+ * (aws_endpoints_parameter *) as a value. Ruleset owns the owns the hashtable and
+ * pointer is valid during ruleset lifetime. Will never return a NULL. In case
+ * there are no parameters in the ruleset, hash table will contain 0 elements.
+ *
+ * Note on usage in bindings:
+ * - this is basically a map from a parameter name to a structure describing parameter
+ * - deep copy all the fields and let language take ownership of data
+ * Consider transforming this into language specific map (dict for python, Map
+ * in Java, std::map in C++, etc...) instead of wrapping it into a custom class.
+ */
+AWS_SDKUTILS_API const struct aws_hash_table *aws_endpoints_ruleset_get_parameters(
+ struct aws_endpoints_ruleset *ruleset);
+
+/*
+ * Ruleset version.
+ * Returned pointer is owned by ruleset.
+ * Will not return NULL as version is a required field for ruleset.
+ */
+AWS_SDKUTILS_API struct aws_byte_cursor aws_endpoints_ruleset_get_version(const struct aws_endpoints_ruleset *ruleset);
+
+/*
+ * Ruleset service id.
+ * Returned pointer is owned by ruleset.
+ * Can be NULL if not specified in ruleset.
+ */
+AWS_SDKUTILS_API struct aws_byte_cursor aws_endpoints_ruleset_get_service_id(
+ const struct aws_endpoints_ruleset *ruleset);
+
+/*
+******************************
+* Rule engine
+******************************
+*/
+
+/**
+ * Create new rule engine for a given ruleset.
+ * In cases of failure NULL is returned and last error is set.
+ */
+AWS_SDKUTILS_API struct aws_endpoints_rule_engine *aws_endpoints_rule_engine_new(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_ruleset *ruleset,
+ struct aws_partitions_config *partitions_config);
+
+/*
+ * Increment rule engine ref count.
+ */
+AWS_SDKUTILS_API struct aws_endpoints_rule_engine *aws_endpoints_rule_engine_acquire(
+ struct aws_endpoints_rule_engine *rule_engine);
+
+/*
+ * Decrement rule engine ref count.
+ */
+AWS_SDKUTILS_API struct aws_endpoints_rule_engine *aws_endpoints_rule_engine_release(
+ struct aws_endpoints_rule_engine *rule_engine);
+
+/*
+ * Creates new request context.
+ * This is basically a property bag containing all request parameter values needed to
+ * resolve endpoint. Parameter value names must match parameter names specified
+ * in ruleset.
+ * Caller is responsible for releasing request context.
+ * Note on usage in bindings:
+ * - Consider exposing it as a custom property bag or a standard map and then
+ * transform it into request context.
+ */
+AWS_SDKUTILS_API struct aws_endpoints_request_context *aws_endpoints_request_context_new(
+ struct aws_allocator *allocator);
+
+/*
+ * Increment resolved endpoint ref count.
+ */
+AWS_SDKUTILS_API struct aws_endpoints_request_context *aws_endpoints_request_context_acquire(
+ struct aws_endpoints_request_context *request_context);
+
+/*
+ * Decrement resolved endpoint ref count.
+ */
+AWS_SDKUTILS_API struct aws_endpoints_request_context *aws_endpoints_request_context_release(
+ struct aws_endpoints_request_context *request_context);
+
+/*
+ * Add string value to request context.
+ * Note: this function will make a copy of the memory backing the cursors.
+ * The function will override any previous value stored in the context with the
+ * same name.
+ */
+AWS_SDKUTILS_API int aws_endpoints_request_context_add_string(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_request_context *context,
+ struct aws_byte_cursor name,
+ struct aws_byte_cursor value);
+
+/*
+ * Add boolean value to request context.
+ * Note: this function will make a copy of the memory backing the cursors.
+ * The function will override any previous value stored in the context with the
+ * same name.
+ */
+AWS_SDKUTILS_API int aws_endpoints_request_context_add_boolean(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_request_context *context,
+ struct aws_byte_cursor name,
+ bool value);
+
+/*
+ * Resolve an endpoint given request context.
+ * Resolved endpoint is returned through out_resolved_endpoint.
+ * In cases of error out_resolved_endpoint is set to NULL and error is returned.
+ * Resolved endpoint is ref counter and caller is responsible for releasing it.
+ */
+AWS_SDKUTILS_API int aws_endpoints_rule_engine_resolve(
+ struct aws_endpoints_rule_engine *engine,
+ const struct aws_endpoints_request_context *context,
+ struct aws_endpoints_resolved_endpoint **out_resolved_endpoint);
+
+/*
+ * Increment resolved endpoint ref count.
+ */
+AWS_SDKUTILS_API struct aws_endpoints_resolved_endpoint *aws_endpoints_resolved_endpoint_acquire(
+ struct aws_endpoints_resolved_endpoint *resolved_endpoint);
+
+/*
+ * Decrement resolved endpoint ref count.
+ */
+AWS_SDKUTILS_API struct aws_endpoints_resolved_endpoint *aws_endpoints_resolved_endpoint_release(
+ struct aws_endpoints_resolved_endpoint *resolved_endpoint);
+
+/*
+ * Get type of resolved endpoint.
+ */
+AWS_SDKUTILS_API enum aws_endpoints_resolved_endpoint_type aws_endpoints_resolved_endpoint_get_type(
+ const struct aws_endpoints_resolved_endpoint *resolved_endpoint);
+
+/*
+ * Get url for the resolved endpoint.
+ * Valid only if resolved endpoint has endpoint type and will error otherwise.
+ */
+AWS_SDKUTILS_API int aws_endpoints_resolved_endpoint_get_url(
+ const struct aws_endpoints_resolved_endpoint *resolved_endpoint,
+ struct aws_byte_cursor *out_url);
+
+/*
+ * Get properties for the resolved endpoint.
+ * Note: properties is a json string containing additional data for a given
+ * endpoint. Data is not typed and is not guaranteed to change in the future.
+ * For use at callers discretion.
+ * Valid only if resolved endpoint has endpoint type and will error otherwise.
+ */
+AWS_SDKUTILS_API int aws_endpoints_resolved_endpoint_get_properties(
+ const struct aws_endpoints_resolved_endpoint *resolved_endpoint,
+ struct aws_byte_cursor *out_properties);
+
+/*
+ * Get headers for the resolved endpoint.
+ * out_headers type is aws_hash_table with (aws_string *) as key
+ * and (aws_array_list * of aws_string *) as value.
+ * Note on usage in bindings:
+ * - this is a map to a list of strings and can be implemented as such in the
+ * target language with deep copy of all underlying strings.
+ * Valid only if resolved endpoint has endpoint type and will error otherwise.
+ */
+AWS_SDKUTILS_API int aws_endpoints_resolved_endpoint_get_headers(
+ const struct aws_endpoints_resolved_endpoint *resolved_endpoint,
+ const struct aws_hash_table **out_headers);
+
+/*
+ * Get error for the resolved endpoint.
+ * Valid only if resolved endpoint has error type and will error otherwise.
+ */
+AWS_SDKUTILS_API int aws_endpoints_resolved_endpoint_get_error(
+ const struct aws_endpoints_resolved_endpoint *resolved_endpoint,
+ struct aws_byte_cursor *out_error);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_SDKUTILS_ENDPOINTS_RULESET_H */
diff --git a/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/exports.h b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/exports.h
new file mode 100644
index 0000000000..6571706e6f
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/exports.h
@@ -0,0 +1,30 @@
+#ifndef AWS_SDKUTILS_EXPORTS_H
+#define AWS_SDKUTILS_EXPORTS_H
+
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#if defined(USE_WINDOWS_DLL_SEMANTICS) || defined(WIN32)
+# ifdef AWS_SDKUTILS_USE_IMPORT_EXPORT
+# ifdef AWS_SDKUTILS_EXPORTS
+# define AWS_SDKUTILS_API __declspec(dllexport)
+# else
+# define AWS_SDKUTILS_API __declspec(dllimport)
+# endif /* AWS_SDKUTILS_EXPORTS */
+# else
+# define AWS_SDKUTILS_API
+# endif /*USE_IMPORT_EXPORT */
+
+#else
+# if ((__GNUC__ >= 4) || defined(__clang__)) && defined(AWS_SDKUTILS_USE_IMPORT_EXPORT) && \
+ defined(AWS_SDKUTILS_EXPORTS)
+# define AWS_SDKUTILS_API __attribute__((visibility("default")))
+# else
+# define AWS_SDKUTILS_API
+# endif /* __GNUC__ >= 4 || defined(__clang__) */
+
+#endif /* defined(USE_WINDOWS_DLL_SEMANTICS) || defined(WIN32) */
+
+#endif /* AWS_SDKUTILS_EXPORTS_H */
diff --git a/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/partitions.h b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/partitions.h
new file mode 100644
index 0000000000..bcbd96589c
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/partitions.h
@@ -0,0 +1,38 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#ifndef AWS_SDKUTILS_PARTITIONS_H
+#define AWS_SDKUTILS_PARTITIONS_H
+
+#include <aws/common/byte_buf.h>
+#include <aws/sdkutils/sdkutils.h>
+
+struct aws_partitions_config;
+
+AWS_EXTERN_C_BEGIN
+
+AWS_SDKUTILS_API struct aws_byte_cursor aws_partitions_get_supported_version(void);
+
+/*
+ * Create new partitions config from a json string.
+ * In cases of failure NULL is returned and last error is set.
+ */
+AWS_SDKUTILS_API struct aws_partitions_config *aws_partitions_config_new_from_string(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor json);
+
+/*
+ * Increment ref count
+ */
+AWS_SDKUTILS_API struct aws_partitions_config *aws_partitions_config_acquire(struct aws_partitions_config *partitions);
+
+/*
+ * Decrement ref count
+ */
+AWS_SDKUTILS_API struct aws_partitions_config *aws_partitions_config_release(struct aws_partitions_config *partitions);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_SDKUTILS_PARTITIONS_H */
diff --git a/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/private/endpoints_types_impl.h b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/private/endpoints_types_impl.h
new file mode 100644
index 0000000000..d4d0823c96
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/private/endpoints_types_impl.h
@@ -0,0 +1,314 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#ifndef AWS_SDKUTILS_ENDPOINTS_RULESET_TYPES_IMPL_H
+#define AWS_SDKUTILS_ENDPOINTS_RULESET_TYPES_IMPL_H
+
+#include <aws/common/hash_table.h>
+#include <aws/common/ref_count.h>
+#include <aws/sdkutils/endpoints_rule_engine.h>
+#include <aws/sdkutils/private/endpoints_util.h>
+
+struct aws_json_value;
+
+/*
+ * Rule engine is built around 2 major types:
+ * - expr - can be a literal, like bool or number or expression like function or ref
+ * - value - literal types only. result of resolving expr. Can have special None
+ * value depending on how expr is resolved. Ex. accessing array past bounds or
+ * substrings with invalid start/end combination will both result in null.
+ *
+ * There is a lot of overlap between expr and value, so why do we need both?
+ * Primary reason is to create a clean boundary between ruleset and resolved
+ * values as it allows to distinguish easily between things that need to be
+ * resolved and things that have been lowered. Given this type system, rule
+ * engine basically performs a task of transforming exprs into values to get
+ * final result.
+ *
+ * Other important types:
+ * Parameter - definition of values that can be provided to rule engine during
+ * resolution. Can define default values if caller didn't provide a value for
+ * parameter.
+ * Request Context - set of parameter value defined for a particular request that
+ * are used during resolution
+ * Scope - set of values defined during resolution of a rule. Can grow/shrink as
+ * rules are evaluated. Ex. scope can have value with name "Region" and value "us-west-2".
+ */
+
+/*
+******************************
+* Parse types.
+******************************
+*/
+
+enum aws_endpoints_rule_type { AWS_ENDPOINTS_RULE_ENDPOINT, AWS_ENDPOINTS_RULE_ERROR, AWS_ENDPOINTS_RULE_TREE };
+
+enum aws_endpoints_expr_type {
+ AWS_ENDPOINTS_EXPR_STRING,
+ AWS_ENDPOINTS_EXPR_NUMBER,
+ AWS_ENDPOINTS_EXPR_BOOLEAN,
+ AWS_ENDPOINTS_EXPR_ARRAY,
+ AWS_ENDPOINTS_EXPR_REFERENCE,
+ AWS_ENDPOINTS_EXPR_FUNCTION
+};
+
+enum aws_endpoints_fn_type {
+ AWS_ENDPOINTS_FN_FIRST = 0,
+ AWS_ENDPOINTS_FN_IS_SET = 0,
+ AWS_ENDPOINTS_FN_NOT,
+ AWS_ENDPOINTS_FN_GET_ATTR,
+ AWS_ENDPOINTS_FN_SUBSTRING,
+ AWS_ENDPOINTS_FN_STRING_EQUALS,
+ AWS_ENDPOINTS_FN_BOOLEAN_EQUALS,
+ AWS_ENDPOINTS_FN_URI_ENCODE,
+ AWS_ENDPOINTS_FN_PARSE_URL,
+ AWS_ENDPOINTS_FN_IS_VALID_HOST_LABEL,
+ AWS_ENDPOINTS_FN_AWS_PARTITION,
+ AWS_ENDPOINTS_FN_AWS_PARSE_ARN,
+ AWS_ENDPOINTS_FN_AWS_IS_VIRTUAL_HOSTABLE_S3_BUCKET,
+ AWS_ENDPOINTS_FN_LAST,
+};
+
+struct aws_endpoints_parameter {
+ struct aws_allocator *allocator;
+
+ struct aws_byte_cursor name;
+
+ enum aws_endpoints_parameter_type type;
+ struct aws_byte_cursor built_in;
+
+ bool has_default_value;
+ union {
+ struct aws_byte_cursor string;
+ bool boolean;
+ } default_value;
+
+ bool is_required;
+ struct aws_byte_cursor documentation;
+ bool is_deprecated;
+ struct aws_byte_cursor deprecated_message;
+ struct aws_byte_cursor deprecated_since;
+};
+
+struct aws_endpoints_ruleset {
+ struct aws_allocator *allocator;
+ struct aws_ref_count ref_count;
+
+ struct aws_json_value *json_root;
+
+ /* list of (aws_endpoints_rule) */
+ struct aws_array_list rules;
+
+ struct aws_byte_cursor version;
+ struct aws_byte_cursor service_id;
+ /* map of (aws_byte_cursor *) -> (aws_endpoints_parameter *) */
+ struct aws_hash_table parameters;
+};
+
+struct aws_endpoints_function {
+ enum aws_endpoints_fn_type fn;
+ /* List of (aws_endpoints_expr) */
+ struct aws_array_list argv;
+};
+
+struct aws_endpoints_expr {
+ enum aws_endpoints_expr_type type;
+ union {
+ struct aws_byte_cursor string;
+ double number;
+ bool boolean;
+ struct aws_array_list array; /* List of (aws_endpoints_expr) */
+ struct aws_byte_cursor reference;
+ struct aws_endpoints_function function;
+ } e;
+};
+
+struct aws_endpoints_rule_data_endpoint {
+ struct aws_allocator *allocator;
+ struct aws_endpoints_expr url;
+
+ /*
+ * Note: this is a custom properties json associated with the result.
+ * Properties are unstable and format can change frequently.
+ * Its up to caller to parse json to retrieve properties.
+ */
+ struct aws_byte_buf properties;
+ /* Map of (aws_string *) -> (aws_array_list * of aws_endpoints_expr) */
+ struct aws_hash_table headers;
+};
+
+struct aws_endpoints_rule_data_error {
+ struct aws_endpoints_expr error;
+};
+
+struct aws_endpoints_rule_data_tree {
+ /* List of (aws_endpoints_rule) */
+ struct aws_array_list rules;
+};
+
+struct aws_endpoints_condition {
+ struct aws_endpoints_expr expr;
+ struct aws_byte_cursor assign;
+};
+
+struct aws_endpoints_rule {
+ /* List of (aws_endpoints_condition) */
+ struct aws_array_list conditions;
+ struct aws_byte_cursor documentation;
+
+ enum aws_endpoints_rule_type type;
+ union {
+ struct aws_endpoints_rule_data_endpoint endpoint;
+ struct aws_endpoints_rule_data_error error;
+ struct aws_endpoints_rule_data_tree tree;
+ } rule_data;
+};
+
+struct aws_partition_info {
+ struct aws_allocator *allocator;
+ struct aws_byte_cursor name;
+
+ bool is_copy;
+ struct aws_string *info;
+};
+
+struct aws_partitions_config {
+ struct aws_allocator *allocator;
+ struct aws_ref_count ref_count;
+
+ struct aws_json_value *json_root;
+
+ /* map of (byte_cur -> aws_partition_info) */
+ struct aws_hash_table region_to_partition_info;
+
+ struct aws_string *version;
+};
+
+/*
+******************************
+* Eval types.
+******************************
+*/
+
+enum aws_endpoints_value_type {
+ /* Special value to represent that any value type is expected from resolving an expresion.
+ Note a valid value for a value type. */
+ AWS_ENDPOINTS_VALUE_ANY,
+
+ AWS_ENDPOINTS_VALUE_NONE,
+ AWS_ENDPOINTS_VALUE_STRING,
+ AWS_ENDPOINTS_VALUE_BOOLEAN,
+ AWS_ENDPOINTS_VALUE_OBJECT, /* Generic type returned by some functions. json string under the covers. */
+ AWS_ENDPOINTS_VALUE_NUMBER,
+ AWS_ENDPOINTS_VALUE_ARRAY,
+
+ AWS_ENDPOINTS_VALUE_SIZE
+};
+
+struct aws_endpoints_request_context {
+ struct aws_allocator *allocator;
+ struct aws_ref_count ref_count;
+
+ struct aws_hash_table values;
+};
+
+/* concrete type value */
+struct aws_endpoints_value {
+ enum aws_endpoints_value_type type;
+ union {
+ struct aws_owning_cursor owning_cursor_string;
+ bool boolean;
+ struct aws_owning_cursor owning_cursor_object;
+ double number;
+ struct aws_array_list array;
+ } v;
+};
+
+/* wrapper around aws_endpoints_value to store it more easily in hash table*/
+struct aws_endpoints_scope_value {
+ struct aws_allocator *allocator;
+
+ struct aws_owning_cursor name;
+
+ struct aws_endpoints_value value;
+};
+
+struct aws_endpoints_resolution_scope {
+ /* current values in scope. byte_cur -> aws_endpoints_scope_value */
+ struct aws_hash_table values;
+ /* list of value keys added since last cleanup */
+ struct aws_array_list added_keys;
+
+ /* index of the rule currently being evaluated */
+ size_t rule_idx;
+ /* pointer to rules array */
+ const struct aws_array_list *rules;
+
+ const struct aws_partitions_config *partitions;
+};
+
+struct aws_partition_info *aws_partition_info_new(struct aws_allocator *allocator, struct aws_byte_cursor name);
+void aws_partition_info_destroy(struct aws_partition_info *partition_info);
+
+struct aws_endpoints_parameter *aws_endpoints_parameter_new(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor name);
+void aws_endpoints_parameter_destroy(struct aws_endpoints_parameter *parameter);
+
+void aws_endpoints_rule_clean_up(struct aws_endpoints_rule *rule);
+
+void aws_endpoints_rule_data_endpoint_clean_up(struct aws_endpoints_rule_data_endpoint *rule_data);
+void aws_endpoints_rule_data_error_clean_up(struct aws_endpoints_rule_data_error *rule_data);
+void aws_endpoints_rule_data_tree_clean_up(struct aws_endpoints_rule_data_tree *rule_data);
+
+void aws_endpoints_condition_clean_up(struct aws_endpoints_condition *condition);
+void aws_endpoints_function_clean_up(struct aws_endpoints_function *function);
+void aws_endpoints_expr_clean_up(struct aws_endpoints_expr *expr);
+
+struct aws_endpoints_scope_value *aws_endpoints_scope_value_new(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor name_cur);
+void aws_endpoints_scope_value_destroy(struct aws_endpoints_scope_value *scope_value);
+
+int aws_endpoints_deep_copy_parameter_value(
+ struct aws_allocator *allocator,
+ const struct aws_endpoints_value *from,
+ struct aws_endpoints_value *to);
+
+void aws_endpoints_value_clean_up(struct aws_endpoints_value *aws_endpoints_value);
+
+/* Helper to resolve argv. Implemented in rule engine. */
+int aws_endpoints_argv_expect(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_array_list *argv,
+ size_t idx,
+ enum aws_endpoints_value_type expected_type,
+ struct aws_endpoints_value *out_value);
+
+extern uint64_t aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_LAST];
+void aws_endpoints_rule_engine_init(void);
+
+int aws_endpoints_dispatch_standard_lib_fn_resolve(
+ enum aws_endpoints_fn_type type,
+ struct aws_allocator *allocator,
+ struct aws_array_list *argv,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value);
+
+int aws_endpoints_path_through_array(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *eval_val,
+ struct aws_byte_cursor path_cur,
+ struct aws_endpoints_value *out_value);
+
+int aws_endpoints_path_through_object(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_value *eval_val,
+ struct aws_byte_cursor path_cur,
+ struct aws_endpoints_value *out_value);
+
+#endif /* AWS_SDKUTILS_ENDPOINTS_RULESET_TYPES_IMPL_H */
diff --git a/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/private/endpoints_util.h b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/private/endpoints_util.h
new file mode 100644
index 0000000000..29a4f48976
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/private/endpoints_util.h
@@ -0,0 +1,136 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#ifndef AWS_SDKUTILS_ENDPOINTS_EVAL_UTIL_H
+#define AWS_SDKUTILS_ENDPOINTS_EVAL_UTIL_H
+
+#include <aws/sdkutils/sdkutils.h>
+
+struct aws_string;
+struct aws_byte_buf;
+struct aws_json_value;
+
+/* Cursor that optionally owns underlying memory. */
+struct aws_owning_cursor {
+ struct aws_byte_cursor cur;
+ struct aws_string *string;
+};
+
+/* Clones string and wraps it in owning cursor. */
+AWS_SDKUTILS_API struct aws_owning_cursor aws_endpoints_owning_cursor_create(
+ struct aws_allocator *allocator,
+ const struct aws_string *str);
+/* Creates new cursor that takes ownership of created string. */
+AWS_SDKUTILS_API struct aws_owning_cursor aws_endpoints_owning_cursor_from_string(struct aws_string *str);
+/* Clones memory pointer to by cursor and wraps in owning cursor */
+AWS_SDKUTILS_API struct aws_owning_cursor aws_endpoints_owning_cursor_from_cursor(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor cur);
+/* Creates owning cursor with memory pointer set to NULL */
+AWS_SDKUTILS_API struct aws_owning_cursor aws_endpoints_non_owning_cursor_create(struct aws_byte_cursor cur);
+
+/* Cleans up memory associated with the cursor */
+AWS_SDKUTILS_API void aws_owning_cursor_clean_up(struct aws_owning_cursor *cursor);
+
+/*
+ * Determine whether host cursor is IPv4 string.
+ */
+AWS_SDKUTILS_API bool aws_is_ipv4(struct aws_byte_cursor host);
+
+/*
+ * Determine whether host cursor is IPv6 string.
+ * Supports checking for uri encoded strings and scoped literals.
+ */
+AWS_SDKUTILS_API bool aws_is_ipv6(struct aws_byte_cursor host, bool is_uri_encoded);
+
+/*
+ * Determine whether label is a valid host label.
+ */
+AWS_SDKUTILS_API bool aws_is_valid_host_label(struct aws_byte_cursor label, bool allow_subdomains);
+
+/*
+ * Determines partition from region name.
+ * Note: this basically implements regex-less alternative to regexes specified in
+ * partitions file.
+ * Returns cursor indicating which partition region maps to or empty cursor if
+ * region cannot be mapped.
+ */
+AWS_SDKUTILS_API struct aws_byte_cursor aws_map_region_to_partition(struct aws_byte_cursor region);
+
+/*
+ * Normalize uri path - make sure it starts and ends with /
+ * Will initialize out_normalized_path.
+ * In cases of error out_normalized_path will be uninitialized.
+ */
+AWS_SDKUTILS_API int aws_byte_buf_init_from_normalized_uri_path(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor path,
+ struct aws_byte_buf *out_normalized_path);
+
+/*
+ * Creates new string from json value.
+ * NULL in cases of error.
+ */
+AWS_SDKUTILS_API struct aws_string *aws_string_new_from_json(
+ struct aws_allocator *allocator,
+ const struct aws_json_value *value);
+
+/*
+ * Convenience helper for comparing byte cursors.
+ * Typeless for use with hash tables.
+ */
+AWS_SDKUTILS_API bool aws_endpoints_byte_cursor_eq(const void *a, const void *b);
+
+/*
+ * Helpers to do deep clean up of array list.
+ * TODO: move to aws-c-common?
+ */
+typedef void(aws_array_callback_clean_up_fn)(void *value);
+AWS_SDKUTILS_API void aws_array_list_deep_clean_up(
+ struct aws_array_list *array,
+ aws_array_callback_clean_up_fn on_clean_up_element);
+
+/* Function that resolves template. */
+typedef int(aws_endpoints_template_resolve_fn)(
+ struct aws_byte_cursor template,
+ void *user_data,
+ struct aws_owning_cursor *out_resolved);
+/*
+ * Resolve templated string and write it out to buf.
+ * Will parse templated values (i.e. values enclosed in {}) and replace them with
+ * the value returned from resolve_callback.
+ * Note: callback must be able to support syntax for pathing through value (path
+ * provided after #).
+ * Will replace escaped template delimiters ({{ and }}) with single chars.
+ * Supports replacing templated values inside json strings (controlled by
+ * is_json), by ignoring json { and } chars.
+ */
+AWS_SDKUTILS_API int aws_byte_buf_init_from_resolved_templated_string(
+ struct aws_allocator *allocator,
+ struct aws_byte_buf *out_buf,
+ struct aws_byte_cursor string,
+ aws_endpoints_template_resolve_fn resolve_callback,
+ void *user_data,
+ bool is_json);
+
+/*
+ * Path through json structure and return final json node in out_value.
+ * In cases of error, error is returned and out_value is set to NULL.
+ * Array access out of bounds returns success, but set out_value to NULL (to be
+ * consistent with spec).
+ *
+ * Path is defined as a string of '.' delimited fields names, that can optionally
+ * end with [] to indicate indexing.
+ * Note: only last element can be indexed.
+ * ex. path "a.b.c[5]" results in going through a, b and then c and finally
+ * taking index of 5.
+ */
+AWS_SDKUTILS_API int aws_path_through_json(
+ struct aws_allocator *allocator,
+ const struct aws_json_value *root,
+ struct aws_byte_cursor path,
+ const struct aws_json_value **out_value);
+
+#endif /* AWS_SDKUTILS_ENDPOINTS_EVAL_UTIL_H */
diff --git a/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/resource_name.h b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/resource_name.h
new file mode 100644
index 0000000000..076a433d5b
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/resource_name.h
@@ -0,0 +1,44 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#ifndef AWS_SDKUTILS_RESOURCE_NAME_H
+#define AWS_SDKUTILS_RESOURCE_NAME_H
+#pragma once
+
+#include <aws/sdkutils/sdkutils.h>
+
+#include <aws/common/byte_buf.h>
+
+struct aws_resource_name {
+ struct aws_byte_cursor partition;
+ struct aws_byte_cursor service;
+ struct aws_byte_cursor region;
+ struct aws_byte_cursor account_id;
+ struct aws_byte_cursor resource_id;
+};
+
+AWS_EXTERN_C_BEGIN
+
+/**
+ Given an ARN "Amazon Resource Name" represented as an in memory a
+ structure representing the parts
+*/
+AWS_SDKUTILS_API
+int aws_resource_name_init_from_cur(struct aws_resource_name *arn, const struct aws_byte_cursor *input);
+
+/**
+ Calculates the space needed to write an ARN to a byte buf
+*/
+AWS_SDKUTILS_API
+int aws_resource_name_length(const struct aws_resource_name *arn, size_t *size);
+
+/**
+ Serializes an ARN structure into the lexical string format
+*/
+AWS_SDKUTILS_API
+int aws_byte_buf_append_resource_name(struct aws_byte_buf *buf, const struct aws_resource_name *arn);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_SDKUTILS_RESOURCE_NAME_H */
diff --git a/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/sdkutils.h b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/sdkutils.h
new file mode 100644
index 0000000000..51d5da528d
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/include/aws/sdkutils/sdkutils.h
@@ -0,0 +1,51 @@
+#ifndef AWS_SDKUTILS_SDKUTILS_H
+#define AWS_SDKUTILS_SDKUTILS_H
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/common.h>
+#include <aws/common/logging.h>
+
+#include <aws/sdkutils/exports.h>
+
+struct aws_allocator;
+
+#define AWS_C_SDKUTILS_PACKAGE_ID 15
+
+enum aws_sdkutils_errors {
+ AWS_ERROR_SDKUTILS_GENERAL = AWS_ERROR_ENUM_BEGIN_RANGE(AWS_C_SDKUTILS_PACKAGE_ID),
+ AWS_ERROR_SDKUTILS_PARSE_FATAL,
+ AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE,
+ AWS_ERROR_SDKUTILS_ENDPOINTS_UNSUPPORTED_RULESET,
+ AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED,
+ AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_INIT_FAILED,
+ AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED,
+ AWS_ERROR_SDKUTILS_ENDPOINTS_EMPTY_RULESET,
+ AWS_ERROR_SDKUTILS_ENDPOINTS_RULESET_EXHAUSTED,
+ AWS_ERROR_SDKUTILS_PARTITIONS_UNSUPPORTED,
+ AWS_ERROR_SDKUTILS_PARTITIONS_PARSE_FAILED,
+
+ AWS_ERROR_SDKUTILS_END_RANGE = AWS_ERROR_ENUM_END_RANGE(AWS_C_SDKUTILS_PACKAGE_ID)
+};
+
+enum aws_sdkutils_log_subject {
+ AWS_LS_SDKUTILS_GENERAL = AWS_LOG_SUBJECT_BEGIN_RANGE(AWS_C_SDKUTILS_PACKAGE_ID),
+ AWS_LS_SDKUTILS_PROFILE,
+ AWS_LS_SDKUTILS_ENDPOINTS_PARSING,
+ AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE,
+ AWS_LS_SDKUTILS_ENDPOINTS_GENERAL,
+ AWS_LS_SDKUTILS_PARTITIONS_PARSING,
+
+ AWS_LS_SDKUTILS_LAST = AWS_LOG_SUBJECT_END_RANGE(AWS_C_SDKUTILS_PACKAGE_ID)
+};
+
+AWS_EXTERN_C_BEGIN
+
+AWS_SDKUTILS_API void aws_sdkutils_library_init(struct aws_allocator *allocator);
+AWS_SDKUTILS_API void aws_sdkutils_library_clean_up(void);
+
+AWS_EXTERN_C_END
+
+#endif /* AWS_SDKUTILS_SDKUTILS_H */
diff --git a/contrib/restricted/aws/aws-c-sdkutils/source/aws_profile.c b/contrib/restricted/aws/aws-c-sdkutils/source/aws_profile.c
new file mode 100644
index 0000000000..3e25536cf7
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/source/aws_profile.c
@@ -0,0 +1,1592 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/byte_buf.h>
+#include <aws/common/environment.h>
+#include <aws/common/file.h>
+#include <aws/common/hash_table.h>
+#include <aws/common/logging.h>
+#include <aws/common/ref_count.h>
+#include <aws/common/string.h>
+#include <aws/sdkutils/aws_profile.h>
+
+#define PROPERTIES_TABLE_DEFAULT_SIZE 4
+#define PROFILE_TABLE_DEFAULT_SIZE 5
+
+struct aws_profile_property {
+ struct aws_allocator *allocator;
+ struct aws_string *name;
+ struct aws_string *value;
+ struct aws_hash_table sub_properties;
+ bool is_empty_valued;
+};
+
+struct aws_profile {
+ struct aws_allocator *allocator;
+ struct aws_string *name;
+ struct aws_hash_table properties;
+ bool has_profile_prefix;
+};
+
+struct aws_profile_collection {
+ struct aws_allocator *allocator;
+ enum aws_profile_source_type profile_source;
+ /*
+ * Array of aws_hash_table for each section type.
+ * Each table is a map from section identifier to aws_profile.
+ * key: struct aws_string*
+ * value: struct aws_profile*
+ */
+ struct aws_hash_table sections[AWS_PROFILE_SECTION_TYPE_COUNT];
+ struct aws_ref_count ref_count;
+};
+
+/*
+ * Character-based profile parse helper functions
+ */
+static bool s_is_assignment_operator(uint8_t value) {
+ return (char)value == '=';
+}
+
+static bool s_is_not_assignment_operator(uint8_t value) {
+ return !s_is_assignment_operator(value);
+}
+
+static bool s_is_identifier(uint8_t value) {
+ char value_as_char = (char)value;
+
+ if ((value_as_char >= 'A' && value_as_char <= 'Z') || (value_as_char >= 'a' && value_as_char <= 'z') ||
+ (value_as_char >= '0' && value_as_char <= '9') || value_as_char == '\\' || value_as_char == '_' ||
+ value_as_char == '-') {
+ return true;
+ }
+
+ return false;
+}
+
+static bool s_is_whitespace(uint8_t value) {
+ char value_as_char = (char)value;
+
+ switch (value_as_char) {
+ case '\t':
+ case '\n':
+ case '\r':
+ case ' ':
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+static bool s_is_comment_token(uint8_t value) {
+ char char_value = (char)value;
+
+ return char_value == '#' || char_value == ';';
+}
+
+static bool s_is_not_comment_token(uint8_t value) {
+ return !s_is_comment_token(value);
+}
+
+static bool s_is_profile_start(uint8_t value) {
+ return (char)value == '[';
+}
+
+static bool s_is_not_profile_end(uint8_t value) {
+ return (char)value != ']';
+}
+
+static bool s_is_carriage_return(uint8_t value) {
+ return (char)value == '\r';
+}
+
+/*
+ * Line and string based parse helper functions
+ */
+static bool s_is_comment_line(const struct aws_byte_cursor *line_cursor) {
+ char first_char = *line_cursor->ptr;
+ return first_char == '#' || first_char == ';';
+}
+
+static bool s_is_whitespace_line(const struct aws_byte_cursor *line_cursor) {
+ return aws_byte_cursor_left_trim_pred(line_cursor, s_is_whitespace).len == 0;
+}
+
+AWS_STATIC_STRING_FROM_LITERAL(s_default_profile_name, "default");
+
+static bool s_is_default_profile_name(const struct aws_byte_cursor *profile_name) {
+ return aws_string_eq_byte_cursor(s_default_profile_name, profile_name);
+}
+
+/*
+ * Consume helpers
+ */
+
+/*
+ * Consumes characters as long as a predicate is satisfied. "parsed" is optional and contains the consumed range as
+ * output. Returns true if anything was consumed.
+ *
+ * On success, start is updated to the new position.
+ */
+static bool s_parse_by_character_predicate(
+ struct aws_byte_cursor *start,
+ aws_byte_predicate_fn *predicate,
+ struct aws_byte_cursor *parsed,
+ size_t maximum_allowed) {
+
+ uint8_t *current_ptr = start->ptr;
+ uint8_t *end_ptr = start->ptr + start->len;
+ if (maximum_allowed > 0 && maximum_allowed < start->len) {
+ end_ptr = start->ptr + maximum_allowed;
+ }
+
+ while (current_ptr < end_ptr) {
+ if (!predicate(*current_ptr)) {
+ break;
+ }
+
+ ++current_ptr;
+ }
+
+ size_t consumed = current_ptr - start->ptr;
+ if (parsed != NULL) {
+ parsed->ptr = start->ptr;
+ parsed->len = consumed;
+ }
+
+ aws_byte_cursor_advance(start, consumed);
+
+ return consumed > 0;
+}
+
+/*
+ * Consumes characters if they match a token string. "parsed" is optional and contains the consumed range as output.
+ * Returns true if anything was consumed.
+ *
+ * On success, start is updated to the new position.
+ */
+static bool s_parse_by_token(
+ struct aws_byte_cursor *start,
+ const struct aws_string *token,
+ struct aws_byte_cursor *parsed) {
+
+ bool matched = false;
+
+ if (token->len <= start->len) {
+ matched = strncmp((const char *)start->ptr, aws_string_c_str(token), token->len) == 0;
+ }
+
+ if (parsed != NULL) {
+ parsed->ptr = start->ptr;
+ parsed->len = matched ? token->len : 0;
+ }
+
+ if (matched) {
+ aws_byte_cursor_advance(start, token->len);
+ }
+
+ return matched;
+}
+
+/*
+ * Parse context and logging
+ */
+
+struct profile_file_parse_context {
+ const struct aws_string *source_file_path;
+ struct aws_profile_collection *profile_collection;
+ struct aws_profile *current_profile;
+ struct aws_profile_property *current_property;
+ struct aws_byte_cursor current_line;
+ int parse_error;
+ int current_line_number;
+ bool has_seen_profile;
+};
+
+AWS_STATIC_STRING_FROM_LITERAL(s_none_string, "<None>");
+
+static void s_log_parse_context(enum aws_log_level log_level, const struct profile_file_parse_context *context) {
+ AWS_LOGF(
+ log_level,
+ AWS_LS_SDKUTILS_PROFILE,
+ "Profile Parse context:\n Source File:%s\n Line: %d\n Current Profile: %s\n Current Property: %s",
+ context->source_file_path ? context->source_file_path->bytes : s_none_string->bytes,
+ context->current_line_number,
+ context->current_profile ? context->current_profile->name->bytes : s_none_string->bytes,
+ context->current_property ? context->current_property->name->bytes : s_none_string->bytes);
+}
+
+/*
+ * aws_profile_property APIs
+ */
+
+static void s_profile_property_destroy(struct aws_profile_property *property) {
+ if (property == NULL) {
+ return;
+ }
+
+ aws_string_destroy(property->name);
+ aws_string_destroy(property->value);
+
+ aws_hash_table_clean_up(&property->sub_properties);
+
+ aws_mem_release(property->allocator, property);
+}
+
+struct aws_profile_property *aws_profile_property_new(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *name,
+ const struct aws_byte_cursor *value) {
+
+ struct aws_profile_property *property =
+ (struct aws_profile_property *)aws_mem_acquire(allocator, sizeof(struct aws_profile_property));
+ if (property == NULL) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*property);
+ property->allocator = allocator;
+
+ if (aws_hash_table_init(
+ &property->sub_properties,
+ allocator,
+ 0,
+ aws_hash_string,
+ aws_hash_callback_string_eq,
+ aws_hash_callback_string_destroy,
+ aws_hash_callback_string_destroy)) {
+ goto on_error;
+ }
+
+ property->value = aws_string_new_from_array(allocator, value->ptr, value->len);
+ if (property->value == NULL) {
+ goto on_error;
+ }
+
+ property->name = aws_string_new_from_array(allocator, name->ptr, name->len);
+ if (property->name == NULL) {
+ goto on_error;
+ }
+
+ property->is_empty_valued = value->len == 0;
+
+ return property;
+
+on_error:
+ s_profile_property_destroy(property);
+
+ return NULL;
+}
+
+AWS_STATIC_STRING_FROM_LITERAL(s_newline, "\n");
+
+/*
+ * Continuations are applied to the property value by concatenating the old value and the new value, with a '\n'
+ * in between.
+ */
+static int s_profile_property_add_continuation(
+ struct aws_profile_property *property,
+ const struct aws_byte_cursor *continuation_value) {
+
+ int result = AWS_OP_ERR;
+ struct aws_byte_buf concatenation;
+ if (aws_byte_buf_init(&concatenation, property->allocator, property->value->len + continuation_value->len + 1)) {
+ return result;
+ }
+
+ struct aws_byte_cursor old_value = aws_byte_cursor_from_string(property->value);
+ if (aws_byte_buf_append(&concatenation, &old_value)) {
+ goto on_generic_failure;
+ }
+
+ struct aws_byte_cursor newline = aws_byte_cursor_from_string(s_newline);
+ if (aws_byte_buf_append(&concatenation, &newline)) {
+ goto on_generic_failure;
+ }
+
+ if (aws_byte_buf_append(&concatenation, continuation_value)) {
+ goto on_generic_failure;
+ }
+
+ struct aws_string *new_value =
+ aws_string_new_from_array(property->allocator, concatenation.buffer, concatenation.len);
+ if (new_value == NULL) {
+ goto on_generic_failure;
+ }
+
+ result = AWS_OP_SUCCESS;
+ aws_string_destroy(property->value);
+ property->value = new_value;
+
+on_generic_failure:
+ aws_byte_buf_clean_up(&concatenation);
+
+ return result;
+}
+
+static int s_profile_property_add_sub_property(
+ struct aws_profile_property *property,
+ const struct aws_byte_cursor *key,
+ const struct aws_byte_cursor *value,
+ const struct profile_file_parse_context *context) {
+
+ struct aws_string *key_string = aws_string_new_from_array(property->allocator, key->ptr, key->len);
+ if (key_string == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_string *value_string = aws_string_new_from_array(property->allocator, value->ptr, value->len);
+ if (value_string == NULL) {
+ goto on_failure;
+ }
+
+ int was_present = 0;
+ aws_hash_table_remove(&property->sub_properties, key_string, NULL, &was_present);
+ if (was_present) {
+ AWS_LOGF_WARN(
+ AWS_LS_SDKUTILS_PROFILE,
+ "subproperty \"%s\" of property \"%s\" had value overridden with new value",
+ key_string->bytes,
+ property->name->bytes);
+ s_log_parse_context(AWS_LL_WARN, context);
+ }
+
+ if (aws_hash_table_put(&property->sub_properties, key_string, value_string, NULL)) {
+ goto on_failure;
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_failure:
+
+ if (value_string) {
+ aws_string_destroy(value_string);
+ }
+
+ aws_string_destroy(key_string);
+
+ return AWS_OP_ERR;
+}
+
+static int s_profile_property_merge(struct aws_profile_property *dest, const struct aws_profile_property *source) {
+
+ AWS_ASSERT(dest != NULL && source != NULL);
+
+ /*
+ * Source value overwrites any existing dest value
+ */
+ if (source->value) {
+ struct aws_string *new_value = aws_string_new_from_string(dest->allocator, source->value);
+ if (new_value == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ if (dest->value) {
+ AWS_LOGF_WARN(
+ AWS_LS_SDKUTILS_PROFILE,
+ "property \"%s\" has value \"%s\" replaced during merge",
+ dest->name->bytes,
+ dest->value->bytes);
+ aws_string_destroy(dest->value);
+ }
+
+ dest->value = new_value;
+ }
+
+ dest->is_empty_valued = source->is_empty_valued;
+
+ /*
+ * Iterate sub properties, stomping on conflicts
+ */
+ struct aws_hash_iter source_iter = aws_hash_iter_begin(&source->sub_properties);
+ while (!aws_hash_iter_done(&source_iter)) {
+ struct aws_string *source_sub_property = (struct aws_string *)source_iter.element.value;
+
+ struct aws_string *dest_key =
+ aws_string_new_from_string(dest->allocator, (struct aws_string *)source_iter.element.key);
+ if (dest_key == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_string *dest_sub_property = aws_string_new_from_string(dest->allocator, source_sub_property);
+ if (dest_sub_property == NULL) {
+ aws_string_destroy(dest_key);
+ return AWS_OP_ERR;
+ }
+
+ int was_present = 0;
+ aws_hash_table_remove(&dest->sub_properties, dest_key, NULL, &was_present);
+ if (was_present) {
+ AWS_LOGF_WARN(
+ AWS_LS_SDKUTILS_PROFILE,
+ "subproperty \"%s\" of property \"%s\" had value overridden during property merge",
+ dest_key->bytes,
+ dest->name->bytes);
+ }
+
+ if (aws_hash_table_put(&dest->sub_properties, dest_key, dest_sub_property, NULL)) {
+ aws_string_destroy(dest_sub_property);
+ aws_string_destroy(dest_key);
+ return AWS_OP_ERR;
+ }
+
+ aws_hash_iter_next(&source_iter);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/*
+ * Helper destroy function for aws_profile's hash table of properties
+ */
+static void s_property_hash_table_value_destroy(void *value) {
+ s_profile_property_destroy((struct aws_profile_property *)value);
+}
+
+/*
+ * aws_profile APIs
+ */
+
+void aws_profile_destroy(struct aws_profile *profile) {
+ if (profile == NULL) {
+ return;
+ }
+
+ aws_string_destroy(profile->name);
+
+ aws_hash_table_clean_up(&profile->properties);
+
+ aws_mem_release(profile->allocator, profile);
+}
+
+struct aws_profile *aws_profile_new(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *name,
+ bool has_profile_prefix) {
+
+ struct aws_profile *profile = (struct aws_profile *)aws_mem_acquire(allocator, sizeof(struct aws_profile));
+ if (profile == NULL) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*profile);
+
+ profile->name = aws_string_new_from_array(allocator, name->ptr, name->len);
+ if (profile->name == NULL) {
+ goto cleanup;
+ }
+
+ if (aws_hash_table_init(
+ &profile->properties,
+ allocator,
+ PROPERTIES_TABLE_DEFAULT_SIZE,
+ aws_hash_string,
+ aws_hash_callback_string_eq,
+ NULL, /* The key is owned by the value (and destroy cleans it up), so we don't have to */
+ s_property_hash_table_value_destroy)) {
+
+ goto cleanup;
+ }
+
+ profile->allocator = allocator;
+ profile->has_profile_prefix = has_profile_prefix;
+
+ return profile;
+
+cleanup:
+ aws_profile_destroy(profile);
+
+ return NULL;
+}
+
+/*
+ * Adds a property to a profile.
+ *
+ * If a property already exists then the old one is removed and replaced by the
+ * new one.
+ */
+static struct aws_profile_property *s_profile_add_property(
+ struct aws_profile *profile,
+ const struct aws_byte_cursor *key_cursor,
+ const struct aws_byte_cursor *value_cursor) {
+
+ struct aws_profile_property *property = aws_profile_property_new(profile->allocator, key_cursor, value_cursor);
+ if (property == NULL) {
+ goto on_property_new_failure;
+ }
+
+ if (aws_hash_table_put(&profile->properties, property->name, property, NULL)) {
+ goto on_hash_table_put_failure;
+ }
+
+ return property;
+
+on_hash_table_put_failure:
+ s_profile_property_destroy(property);
+
+on_property_new_failure:
+ return NULL;
+}
+
+const struct aws_profile_property *aws_profile_get_property(
+ const struct aws_profile *profile,
+ const struct aws_string *property_name) {
+
+ struct aws_hash_element *element = NULL;
+ aws_hash_table_find(&profile->properties, property_name, &element);
+
+ if (element == NULL) {
+ return NULL;
+ }
+
+ return element->value;
+}
+
+const struct aws_string *aws_profile_property_get_value(const struct aws_profile_property *property) {
+ AWS_PRECONDITION(property);
+ return property->value;
+}
+
+static int s_profile_merge(struct aws_profile *dest_profile, const struct aws_profile *source_profile) {
+
+ AWS_ASSERT(dest_profile != NULL && source_profile != NULL);
+
+ dest_profile->has_profile_prefix = source_profile->has_profile_prefix;
+
+ struct aws_hash_iter source_iter = aws_hash_iter_begin(&source_profile->properties);
+ while (!aws_hash_iter_done(&source_iter)) {
+ struct aws_profile_property *source_property = (struct aws_profile_property *)source_iter.element.value;
+ struct aws_profile_property *dest_property = (struct aws_profile_property *)aws_profile_get_property(
+ dest_profile, (struct aws_string *)source_iter.element.key);
+ if (dest_property == NULL) {
+
+ struct aws_byte_cursor empty_value;
+ AWS_ZERO_STRUCT(empty_value);
+
+ struct aws_byte_cursor property_name = aws_byte_cursor_from_string(source_iter.element.key);
+ dest_property = aws_profile_property_new(dest_profile->allocator, &property_name, &empty_value);
+ if (dest_property == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_hash_table_put(&dest_profile->properties, dest_property->name, dest_property, NULL)) {
+ s_profile_property_destroy(dest_property);
+ return AWS_OP_ERR;
+ }
+ }
+
+ if (s_profile_property_merge(dest_property, source_property)) {
+ return AWS_OP_ERR;
+ }
+
+ aws_hash_iter_next(&source_iter);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/*
+ * Hash table destroy helper for profile collection's profiles member
+ */
+static void s_profile_hash_table_value_destroy(void *value) {
+ aws_profile_destroy((struct aws_profile *)value);
+}
+
+/*
+ * aws_profile_collection APIs
+ */
+
+void aws_profile_collection_destroy(struct aws_profile_collection *profile_collection) {
+ aws_profile_collection_release(profile_collection);
+}
+
+static void s_aws_profile_collection_destroy_internal(struct aws_profile_collection *profile_collection) {
+ for (int i = 0; i < AWS_PROFILE_SECTION_TYPE_COUNT; i++) {
+ aws_hash_table_clean_up(&profile_collection->sections[i]);
+ }
+ aws_mem_release(profile_collection->allocator, profile_collection);
+}
+
+AWS_STATIC_STRING_FROM_LITERAL(s_profile_token, "profile");
+AWS_STATIC_STRING_FROM_LITERAL(s_sso_session_token, "sso-session");
+
+const struct aws_profile *aws_profile_collection_get_profile(
+ const struct aws_profile_collection *profile_collection,
+ const struct aws_string *profile_name) {
+ return aws_profile_collection_get_section(profile_collection, AWS_PROFILE_SECTION_TYPE_PROFILE, profile_name);
+}
+
+const struct aws_profile *aws_profile_collection_get_section(
+ const struct aws_profile_collection *profile_collection,
+ const enum aws_profile_section_type section_type,
+ const struct aws_string *section_name) {
+ struct aws_hash_element *element = NULL;
+ aws_hash_table_find(&profile_collection->sections[section_type], section_name, &element);
+ if (element == NULL) {
+ return NULL;
+ }
+ return element->value;
+}
+
+static int s_profile_collection_add_profile(
+ struct aws_profile_collection *profile_collection,
+ const enum aws_profile_section_type section_type,
+ const struct aws_byte_cursor *profile_name,
+ bool has_prefix,
+ const struct profile_file_parse_context *context,
+ struct aws_profile **current_profile_out) {
+
+ *current_profile_out = NULL;
+ struct aws_string *key =
+ aws_string_new_from_array(profile_collection->allocator, profile_name->ptr, profile_name->len);
+ if (key == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ struct aws_profile *existing_profile = NULL;
+ struct aws_hash_element *element = NULL;
+ aws_hash_table_find(&profile_collection->sections[section_type], key, &element);
+ if (element != NULL) {
+ existing_profile = element->value;
+ }
+
+ aws_string_destroy(key);
+
+ if (section_type == AWS_PROFILE_SECTION_TYPE_PROFILE && profile_collection->profile_source == AWS_PST_CONFIG &&
+ s_is_default_profile_name(profile_name)) {
+ /*
+ * In a config file, "profile default" always supercedes "default"
+ */
+ if (!has_prefix && existing_profile && existing_profile->has_profile_prefix) {
+ /*
+ * existing one supercedes: ignore this (and its properties) completely by failing the add
+ * which sets the current profile to NULL
+ */
+ AWS_LOGF_WARN(
+ AWS_LS_SDKUTILS_PROFILE,
+ "Existing prefixed default config profile supercedes unprefixed default profile");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ return AWS_OP_SUCCESS;
+ }
+
+ if (has_prefix && existing_profile && !existing_profile->has_profile_prefix) {
+ /*
+ * stomp over existing: remove it, then proceed with add
+ * element destroy function will clean up the profile and key
+ */
+ AWS_LOGF_WARN(
+ AWS_LS_SDKUTILS_PROFILE, "Prefixed default config profile replacing unprefixed default profile");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ aws_hash_table_remove(&profile_collection->sections[section_type], element->key, NULL, NULL);
+ existing_profile = NULL;
+ }
+ }
+
+ if (existing_profile) {
+ *current_profile_out = existing_profile;
+ return AWS_OP_SUCCESS;
+ }
+
+ struct aws_profile *new_profile = aws_profile_new(profile_collection->allocator, profile_name, has_prefix);
+ if (new_profile == NULL) {
+ goto on_aws_profile_new_failure;
+ }
+
+ if (aws_hash_table_put(&profile_collection->sections[section_type], new_profile->name, new_profile, NULL)) {
+ goto on_hash_table_put_failure;
+ }
+
+ *current_profile_out = new_profile;
+ return AWS_OP_SUCCESS;
+
+on_hash_table_put_failure:
+ aws_profile_destroy(new_profile);
+
+on_aws_profile_new_failure:
+ return AWS_OP_ERR;
+}
+
+static int s_profile_collection_merge(
+ struct aws_profile_collection *dest_collection,
+ const struct aws_profile_collection *source_collection) {
+
+ AWS_ASSERT(dest_collection != NULL && source_collection);
+ for (int i = 0; i < AWS_PROFILE_SECTION_TYPE_COUNT; i++) {
+ struct aws_hash_iter source_iter = aws_hash_iter_begin(&source_collection->sections[i]);
+ while (!aws_hash_iter_done(&source_iter)) {
+ struct aws_profile *source_profile = (struct aws_profile *)source_iter.element.value;
+ struct aws_profile *dest_profile = (struct aws_profile *)aws_profile_collection_get_profile(
+ dest_collection, (struct aws_string *)source_iter.element.key);
+
+ if (dest_profile == NULL) {
+
+ struct aws_byte_cursor name_cursor = aws_byte_cursor_from_string(source_iter.element.key);
+ dest_profile =
+ aws_profile_new(dest_collection->allocator, &name_cursor, source_profile->has_profile_prefix);
+ if (dest_profile == NULL) {
+ return AWS_OP_ERR;
+ }
+
+ if (aws_hash_table_put(&dest_collection->sections[i], dest_profile->name, dest_profile, NULL)) {
+ aws_profile_destroy(dest_profile);
+ return AWS_OP_ERR;
+ }
+ }
+
+ if (s_profile_merge(dest_profile, source_profile)) {
+ return AWS_OP_ERR;
+ }
+
+ aws_hash_iter_next(&source_iter);
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+struct aws_profile_collection *aws_profile_collection_new_from_merge(
+ struct aws_allocator *allocator,
+ const struct aws_profile_collection *config_profiles,
+ const struct aws_profile_collection *credentials_profiles) {
+
+ struct aws_profile_collection *merged =
+ (struct aws_profile_collection *)(aws_mem_acquire(allocator, sizeof(struct aws_profile_collection)));
+ if (merged == NULL) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*merged);
+ aws_ref_count_init(
+ &merged->ref_count, merged, (aws_simple_completion_callback *)s_aws_profile_collection_destroy_internal);
+ for (int i = 0; i < AWS_PROFILE_SECTION_TYPE_COUNT; i++) {
+ size_t max_profiles = 0;
+ if (config_profiles != NULL) {
+ max_profiles += aws_hash_table_get_entry_count(&config_profiles->sections[i]);
+ }
+ if (credentials_profiles != NULL) {
+ max_profiles += aws_hash_table_get_entry_count(&credentials_profiles->sections[i]);
+ }
+
+ merged->allocator = allocator;
+ merged->profile_source = AWS_PST_NONE;
+
+ if (aws_hash_table_init(
+ &merged->sections[i],
+ allocator,
+ max_profiles,
+ aws_hash_string,
+ aws_hash_callback_string_eq,
+ NULL,
+ s_profile_hash_table_value_destroy)) {
+ goto cleanup;
+ }
+ }
+
+ if (config_profiles != NULL) {
+ if (s_profile_collection_merge(merged, config_profiles)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PROFILE, "Failed to merge config profile set");
+ goto cleanup;
+ }
+ }
+
+ if (credentials_profiles != NULL) {
+ if (s_profile_collection_merge(merged, credentials_profiles)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PROFILE, "Failed to merge credentials profile set");
+ goto cleanup;
+ }
+ }
+
+ return merged;
+
+cleanup:
+ s_aws_profile_collection_destroy_internal(merged);
+
+ return NULL;
+}
+
+/*
+ * Profile parsing
+ */
+
+/*
+ * The comment situation in config files is messy. Some line types require a comment to have at least one
+ * whitespace in front of it, while other line types only require a comment token (;, #) On top of that, some
+ * line types do not allow comments at all (get folded into the value).
+ *
+ */
+
+/*
+ * a trailing comment is started by ';' or '#'
+ * Only certain types of lines allow comments without prefixing whitespace
+ */
+static struct aws_byte_cursor s_trim_trailing_comment(const struct aws_byte_cursor *line) {
+
+ struct aws_byte_cursor line_copy = *line;
+ struct aws_byte_cursor trimmed;
+ s_parse_by_character_predicate(&line_copy, s_is_not_comment_token, &trimmed, 0);
+
+ return trimmed;
+}
+
+/*
+ * A trailing whitespace comment is started by " ;", " #", "\t;", or "\t#"
+ * Certain types of lines require comments be whitespace-prefixed
+ */
+static struct aws_byte_cursor s_trim_trailing_whitespace_comment(const struct aws_byte_cursor *line) {
+ struct aws_byte_cursor trimmed;
+ trimmed.ptr = line->ptr;
+
+ uint8_t *current_ptr = line->ptr;
+ uint8_t *end_ptr = line->ptr + line->len;
+
+ while (current_ptr < end_ptr) {
+ if (s_is_whitespace(*current_ptr)) {
+ /*
+ * Look ahead 1
+ */
+ if (current_ptr + 1 < end_ptr && s_is_comment_token(*(current_ptr + 1))) {
+ break;
+ }
+ }
+
+ current_ptr++;
+ }
+
+ trimmed.len = current_ptr - line->ptr;
+
+ return trimmed;
+}
+
+/**
+ * Attempts to parse profile declaration lines
+ *
+ * Return false if this is not a profile declaration, true otherwise (stop parsing the line)
+ */
+static bool s_parse_profile_declaration(
+ const struct aws_byte_cursor *line_cursor,
+ struct profile_file_parse_context *context) {
+
+ /*
+ * Strip comment and right-side whitespace
+ */
+ struct aws_byte_cursor profile_line_cursor = s_trim_trailing_comment(line_cursor);
+ struct aws_byte_cursor profile_cursor = aws_byte_cursor_right_trim_pred(&profile_line_cursor, s_is_whitespace);
+
+ /*
+ * "[" + <whitespace>? + <"profile ">? + <profile name = identifier> + <whitespace>? + "]"
+ */
+ if (!s_parse_by_character_predicate(&profile_cursor, s_is_profile_start, NULL, 1)) {
+ /*
+ * This isn't a profile declaration, try something else
+ */
+ return false;
+ }
+
+ context->has_seen_profile = true;
+ context->current_profile = NULL;
+ context->current_property = NULL;
+
+ s_parse_by_character_predicate(&profile_cursor, s_is_whitespace, NULL, 0);
+ enum aws_profile_section_type section_type = AWS_PROFILE_SECTION_TYPE_PROFILE;
+
+ /*
+ * Check if the profile name starts with the 'profile' keyword. We need to check for
+ * "profile" and at least one whitespace character. A partial match
+ * ("[profilefoo]" for example) should rewind and use the whole name properly.
+ */
+ struct aws_byte_cursor backtrack_cursor = profile_cursor;
+ bool has_profile_prefix = s_parse_by_token(&profile_cursor, s_profile_token, NULL) &&
+ s_parse_by_character_predicate(&profile_cursor, s_is_whitespace, NULL, 1);
+ bool has_sso_session_prefix = !has_profile_prefix && s_parse_by_token(&profile_cursor, s_sso_session_token, NULL) &&
+ s_parse_by_character_predicate(&profile_cursor, s_is_whitespace, NULL, 1);
+
+ if (has_profile_prefix) {
+ if (context->profile_collection->profile_source == AWS_PST_CREDENTIALS) {
+ AWS_LOGF_WARN(
+ AWS_LS_SDKUTILS_PROFILE,
+ "Profile declarations in credentials files are not allowed to begin with the \"profile\" keyword");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE;
+ return true;
+ }
+
+ s_parse_by_character_predicate(&profile_cursor, s_is_whitespace, NULL, 0);
+ } else if (has_sso_session_prefix) {
+ if (context->profile_collection->profile_source == AWS_PST_CREDENTIALS) {
+ AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "sso-session declarations in credentials files are not allowed");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE;
+ return true;
+ }
+ section_type = AWS_PROFILE_SECTION_TYPE_SSO_SESSION;
+ s_parse_by_character_predicate(&profile_cursor, s_is_whitespace, NULL, 0);
+ } else {
+ profile_cursor = backtrack_cursor;
+ }
+
+ struct aws_byte_cursor profile_name;
+ if (!s_parse_by_character_predicate(&profile_cursor, s_is_identifier, &profile_name, 0)) {
+ AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Profile declarations must contain a valid identifier for a name");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE;
+ return true;
+ }
+
+ if (context->profile_collection->profile_source == AWS_PST_CONFIG && !has_profile_prefix &&
+ !s_is_default_profile_name(&profile_name) && !has_sso_session_prefix) {
+ AWS_LOGF_WARN(
+ AWS_LS_SDKUTILS_PROFILE,
+ "Non-default profile declarations in config files must use the \"profile\" keyword");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE;
+ return true;
+ }
+
+ s_parse_by_character_predicate(&profile_cursor, s_is_whitespace, NULL, 0);
+
+ /*
+ * Special case the right side bracket check. We need to distinguish between a missing right bracket
+ * (fatal error) and invalid profile name (spaces, non-identifier characters).
+ *
+ * Do so by consuming all non right-bracket characters. If the remainder is empty it is missing,
+ * otherwise it is an invalid profile name (non-empty invalid_chars) or a good definition
+ * (empty invalid_chars cursor).
+ */
+ struct aws_byte_cursor invalid_chars;
+ s_parse_by_character_predicate(&profile_cursor, s_is_not_profile_end, &invalid_chars, 0);
+ if (profile_cursor.len == 0) {
+ AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Profile declaration missing required ending bracket");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL;
+ return true;
+ }
+
+ if (invalid_chars.len > 0) {
+ AWS_LOGF_WARN(
+ AWS_LS_SDKUTILS_PROFILE,
+ "Profile declaration contains invalid characters: \"" PRInSTR "\"",
+ AWS_BYTE_CURSOR_PRI(invalid_chars));
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE;
+ return true;
+ }
+
+ /*
+ * Apply to the profile collection
+ */
+ if (s_profile_collection_add_profile(
+ context->profile_collection,
+ section_type,
+ &profile_name,
+ has_profile_prefix,
+ context,
+ &context->current_profile)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PROFILE, "Failed to add profile to profile collection");
+ s_log_parse_context(AWS_LL_ERROR, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL;
+ return true;
+ }
+
+ return true;
+}
+
+/**
+ * Attempts to parse property continuation lines
+ *
+ * Return false if this is not a property continuation line, true otherwise (stop parsing the line)
+ */
+static bool s_parse_property_continuation(
+ const struct aws_byte_cursor *line_cursor,
+ struct profile_file_parse_context *context) {
+
+ /*
+ * Strip right-side whitespace only. Comments cannot be made on continuation lines. They
+ * get folded into the value.
+ */
+ struct aws_byte_cursor continuation_cursor = aws_byte_cursor_right_trim_pred(line_cursor, s_is_whitespace);
+
+ /*
+ * Can't be a continuation without at least one whitespace on the left
+ */
+ if (!s_parse_by_character_predicate(&continuation_cursor, s_is_whitespace, NULL, 0)) {
+ return false;
+ }
+
+ /*
+ * This should never happen since it should have been caught as a whitespace line
+ */
+ if (continuation_cursor.len == 0) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PROFILE, "Property continuation internal parsing error");
+ s_log_parse_context(AWS_LL_ERROR, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE;
+ return true;
+ }
+
+ /*
+ * A continuation without a current property is bad
+ */
+ if (context->current_profile == NULL || context->current_property == NULL) {
+ AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Property continuation seen outside of a current property");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL;
+ return true;
+ }
+
+ if (s_profile_property_add_continuation(context->current_property, &continuation_cursor)) {
+ AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Property continuation could not be applied to the current property");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE;
+ return true;
+ }
+
+ if (context->current_property->is_empty_valued) {
+
+ struct aws_byte_cursor key_cursor;
+ if (!s_parse_by_character_predicate(&continuation_cursor, s_is_not_assignment_operator, &key_cursor, 0)) {
+ AWS_LOGF_WARN(
+ AWS_LS_SDKUTILS_PROFILE, "Empty-valued property continuation must contain the assignment operator");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL;
+ return true;
+ }
+
+ if (!s_parse_by_character_predicate(&continuation_cursor, s_is_assignment_operator, NULL, 1)) {
+ AWS_LOGF_WARN(
+ AWS_LS_SDKUTILS_PROFILE, "Empty-valued property continuation must contain the assignment operator");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL;
+ return true;
+ }
+
+ struct aws_byte_cursor trimmed_key_cursor = aws_byte_cursor_right_trim_pred(&key_cursor, s_is_whitespace);
+ struct aws_byte_cursor id_check_cursor = aws_byte_cursor_trim_pred(&trimmed_key_cursor, s_is_identifier);
+ if (id_check_cursor.len > 0) {
+ AWS_LOGF_WARN(
+ AWS_LS_SDKUTILS_PROFILE,
+ "Empty-valued property continuation must have a valid identifier to the left of the assignment");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE;
+ return true;
+ }
+
+ s_parse_by_character_predicate(&continuation_cursor, s_is_whitespace, NULL, 0);
+
+ /*
+ * everything left in the continuation_cursor is the sub property value
+ */
+ if (s_profile_property_add_sub_property(
+ context->current_property, &trimmed_key_cursor, &continuation_cursor, context)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PROFILE, "Internal error adding sub property to current property");
+ s_log_parse_context(AWS_LL_ERROR, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL;
+ }
+ }
+
+ return true;
+}
+
+/**
+ * Attempts to parse property lines
+ *
+ * Return false if this is not a property line, true otherwise (stop parsing the line)
+ */
+static bool s_parse_property(const struct aws_byte_cursor *line_cursor, struct profile_file_parse_context *context) {
+
+ /*
+ * Strip whitespace-prefixed comment and right-side whitespace
+ */
+ struct aws_byte_cursor property_line_cursor = s_trim_trailing_whitespace_comment(line_cursor);
+ struct aws_byte_cursor property_cursor = aws_byte_cursor_right_trim_pred(&property_line_cursor, s_is_whitespace);
+
+ context->current_property = NULL;
+
+ struct aws_byte_cursor key_cursor;
+ if (!s_parse_by_character_predicate(&property_cursor, s_is_not_assignment_operator, &key_cursor, 0)) {
+ AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Property definition does not contain the assignment operator");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL;
+ return true;
+ }
+
+ struct aws_byte_cursor trimmed_key_cursor = aws_byte_cursor_right_trim_pred(&key_cursor, s_is_whitespace);
+ struct aws_byte_cursor id_check_cursor = aws_byte_cursor_trim_pred(&trimmed_key_cursor, s_is_identifier);
+ if (id_check_cursor.len > 0) {
+ AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Property definition does not begin with a valid identifier");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE;
+ return true;
+ }
+
+ if (!s_parse_by_character_predicate(&property_cursor, s_is_assignment_operator, NULL, 1)) {
+ AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Property definition does not contain the assignment operator");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL;
+ return true;
+ }
+
+ s_parse_by_character_predicate(&property_cursor, s_is_whitespace, NULL, 0);
+
+ /*
+ * If appropriate, apply to the profile collection, property_cursor contains the trimmed value, if one exists
+ */
+ if (context->current_profile != NULL) {
+ context->current_property =
+ s_profile_add_property(context->current_profile, &trimmed_key_cursor, &property_cursor);
+ if (context->current_property == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_SDKUTILS_PROFILE,
+ "Failed to add property \"" PRInSTR "\" to current profile \"%s\"",
+ AWS_BYTE_CURSOR_PRI(trimmed_key_cursor),
+ context->current_profile->name->bytes);
+ s_log_parse_context(AWS_LL_ERROR, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL;
+ }
+ } else {
+ /*
+ * By definition, if we haven't seen any profiles yet, this is a fatal error
+ */
+ if (context->has_seen_profile) {
+ AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Property definition seen outside a profile");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE;
+ } else {
+ AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Property definition seen before any profiles");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL;
+ }
+ }
+
+ return true;
+}
+
+static void s_parse_and_apply_line_to_profile_collection(
+ struct profile_file_parse_context *context,
+ const struct aws_byte_cursor *line_cursor) {
+
+ /*
+ * Ignore line feed on windows
+ */
+ struct aws_byte_cursor line = aws_byte_cursor_right_trim_pred(line_cursor, s_is_carriage_return);
+ if (line.len == 0 || s_is_comment_line(&line) || s_is_whitespace_line(&line)) {
+ return;
+ }
+
+ AWS_LOGF_TRACE(
+ AWS_LS_SDKUTILS_PROFILE,
+ "Parsing aws profile line in profile \"%s\", current property: \"%s\"",
+ context->current_profile ? context->current_profile->name->bytes : s_none_string->bytes,
+ context->current_property ? context->current_property->name->bytes : s_none_string->bytes);
+
+ if (s_parse_profile_declaration(&line, context)) {
+ return;
+ }
+
+ if (s_parse_property_continuation(&line, context)) {
+ return;
+ }
+
+ if (s_parse_property(&line, context)) {
+ return;
+ }
+
+ AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Unidentifiable line type encountered while parsing profile file");
+ s_log_parse_context(AWS_LL_WARN, context);
+
+ context->parse_error = AWS_ERROR_SDKUTILS_PARSE_FATAL;
+}
+
+static struct aws_profile_collection *s_aws_profile_collection_new_internal(
+ struct aws_allocator *allocator,
+ const struct aws_byte_buf *buffer,
+ enum aws_profile_source_type source,
+ const struct aws_string *path) {
+
+ struct aws_profile_collection *profile_collection =
+ (struct aws_profile_collection *)aws_mem_acquire(allocator, sizeof(struct aws_profile_collection));
+ if (profile_collection == NULL) {
+ return NULL;
+ }
+
+ AWS_ZERO_STRUCT(*profile_collection);
+
+ profile_collection->profile_source = source;
+ profile_collection->allocator = allocator;
+
+ aws_ref_count_init(
+ &profile_collection->ref_count,
+ profile_collection,
+ (aws_simple_completion_callback *)s_aws_profile_collection_destroy_internal);
+
+ for (int i = 0; i < AWS_PROFILE_SECTION_TYPE_COUNT; i++) {
+ if (aws_hash_table_init(
+ &profile_collection->sections[i],
+ allocator,
+ PROFILE_TABLE_DEFAULT_SIZE,
+ aws_hash_string,
+ aws_hash_callback_string_eq,
+ NULL, /* The key is owned by the value (and destroy cleans it up), so we don't have to */
+ s_profile_hash_table_value_destroy)) {
+ goto cleanup;
+ }
+ }
+
+ struct aws_byte_cursor current_position = aws_byte_cursor_from_buf(buffer);
+
+ if (current_position.len > 0) {
+ struct aws_byte_cursor line_cursor;
+ AWS_ZERO_STRUCT(line_cursor);
+
+ struct profile_file_parse_context context;
+ AWS_ZERO_STRUCT(context);
+ context.current_line_number = 1;
+ context.profile_collection = profile_collection;
+ context.source_file_path = path;
+
+ while (aws_byte_cursor_next_split(&current_position, '\n', &line_cursor)) {
+ context.current_line = line_cursor;
+
+ s_parse_and_apply_line_to_profile_collection(&context, &line_cursor);
+ if (context.parse_error == AWS_ERROR_SDKUTILS_PARSE_FATAL) {
+ AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Fatal error while parsing aws profile collection");
+ goto cleanup;
+ }
+
+ aws_byte_cursor_advance(&current_position, line_cursor.len + 1);
+ ++context.current_line_number;
+ }
+ }
+
+ return profile_collection;
+
+cleanup:
+ s_aws_profile_collection_destroy_internal(profile_collection);
+
+ return NULL;
+}
+
+struct aws_profile_collection *aws_profile_collection_acquire(struct aws_profile_collection *collection) {
+ if (collection != NULL) {
+ aws_ref_count_acquire(&collection->ref_count);
+ }
+
+ return collection;
+}
+
+struct aws_profile_collection *aws_profile_collection_release(struct aws_profile_collection *collection) {
+ if (collection != NULL) {
+ aws_ref_count_release(&collection->ref_count);
+ }
+
+ return NULL;
+}
+
+struct aws_profile_collection *aws_profile_collection_new_from_file(
+ struct aws_allocator *allocator,
+ const struct aws_string *file_path,
+ enum aws_profile_source_type source) {
+
+ struct aws_byte_buf file_contents;
+ AWS_ZERO_STRUCT(file_contents);
+
+ AWS_LOGF_DEBUG(AWS_LS_SDKUTILS_PROFILE, "Creating profile collection from file at \"%s\"", file_path->bytes);
+
+ if (aws_byte_buf_init_from_file(&file_contents, allocator, aws_string_c_str(file_path)) != 0) {
+ AWS_LOGF_WARN(AWS_LS_SDKUTILS_PROFILE, "Failed to read file at \"%s\"", file_path->bytes);
+ return NULL;
+ }
+
+ struct aws_profile_collection *profile_collection =
+ s_aws_profile_collection_new_internal(allocator, &file_contents, source, file_path);
+
+ aws_byte_buf_clean_up(&file_contents);
+
+ return profile_collection;
+}
+
+struct aws_profile_collection *aws_profile_collection_new_from_buffer(
+ struct aws_allocator *allocator,
+ const struct aws_byte_buf *buffer,
+ enum aws_profile_source_type source) {
+
+ return s_aws_profile_collection_new_internal(allocator, buffer, source, NULL);
+}
+
+static struct aws_string *s_process_profile_file_path(struct aws_allocator *allocator, const struct aws_string *path) {
+ struct aws_string *final_path = NULL;
+
+ /*
+ * Make a copy to mess with
+ */
+ struct aws_string *path_copy = aws_string_new_from_string(allocator, path);
+ if (path_copy == NULL) {
+ return NULL;
+ }
+
+ struct aws_string *home_directory = NULL;
+
+ /*
+ * Fake directory cursor for final directory construction
+ */
+ char local_platform_separator = aws_get_platform_directory_separator();
+ struct aws_byte_cursor separator_cursor;
+ AWS_ZERO_STRUCT(separator_cursor);
+ separator_cursor.ptr = (uint8_t *)&local_platform_separator;
+ separator_cursor.len = 1;
+
+ for (size_t i = 0; i < path_copy->len; ++i) {
+ char value = path_copy->bytes[i];
+ if (aws_is_any_directory_separator(value)) {
+ ((char *)(path_copy->bytes))[i] = local_platform_separator;
+ }
+ }
+
+ /*
+ * Process a split on the local separator, which we now know is the only one present in the string.
+ *
+ * While this does not conform fully to the SEP governing profile file path resolution, it covers
+ * a useful, cross-platform subset of functionality that the full implementation will be backwards compatible with.
+ */
+ struct aws_array_list path_segments;
+ if (aws_array_list_init_dynamic(&path_segments, allocator, 10, sizeof(struct aws_byte_cursor))) {
+ goto on_array_list_init_failure;
+ }
+
+ struct aws_byte_cursor path_cursor = aws_byte_cursor_from_string(path_copy);
+ if (aws_byte_cursor_split_on_char(&path_cursor, local_platform_separator, &path_segments)) {
+ goto on_split_failure;
+ }
+
+ size_t final_string_length = 0;
+ size_t path_segment_count = aws_array_list_length(&path_segments);
+ for (size_t i = 0; i < path_segment_count; ++i) {
+ struct aws_byte_cursor segment_cursor;
+ AWS_ZERO_STRUCT(segment_cursor);
+
+ if (aws_array_list_get_at(&path_segments, &segment_cursor, i)) {
+ continue;
+ }
+
+ /*
+ * Current support: if and only if the first segment is just '~' then replace it
+ * with the current home directory based on SEP home directory resolution rules.
+ *
+ * Support for (pathological but proper) paths with embedded ~ ("../../~/etc...") and
+ * cross-user ~ ("~someone/.aws/credentials") can come later. As it stands, they will
+ * potentially succeed on unix platforms but not Windows.
+ */
+ if (i == 0 && segment_cursor.len == 1 && *segment_cursor.ptr == '~') {
+ if (home_directory == NULL) {
+ home_directory = aws_get_home_directory(allocator);
+
+ if (AWS_UNLIKELY(!home_directory)) {
+ goto on_empty_path;
+ }
+ }
+
+ final_string_length += home_directory->len;
+ } else {
+ final_string_length += segment_cursor.len;
+ }
+ }
+
+ if (path_segment_count > 1) {
+ final_string_length += path_segment_count - 1;
+ }
+
+ if (final_string_length == 0) {
+ goto on_empty_path;
+ }
+
+ /*
+ * Build the final path from the split + a possible home directory resolution
+ */
+ struct aws_byte_buf result;
+ aws_byte_buf_init(&result, allocator, final_string_length);
+ for (size_t i = 0; i < path_segment_count; ++i) {
+ struct aws_byte_cursor segment_cursor;
+ AWS_ZERO_STRUCT(segment_cursor);
+
+ if (aws_array_list_get_at(&path_segments, &segment_cursor, i)) {
+ continue;
+ }
+
+ /*
+ * See above for explanation
+ */
+ if (i == 0 && segment_cursor.len == 1 && *segment_cursor.ptr == '~') {
+ if (home_directory == NULL) {
+ goto on_home_directory_failure;
+ }
+ struct aws_byte_cursor home_cursor = aws_byte_cursor_from_string(home_directory);
+ if (aws_byte_buf_append(&result, &home_cursor)) {
+ goto on_byte_buf_write_failure;
+ }
+ } else {
+ if (aws_byte_buf_append(&result, &segment_cursor)) {
+ goto on_byte_buf_write_failure;
+ }
+ }
+
+ /*
+ * Add the separator after all but the last segment
+ */
+ if (i + 1 < path_segment_count) {
+ if (aws_byte_buf_append(&result, &separator_cursor)) {
+ goto on_byte_buf_write_failure;
+ }
+ }
+ }
+
+ final_path = aws_string_new_from_array(allocator, result.buffer, result.len);
+
+/*
+ * clean up
+ */
+on_byte_buf_write_failure:
+ aws_byte_buf_clean_up(&result);
+
+on_empty_path:
+on_home_directory_failure:
+on_split_failure:
+ aws_array_list_clean_up(&path_segments);
+
+on_array_list_init_failure:
+ aws_string_destroy(path_copy);
+
+ if (home_directory != NULL) {
+ aws_string_destroy(home_directory);
+ }
+
+ return final_path;
+}
+
+AWS_STATIC_STRING_FROM_LITERAL(s_default_credentials_path, "~/.aws/credentials");
+AWS_STATIC_STRING_FROM_LITERAL(s_credentials_file_path_env_variable_name, "AWS_SHARED_CREDENTIALS_FILE");
+
+AWS_STATIC_STRING_FROM_LITERAL(s_default_config_path, "~/.aws/config");
+AWS_STATIC_STRING_FROM_LITERAL(s_config_file_path_env_variable_name, "AWS_CONFIG_FILE");
+
+static struct aws_string *s_get_raw_file_path(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *override_path,
+ const struct aws_string *override_env_var_name,
+ const struct aws_string *default_path) {
+
+ if (override_path != NULL && override_path->ptr != NULL) {
+ return aws_string_new_from_array(allocator, override_path->ptr, override_path->len);
+ }
+
+ struct aws_string *env_override_path = NULL;
+ if (aws_get_environment_value(allocator, override_env_var_name, &env_override_path) == 0 &&
+ env_override_path != NULL) {
+ return env_override_path;
+ }
+
+ return aws_string_new_from_string(allocator, default_path);
+}
+
+struct aws_string *aws_get_credentials_file_path(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *override_path) {
+
+ struct aws_string *raw_path = s_get_raw_file_path(
+ allocator, override_path, s_credentials_file_path_env_variable_name, s_default_credentials_path);
+
+ struct aws_string *final_path = s_process_profile_file_path(allocator, raw_path);
+
+ aws_string_destroy(raw_path);
+
+ return final_path;
+}
+
+struct aws_string *aws_get_config_file_path(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor *override_path) {
+
+ struct aws_string *raw_path =
+ s_get_raw_file_path(allocator, override_path, s_config_file_path_env_variable_name, s_default_config_path);
+
+ struct aws_string *final_path = s_process_profile_file_path(allocator, raw_path);
+
+ aws_string_destroy(raw_path);
+
+ return final_path;
+}
+
+AWS_STATIC_STRING_FROM_LITERAL(s_default_profile_env_variable_name, "AWS_PROFILE");
+
+struct aws_string *aws_get_profile_name(struct aws_allocator *allocator, const struct aws_byte_cursor *override_name) {
+
+ struct aws_string *profile_name = NULL;
+
+ if (aws_get_environment_value(allocator, s_default_profile_env_variable_name, &profile_name) ||
+ profile_name == NULL) {
+ if (override_name != NULL && override_name->ptr != NULL) {
+ profile_name = aws_string_new_from_array(allocator, override_name->ptr, override_name->len);
+ } else {
+ profile_name = aws_string_new_from_string(allocator, s_default_profile_name);
+ }
+ }
+
+ return profile_name;
+}
+
+size_t aws_profile_get_property_count(const struct aws_profile *profile) {
+ return aws_hash_table_get_entry_count(&profile->properties);
+}
+
+size_t aws_profile_collection_get_profile_count(const struct aws_profile_collection *profile_collection) {
+ return aws_hash_table_get_entry_count(&profile_collection->sections[AWS_PROFILE_SECTION_TYPE_PROFILE]);
+}
+
+size_t aws_profile_collection_get_section_count(
+ const struct aws_profile_collection *profile_collection,
+ const enum aws_profile_section_type section_type) {
+ return aws_hash_table_get_entry_count(&profile_collection->sections[section_type]);
+}
+
+size_t aws_profile_property_get_sub_property_count(const struct aws_profile_property *property) {
+ return aws_hash_table_get_entry_count(&property->sub_properties);
+}
+
+const struct aws_string *aws_profile_property_get_sub_property(
+ const struct aws_profile_property *property,
+ const struct aws_string *sub_property_name) {
+ struct aws_hash_element *element = NULL;
+
+ if (aws_hash_table_find(&property->sub_properties, sub_property_name, &element) || element == NULL) {
+ return NULL;
+ }
+
+ return (const struct aws_string *)element->value;
+}
+
+const struct aws_string *aws_profile_get_name(const struct aws_profile *profile) {
+ AWS_PRECONDITION(profile);
+ return profile->name;
+}
diff --git a/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_rule_engine.c b/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_rule_engine.c
new file mode 100644
index 0000000000..556450b697
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_rule_engine.c
@@ -0,0 +1,1132 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/byte_buf.h>
+#include <aws/common/json.h>
+#include <aws/common/macros.h>
+#include <aws/common/string.h>
+#include <aws/sdkutils/partitions.h>
+#include <aws/sdkutils/private/endpoints_types_impl.h>
+#include <aws/sdkutils/private/endpoints_util.h>
+#include <inttypes.h>
+#include <stdio.h>
+
+/* TODO: checking for unknown enum values is annoying and is brittle. compile
+time assert on enum size or members would make it a lot simpler. */
+
+/*
+ * How rule resolution works.
+ * Note: read comments in endpoint_types_impl.h first to understand type system.
+ *
+ * Initial scope is created from parameters defined in request context and
+ * default values defined in ruleset (s_init_top_level_scope). Validation that
+ * all required parameters have values is done at this point as well.
+ *
+ * Rules are then resolved sequentially against scope.
+ * First list of conditions associated with the rule is resolved
+ * (s_resolve_conditions). Final result of conditions resolution is an AND of
+ * truthiness of resolved values (as defined in is_value_truthy) for each
+ * condition. If resolution is true then rule is selected.
+ * - For endpoint and error rules that means terminal state is reached and rule
+ * data is returned
+ * - For tree rule, the engine starts resolving rules associated with tree rule.
+ * Note: tree rules are terminal and once engine jumps into tree rule
+ * resolution there is no way to jump back out.
+ *
+ * Conditions can add values to scope. Those values are valid for the duration of
+ * rule resolution. Note: for tree rules, any values added in tree conditions are
+ * valid for all rules within the tree.
+ * Scope can be though of as a 'leveled' structure. Top level or 0 level
+ * represents all values from context and defaults. Levels 1 and up represent
+ * values added by rules. Ex. if we start at level 0, all values added by rule
+ * can be though of as level 1.
+ * Since tree rule cannot be exited from, engine is simplified by making all
+ * values in scope top level whenever tree is jumped into. So in practice engine
+ * goes back between top level and first level as resolving rules. If that
+ * changes in future, scope can add explicit level number and cleanup only values
+ * at that level when going to next rule.
+ *
+ * Overall flow is as follows:
+ * - Start with any values provided in context as scope
+ * - Add any default values provided in ruleset and validate all required
+ * params are specified.
+ * - Iterate through rules and resolve each rule:
+ * -- resolve conditions with side effects
+ * -- if conditions are truthy return rule result
+ * -- if conditions are truthy and rule is tree, jump down a level and
+ * restart resolution with tree rules
+ * -- if conditions are falsy, rollback level and go to next rule
+ * - if no rules match, resolution fails with exhausted error.
+ */
+
+struct resolve_template_callback_data {
+ struct aws_allocator *allocator;
+ struct aws_endpoints_resolution_scope *scope;
+};
+
+AWS_STATIC_ASSERT(AWS_ENDPOINTS_VALUE_SIZE == 7);
+static bool is_value_truthy(const struct aws_endpoints_value *value) {
+ switch (value->type) {
+ case AWS_ENDPOINTS_VALUE_NONE:
+ return false;
+ case AWS_ENDPOINTS_VALUE_BOOLEAN:
+ return value->v.boolean;
+ case AWS_ENDPOINTS_VALUE_ARRAY:
+ case AWS_ENDPOINTS_VALUE_STRING:
+ case AWS_ENDPOINTS_VALUE_OBJECT:
+ return true;
+ case AWS_ENDPOINTS_VALUE_NUMBER:
+ return value->v.number != 0;
+ default:
+ AWS_ASSERT(false);
+ return false;
+ }
+}
+
+void s_scope_value_destroy_cb(void *data) {
+ struct aws_endpoints_scope_value *value = data;
+ aws_endpoints_scope_value_destroy(value);
+}
+
+static int s_deep_copy_context_to_scope(
+ struct aws_allocator *allocator,
+ const struct aws_endpoints_request_context *context,
+ struct aws_endpoints_resolution_scope *scope) {
+
+ struct aws_endpoints_scope_value *new_value = NULL;
+
+ for (struct aws_hash_iter iter = aws_hash_iter_begin(&context->values); !aws_hash_iter_done(&iter);
+ aws_hash_iter_next(&iter)) {
+
+ struct aws_endpoints_scope_value *context_value = (struct aws_endpoints_scope_value *)iter.element.value;
+
+ new_value = aws_endpoints_scope_value_new(allocator, context_value->name.cur);
+ if (aws_endpoints_deep_copy_parameter_value(allocator, &context_value->value, &new_value->value)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to deep copy value.");
+ goto on_error;
+ }
+
+ if (aws_hash_table_put(&scope->values, &new_value->name.cur, new_value, NULL)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add deep copy to scope.");
+ goto on_error;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_endpoints_scope_value_destroy(new_value);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_INIT_FAILED);
+}
+
+static int s_init_top_level_scope(
+ struct aws_allocator *allocator,
+ const struct aws_endpoints_request_context *context,
+ const struct aws_endpoints_ruleset *ruleset,
+ const struct aws_partitions_config *partitions,
+ struct aws_endpoints_resolution_scope *scope) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(context);
+ AWS_PRECONDITION(ruleset);
+ AWS_PRECONDITION(scope);
+
+ struct aws_endpoints_scope_value *val = NULL;
+ scope->rule_idx = 0;
+ scope->rules = &ruleset->rules;
+ scope->partitions = partitions;
+
+ if (aws_hash_table_init(
+ &scope->values,
+ allocator,
+ 0,
+ aws_hash_byte_cursor_ptr,
+ aws_endpoints_byte_cursor_eq,
+ NULL,
+ s_scope_value_destroy_cb)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to init request context values.");
+ goto on_error;
+ }
+
+ if (s_deep_copy_context_to_scope(allocator, context, scope)) {
+ goto on_error;
+ }
+
+ if (aws_array_list_init_dynamic(&scope->added_keys, allocator, 10, sizeof(struct aws_byte_cursor))) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to init added keys.");
+ goto on_error;
+ }
+
+ /* Add defaults to the top level scope. */
+ for (struct aws_hash_iter iter = aws_hash_iter_begin(&ruleset->parameters); !aws_hash_iter_done(&iter);
+ aws_hash_iter_next(&iter)) {
+ const struct aws_byte_cursor key = *(const struct aws_byte_cursor *)iter.element.key;
+ struct aws_endpoints_parameter *value = (struct aws_endpoints_parameter *)iter.element.value;
+
+ /* Skip non-required values, since they cannot have default values. */
+ if (!value->is_required) {
+ continue;
+ }
+
+ struct aws_hash_element *existing = NULL;
+ if (aws_hash_table_find(&scope->values, &key, &existing)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to init request context values.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_INIT_FAILED);
+ }
+
+ if (existing == NULL) {
+ if (!value->has_default_value) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "No value or default for required parameter.");
+ goto on_error;
+ }
+
+ val = aws_endpoints_scope_value_new(allocator, key);
+ AWS_ASSERT(val);
+
+ switch (value->type) {
+ case AWS_ENDPOINTS_PARAMETER_STRING:
+ val->value.type = AWS_ENDPOINTS_VALUE_STRING;
+ val->value.v.owning_cursor_string =
+ aws_endpoints_non_owning_cursor_create(value->default_value.string);
+ break;
+ case AWS_ENDPOINTS_PARAMETER_BOOLEAN:
+ val->value.type = AWS_ENDPOINTS_VALUE_BOOLEAN;
+ val->value.v.boolean = value->default_value.boolean;
+ break;
+ default:
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Unexpected parameter type.");
+ goto on_error;
+ }
+
+ if (aws_hash_table_put(&scope->values, &val->name.cur, val, NULL)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add value to top level scope.");
+ goto on_error;
+ }
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_endpoints_scope_value_destroy(val);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_INIT_FAILED);
+}
+
+static void s_scope_clean_up(struct aws_endpoints_resolution_scope *scope) {
+ AWS_PRECONDITION(scope);
+
+ aws_hash_table_clean_up(&scope->values);
+ aws_array_list_clean_up(&scope->added_keys);
+}
+
+static int s_resolve_expr(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_expr *expr,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value);
+
+static int s_resolve_template(
+ struct aws_byte_cursor template,
+ void *user_data,
+ struct aws_owning_cursor *out_owning_cursor);
+
+int aws_endpoints_argv_expect(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_array_list *argv,
+ size_t idx,
+ enum aws_endpoints_value_type expected_type,
+ struct aws_endpoints_value *out_value) {
+
+ AWS_ZERO_STRUCT(*out_value);
+ struct aws_endpoints_value argv_value = {0};
+ struct aws_endpoints_expr argv_expr;
+ if (aws_array_list_get_at(argv, &argv_expr, idx)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to parse argv");
+ goto on_error;
+ }
+
+ if (s_resolve_expr(allocator, &argv_expr, scope, &argv_value)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve argv.");
+ goto on_error;
+ }
+
+ if (expected_type != AWS_ENDPOINTS_VALUE_ANY && argv_value.type != expected_type) {
+ AWS_LOGF_ERROR(
+ AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE,
+ "Unexpected arg type actual: %u expected %u.",
+ argv_value.type,
+ expected_type);
+ goto on_error;
+ }
+
+ *out_value = argv_value;
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_endpoints_value_clean_up(&argv_value);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+}
+
+/*
+******************************
+* Expr/String resolve
+******************************
+*/
+
+static int s_resolve_expr(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_expr *expr,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value) {
+
+ AWS_ZERO_STRUCT(*out_value);
+ switch (expr->type) {
+ case AWS_ENDPOINTS_EXPR_STRING: {
+ struct aws_byte_buf buf;
+ struct resolve_template_callback_data data = {.allocator = allocator, .scope = scope};
+ if (aws_byte_buf_init_from_resolved_templated_string(
+ allocator, &buf, expr->e.string, s_resolve_template, &data, false)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve templated string.");
+ goto on_error;
+ }
+
+ out_value->type = AWS_ENDPOINTS_VALUE_STRING;
+ out_value->v.owning_cursor_string =
+ aws_endpoints_owning_cursor_from_string(aws_string_new_from_buf(allocator, &buf));
+ aws_byte_buf_clean_up(&buf);
+ break;
+ }
+ case AWS_ENDPOINTS_EXPR_BOOLEAN: {
+ out_value->type = AWS_ENDPOINTS_VALUE_BOOLEAN;
+ out_value->v.boolean = expr->e.boolean;
+ break;
+ }
+ case AWS_ENDPOINTS_EXPR_NUMBER: {
+ out_value->type = AWS_ENDPOINTS_VALUE_NUMBER;
+ out_value->v.number = expr->e.number;
+ break;
+ }
+ case AWS_ENDPOINTS_EXPR_ARRAY: {
+ out_value->type = AWS_ENDPOINTS_VALUE_ARRAY;
+ /* TODO: deep copy */
+ out_value->v.array = expr->e.array;
+ break;
+ }
+ case AWS_ENDPOINTS_EXPR_REFERENCE: {
+ struct aws_hash_element *element;
+ if (aws_hash_table_find(&scope->values, &expr->e.reference, &element)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to deref.");
+ goto on_error;
+ }
+
+ if (element == NULL) {
+ out_value->type = AWS_ENDPOINTS_VALUE_NONE;
+ } else {
+ struct aws_endpoints_scope_value *aws_endpoints_scope_value = element->value;
+ *out_value = aws_endpoints_scope_value->value;
+ if (aws_endpoints_scope_value->value.type == AWS_ENDPOINTS_VALUE_STRING) {
+ /* Value will not own underlying mem and instead its owned
+ by the scope, so set it to NULL. */
+ out_value->v.owning_cursor_string.string = NULL;
+ } else if (aws_endpoints_scope_value->value.type == AWS_ENDPOINTS_VALUE_OBJECT) {
+ out_value->v.owning_cursor_object.string = NULL;
+ }
+ }
+ break;
+ }
+ case AWS_ENDPOINTS_EXPR_FUNCTION: {
+ if (aws_endpoints_dispatch_standard_lib_fn_resolve(
+ expr->e.function.fn, allocator, &expr->e.function.argv, scope, out_value)) {
+ goto on_error;
+ }
+ break;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+}
+
+static int s_resolve_one_condition(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_condition *condition,
+ struct aws_endpoints_resolution_scope *scope,
+ bool *out_is_truthy) {
+
+ struct aws_endpoints_scope_value *scope_value = NULL;
+
+ struct aws_endpoints_value val;
+ if (s_resolve_expr(allocator, &condition->expr, scope, &val)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve expr.");
+ goto on_error;
+ }
+
+ *out_is_truthy = is_value_truthy(&val);
+
+ /* Note: assigning value is skipped if condition is falsy, since nothing can
+ use it and that avoids adding value and then removing it from scope right away. */
+ if (*out_is_truthy && condition->assign.len > 0) {
+ /* If condition assigns a value, push it to scope and let scope
+ handle value memory. */
+ scope_value = aws_endpoints_scope_value_new(allocator, condition->assign);
+ scope_value->value = val;
+
+ if (aws_array_list_push_back(&scope->added_keys, &scope_value->name.cur)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to update key at given scope.");
+ goto on_error;
+ }
+
+ int was_created = 1;
+ if (aws_hash_table_put(&scope->values, &scope_value->name.cur, scope_value, &was_created)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to set assigned variable.");
+ goto on_error;
+ }
+
+ /* Shadowing existing values is prohibited. */
+ if (!was_created) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Assigned variable shadows existing one.");
+ goto on_error;
+ }
+ } else {
+ /* Otherwise clean up temp value */
+ aws_endpoints_value_clean_up(&val);
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_endpoints_scope_value_destroy(scope_value);
+ /* Only cleanup value if mem ownership was not transferred to scope value. */
+ if (scope_value == NULL) {
+ aws_endpoints_value_clean_up(&val);
+ }
+
+ *out_is_truthy = false;
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+}
+
+static int s_resolve_conditions(
+ struct aws_allocator *allocator,
+ const struct aws_array_list *conditions,
+ struct aws_endpoints_resolution_scope *scope,
+ bool *out_is_truthy) {
+
+ /* Note: spec defines empty conditions list as truthy. */
+ *out_is_truthy = true;
+
+ for (size_t idx = 0; idx < aws_array_list_length(conditions); ++idx) {
+ struct aws_endpoints_condition *condition = NULL;
+ if (aws_array_list_get_at_ptr(conditions, (void **)&condition, idx)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to retrieve condition.");
+ goto on_error;
+ }
+
+ if (s_resolve_one_condition(allocator, condition, scope, out_is_truthy)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve condition.");
+ goto on_error;
+ }
+
+ /* truthiness of all conditions is an AND of truthiness for each condition,
+ hence first false one short circuits resolution */
+ if (!*out_is_truthy) {
+ break;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ *out_is_truthy = false;
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+}
+
+int aws_endpoints_path_through_array(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *value,
+ struct aws_byte_cursor path_cur,
+ struct aws_endpoints_value *out_value) {
+
+ AWS_PRECONDITION(value->type == AWS_ENDPOINTS_VALUE_ARRAY);
+
+ uint64_t index;
+ struct aws_byte_cursor split = {0};
+ if ((!aws_byte_cursor_next_split(&path_cur, '[', &split) || split.len > 0) ||
+ !aws_byte_cursor_next_split(&path_cur, ']', &split) || aws_byte_cursor_utf8_parse_u64(split, &index)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Could not parse index from template string.");
+ goto on_error;
+ }
+
+ if (index < aws_array_list_length(&value->v.array)) {
+ out_value->type = AWS_ENDPOINTS_VALUE_NONE;
+ return AWS_OP_SUCCESS;
+ }
+
+ struct aws_endpoints_expr *expr = NULL;
+ if (aws_array_list_get_at_ptr(&value->v.array, (void **)&expr, (size_t)index)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to index into resolved value");
+ goto on_error;
+ }
+
+ struct aws_endpoints_value val;
+ if (s_resolve_expr(allocator, expr, scope, &val)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve val.");
+ aws_endpoints_value_clean_up(&val);
+ goto on_error;
+ }
+
+ *out_value = val;
+ return AWS_OP_SUCCESS;
+
+on_error:
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+}
+
+int aws_endpoints_path_through_object(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_value *value,
+ struct aws_byte_cursor path_cur,
+ struct aws_endpoints_value *out_value) {
+
+ AWS_ZERO_STRUCT(*out_value);
+ struct aws_json_value *root_node = NULL;
+
+ struct aws_byte_cursor value_cur = value->type != AWS_ENDPOINTS_VALUE_STRING ? value->v.owning_cursor_string.cur
+ : value->v.owning_cursor_object.cur;
+
+ root_node = aws_json_value_new_from_string(allocator, value_cur);
+ const struct aws_json_value *result;
+ if (aws_path_through_json(allocator, root_node, path_cur, &result)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to path through json.");
+ goto on_error;
+ }
+
+ if (result == NULL) {
+ out_value->type = AWS_ENDPOINTS_VALUE_NONE;
+ } else if (aws_json_value_is_string(result)) {
+ struct aws_byte_cursor final;
+ if (aws_json_value_get_string(result, &final)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Could not parse string from node.");
+ goto on_error;
+ }
+
+ out_value->type = AWS_ENDPOINTS_VALUE_STRING;
+ out_value->v.owning_cursor_string = aws_endpoints_owning_cursor_from_cursor(allocator, final);
+ } else if (aws_json_value_is_array(result) || aws_json_value_is_object(result)) {
+ struct aws_byte_buf json_blob;
+ aws_byte_buf_init(&json_blob, allocator, 0);
+
+ if (aws_byte_buf_append_json_string(result, &json_blob)) {
+ aws_byte_buf_clean_up(&json_blob);
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to extract properties.");
+ goto on_error;
+ }
+
+ aws_byte_buf_clean_up(&json_blob);
+ out_value->type = AWS_ENDPOINTS_VALUE_OBJECT;
+ out_value->v.owning_cursor_object =
+ aws_endpoints_owning_cursor_from_string(aws_string_new_from_buf(allocator, &json_blob));
+ } else if (aws_json_value_is_boolean(result)) {
+ if (aws_json_value_get_boolean(result, &out_value->v.boolean)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Could not parse boolean from node.");
+ goto on_error;
+ }
+
+ out_value->type = AWS_ENDPOINTS_VALUE_BOOLEAN;
+ } else if (aws_json_value_is_number(result)) {
+ if (aws_json_value_get_number(result, &out_value->v.number)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Could not parse number from node.");
+ goto on_error;
+ }
+
+ out_value->type = AWS_ENDPOINTS_VALUE_NUMBER;
+ }
+
+ aws_json_value_destroy(root_node);
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_json_value_destroy(root_node);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+}
+
+static int s_resolve_templated_value_with_pathing(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_byte_cursor template_cur,
+ struct aws_owning_cursor *out_owning_cursor) {
+
+ struct aws_endpoints_value resolved_value = {0};
+ struct aws_byte_cursor split = {0};
+ if (!aws_byte_cursor_next_split(&template_cur, '#', &split) || split.len == 0) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Invalid value in template string.");
+ goto on_error;
+ }
+
+ struct aws_hash_element *elem = NULL;
+ if (aws_hash_table_find(&scope->values, &split, &elem) || elem == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Templated value does not exist: " PRInSTR, AWS_BYTE_CURSOR_PRI(split));
+ goto on_error;
+ }
+
+ struct aws_endpoints_scope_value *scope_value = elem->value;
+ if (!aws_byte_cursor_next_split(&template_cur, '#', &split)) {
+ if (scope_value->value.type != AWS_ENDPOINTS_VALUE_STRING) {
+ AWS_LOGF_ERROR(
+ AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Unexpected type: must be string if pathing is not provided");
+ goto on_error;
+ }
+
+ *out_owning_cursor = aws_endpoints_non_owning_cursor_create(scope_value->value.v.owning_cursor_string.cur);
+ return AWS_OP_SUCCESS;
+ }
+
+ if (scope_value->value.type == AWS_ENDPOINTS_VALUE_OBJECT) {
+ if (aws_endpoints_path_through_object(allocator, &scope_value->value, split, &resolved_value)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to path through object.");
+ goto on_error;
+ }
+ } else if (scope_value->value.type == AWS_ENDPOINTS_VALUE_ARRAY) {
+ if (aws_endpoints_path_through_array(allocator, scope, &scope_value->value, split, &resolved_value)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to path through array.");
+ goto on_error;
+ }
+ } else {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Invalid value type for pathing through.");
+ goto on_error;
+ }
+
+ if (resolved_value.type != AWS_ENDPOINTS_VALUE_STRING) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Templated string didn't resolve to string");
+ goto on_error;
+ }
+
+ if (resolved_value.v.owning_cursor_string.string != NULL) {
+ /* Transfer ownership of the underlying string. */
+ *out_owning_cursor = aws_endpoints_owning_cursor_from_string(resolved_value.v.owning_cursor_string.string);
+ resolved_value.v.owning_cursor_string.string = NULL;
+ } else {
+ /* Unlikely to get here since current pathing always return new string. */
+ *out_owning_cursor = aws_endpoints_non_owning_cursor_create(resolved_value.v.owning_cursor_string.cur);
+ }
+
+ aws_endpoints_value_clean_up(&resolved_value);
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_endpoints_value_clean_up(&resolved_value);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+}
+
+static int s_resolve_template(struct aws_byte_cursor template, void *user_data, struct aws_owning_cursor *out_cursor) {
+
+ struct resolve_template_callback_data *data = user_data;
+
+ if (s_resolve_templated_value_with_pathing(data->allocator, data->scope, template, out_cursor)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve template value.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ ;
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/*
+******************************
+* Request Context
+******************************
+*/
+
+static void s_endpoints_request_context_destroy(void *data) {
+ if (data == NULL) {
+ return;
+ }
+
+ struct aws_endpoints_request_context *context = data;
+ aws_hash_table_clean_up(&context->values);
+
+ aws_mem_release(context->allocator, context);
+}
+
+struct aws_endpoints_request_context *aws_endpoints_request_context_new(struct aws_allocator *allocator) {
+ AWS_PRECONDITION(allocator);
+
+ struct aws_endpoints_request_context *context =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_endpoints_request_context));
+
+ context->allocator = allocator;
+ aws_ref_count_init(&context->ref_count, context, s_endpoints_request_context_destroy);
+
+ if (aws_hash_table_init(
+ &context->values,
+ allocator,
+ 0,
+ aws_hash_byte_cursor_ptr,
+ aws_endpoints_byte_cursor_eq,
+ NULL,
+ s_scope_value_destroy_cb)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to init request context values.");
+ goto on_error;
+ }
+
+ return context;
+
+on_error:
+ s_endpoints_request_context_destroy(context);
+ return NULL;
+}
+
+struct aws_endpoints_request_context *aws_endpoints_request_context_acquire(
+ struct aws_endpoints_request_context *request_context) {
+ AWS_PRECONDITION(request_context);
+ if (request_context) {
+ aws_ref_count_acquire(&request_context->ref_count);
+ }
+ return request_context;
+}
+
+struct aws_endpoints_request_context *aws_endpoints_request_context_release(
+ struct aws_endpoints_request_context *request_context) {
+ if (request_context) {
+ aws_ref_count_release(&request_context->ref_count);
+ }
+ return NULL;
+}
+
+int aws_endpoints_request_context_add_string(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_request_context *context,
+ struct aws_byte_cursor name,
+ struct aws_byte_cursor value) {
+ AWS_PRECONDITION(allocator);
+
+ struct aws_endpoints_scope_value *val = aws_endpoints_scope_value_new(allocator, name);
+ val->value.type = AWS_ENDPOINTS_VALUE_STRING;
+ val->value.v.owning_cursor_string = aws_endpoints_owning_cursor_from_cursor(allocator, value);
+
+ if (aws_hash_table_put(&context->values, &val->name.cur, val, NULL)) {
+ aws_endpoints_scope_value_destroy(val);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_INIT_FAILED);
+ };
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_endpoints_request_context_add_boolean(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_request_context *context,
+ struct aws_byte_cursor name,
+ bool value) {
+ AWS_PRECONDITION(allocator);
+
+ struct aws_endpoints_scope_value *val = aws_endpoints_scope_value_new(allocator, name);
+ val->value.type = AWS_ENDPOINTS_VALUE_BOOLEAN;
+ val->value.v.boolean = value;
+
+ if (aws_hash_table_put(&context->values, &val->name.cur, val, NULL)) {
+ aws_endpoints_scope_value_destroy(val);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_INIT_FAILED);
+ };
+
+ return AWS_OP_SUCCESS;
+}
+
+/*
+******************************
+* Rule engine.
+******************************
+*/
+
+struct aws_endpoints_resolved_endpoint {
+ struct aws_allocator *allocator;
+ struct aws_ref_count ref_count;
+ enum aws_endpoints_resolved_endpoint_type type;
+ union {
+ struct resolved_endpoint {
+ struct aws_byte_buf url;
+ struct aws_byte_buf properties;
+ struct aws_hash_table headers;
+ } endpoint;
+ struct aws_byte_buf error;
+ } r;
+};
+
+static void s_endpoints_resolved_endpoint_destroy(void *data) {
+ if (data == NULL) {
+ return;
+ }
+
+ struct aws_endpoints_resolved_endpoint *resolved = data;
+ if (resolved->type == AWS_ENDPOINTS_RESOLVED_ENDPOINT) {
+ aws_byte_buf_clean_up(&resolved->r.endpoint.url);
+ aws_byte_buf_clean_up(&resolved->r.endpoint.properties);
+ aws_hash_table_clean_up(&resolved->r.endpoint.headers);
+ } else if (resolved->type == AWS_ENDPOINTS_RESOLVED_ERROR) {
+ aws_byte_buf_clean_up(&resolved->r.error);
+ }
+ aws_mem_release(resolved->allocator, resolved);
+}
+
+struct aws_endpoints_resolved_endpoint *s_endpoints_resolved_endpoint_new(struct aws_allocator *allocator) {
+ AWS_PRECONDITION(allocator);
+
+ struct aws_endpoints_resolved_endpoint *resolved =
+ aws_mem_calloc(allocator, 1, sizeof(struct aws_endpoints_resolved_endpoint));
+ resolved->allocator = allocator;
+
+ aws_ref_count_init(&resolved->ref_count, resolved, s_endpoints_resolved_endpoint_destroy);
+
+ return resolved;
+}
+
+struct aws_endpoints_resolved_endpoint *aws_endpoints_resolved_endpoint_acquire(
+ struct aws_endpoints_resolved_endpoint *resolved_endpoint) {
+ AWS_PRECONDITION(resolved_endpoint);
+ if (resolved_endpoint) {
+ aws_ref_count_acquire(&resolved_endpoint->ref_count);
+ }
+ return resolved_endpoint;
+}
+
+struct aws_endpoints_resolved_endpoint *aws_endpoints_resolved_endpoint_release(
+ struct aws_endpoints_resolved_endpoint *resolved_endpoint) {
+ if (resolved_endpoint) {
+ aws_ref_count_release(&resolved_endpoint->ref_count);
+ }
+ return NULL;
+}
+
+enum aws_endpoints_resolved_endpoint_type aws_endpoints_resolved_endpoint_get_type(
+ const struct aws_endpoints_resolved_endpoint *resolved_endpoint) {
+ AWS_PRECONDITION(resolved_endpoint);
+ return resolved_endpoint->type;
+}
+
+int aws_endpoints_resolved_endpoint_get_url(
+ const struct aws_endpoints_resolved_endpoint *resolved_endpoint,
+ struct aws_byte_cursor *out_url) {
+ AWS_PRECONDITION(resolved_endpoint);
+ AWS_PRECONDITION(out_url);
+ if (resolved_endpoint->type != AWS_ENDPOINTS_RESOLVED_ENDPOINT) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ *out_url = aws_byte_cursor_from_buf(&resolved_endpoint->r.endpoint.url);
+ return AWS_OP_SUCCESS;
+}
+
+int aws_endpoints_resolved_endpoint_get_properties(
+ const struct aws_endpoints_resolved_endpoint *resolved_endpoint,
+ struct aws_byte_cursor *out_properties) {
+ AWS_PRECONDITION(resolved_endpoint);
+ AWS_PRECONDITION(out_properties);
+ if (resolved_endpoint->type != AWS_ENDPOINTS_RESOLVED_ENDPOINT) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ *out_properties = aws_byte_cursor_from_buf(&resolved_endpoint->r.endpoint.properties);
+ return AWS_OP_SUCCESS;
+}
+
+int aws_endpoints_resolved_endpoint_get_headers(
+ const struct aws_endpoints_resolved_endpoint *resolved_endpoint,
+ const struct aws_hash_table **out_headers) {
+ AWS_PRECONDITION(resolved_endpoint);
+ AWS_PRECONDITION(out_headers);
+ if (resolved_endpoint->type != AWS_ENDPOINTS_RESOLVED_ENDPOINT) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ *out_headers = &resolved_endpoint->r.endpoint.headers;
+ return AWS_OP_SUCCESS;
+}
+
+int aws_endpoints_resolved_endpoint_get_error(
+ const struct aws_endpoints_resolved_endpoint *resolved_endpoint,
+ struct aws_byte_cursor *out_error) {
+ AWS_PRECONDITION(resolved_endpoint);
+ AWS_PRECONDITION(out_error);
+ if (resolved_endpoint->type != AWS_ENDPOINTS_RESOLVED_ERROR) {
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ *out_error = aws_byte_cursor_from_buf(&resolved_endpoint->r.error);
+ return AWS_OP_SUCCESS;
+}
+
+struct aws_endpoints_rule_engine {
+ struct aws_allocator *allocator;
+ struct aws_ref_count ref_count;
+
+ struct aws_endpoints_ruleset *ruleset;
+ struct aws_partitions_config *partitions_config;
+};
+
+static void s_endpoints_rule_engine_destroy(void *data) {
+ if (data == NULL) {
+ return;
+ }
+
+ struct aws_endpoints_rule_engine *engine = data;
+ aws_endpoints_ruleset_release(engine->ruleset);
+ aws_partitions_config_release(engine->partitions_config);
+
+ aws_mem_release(engine->allocator, engine);
+}
+
+struct aws_endpoints_rule_engine *aws_endpoints_rule_engine_new(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_ruleset *ruleset,
+ struct aws_partitions_config *partitions_config) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(ruleset);
+
+ struct aws_endpoints_rule_engine *engine = aws_mem_calloc(allocator, 1, sizeof(struct aws_endpoints_rule_engine));
+ engine->allocator = allocator;
+ engine->ruleset = ruleset;
+ engine->partitions_config = partitions_config;
+
+ aws_endpoints_ruleset_acquire(ruleset);
+ aws_partitions_config_acquire(partitions_config);
+ aws_ref_count_init(&engine->ref_count, engine, s_endpoints_rule_engine_destroy);
+
+ return engine;
+}
+
+struct aws_endpoints_rule_engine *aws_endpoints_rule_engine_acquire(struct aws_endpoints_rule_engine *rule_engine) {
+ AWS_PRECONDITION(rule_engine);
+ if (rule_engine) {
+ aws_ref_count_acquire(&rule_engine->ref_count);
+ }
+ return rule_engine;
+}
+
+struct aws_endpoints_rule_engine *aws_endpoints_rule_engine_release(struct aws_endpoints_rule_engine *rule_engine) {
+ if (rule_engine) {
+ aws_ref_count_release(&rule_engine->ref_count);
+ }
+ return NULL;
+}
+
+int s_revert_scope(struct aws_endpoints_resolution_scope *scope) {
+
+ for (size_t idx = 0; idx < aws_array_list_length(&scope->added_keys); ++idx) {
+ struct aws_byte_cursor *cur = NULL;
+ if (aws_array_list_get_at_ptr(&scope->added_keys, (void **)&cur, idx)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to retrieve value.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ }
+
+ aws_hash_table_remove(&scope->values, cur, NULL, NULL);
+ }
+
+ aws_array_list_clear(&scope->added_keys);
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_on_string_array_element_destroy(void *element) {
+ struct aws_string *str = *(struct aws_string **)element;
+ aws_string_destroy(str);
+}
+
+static void s_callback_headers_destroy(void *data) {
+ struct aws_array_list *array = data;
+ struct aws_allocator *alloc = array->alloc;
+ aws_array_list_deep_clean_up(array, s_on_string_array_element_destroy);
+ aws_mem_release(alloc, array);
+}
+
+static int s_resolve_headers(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_hash_table *headers,
+ struct aws_hash_table *out_headers) {
+
+ struct aws_endpoints_value value;
+ struct aws_array_list *resolved_headers = NULL;
+
+ if (aws_hash_table_init(
+ out_headers,
+ allocator,
+ aws_hash_table_get_entry_count(headers),
+ aws_hash_string,
+ aws_hash_callback_string_eq,
+ aws_hash_callback_string_destroy,
+ s_callback_headers_destroy)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to init table for resolved headers");
+ goto on_error;
+ }
+
+ for (struct aws_hash_iter iter = aws_hash_iter_begin(headers); !aws_hash_iter_done(&iter);
+ aws_hash_iter_next(&iter)) {
+
+ struct aws_string *key = (struct aws_string *)iter.element.key;
+ struct aws_array_list *header_list = (struct aws_array_list *)iter.element.value;
+
+ resolved_headers = aws_mem_calloc(allocator, 1, sizeof(struct aws_array_list));
+ aws_array_list_init_dynamic(
+ resolved_headers, allocator, aws_array_list_length(header_list), sizeof(struct aws_string *));
+
+ for (size_t i = 0; i < aws_array_list_length(header_list); ++i) {
+ struct aws_endpoints_expr *expr = NULL;
+ if (aws_array_list_get_at_ptr(header_list, (void **)&expr, i)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to get header.");
+ goto on_error;
+ }
+
+ if (s_resolve_expr(allocator, expr, scope, &value) || value.type != AWS_ENDPOINTS_VALUE_STRING) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve header expr.");
+ goto on_error;
+ }
+
+ struct aws_string *str = aws_string_new_from_cursor(allocator, &value.v.owning_cursor_string.cur);
+ if (aws_array_list_push_back(resolved_headers, &str)) {
+ aws_string_destroy(str);
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add resolved header to result.");
+ goto on_error;
+ }
+
+ aws_endpoints_value_clean_up(&value);
+ }
+
+ if (aws_hash_table_put(out_headers, aws_string_clone_or_reuse(allocator, key), resolved_headers, NULL)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add resolved header to result.");
+ goto on_error;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_endpoints_value_clean_up(&value);
+ if (resolved_headers != NULL) {
+ s_callback_headers_destroy(resolved_headers);
+ }
+ aws_hash_table_clean_up(out_headers);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+}
+
+int aws_endpoints_rule_engine_resolve(
+ struct aws_endpoints_rule_engine *engine,
+ const struct aws_endpoints_request_context *context,
+ struct aws_endpoints_resolved_endpoint **out_resolved_endpoint) {
+
+ if (aws_array_list_length(&engine->ruleset->rules) == 0) {
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_EMPTY_RULESET);
+ }
+
+ int result = AWS_OP_SUCCESS;
+ struct aws_endpoints_resolution_scope scope;
+ if (s_init_top_level_scope(engine->allocator, context, engine->ruleset, engine->partitions_config, &scope)) {
+ result = AWS_OP_ERR;
+ goto on_done;
+ }
+
+ while (scope.rule_idx < aws_array_list_length(scope.rules)) {
+ struct aws_endpoints_rule *rule = NULL;
+ if (aws_array_list_get_at_ptr(scope.rules, (void **)&rule, scope.rule_idx)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to get rule.");
+ result = AWS_OP_ERR;
+ goto on_done;
+ }
+
+ bool is_truthy = false;
+ if (s_resolve_conditions(engine->allocator, &rule->conditions, &scope, &is_truthy)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve conditions.");
+ result = AWS_OP_ERR;
+ goto on_done;
+ }
+
+ if (!is_truthy) {
+ s_revert_scope(&scope);
+ ++scope.rule_idx;
+ continue;
+ }
+
+ switch (rule->type) {
+ case AWS_ENDPOINTS_RULE_ENDPOINT: {
+ struct aws_endpoints_resolved_endpoint *endpoint = s_endpoints_resolved_endpoint_new(engine->allocator);
+ endpoint->type = AWS_ENDPOINTS_RESOLVED_ENDPOINT;
+
+ struct aws_endpoints_value val;
+ if (s_resolve_expr(engine->allocator, &rule->rule_data.endpoint.url, &scope, &val) ||
+ val.type != AWS_ENDPOINTS_VALUE_STRING ||
+ aws_byte_buf_init_copy_from_cursor(
+ &endpoint->r.endpoint.url, engine->allocator, val.v.owning_cursor_string.cur)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve templated url.");
+ result = AWS_OP_ERR;
+ goto on_done;
+ }
+
+ aws_endpoints_value_clean_up(&val);
+
+ struct resolve_template_callback_data data = {.allocator = engine->allocator, .scope = &scope};
+
+ if (rule->rule_data.endpoint.properties.len > 0 &&
+ aws_byte_buf_init_from_resolved_templated_string(
+ engine->allocator,
+ &endpoint->r.endpoint.properties,
+ aws_byte_cursor_from_buf(&rule->rule_data.endpoint.properties),
+ s_resolve_template,
+ &data,
+ true)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve templated properties.");
+ result = AWS_OP_ERR;
+ goto on_done;
+ }
+
+ if (s_resolve_headers(
+ engine->allocator, &scope, &rule->rule_data.endpoint.headers, &endpoint->r.endpoint.headers)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve templated headers.");
+ result = AWS_OP_ERR;
+ goto on_done;
+ }
+
+ *out_resolved_endpoint = endpoint;
+ goto on_done;
+ }
+ case AWS_ENDPOINTS_RULE_ERROR: {
+ struct aws_endpoints_resolved_endpoint *error = s_endpoints_resolved_endpoint_new(engine->allocator);
+ error->type = AWS_ENDPOINTS_RESOLVED_ERROR;
+
+ struct aws_endpoints_value val;
+ if (s_resolve_expr(engine->allocator, &rule->rule_data.error.error, &scope, &val) ||
+ val.type != AWS_ENDPOINTS_VALUE_STRING ||
+ aws_byte_buf_init_copy_from_cursor(
+ &error->r.error, engine->allocator, val.v.owning_cursor_string.cur)) {
+ aws_endpoints_value_clean_up(&val);
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve templated url.");
+ result = AWS_OP_ERR;
+ goto on_done;
+ }
+
+ aws_endpoints_value_clean_up(&val);
+ *out_resolved_endpoint = error;
+ goto on_done;
+ }
+ case AWS_ENDPOINTS_RULE_TREE: {
+ /* jumping down a level */
+ aws_array_list_clear(&scope.added_keys);
+ scope.rule_idx = 0;
+ scope.rules = &rule->rule_data.tree.rules;
+ continue;
+ }
+ default: {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Unexpected rule type.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+ }
+ }
+
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "All rules have been exhausted.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RULESET_EXHAUSTED);
+
+on_done:
+ AWS_LOGF_DEBUG(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Resolved endpoint with status %d", result);
+ s_scope_clean_up(&scope);
+ return result;
+}
diff --git a/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_ruleset.c b/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_ruleset.c
new file mode 100644
index 0000000000..99f31a5062
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_ruleset.c
@@ -0,0 +1,958 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/array_list.h>
+#include <aws/common/hash_table.h>
+#include <aws/common/json.h>
+#include <aws/common/ref_count.h>
+#include <aws/common/string.h>
+#include <aws/sdkutils/private/endpoints_types_impl.h>
+#include <aws/sdkutils/private/endpoints_util.h>
+
+/* parameter types */
+static struct aws_byte_cursor s_string_type_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("string");
+static struct aws_byte_cursor s_boolean_type_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("boolean");
+
+/* rule types */
+static struct aws_byte_cursor s_endpoint_type_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("endpoint");
+static struct aws_byte_cursor s_error_type_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("error");
+static struct aws_byte_cursor s_tree_type_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("tree");
+
+static struct aws_byte_cursor s_supported_version = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("1.0");
+
+static struct aws_byte_cursor s_empty_cursor = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("");
+
+/* TODO: improve error messages. Include json line num? or dump json node? */
+
+struct aws_byte_cursor aws_endpoints_get_supported_ruleset_version(void) {
+ return s_supported_version;
+}
+
+/*
+******************************
+* Parameter Getters.
+******************************
+*/
+enum aws_endpoints_parameter_type aws_endpoints_parameter_get_type(const struct aws_endpoints_parameter *parameter) {
+ AWS_PRECONDITION(parameter);
+ return parameter->type;
+}
+
+struct aws_byte_cursor aws_endpoints_parameter_get_built_in(const struct aws_endpoints_parameter *parameter) {
+ AWS_PRECONDITION(parameter);
+ return parameter->built_in;
+}
+
+int aws_endpoints_parameter_get_default_string(
+ const struct aws_endpoints_parameter *parameter,
+ struct aws_byte_cursor *out_cursor) {
+ AWS_PRECONDITION(parameter);
+ AWS_PRECONDITION(out_cursor);
+
+ if (parameter->type == AWS_ENDPOINTS_PARAMETER_STRING) {
+ *out_cursor = parameter->default_value.string;
+ return AWS_OP_SUCCESS;
+ };
+
+ *out_cursor = s_empty_cursor;
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+}
+
+int aws_endpoints_parameter_get_default_boolean(
+ const struct aws_endpoints_parameter *parameter,
+ const bool **out_bool) {
+ AWS_PRECONDITION(parameter);
+ AWS_PRECONDITION(out_bool);
+
+ if (parameter->type == AWS_ENDPOINTS_PARAMETER_BOOLEAN) {
+ *out_bool = &parameter->default_value.boolean;
+ return AWS_OP_SUCCESS;
+ };
+
+ *out_bool = NULL;
+ return aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+}
+
+bool aws_endpoints_parameters_get_is_required(const struct aws_endpoints_parameter *parameter) {
+ AWS_PRECONDITION(parameter);
+ return parameter->is_required;
+}
+
+struct aws_byte_cursor aws_endpoints_parameter_get_documentation(const struct aws_endpoints_parameter *parameter) {
+ AWS_PRECONDITION(parameter);
+ return parameter->documentation;
+}
+
+bool aws_endpoints_parameters_get_is_deprecated(const struct aws_endpoints_parameter *parameter) {
+ AWS_PRECONDITION(parameter);
+ return parameter->is_deprecated;
+}
+
+struct aws_byte_cursor aws_endpoints_parameter_get_deprecated_message(const struct aws_endpoints_parameter *parameter) {
+ AWS_PRECONDITION(parameter);
+ return parameter->deprecated_message;
+}
+
+struct aws_byte_cursor aws_endpoints_parameter_get_deprecated_since(const struct aws_endpoints_parameter *parameter) {
+ AWS_PRECONDITION(parameter);
+ return parameter->deprecated_since;
+}
+
+/*
+******************************
+* Parser getters.
+******************************
+*/
+
+const struct aws_hash_table *aws_endpoints_ruleset_get_parameters(struct aws_endpoints_ruleset *ruleset) {
+ AWS_PRECONDITION(ruleset);
+ return &ruleset->parameters;
+}
+
+struct aws_byte_cursor aws_endpoints_ruleset_get_version(const struct aws_endpoints_ruleset *ruleset) {
+ AWS_PRECONDITION(ruleset);
+ return ruleset->version;
+}
+
+struct aws_byte_cursor aws_endpoints_ruleset_get_service_id(const struct aws_endpoints_ruleset *ruleset) {
+ AWS_PRECONDITION(ruleset);
+ return ruleset->service_id;
+}
+
+/*
+******************************
+* Parser helpers.
+******************************
+*/
+
+static void s_on_rule_array_element_clean_up(void *element) {
+ struct aws_endpoints_rule *rule = element;
+ aws_endpoints_rule_clean_up(rule);
+}
+
+static void s_on_expr_element_clean_up(void *data) {
+ struct aws_endpoints_expr *expr = data;
+ aws_endpoints_expr_clean_up(expr);
+}
+
+static void s_callback_endpoints_parameter_destroy(void *data) {
+ struct aws_endpoints_parameter *parameter = data;
+ aws_endpoints_parameter_destroy(parameter);
+}
+
+static void s_callback_headers_destroy(void *data) {
+ struct aws_array_list *array = data;
+ struct aws_allocator *alloc = array->alloc;
+ aws_array_list_deep_clean_up(array, s_on_expr_element_clean_up);
+ aws_array_list_clean_up(array);
+ aws_mem_release(alloc, array);
+}
+
+struct array_parser_wrapper {
+ struct aws_allocator *allocator;
+ struct aws_array_list *array;
+};
+
+static int s_init_array_from_json(
+ struct aws_allocator *allocator,
+ const struct aws_json_value *value_node,
+ struct aws_array_list *values,
+ aws_json_on_value_encountered_const_fn *value_fn) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(values);
+ AWS_PRECONDITION(value_node);
+ AWS_PRECONDITION(value_fn);
+
+ struct array_parser_wrapper wrapper = {
+ .allocator = allocator,
+ .array = values,
+ };
+
+ if (aws_json_const_iterate_array(value_node, value_fn, &wrapper)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to iterate through array.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+struct member_parser_wrapper {
+ struct aws_allocator *allocator;
+ struct aws_hash_table *table;
+};
+
+static int s_init_members_from_json(
+ struct aws_allocator *allocator,
+ struct aws_json_value *node,
+ struct aws_hash_table *table,
+ aws_json_on_member_encountered_const_fn *member_fn) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(node);
+ AWS_PRECONDITION(table);
+
+ struct member_parser_wrapper wrapper = {
+ .allocator = allocator,
+ .table = table,
+ };
+
+ if (aws_json_const_iterate_object(node, member_fn, &wrapper)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to iterate through member fields.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+/*
+******************************
+* Parser functions.
+******************************
+*/
+
+static int s_parse_function(
+ struct aws_allocator *allocator,
+ const struct aws_json_value *node,
+ struct aws_endpoints_function *function);
+
+/*
+ * Note: this function only fails in cases where node is a ref (ie object with a
+ * ref field), but cannot be parsed completely.
+ */
+static int s_try_parse_reference(const struct aws_json_value *node, struct aws_byte_cursor *out_reference) {
+ AWS_PRECONDITION(node);
+
+ AWS_ZERO_STRUCT(*out_reference);
+
+ struct aws_json_value *ref_node = aws_json_value_get_from_object(node, aws_byte_cursor_from_c_str("ref"));
+ if (ref_node != NULL && aws_json_value_get_string(ref_node, out_reference)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse ref.");
+ AWS_ZERO_STRUCT(*out_reference);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_parse_expr(
+ struct aws_allocator *allocator,
+ const struct aws_json_value *node,
+ struct aws_endpoints_expr *expr);
+
+static int s_on_expr_element(
+ size_t idx,
+ const struct aws_json_value *value_node,
+ bool *out_should_continue,
+ void *user_data) {
+ (void)idx;
+ (void)out_should_continue;
+ AWS_PRECONDITION(value_node);
+ AWS_PRECONDITION(user_data);
+
+ struct array_parser_wrapper *wrapper = user_data;
+
+ struct aws_endpoints_expr expr;
+ if (s_parse_expr(wrapper->allocator, value_node, &expr)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse expr.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+ }
+
+ aws_array_list_push_back(wrapper->array, &expr);
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_parse_expr(
+ struct aws_allocator *allocator,
+ const struct aws_json_value *node,
+ struct aws_endpoints_expr *expr) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(node);
+ AWS_PRECONDITION(expr);
+
+ AWS_ZERO_STRUCT(*expr);
+
+ /* TODO: this recurses. in practical circumstances depth will never be high,
+ but we should still consider doing iterative approach */
+ if (aws_json_value_is_string(node) && !aws_json_value_get_string(node, &expr->e.string)) {
+ expr->type = AWS_ENDPOINTS_EXPR_STRING;
+ return AWS_OP_SUCCESS;
+ } else if (aws_json_value_is_number(node) && !aws_json_value_get_number(node, &expr->e.number)) {
+ expr->type = AWS_ENDPOINTS_EXPR_NUMBER;
+ return AWS_OP_SUCCESS;
+ } else if (aws_json_value_is_boolean(node) && !aws_json_value_get_boolean(node, &expr->e.boolean)) {
+ expr->type = AWS_ENDPOINTS_EXPR_BOOLEAN;
+ return AWS_OP_SUCCESS;
+ } else if (aws_json_value_is_array(node)) {
+ expr->type = AWS_ENDPOINTS_EXPR_ARRAY;
+ size_t num_elements = aws_json_get_array_size(node);
+ aws_array_list_init_dynamic(&expr->e.array, allocator, num_elements, sizeof(struct aws_endpoints_expr));
+ if (s_init_array_from_json(allocator, node, &expr->e.array, s_on_expr_element)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse array value type.");
+ goto on_error;
+ }
+ return AWS_OP_SUCCESS;
+ }
+
+ struct aws_byte_cursor reference;
+ if (s_try_parse_reference(node, &reference)) {
+ goto on_error;
+ }
+
+ if (reference.len > 0) {
+ expr->type = AWS_ENDPOINTS_EXPR_REFERENCE;
+ expr->e.reference = reference;
+ return AWS_OP_SUCCESS;
+ }
+
+ expr->type = AWS_ENDPOINTS_EXPR_FUNCTION;
+ if (s_parse_function(allocator, node, &expr->e.function)) {
+ goto on_error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_endpoints_expr_clean_up(expr);
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse expr type");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+}
+
+static int s_parse_function(
+ struct aws_allocator *allocator,
+ const struct aws_json_value *node,
+ struct aws_endpoints_function *function) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(node);
+
+ AWS_ZERO_STRUCT(*function);
+
+ struct aws_json_value *fn_node = aws_json_value_get_from_object(node, aws_byte_cursor_from_c_str("fn"));
+ if (fn_node == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Node is not a function.");
+ goto on_error;
+ }
+
+ struct aws_byte_cursor fn_cur;
+ if (aws_json_value_get_string(fn_node, &fn_cur)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract fn name.");
+ goto on_error;
+ }
+
+ function->fn = AWS_ENDPOINTS_FN_LAST;
+ uint64_t hash = aws_hash_byte_cursor_ptr(&fn_cur);
+ for (int idx = AWS_ENDPOINTS_FN_FIRST; idx < AWS_ENDPOINTS_FN_LAST; ++idx) {
+ if (aws_endpoints_fn_name_hash[idx] == hash) {
+ function->fn = idx;
+ break;
+ }
+ }
+
+ if (function->fn == AWS_ENDPOINTS_FN_LAST) {
+ AWS_LOGF_ERROR(
+ AWS_LS_SDKUTILS_ENDPOINTS_PARSING,
+ "Could not map function name to function type: " PRInSTR,
+ AWS_BYTE_CURSOR_PRI(fn_cur));
+ goto on_error;
+ }
+
+ struct aws_json_value *argv_node = aws_json_value_get_from_object(node, aws_byte_cursor_from_c_str("argv"));
+ if (argv_node == NULL || !aws_json_value_is_array(argv_node)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "No argv or unexpected type.");
+ goto on_error;
+ }
+
+ size_t num_args = aws_json_get_array_size(argv_node);
+ aws_array_list_init_dynamic(&function->argv, allocator, num_args, sizeof(struct aws_endpoints_expr));
+
+ if (s_init_array_from_json(allocator, argv_node, &function->argv, s_on_expr_element)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse argv.");
+ goto on_error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_endpoints_function_clean_up(function);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+}
+
+static int s_on_parameter_key(
+ const struct aws_byte_cursor *key,
+ const struct aws_json_value *value,
+ bool *out_should_continue,
+ void *user_data) {
+ (void)out_should_continue;
+ AWS_PRECONDITION(key);
+ AWS_PRECONDITION(value);
+ AWS_PRECONDITION(user_data);
+
+ struct member_parser_wrapper *wrapper = user_data;
+
+ struct aws_endpoints_parameter *parameter = aws_endpoints_parameter_new(wrapper->allocator, *key);
+
+ /* required fields */
+ struct aws_byte_cursor type_cur;
+ struct aws_json_value *type_node = aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("type"));
+ if (type_node == NULL || aws_json_value_get_string(type_node, &type_cur)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract parameter type.");
+ goto on_error;
+ }
+
+ enum aws_endpoints_parameter_type type;
+ if (aws_byte_cursor_eq_ignore_case(&type_cur, &s_string_type_cur)) {
+ type = AWS_ENDPOINTS_PARAMETER_STRING;
+ } else if (aws_byte_cursor_eq_ignore_case(&type_cur, &s_boolean_type_cur)) {
+ type = AWS_ENDPOINTS_PARAMETER_BOOLEAN;
+ } else {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected type for parameter.");
+ goto on_error;
+ }
+
+ parameter->type = type;
+
+ struct aws_json_value *documentation_node =
+ aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("documentation"));
+
+ /* TODO: spec calls for documentation to be required, but several test-cases
+ are missing docs on parameters */
+ if (documentation_node != NULL) {
+ if (aws_json_value_get_string(documentation_node, &parameter->documentation)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract parameter documentation.");
+ goto on_error;
+ }
+ }
+
+ /* optional fields */
+ struct aws_json_value *built_in_node = aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("builtIn"));
+ if (built_in_node != NULL) {
+ if (aws_json_value_get_string(built_in_node, &parameter->built_in)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected type for built-in parameter field.");
+ goto on_error;
+ }
+ }
+
+ struct aws_json_value *required_node =
+ aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("required"));
+ if (required_node != NULL) {
+ if (!aws_json_value_is_boolean(required_node)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected type for required parameter field.");
+ goto on_error;
+ }
+ aws_json_value_get_boolean(required_node, &parameter->is_required);
+ }
+
+ struct aws_json_value *default_node = aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("default"));
+ parameter->has_default_value = default_node != NULL;
+ if (default_node != NULL) {
+ if (type == AWS_ENDPOINTS_PARAMETER_STRING &&
+ aws_json_value_get_string(default_node, &parameter->default_value.string)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected type for default parameter value.");
+ goto on_error;
+ } else if (
+ type == AWS_ENDPOINTS_PARAMETER_BOOLEAN &&
+ aws_json_value_get_boolean(default_node, &parameter->default_value.boolean)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected type for default parameter value.");
+ goto on_error;
+ }
+ }
+
+ struct aws_json_value *deprecated_node =
+ aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("deprecated"));
+ if (deprecated_node != NULL) {
+ struct aws_json_value *deprecated_message_node =
+ aws_json_value_get_from_object(deprecated_node, aws_byte_cursor_from_c_str("message"));
+ if (deprecated_message_node != NULL &&
+ aws_json_value_get_string(deprecated_message_node, &parameter->deprecated_message)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected value for deprecated message.");
+ goto on_error;
+ }
+
+ struct aws_json_value *deprecated_since_node =
+ aws_json_value_get_from_object(deprecated_node, aws_byte_cursor_from_c_str("since"));
+ if (deprecated_since_node != NULL &&
+ aws_json_value_get_string(deprecated_since_node, &parameter->deprecated_since)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected value for deprecated since.");
+ goto on_error;
+ }
+ }
+
+ if (aws_hash_table_put(wrapper->table, &parameter->name, parameter, NULL)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to add parameter.");
+ goto on_error;
+ }
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_endpoints_parameter_destroy(parameter);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+}
+
+static int s_on_condition_element(
+ size_t idx,
+ const struct aws_json_value *condition_node,
+ bool *out_should_continue,
+ void *user_data) {
+ (void)idx;
+ (void)out_should_continue;
+ AWS_PRECONDITION(condition_node);
+ AWS_PRECONDITION(user_data);
+
+ struct array_parser_wrapper *wrapper = user_data;
+
+ struct aws_endpoints_condition condition;
+ AWS_ZERO_STRUCT(condition);
+
+ condition.expr.type = AWS_ENDPOINTS_EXPR_FUNCTION;
+ if (s_parse_function(wrapper->allocator, condition_node, &condition.expr.e.function)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse function.");
+ goto on_error;
+ }
+
+ struct aws_json_value *assign_node =
+ aws_json_value_get_from_object(condition_node, aws_byte_cursor_from_c_str("assign"));
+ if (assign_node != NULL && aws_json_value_get_string(assign_node, &condition.assign)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected value for assign.");
+ goto on_error;
+ }
+
+ aws_array_list_push_back(wrapper->array, &condition);
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_endpoints_condition_clean_up(&condition);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+}
+
+static int s_on_header_element(
+ size_t idx,
+ const struct aws_json_value *value,
+ bool *out_should_continue,
+ void *user_data) {
+ (void)idx;
+ (void)out_should_continue;
+ AWS_PRECONDITION(value);
+ AWS_PRECONDITION(user_data);
+ struct array_parser_wrapper *wrapper = user_data;
+
+ struct aws_endpoints_expr expr;
+ if (s_parse_expr(wrapper->allocator, value, &expr)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected format for header element.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+ }
+
+ aws_array_list_push_back(wrapper->array, &expr);
+ return AWS_OP_SUCCESS;
+}
+
+static int s_on_headers_key(
+ const struct aws_byte_cursor *key,
+ const struct aws_json_value *value,
+ bool *out_should_continue,
+ void *user_data) {
+ (void)out_should_continue;
+ AWS_PRECONDITION(key);
+ AWS_PRECONDITION(value);
+ AWS_PRECONDITION(user_data);
+ struct member_parser_wrapper *wrapper = user_data;
+
+ if (!aws_json_value_is_array(value)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected format for header value.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+ }
+
+ size_t num_elements = aws_json_get_array_size(value);
+ struct aws_array_list *headers = aws_mem_calloc(wrapper->allocator, 1, sizeof(struct aws_array_list));
+ aws_array_list_init_dynamic(headers, wrapper->allocator, num_elements, sizeof(struct aws_endpoints_expr));
+ if (s_init_array_from_json(wrapper->allocator, value, headers, s_on_header_element)) {
+ goto on_error;
+ }
+
+ aws_hash_table_put(wrapper->table, aws_string_new_from_cursor(wrapper->allocator, key), headers, NULL);
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ if (headers) {
+ s_callback_headers_destroy(headers);
+ }
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+}
+
+static int s_parse_endpoints_rule_data_endpoint(
+ struct aws_allocator *allocator,
+ const struct aws_json_value *rule_node,
+ struct aws_endpoints_rule_data_endpoint *data_rule) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(rule_node);
+ AWS_PRECONDITION(data_rule);
+
+ data_rule->allocator = allocator;
+ struct aws_json_value *url_node = aws_json_value_get_from_object(rule_node, aws_byte_cursor_from_c_str("url"));
+ if (url_node == NULL || aws_json_value_is_string(url_node)) {
+ data_rule->url.type = AWS_ENDPOINTS_EXPR_STRING;
+ aws_json_value_get_string(url_node, &data_rule->url.e.string);
+ } else {
+ struct aws_byte_cursor reference;
+ if (s_try_parse_reference(url_node, &reference)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse reference.");
+ goto on_error;
+ }
+
+ if (reference.len > 0) {
+ data_rule->url.type = AWS_ENDPOINTS_EXPR_REFERENCE;
+ data_rule->url.e.reference = reference;
+ } else {
+ data_rule->url.type = AWS_ENDPOINTS_EXPR_FUNCTION;
+ if (s_parse_function(allocator, url_node, &data_rule->url.e.function)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to function.");
+ goto on_error;
+ }
+ }
+ }
+
+ struct aws_json_value *properties_node =
+ aws_json_value_get_from_object(rule_node, aws_byte_cursor_from_c_str("properties"));
+ if (properties_node != NULL) {
+ aws_byte_buf_init(&data_rule->properties, allocator, 0);
+
+ if (aws_byte_buf_append_json_string(properties_node, &data_rule->properties)) {
+ aws_byte_buf_clean_up(&data_rule->properties);
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract properties.");
+ goto on_error;
+ }
+ }
+
+ /* TODO: this is currently aws_string* to aws_array_list*
+ * We cannot use same trick as for params to use aws_byte_cursor as key,
+ * since value is a generic type. We can wrap list into a struct, but
+ * seems ugly. Anything cleaner?
+ */
+ aws_hash_table_init(
+ &data_rule->headers,
+ allocator,
+ 20,
+ aws_hash_string,
+ aws_hash_callback_string_eq,
+ aws_hash_callback_string_destroy,
+ s_callback_headers_destroy);
+
+ struct aws_json_value *headers_node =
+ aws_json_value_get_from_object(rule_node, aws_byte_cursor_from_c_str("headers"));
+ if (headers_node != NULL) {
+
+ if (s_init_members_from_json(allocator, headers_node, &data_rule->headers, s_on_headers_key)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract parameters.");
+ goto on_error;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_endpoints_rule_data_endpoint_clean_up(data_rule);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+}
+
+static int s_parse_endpoints_rule_data_error(
+ struct aws_allocator *allocator,
+ const struct aws_json_value *error_node,
+ struct aws_endpoints_rule_data_error *data_rule) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(error_node);
+ AWS_PRECONDITION(data_rule);
+
+ if (aws_json_value_is_string(error_node)) {
+ data_rule->error.type = AWS_ENDPOINTS_EXPR_STRING;
+ aws_json_value_get_string(error_node, &data_rule->error.e.string);
+
+ return AWS_OP_SUCCESS;
+ }
+
+ struct aws_byte_cursor reference;
+ if (s_try_parse_reference(error_node, &reference)) {
+ goto on_error;
+ }
+
+ if (reference.len > 0) {
+ data_rule->error.type = AWS_ENDPOINTS_EXPR_REFERENCE;
+ data_rule->error.e.reference = reference;
+ return AWS_OP_SUCCESS;
+ }
+
+ data_rule->error.type = AWS_ENDPOINTS_EXPR_FUNCTION;
+ if (s_parse_function(allocator, error_node, &data_rule->error.e.function)) {
+ goto on_error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_endpoints_rule_data_error_clean_up(data_rule);
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse error rule.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+}
+
+static int s_on_rule_element(
+ size_t idx,
+ const struct aws_json_value *value,
+ bool *out_should_continue,
+ void *user_data);
+
+static int s_parse_endpoints_rule_data_tree(
+ struct aws_allocator *allocator,
+ const struct aws_json_value *rule_node,
+ struct aws_endpoints_rule_data_tree *rule_data) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(rule_node);
+ AWS_PRECONDITION(rule_data);
+
+ struct aws_json_value *rules_node = aws_json_value_get_from_object(rule_node, aws_byte_cursor_from_c_str("rules"));
+ if (rules_node == NULL || !aws_json_value_is_array(rules_node)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Rules node is missing or unexpected type.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+ }
+
+ size_t num_rules = aws_json_get_array_size(rules_node);
+ aws_array_list_init_dynamic(&rule_data->rules, allocator, num_rules, sizeof(struct aws_endpoints_rule));
+ if (s_init_array_from_json(allocator, rules_node, &rule_data->rules, s_on_rule_element)) {
+ aws_endpoints_rule_data_tree_clean_up(rule_data);
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse rules.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static int s_on_rule_element(
+ size_t idx,
+ const struct aws_json_value *value,
+ bool *out_should_continue,
+ void *user_data) {
+ (void)idx;
+ (void)out_should_continue;
+ AWS_PRECONDITION(value);
+ AWS_PRECONDITION(user_data);
+
+ struct array_parser_wrapper *wrapper = user_data;
+
+ /* Required fields */
+ struct aws_byte_cursor type_cur;
+ struct aws_json_value *type_node = aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("type"));
+ if (type_node == NULL || aws_json_value_get_string(type_node, &type_cur)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract rule type.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+ }
+
+ enum aws_endpoints_rule_type type;
+ if (aws_byte_cursor_eq_ignore_case(&type_cur, &s_endpoint_type_cur)) {
+ type = AWS_ENDPOINTS_RULE_ENDPOINT;
+ } else if (aws_byte_cursor_eq_ignore_case(&type_cur, &s_error_type_cur)) {
+ type = AWS_ENDPOINTS_RULE_ERROR;
+ } else if (aws_byte_cursor_eq_ignore_case(&type_cur, &s_tree_type_cur)) {
+ type = AWS_ENDPOINTS_RULE_TREE;
+ } else {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected rule type.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+ }
+
+ struct aws_endpoints_rule rule;
+ AWS_ZERO_STRUCT(rule);
+ rule.type = type;
+
+ struct aws_json_value *conditions_node =
+ aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("conditions"));
+ if (conditions_node == NULL || !aws_json_value_is_array(conditions_node)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Conditions node missing.");
+ goto on_error;
+ }
+
+ size_t num_conditions = aws_json_get_array_size(conditions_node);
+ aws_array_list_init_dynamic(
+ &rule.conditions, wrapper->allocator, num_conditions, sizeof(struct aws_endpoints_condition));
+
+ if (s_init_array_from_json(wrapper->allocator, conditions_node, &rule.conditions, s_on_condition_element)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract conditions.");
+ goto on_error;
+ }
+
+ switch (type) {
+ case AWS_ENDPOINTS_RULE_ENDPOINT: {
+ struct aws_json_value *endpoint_node =
+ aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("endpoint"));
+ if (endpoint_node == NULL ||
+ s_parse_endpoints_rule_data_endpoint(wrapper->allocator, endpoint_node, &rule.rule_data.endpoint)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract endpoint rule data.");
+ goto on_error;
+ }
+ break;
+ }
+ case AWS_ENDPOINTS_RULE_ERROR: {
+ struct aws_json_value *error_node =
+ aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("error"));
+ if (error_node == NULL ||
+ s_parse_endpoints_rule_data_error(wrapper->allocator, error_node, &rule.rule_data.error)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract error rule data.");
+ goto on_error;
+ }
+ break;
+ }
+ case AWS_ENDPOINTS_RULE_TREE: {
+ if (s_parse_endpoints_rule_data_tree(wrapper->allocator, value, &rule.rule_data.tree)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract tree rule data.");
+ goto on_error;
+ }
+ break;
+ }
+ default:
+ AWS_FATAL_ASSERT(false);
+ }
+
+ /* Optional fields */
+ struct aws_json_value *documentation_node =
+ aws_json_value_get_from_object(value, aws_byte_cursor_from_c_str("documentation"));
+ if (documentation_node != NULL) {
+ if (aws_json_value_get_string(documentation_node, &rule.documentation)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract parameter documentation.");
+ goto on_error;
+ }
+ }
+
+ aws_array_list_push_back(wrapper->array, &rule);
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_endpoints_rule_clean_up(&rule);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+}
+
+static int s_init_ruleset_from_json(
+ struct aws_allocator *allocator,
+ struct aws_endpoints_ruleset *ruleset,
+ struct aws_byte_cursor json) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(ruleset);
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&json));
+
+ struct aws_json_value *root = aws_json_value_new_from_string(allocator, json);
+
+ if (root == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse provided string as json.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+ }
+
+ ruleset->json_root = root;
+
+ struct aws_json_value *version_node = aws_json_value_get_from_object(root, aws_byte_cursor_from_c_str("version"));
+ if (version_node == NULL || aws_json_value_get_string(version_node, &ruleset->version)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract version.");
+ aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_UNSUPPORTED_RULESET);
+ goto on_error;
+ }
+
+#ifdef ENDPOINTS_VERSION_CHECK /* TODO: samples are currently inconsistent with versions. skip check for now */
+ if (!aws_byte_cursor_eq_c_str(&ruleset->version, &s_supported_version)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unsupported ruleset version.");
+ aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_UNSUPPORTED_RULESET);
+ goto on_error;
+ }
+#endif
+
+ struct aws_json_value *service_id_node =
+ aws_json_value_get_from_object(root, aws_byte_cursor_from_c_str("serviceId"));
+
+ if (service_id_node != NULL && aws_json_value_get_string(service_id_node, &ruleset->service_id)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract serviceId.");
+ aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_UNSUPPORTED_RULESET);
+ goto on_error;
+ }
+
+ aws_hash_table_init(
+ &ruleset->parameters,
+ allocator,
+ 20,
+ aws_hash_byte_cursor_ptr,
+ aws_endpoints_byte_cursor_eq,
+ NULL,
+ s_callback_endpoints_parameter_destroy);
+
+ struct aws_json_value *parameters_node =
+ aws_json_value_get_from_object(root, aws_byte_cursor_from_c_str("parameters"));
+ if (parameters_node == NULL ||
+ s_init_members_from_json(allocator, parameters_node, &ruleset->parameters, s_on_parameter_key)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract parameters.");
+ aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+ goto on_error;
+ }
+
+ struct aws_json_value *rules_node = aws_json_value_get_from_object(root, aws_byte_cursor_from_c_str("rules"));
+ if (rules_node == NULL || !aws_json_value_is_array(rules_node)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Unexpected type for rules node.");
+ aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+ goto on_error;
+ }
+ size_t num_rules = aws_json_get_array_size(rules_node);
+ aws_array_list_init_dynamic(&ruleset->rules, allocator, num_rules, sizeof(struct aws_endpoints_rule));
+ if (s_init_array_from_json(allocator, rules_node, &ruleset->rules, s_on_rule_element)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to extract rules.");
+ aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED);
+ goto on_error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ return AWS_OP_ERR;
+}
+
+static void s_endpoints_ruleset_destroy(void *data) {
+ if (data == NULL) {
+ return;
+ }
+
+ struct aws_endpoints_ruleset *ruleset = data;
+
+ aws_json_value_destroy(ruleset->json_root);
+
+ aws_hash_table_clean_up(&ruleset->parameters);
+
+ aws_array_list_deep_clean_up(&ruleset->rules, s_on_rule_array_element_clean_up);
+
+ aws_mem_release(ruleset->allocator, ruleset);
+}
+
+struct aws_endpoints_ruleset *aws_endpoints_ruleset_new_from_string(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor ruleset_json) {
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&ruleset_json));
+
+ struct aws_endpoints_ruleset *ruleset = aws_mem_calloc(allocator, 1, sizeof(struct aws_endpoints_ruleset));
+ ruleset->allocator = allocator;
+
+ if (s_init_ruleset_from_json(allocator, ruleset, ruleset_json)) {
+ s_endpoints_ruleset_destroy(ruleset);
+ return NULL;
+ }
+
+ aws_ref_count_init(&ruleset->ref_count, ruleset, s_endpoints_ruleset_destroy);
+
+ return ruleset;
+}
+
+struct aws_endpoints_ruleset *aws_endpoints_ruleset_acquire(struct aws_endpoints_ruleset *ruleset) {
+ AWS_PRECONDITION(ruleset);
+ if (ruleset) {
+ aws_ref_count_acquire(&ruleset->ref_count);
+ }
+ return ruleset;
+}
+
+struct aws_endpoints_ruleset *aws_endpoints_ruleset_release(struct aws_endpoints_ruleset *ruleset) {
+ if (ruleset) {
+ aws_ref_count_release(&ruleset->ref_count);
+ }
+ return NULL;
+}
diff --git a/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_standard_lib.c b/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_standard_lib.c
new file mode 100644
index 0000000000..b559579c38
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_standard_lib.c
@@ -0,0 +1,639 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/json.h>
+#include <aws/common/string.h>
+#include <aws/common/uri.h>
+
+#include <aws/sdkutils/private/endpoints_types_impl.h>
+#include <aws/sdkutils/private/endpoints_util.h>
+#include <aws/sdkutils/resource_name.h>
+
+static struct aws_byte_cursor s_scheme_http = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("http");
+static struct aws_byte_cursor s_scheme_https = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("https");
+
+static int s_resolve_fn_is_set(
+ struct aws_allocator *allocator,
+ struct aws_array_list *argv,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value) {
+
+ int result = AWS_OP_SUCCESS;
+ struct aws_endpoints_value argv_value = {0};
+ if (aws_array_list_length(argv) != 1 ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_ANY, &argv_value)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve args for isSet.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ out_value->type = AWS_ENDPOINTS_VALUE_BOOLEAN;
+ out_value->v.boolean = argv_value.type != AWS_ENDPOINTS_VALUE_NONE;
+
+on_done:
+ aws_endpoints_value_clean_up(&argv_value);
+ return result;
+}
+
+static int s_resolve_fn_not(
+ struct aws_allocator *allocator,
+ struct aws_array_list *argv,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value) {
+
+ int result = AWS_OP_SUCCESS;
+ struct aws_endpoints_value argv_value = {0};
+ if (aws_array_list_length(argv) != 1 ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_BOOLEAN, &argv_value)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve args for not.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ out_value->type = AWS_ENDPOINTS_VALUE_BOOLEAN;
+ out_value->v.boolean = !argv_value.v.boolean;
+
+on_done:
+ aws_endpoints_value_clean_up(&argv_value);
+ return result;
+}
+
+static int s_resolve_fn_get_attr(
+ struct aws_allocator *allocator,
+ struct aws_array_list *argv,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value) {
+
+ int result = AWS_OP_SUCCESS;
+ struct aws_endpoints_value argv_value = {0};
+ struct aws_endpoints_value argv_path = {0};
+ if (aws_array_list_length(argv) != 2 ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_ANY, &argv_value) ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 1, AWS_ENDPOINTS_VALUE_STRING, &argv_path)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve args for get attr.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ struct aws_byte_cursor path_cur = argv_path.v.owning_cursor_string.cur;
+
+ if (argv_value.type == AWS_ENDPOINTS_VALUE_OBJECT) {
+ if (aws_endpoints_path_through_object(allocator, &argv_value, path_cur, out_value)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to path through object.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+ } else if (argv_value.type == AWS_ENDPOINTS_VALUE_ARRAY) {
+ if (aws_endpoints_path_through_array(allocator, scope, &argv_value, path_cur, out_value)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to path through array.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+ } else {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Invalid value type for pathing through.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+on_done:
+ aws_endpoints_value_clean_up(&argv_value);
+ aws_endpoints_value_clean_up(&argv_path);
+ return result;
+}
+
+static int s_resolve_fn_substring(
+ struct aws_allocator *allocator,
+ struct aws_array_list *argv,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value) {
+
+ int result = AWS_OP_SUCCESS;
+ struct aws_endpoints_value input_value = {0};
+ struct aws_endpoints_value start_value = {0};
+ struct aws_endpoints_value stop_value = {0};
+ struct aws_endpoints_value reverse_value = {0};
+ if (aws_array_list_length(argv) != 4 ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_STRING, &input_value) ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 1, AWS_ENDPOINTS_VALUE_NUMBER, &start_value) ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 2, AWS_ENDPOINTS_VALUE_NUMBER, &stop_value) ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 3, AWS_ENDPOINTS_VALUE_BOOLEAN, &reverse_value)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve args for substring.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ if (start_value.v.number >= stop_value.v.number ||
+ input_value.v.owning_cursor_string.cur.len < stop_value.v.number) {
+ out_value->type = AWS_ENDPOINTS_VALUE_NONE;
+ goto on_done;
+ }
+
+ for (size_t idx = 0; idx < input_value.v.owning_cursor_string.cur.len; ++idx) {
+ if (input_value.v.owning_cursor_string.cur.ptr[idx] > 127) {
+ out_value->type = AWS_ENDPOINTS_VALUE_NONE;
+ goto on_done;
+ }
+ }
+
+ if (!reverse_value.v.boolean) {
+ size_t start = (size_t)start_value.v.number;
+ size_t end = (size_t)stop_value.v.number;
+ struct aws_byte_cursor substring = {
+ .ptr = input_value.v.owning_cursor_string.cur.ptr + start,
+ .len = end - start,
+ };
+
+ out_value->type = AWS_ENDPOINTS_VALUE_STRING;
+ out_value->v.owning_cursor_string = aws_endpoints_owning_cursor_from_cursor(allocator, substring);
+ } else {
+ size_t r_start = input_value.v.owning_cursor_string.cur.len - (size_t)stop_value.v.number;
+ size_t r_stop = input_value.v.owning_cursor_string.cur.len - (size_t)start_value.v.number;
+
+ struct aws_byte_cursor substring = {
+ .ptr = input_value.v.owning_cursor_string.cur.ptr + r_start,
+ .len = r_stop - r_start,
+ };
+ out_value->type = AWS_ENDPOINTS_VALUE_STRING;
+ out_value->v.owning_cursor_string = aws_endpoints_owning_cursor_from_cursor(allocator, substring);
+ }
+
+on_done:
+ aws_endpoints_value_clean_up(&input_value);
+ aws_endpoints_value_clean_up(&start_value);
+ aws_endpoints_value_clean_up(&stop_value);
+ aws_endpoints_value_clean_up(&reverse_value);
+ return result;
+}
+
+static int s_resolve_fn_string_equals(
+ struct aws_allocator *allocator,
+ struct aws_array_list *argv,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value) {
+
+ int result = AWS_OP_SUCCESS;
+ struct aws_endpoints_value argv_value_1 = {0};
+ struct aws_endpoints_value argv_value_2 = {0};
+ if (aws_array_list_length(argv) != 2 ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_STRING, &argv_value_1) ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 1, AWS_ENDPOINTS_VALUE_STRING, &argv_value_2)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve stringEquals.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ out_value->type = AWS_ENDPOINTS_VALUE_BOOLEAN;
+ out_value->v.boolean =
+ aws_byte_cursor_eq(&argv_value_1.v.owning_cursor_string.cur, &argv_value_2.v.owning_cursor_string.cur);
+
+on_done:
+ aws_endpoints_value_clean_up(&argv_value_1);
+ aws_endpoints_value_clean_up(&argv_value_2);
+ return result;
+}
+
+static int s_resolve_fn_boolean_equals(
+ struct aws_allocator *allocator,
+ struct aws_array_list *argv,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value) {
+
+ int result = AWS_OP_SUCCESS;
+ struct aws_endpoints_value argv_value_1 = {0};
+ struct aws_endpoints_value argv_value_2 = {0};
+ if (aws_array_list_length(argv) != 2 ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_BOOLEAN, &argv_value_1) ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 1, AWS_ENDPOINTS_VALUE_BOOLEAN, &argv_value_2)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve booleanEquals.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ out_value->type = AWS_ENDPOINTS_VALUE_BOOLEAN;
+ out_value->v.boolean = argv_value_1.v.boolean == argv_value_2.v.boolean;
+
+on_done:
+ aws_endpoints_value_clean_up(&argv_value_1);
+ aws_endpoints_value_clean_up(&argv_value_2);
+ return result;
+}
+
+static int s_resolve_fn_uri_encode(
+ struct aws_allocator *allocator,
+ struct aws_array_list *argv,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value) {
+
+ int result = AWS_OP_SUCCESS;
+ struct aws_byte_buf buf = {0};
+ struct aws_endpoints_value argv_value = {0};
+ if (aws_array_list_length(argv) != 1 ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_STRING, &argv_value)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve parameter to uri encode.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ if (aws_byte_buf_init(&buf, allocator, 10)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve parameter to uri encode.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ if (aws_byte_buf_append_encoding_uri_param(&buf, &argv_value.v.owning_cursor_string.cur)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to uri encode value.");
+ aws_byte_buf_clean_up(&buf);
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ out_value->type = AWS_ENDPOINTS_VALUE_STRING;
+ out_value->v.owning_cursor_string =
+ aws_endpoints_owning_cursor_from_string(aws_string_new_from_buf(allocator, &buf));
+
+on_done:
+ aws_endpoints_value_clean_up(&argv_value);
+ aws_byte_buf_clean_up(&buf);
+ return result;
+}
+
+static bool s_is_uri_ip(struct aws_byte_cursor host, bool is_uri_encoded) {
+ return aws_is_ipv4(host) || aws_is_ipv6(host, is_uri_encoded);
+}
+
+static int s_resolve_fn_parse_url(
+ struct aws_allocator *allocator,
+ struct aws_array_list *argv,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value) {
+
+ int result = AWS_OP_SUCCESS;
+ struct aws_uri uri;
+ struct aws_json_value *root = NULL;
+ struct aws_endpoints_value argv_url = {0};
+ if (aws_array_list_length(argv) != 1 ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_STRING, &argv_url)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve args for parse url.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ if (aws_uri_init_parse(&uri, allocator, &argv_url.v.owning_cursor_string.cur)) {
+ out_value->type = AWS_ENDPOINTS_VALUE_NONE;
+ /* reset error from parser, since non-uri strings should successfully resolve to none. */
+ aws_reset_error();
+ goto on_done;
+ }
+
+ if (aws_uri_query_string(&uri)->len > 0) {
+ out_value->type = AWS_ENDPOINTS_VALUE_NONE;
+ goto on_done;
+ }
+
+ const struct aws_byte_cursor *scheme = aws_uri_scheme(&uri);
+ AWS_ASSERT(scheme != NULL);
+
+ root = aws_json_value_new_object(allocator);
+
+ if (scheme->len == 0) {
+ out_value->type = AWS_ENDPOINTS_VALUE_NONE;
+ goto on_done;
+ }
+
+ if (!(aws_byte_cursor_eq(scheme, &s_scheme_http) || aws_byte_cursor_eq(scheme, &s_scheme_https))) {
+ out_value->type = AWS_ENDPOINTS_VALUE_NONE;
+ goto on_done;
+ }
+
+ if (aws_json_value_add_to_object(
+ root, aws_byte_cursor_from_c_str("scheme"), aws_json_value_new_string(allocator, *scheme))) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add scheme to object.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ const struct aws_byte_cursor *authority = aws_uri_authority(&uri);
+ AWS_ASSERT(authority != NULL);
+
+ if (authority->len == 0) {
+ out_value->type = AWS_ENDPOINTS_VALUE_NONE;
+ goto on_done;
+ }
+
+ if (aws_json_value_add_to_object(
+ root, aws_byte_cursor_from_c_str("authority"), aws_json_value_new_string(allocator, *authority))) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add authority to object.");
+ goto on_done;
+ }
+
+ const struct aws_byte_cursor *path = aws_uri_path(&uri);
+
+ if (aws_json_value_add_to_object(
+ root, aws_byte_cursor_from_c_str("path"), aws_json_value_new_string(allocator, *path))) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add path to object.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ struct aws_byte_cursor normalized_path_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("normalizedPath");
+ struct aws_byte_buf normalized_path_buf;
+ if (aws_byte_buf_init_from_normalized_uri_path(allocator, *path, &normalized_path_buf) ||
+ aws_json_value_add_to_object(
+ root,
+ normalized_path_cur,
+ aws_json_value_new_string(allocator, aws_byte_cursor_from_buf(&normalized_path_buf)))) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to normalize path.");
+ aws_byte_buf_clean_up(&normalized_path_buf);
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ aws_byte_buf_clean_up(&normalized_path_buf);
+
+ const struct aws_byte_cursor *host_name = aws_uri_host_name(&uri);
+ bool is_ip = s_is_uri_ip(*host_name, true);
+ if (aws_json_value_add_to_object(
+ root, aws_byte_cursor_from_c_str("isIp"), aws_json_value_new_boolean(allocator, is_ip))) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add isIp to object.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ struct aws_byte_buf buf;
+ if (aws_byte_buf_init(&buf, allocator, 0)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed init buffer for parseUrl return.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ if (aws_byte_buf_append_json_string(root, &buf)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to create JSON object.");
+ aws_byte_buf_clean_up(&buf);
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ out_value->type = AWS_ENDPOINTS_VALUE_OBJECT;
+ out_value->v.owning_cursor_object =
+ aws_endpoints_owning_cursor_from_string(aws_string_new_from_buf(allocator, &buf));
+
+ aws_byte_buf_clean_up(&buf);
+
+on_done:
+ aws_uri_clean_up(&uri);
+ aws_endpoints_value_clean_up(&argv_url);
+ aws_json_value_destroy(root);
+ return result;
+}
+
+static int s_resolve_is_valid_host_label(
+ struct aws_allocator *allocator,
+ struct aws_array_list *argv,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value) {
+
+ struct aws_endpoints_value argv_value = {0};
+ struct aws_endpoints_value argv_allow_subdomains = {0};
+ if (aws_array_list_length(argv) != 2 ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_STRING, &argv_value) ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 1, AWS_ENDPOINTS_VALUE_BOOLEAN, &argv_allow_subdomains)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve not.");
+ goto on_error;
+ }
+
+ out_value->type = AWS_ENDPOINTS_VALUE_BOOLEAN;
+ out_value->v.boolean =
+ aws_is_valid_host_label(argv_value.v.owning_cursor_string.cur, argv_allow_subdomains.v.boolean);
+
+ aws_endpoints_value_clean_up(&argv_value);
+ aws_endpoints_value_clean_up(&argv_allow_subdomains);
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_endpoints_value_clean_up(&argv_value);
+ aws_endpoints_value_clean_up(&argv_allow_subdomains);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+}
+
+static int s_resolve_fn_aws_partition(
+ struct aws_allocator *allocator,
+ struct aws_array_list *argv,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value) {
+
+ int result = AWS_OP_SUCCESS;
+ struct aws_endpoints_value argv_region = {0};
+
+ if (aws_array_list_length(argv) != 1 ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_STRING, &argv_region)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve arguments for partitions.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ struct aws_hash_element *element = NULL;
+ struct aws_byte_cursor key = argv_region.v.owning_cursor_string.cur;
+ if (aws_hash_table_find(&scope->partitions->region_to_partition_info, &key, &element)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to find partition info. " PRInSTR, AWS_BYTE_CURSOR_PRI(key));
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ if (element != NULL) {
+ out_value->type = AWS_ENDPOINTS_VALUE_OBJECT;
+ out_value->v.owning_cursor_object =
+ aws_endpoints_owning_cursor_create(allocator, ((struct aws_partition_info *)element->value)->info);
+ goto on_done;
+ }
+
+ key = aws_map_region_to_partition(key);
+
+ if (key.len == 0) {
+ key = aws_byte_cursor_from_c_str("aws");
+ }
+
+ if (aws_hash_table_find(&scope->partitions->region_to_partition_info, &key, &element) || element == NULL) {
+ AWS_LOGF_ERROR(
+ AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to find partition info. " PRInSTR, AWS_BYTE_CURSOR_PRI(key));
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ out_value->type = AWS_ENDPOINTS_VALUE_OBJECT;
+ out_value->v.owning_cursor_object =
+ aws_endpoints_owning_cursor_create(allocator, ((struct aws_partition_info *)element->value)->info);
+
+on_done:
+ aws_endpoints_value_clean_up(&argv_region);
+ return result;
+}
+
+static int s_resolve_fn_aws_parse_arn(
+ struct aws_allocator *allocator,
+ struct aws_array_list *argv,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value) {
+
+ int result = AWS_OP_SUCCESS;
+ struct aws_json_value *object = NULL;
+ struct aws_endpoints_value argv_value = {0};
+ if (aws_array_list_length(argv) != 1 ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_STRING, &argv_value)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve parseArn.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ struct aws_resource_name arn;
+ if (aws_resource_name_init_from_cur(&arn, &argv_value.v.owning_cursor_string.cur)) {
+ out_value->type = AWS_ENDPOINTS_VALUE_NONE;
+ goto on_done;
+ }
+
+ object = aws_json_value_new_object(allocator);
+ if (object == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to init object for parseArn.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ if (arn.partition.len == 0 || arn.resource_id.len == 0 || arn.service.len == 0) {
+ out_value->type = AWS_ENDPOINTS_VALUE_NONE;
+ goto on_done;
+ }
+
+ /* Split resource id into components, either on : or / */
+ /* TODO: support multiple delims in existing split helper? */
+ struct aws_json_value *resource_id_node = aws_json_value_new_array(allocator);
+ size_t start = 0;
+ for (size_t i = 0; i < arn.resource_id.len; ++i) {
+ if (arn.resource_id.ptr[i] == '/' || arn.resource_id.ptr[i] == ':') {
+ struct aws_byte_cursor cur = {
+ .ptr = arn.resource_id.ptr + start,
+ .len = i - start,
+ };
+
+ struct aws_json_value *element = aws_json_value_new_string(allocator, cur);
+ if (element == NULL || aws_json_value_add_array_element(resource_id_node, element)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add resource id element");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ start = i + 1;
+ }
+ }
+
+ if (start <= arn.resource_id.len) {
+ struct aws_byte_cursor cur = {
+ .ptr = arn.resource_id.ptr + start,
+ .len = arn.resource_id.len - start,
+ };
+ struct aws_json_value *element = aws_json_value_new_string(allocator, cur);
+ if (element == NULL || aws_json_value_add_array_element(resource_id_node, element)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add resource id element");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+ }
+
+ if (aws_json_value_add_to_object(
+ object, aws_byte_cursor_from_c_str("partition"), aws_json_value_new_string(allocator, arn.partition)) ||
+ aws_json_value_add_to_object(
+ object, aws_byte_cursor_from_c_str("service"), aws_json_value_new_string(allocator, arn.service)) ||
+ aws_json_value_add_to_object(
+ object, aws_byte_cursor_from_c_str("region"), aws_json_value_new_string(allocator, arn.region)) ||
+ aws_json_value_add_to_object(
+ object, aws_byte_cursor_from_c_str("accountId"), aws_json_value_new_string(allocator, arn.account_id)) ||
+ aws_json_value_add_to_object(object, aws_byte_cursor_from_c_str("resourceId"), resource_id_node)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add elements to object for parseArn.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ out_value->type = AWS_ENDPOINTS_VALUE_OBJECT;
+ out_value->v.owning_cursor_object =
+ aws_endpoints_owning_cursor_from_string(aws_string_new_from_json(allocator, object));
+
+ if (out_value->v.owning_cursor_object.cur.len == 0) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to create string from json.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+on_done:
+ aws_json_value_destroy(object);
+ aws_endpoints_value_clean_up(&argv_value);
+ return result;
+}
+
+static int s_resolve_is_virtual_hostable_s3_bucket(
+ struct aws_allocator *allocator,
+ struct aws_array_list *argv,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value) {
+
+ int result = AWS_OP_SUCCESS;
+ struct aws_endpoints_value argv_value = {0};
+ struct aws_endpoints_value argv_allow_subdomains = {0};
+ if (aws_array_list_length(argv) != 2 ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 0, AWS_ENDPOINTS_VALUE_STRING, &argv_value) ||
+ aws_endpoints_argv_expect(allocator, scope, argv, 1, AWS_ENDPOINTS_VALUE_BOOLEAN, &argv_allow_subdomains)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to resolve args for isVirtualHostableS3Bucket.");
+ result = aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ goto on_done;
+ }
+
+ struct aws_byte_cursor label_cur = argv_value.v.owning_cursor_string.cur;
+
+ bool has_uppercase_chars = false;
+ for (size_t i = 0; i < label_cur.len; ++i) {
+ if (label_cur.ptr[i] >= 'A' && label_cur.ptr[i] <= 'Z') {
+ has_uppercase_chars = true;
+ break;
+ }
+ }
+
+ out_value->type = AWS_ENDPOINTS_VALUE_BOOLEAN;
+ out_value->v.boolean = (label_cur.len >= 3 && label_cur.len <= 63) && !has_uppercase_chars &&
+ aws_is_valid_host_label(label_cur, argv_allow_subdomains.v.boolean) &&
+ !aws_is_ipv4(label_cur);
+
+on_done:
+ aws_endpoints_value_clean_up(&argv_value);
+ aws_endpoints_value_clean_up(&argv_allow_subdomains);
+ return result;
+}
+
+typedef int(standard_lib_function_fn)(
+ struct aws_allocator *allocator,
+ struct aws_array_list *argv,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value);
+
+static standard_lib_function_fn *s_resolve_fn_vt[AWS_ENDPOINTS_FN_LAST] = {
+ [AWS_ENDPOINTS_FN_IS_SET] = s_resolve_fn_is_set,
+ [AWS_ENDPOINTS_FN_NOT] = s_resolve_fn_not,
+ [AWS_ENDPOINTS_FN_GET_ATTR] = s_resolve_fn_get_attr,
+ [AWS_ENDPOINTS_FN_SUBSTRING] = s_resolve_fn_substring,
+ [AWS_ENDPOINTS_FN_STRING_EQUALS] = s_resolve_fn_string_equals,
+ [AWS_ENDPOINTS_FN_BOOLEAN_EQUALS] = s_resolve_fn_boolean_equals,
+ [AWS_ENDPOINTS_FN_URI_ENCODE] = s_resolve_fn_uri_encode,
+ [AWS_ENDPOINTS_FN_PARSE_URL] = s_resolve_fn_parse_url,
+ [AWS_ENDPOINTS_FN_IS_VALID_HOST_LABEL] = s_resolve_is_valid_host_label,
+ [AWS_ENDPOINTS_FN_AWS_PARTITION] = s_resolve_fn_aws_partition,
+ [AWS_ENDPOINTS_FN_AWS_PARSE_ARN] = s_resolve_fn_aws_parse_arn,
+ [AWS_ENDPOINTS_FN_AWS_IS_VIRTUAL_HOSTABLE_S3_BUCKET] = s_resolve_is_virtual_hostable_s3_bucket,
+};
+
+int aws_endpoints_dispatch_standard_lib_fn_resolve(
+ enum aws_endpoints_fn_type type,
+ struct aws_allocator *allocator,
+ struct aws_array_list *argv,
+ struct aws_endpoints_resolution_scope *scope,
+ struct aws_endpoints_value *out_value) {
+ return s_resolve_fn_vt[type](allocator, argv, scope, out_value);
+}
diff --git a/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_types_impl.c b/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_types_impl.c
new file mode 100644
index 0000000000..36e0c60bec
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_types_impl.c
@@ -0,0 +1,235 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/array_list.h>
+#include <aws/common/hash_table.h>
+#include <aws/common/json.h>
+#include <aws/common/string.h>
+#include <aws/sdkutils/private/endpoints_types_impl.h>
+#include <aws/sdkutils/private/endpoints_util.h>
+
+uint64_t aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_LAST];
+
+void aws_endpoints_rule_engine_init(void) {
+ aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_IS_SET] = aws_hash_c_string("isSet");
+ aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_NOT] = aws_hash_c_string("not");
+ aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_GET_ATTR] = aws_hash_c_string("getAttr");
+ aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_SUBSTRING] = aws_hash_c_string("substring");
+ aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_STRING_EQUALS] = aws_hash_c_string("stringEquals");
+ aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_BOOLEAN_EQUALS] = aws_hash_c_string("booleanEquals");
+ aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_URI_ENCODE] = aws_hash_c_string("uriEncode");
+ aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_PARSE_URL] = aws_hash_c_string("parseURL");
+ aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_IS_VALID_HOST_LABEL] = aws_hash_c_string("isValidHostLabel");
+ aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_AWS_PARTITION] = aws_hash_c_string("aws.partition");
+ aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_AWS_PARSE_ARN] = aws_hash_c_string("aws.parseArn");
+ aws_endpoints_fn_name_hash[AWS_ENDPOINTS_FN_AWS_IS_VIRTUAL_HOSTABLE_S3_BUCKET] =
+ aws_hash_c_string("aws.isVirtualHostableS3Bucket");
+}
+
+static void s_on_condition_array_element_clean_up(void *element) {
+ struct aws_endpoints_condition *condition = element;
+ aws_endpoints_condition_clean_up(condition);
+}
+
+static void s_on_rule_array_element_clean_up(void *element) {
+ struct aws_endpoints_rule *rule = element;
+ aws_endpoints_rule_clean_up(rule);
+}
+
+static void s_on_expr_array_element_clean_up(void *element) {
+ struct aws_endpoints_expr *expr = element;
+ aws_endpoints_expr_clean_up(expr);
+}
+
+struct aws_partition_info *aws_partition_info_new(struct aws_allocator *allocator, struct aws_byte_cursor name) {
+ AWS_PRECONDITION(allocator);
+ struct aws_partition_info *partition_info = aws_mem_calloc(allocator, 1, sizeof(struct aws_partition_info));
+
+ partition_info->allocator = allocator;
+ partition_info->name = name;
+
+ return partition_info;
+}
+
+void aws_partition_info_destroy(struct aws_partition_info *partition_info) {
+ if (partition_info == NULL) {
+ return;
+ }
+
+ if (!partition_info->is_copy) {
+ aws_string_destroy(partition_info->info);
+ }
+
+ aws_mem_release(partition_info->allocator, partition_info);
+}
+
+struct aws_endpoints_parameter *aws_endpoints_parameter_new(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor name) {
+ AWS_PRECONDITION(allocator);
+ struct aws_endpoints_parameter *parameter = aws_mem_calloc(allocator, 1, sizeof(struct aws_endpoints_parameter));
+
+ parameter->allocator = allocator;
+ parameter->name = name;
+
+ return parameter;
+}
+
+void aws_endpoints_parameter_destroy(struct aws_endpoints_parameter *parameter) {
+ if (parameter == NULL) {
+ return;
+ }
+
+ aws_mem_release(parameter->allocator, parameter);
+}
+
+void aws_endpoints_rule_clean_up(struct aws_endpoints_rule *rule) {
+ AWS_PRECONDITION(rule);
+
+ aws_array_list_deep_clean_up(&rule->conditions, s_on_condition_array_element_clean_up);
+
+ switch (rule->type) {
+ case AWS_ENDPOINTS_RULE_ENDPOINT:
+ aws_endpoints_rule_data_endpoint_clean_up(&rule->rule_data.endpoint);
+ break;
+ case AWS_ENDPOINTS_RULE_ERROR:
+ aws_endpoints_rule_data_error_clean_up(&rule->rule_data.error);
+ break;
+ case AWS_ENDPOINTS_RULE_TREE:
+ aws_endpoints_rule_data_tree_clean_up(&rule->rule_data.tree);
+ break;
+ default:
+ AWS_FATAL_ASSERT(false);
+ }
+
+ AWS_ZERO_STRUCT(*rule);
+}
+
+void aws_endpoints_rule_data_endpoint_clean_up(struct aws_endpoints_rule_data_endpoint *rule_data) {
+ AWS_PRECONDITION(rule_data);
+
+ aws_endpoints_expr_clean_up(&rule_data->url);
+
+ aws_byte_buf_clean_up(&rule_data->properties);
+ aws_hash_table_clean_up(&rule_data->headers);
+
+ AWS_ZERO_STRUCT(*rule_data);
+}
+
+void aws_endpoints_rule_data_error_clean_up(struct aws_endpoints_rule_data_error *rule_data) {
+ AWS_PRECONDITION(rule_data);
+
+ aws_endpoints_expr_clean_up(&rule_data->error);
+
+ AWS_ZERO_STRUCT(*rule_data);
+}
+
+void aws_endpoints_rule_data_tree_clean_up(struct aws_endpoints_rule_data_tree *rule_data) {
+ AWS_PRECONDITION(rule_data);
+
+ aws_array_list_deep_clean_up(&rule_data->rules, s_on_rule_array_element_clean_up);
+ AWS_ZERO_STRUCT(*rule_data);
+}
+
+void aws_endpoints_condition_clean_up(struct aws_endpoints_condition *condition) {
+ AWS_PRECONDITION(condition);
+
+ aws_endpoints_expr_clean_up(&condition->expr);
+ AWS_ZERO_STRUCT(*condition);
+}
+
+void aws_endpoints_function_clean_up(struct aws_endpoints_function *function) {
+ AWS_PRECONDITION(function);
+
+ aws_array_list_deep_clean_up(&function->argv, s_on_expr_array_element_clean_up);
+ AWS_ZERO_STRUCT(*function);
+}
+
+void aws_endpoints_expr_clean_up(struct aws_endpoints_expr *expr) {
+ AWS_PRECONDITION(expr);
+
+ switch (expr->type) {
+ case AWS_ENDPOINTS_EXPR_STRING:
+ case AWS_ENDPOINTS_EXPR_BOOLEAN:
+ case AWS_ENDPOINTS_EXPR_NUMBER:
+ case AWS_ENDPOINTS_EXPR_REFERENCE:
+ break;
+ case AWS_ENDPOINTS_EXPR_FUNCTION:
+ aws_endpoints_function_clean_up(&expr->e.function);
+ break;
+ case AWS_ENDPOINTS_EXPR_ARRAY:
+ aws_array_list_deep_clean_up(&expr->e.array, s_on_expr_array_element_clean_up);
+ break;
+ default:
+ AWS_FATAL_ASSERT(false);
+ }
+
+ AWS_ZERO_STRUCT(*expr);
+}
+
+struct aws_endpoints_scope_value *aws_endpoints_scope_value_new(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor name_cur) {
+ AWS_PRECONDITION(allocator);
+ struct aws_endpoints_scope_value *value = aws_mem_calloc(allocator, 1, sizeof(struct aws_endpoints_scope_value));
+
+ value->allocator = allocator;
+ value->name = aws_endpoints_owning_cursor_from_cursor(allocator, name_cur);
+
+ return value;
+}
+
+void aws_endpoints_scope_value_destroy(struct aws_endpoints_scope_value *scope_value) {
+ if (scope_value == NULL) {
+ return;
+ }
+ aws_string_destroy(scope_value->name.string);
+ aws_endpoints_value_clean_up(&scope_value->value);
+ aws_mem_release(scope_value->allocator, scope_value);
+}
+
+void aws_endpoints_value_clean_up_cb(void *value);
+
+void aws_endpoints_value_clean_up(struct aws_endpoints_value *aws_endpoints_value) {
+ AWS_PRECONDITION(aws_endpoints_value);
+
+ if (aws_endpoints_value->type == AWS_ENDPOINTS_VALUE_STRING) {
+ aws_string_destroy(aws_endpoints_value->v.owning_cursor_string.string);
+ }
+
+ if (aws_endpoints_value->type == AWS_ENDPOINTS_VALUE_OBJECT) {
+ aws_string_destroy(aws_endpoints_value->v.owning_cursor_object.string);
+ }
+
+ if (aws_endpoints_value->type == AWS_ENDPOINTS_VALUE_ARRAY) {
+ aws_array_list_deep_clean_up(&aws_endpoints_value->v.array, aws_endpoints_value_clean_up_cb);
+ }
+
+ AWS_ZERO_STRUCT(*aws_endpoints_value);
+}
+
+void aws_endpoints_value_clean_up_cb(void *value) {
+ struct aws_endpoints_value *aws_endpoints_value = value;
+ aws_endpoints_value_clean_up(aws_endpoints_value);
+}
+
+int aws_endpoints_deep_copy_parameter_value(
+ struct aws_allocator *allocator,
+ const struct aws_endpoints_value *from,
+ struct aws_endpoints_value *to) {
+
+ to->type = from->type;
+
+ if (to->type == AWS_ENDPOINTS_VALUE_STRING) {
+ to->v.owning_cursor_string = aws_endpoints_owning_cursor_create(allocator, from->v.owning_cursor_string.string);
+ } else if (to->type == AWS_ENDPOINTS_VALUE_BOOLEAN) {
+ to->v.boolean = from->v.boolean;
+ } else {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Unexpected value type.");
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ return AWS_OP_SUCCESS;
+}
diff --git a/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_util.c b/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_util.c
new file mode 100644
index 0000000000..1fdf246adb
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/source/endpoints_util.c
@@ -0,0 +1,588 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/json.h>
+#include <aws/common/logging.h>
+#include <aws/common/string.h>
+#include <aws/sdkutils/private/endpoints_util.h>
+#include <aws/sdkutils/sdkutils.h>
+
+#include <inttypes.h>
+
+#ifdef _MSC_VER /* Disable sscanf warnings on windows. */
+# pragma warning(disable : 4204)
+# pragma warning(disable : 4706)
+# pragma warning(disable : 4996)
+#endif
+
+/* 4 octets of 3 chars max + 3 separators + null terminator */
+#define AWS_IPV4_STR_LEN 16
+#define IP_CHAR_FMT "%03" SCNu16
+
+/* arbitrary max length of a region. curent longest region name is 16 chars */
+#define AWS_REGION_LEN 50
+
+bool aws_is_ipv4(struct aws_byte_cursor host) {
+ if (host.len > AWS_IPV4_STR_LEN - 1) {
+ return false;
+ }
+
+ char copy[AWS_IPV4_STR_LEN] = {0};
+ memcpy(copy, host.ptr, host.len);
+
+ uint16_t octet[4] = {0};
+ char remainder[2] = {0};
+ if (4 != sscanf(
+ copy,
+ IP_CHAR_FMT "." IP_CHAR_FMT "." IP_CHAR_FMT "." IP_CHAR_FMT "%1s",
+ &octet[0],
+ &octet[1],
+ &octet[2],
+ &octet[3],
+ remainder)) {
+ return false;
+ }
+
+ for (size_t i = 0; i < 4; ++i) {
+ if (octet[i] > 255) {
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool s_starts_with(struct aws_byte_cursor cur, uint8_t ch) {
+ return cur.len > 0 && cur.ptr[0] == ch;
+}
+
+static bool s_ends_with(struct aws_byte_cursor cur, uint8_t ch) {
+ return cur.len > 0 && cur.ptr[cur.len - 1] == ch;
+}
+
+static bool s_is_ipv6_char(uint8_t value) {
+ return aws_isxdigit(value) || value == ':';
+}
+
+/* actual encoding is %25, but % is omitted for simplicity, since split removes it */
+static struct aws_byte_cursor s_percent_uri_enc = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("25");
+/*
+ * IPv6 format:
+ * 8 groups of 4 hex chars separated by colons (:)
+ * leading 0s in each group can be skipped
+ * 2 or more consecutive zero groups can be replaced by double colon (::),
+ * but only once.
+ * ipv6 literal can be scoped by to zone by appending % followed by zone name
+ * ( does not look like there is length reqs on zone name length. this
+ * implementation enforces that its > 1 )
+ * ipv6 can be embedded in url, in which case it must be wrapped inside []
+ * and % be uri encoded as %25.
+ * Implementation is fairly trivial and just iterates through the string
+ * keeping track of the spec above.
+ */
+bool aws_is_ipv6(struct aws_byte_cursor host, bool is_uri_encoded) {
+ if (host.len == 0) {
+ return false;
+ }
+
+ if (is_uri_encoded) {
+ if (!s_starts_with(host, '[') || !s_ends_with(host, ']')) {
+ return false;
+ }
+ aws_byte_cursor_advance(&host, 1);
+ --host.len;
+ }
+
+ struct aws_byte_cursor substr = {0};
+ /* first split is required ipv6 part */
+ bool is_split = aws_byte_cursor_next_split(&host, '%', &substr);
+ AWS_ASSERT(is_split); /* function is guaranteed to return at least one split */
+
+ if (!is_split || substr.len == 0 || (s_starts_with(substr, ':') || s_ends_with(substr, ':')) ||
+ !aws_byte_cursor_satisfies_pred(&substr, s_is_ipv6_char)) {
+ return false;
+ }
+
+ uint8_t group_count = 0;
+ bool has_double_colon = false;
+ struct aws_byte_cursor group = {0};
+ while (aws_byte_cursor_next_split(&substr, ':', &group)) {
+ ++group_count;
+
+ if (group_count > 8 || /* too many groups */
+ group.len > 4 || /* too many chars in group */
+ (has_double_colon && group.len == 0)) { /* only one double colon allowed */
+ return false;
+ }
+
+ has_double_colon = has_double_colon || group.len == 0;
+ }
+
+ /* second split is optional zone part */
+ if (aws_byte_cursor_next_split(&host, '%', &substr)) {
+ if ((is_uri_encoded &&
+ (substr.len < 3 ||
+ !aws_byte_cursor_starts_with(&substr, &s_percent_uri_enc))) || /* encoding for % + 1 extra char */
+ (!is_uri_encoded && substr.len == 0) || /* at least 1 char */
+ !aws_byte_cursor_satisfies_pred(&substr, aws_isalnum)) {
+ return false;
+ }
+ }
+
+ return has_double_colon ? group_count < 7 : group_count == 8;
+}
+
+static char s_known_countries[][3] = {{"us"}, {"eu"}, {"ap"}, {"sa"}, {"ca"}, {"me"}, {"af"}};
+
+struct aws_byte_cursor aws_map_region_to_partition(struct aws_byte_cursor region) {
+ if (region.len > AWS_REGION_LEN - 1) {
+ return aws_byte_cursor_from_c_str("");
+ }
+
+ char copy[AWS_REGION_LEN] = {0};
+ memcpy(copy, region.ptr, region.len);
+
+ char country[3] = {0};
+ char location[31] = {0};
+ uint8_t num = 0;
+
+ if (3 == sscanf(copy, "%2[^-]-%30[^-]-%03" SCNu8, country, location, &num)) {
+ if (location[0] != 0 && num > 0) {
+ for (size_t i = 0; i < sizeof(s_known_countries) / sizeof(s_known_countries[0]); ++i) {
+ if (0 == strncmp(s_known_countries[i], country, 3)) {
+ return aws_byte_cursor_from_c_str("aws");
+ }
+ }
+
+ if (0 == strncmp("cn", country, 3)) {
+ return aws_byte_cursor_from_c_str("aws-cn");
+ }
+ }
+ }
+
+ if (2 == sscanf(copy, "us-gov-%30[^-]-%03" SCNu8, location, &num)) {
+ if (location[0] != 0 && num > 0) {
+ return aws_byte_cursor_from_c_str("aws-us-gov");
+ }
+ }
+
+ if (2 == sscanf(copy, "us-iso-%30[^-]-%03" SCNu8, location, &num)) {
+ if (location[0] != 0 && num > 0) {
+ return aws_byte_cursor_from_c_str("aws-iso");
+ }
+ }
+
+ if (2 == sscanf(copy, "us-isob-%30[^-]-%03" SCNu8, location, &num)) {
+ if (location[0] != 0 && num > 0) {
+ return aws_byte_cursor_from_c_str("aws-iso-b");
+ }
+ }
+
+ return aws_byte_cursor_from_c_str("");
+}
+
+bool aws_is_valid_host_label(struct aws_byte_cursor label, bool allow_subdomains) {
+ bool next_must_be_alnum = true;
+ size_t subdomain_count = 0;
+
+ for (size_t i = 0; i < label.len; ++i) {
+ if (label.ptr[i] == '.') {
+ if (!allow_subdomains || subdomain_count == 0) {
+ return false;
+ }
+
+ if (!aws_isalnum(label.ptr[i - 1])) {
+ return false;
+ }
+
+ next_must_be_alnum = true;
+ subdomain_count = 0;
+ continue;
+ }
+
+ if (next_must_be_alnum && !aws_isalnum(label.ptr[i])) {
+ return false;
+ } else if (label.ptr[i] != '-' && !aws_isalnum(label.ptr[i])) {
+ return false;
+ }
+
+ next_must_be_alnum = false;
+ ++subdomain_count;
+
+ if (subdomain_count > 63) {
+ return false;
+ }
+ }
+
+ return aws_isalnum(label.ptr[label.len - 1]);
+}
+
+struct aws_byte_cursor s_path_slash = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("/");
+
+int aws_byte_buf_init_from_normalized_uri_path(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor path,
+ struct aws_byte_buf *out_normalized_path) {
+ /* Normalized path is just regular path that ensures that path starts and ends with slash */
+
+ if (aws_byte_buf_init(out_normalized_path, allocator, path.len + 2)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed init buffer for parseUrl return.");
+ goto on_error;
+ }
+
+ if (path.len == 0) {
+ if (aws_byte_buf_append(out_normalized_path, &s_path_slash)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to add path to object.");
+ goto on_error;
+ }
+ return AWS_OP_SUCCESS;
+ }
+
+ if (path.ptr[0] != '/') {
+ if (aws_byte_buf_append_dynamic(out_normalized_path, &s_path_slash)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to append slash to normalized path.");
+ goto on_error;
+ }
+ }
+
+ if (aws_byte_buf_append_dynamic(out_normalized_path, &path)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to append path to normalized path.");
+ goto on_error;
+ }
+
+ if (out_normalized_path->buffer[out_normalized_path->len - 1] != '/') {
+ if (aws_byte_buf_append_dynamic(out_normalized_path, &s_path_slash)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to append slash to normalized path.");
+ goto on_error;
+ }
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_byte_buf_clean_up(out_normalized_path);
+ return AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED;
+}
+
+struct aws_string *aws_string_new_from_json(struct aws_allocator *allocator, const struct aws_json_value *value) {
+ struct aws_byte_buf json_blob;
+ if (aws_byte_buf_init(&json_blob, allocator, 0)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to init buffer for json conversion.");
+ goto on_error;
+ }
+
+ if (aws_byte_buf_append_json_string(value, &json_blob)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to convert json to string.");
+ goto on_error;
+ }
+
+ struct aws_string *ret = aws_string_new_from_buf(allocator, &json_blob);
+ aws_byte_buf_clean_up(&json_blob);
+ return ret;
+
+on_error:
+ aws_byte_buf_clean_up(&json_blob);
+ aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ return NULL;
+}
+
+bool aws_endpoints_byte_cursor_eq(const void *a, const void *b) {
+ const struct aws_byte_cursor *a_cur = a;
+ const struct aws_byte_cursor *b_cur = b;
+ return aws_byte_cursor_eq(a_cur, b_cur);
+}
+
+void aws_array_list_deep_clean_up(struct aws_array_list *array, aws_array_callback_clean_up_fn on_clean_up_element) {
+ for (size_t idx = 0; idx < aws_array_list_length(array); ++idx) {
+ void *element = NULL;
+
+ aws_array_list_get_at_ptr(array, &element, idx);
+ AWS_ASSERT(element);
+ on_clean_up_element(element);
+ }
+
+ aws_array_list_clean_up(array);
+}
+
+/* TODO: this can be moved into common */
+static bool s_split_on_first_delim(
+ struct aws_byte_cursor input,
+ char split_on,
+ struct aws_byte_cursor *out_split,
+ struct aws_byte_cursor *out_rest) {
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&input));
+
+ uint8_t *delim = memchr(input.ptr, split_on, input.len);
+ if (delim != NULL) {
+ out_split->ptr = input.ptr;
+ out_split->len = delim - input.ptr;
+
+ out_rest->ptr = delim;
+ out_rest->len = input.len - (delim - input.ptr);
+ return true;
+ }
+
+ *out_split = input;
+ out_rest->ptr = NULL;
+ out_rest->len = 0;
+ return false;
+}
+
+static int s_buf_append_and_update_quote_count(
+ struct aws_byte_buf *buf,
+ struct aws_byte_cursor to_append,
+ size_t *quote_count,
+ bool is_json) {
+
+ /* Dont count quotes if its not json. escaped quotes will be replaced with
+ regular quotes when ruleset json is parsed, which will lead to incorrect
+ results for when templates should be resolved in regular strings.
+ Note: in json blobs escaped quotes are preserved and bellow approach works. */
+ if (is_json) {
+ for (size_t idx = 0; idx < to_append.len; ++idx) {
+ if (to_append.ptr[idx] == '"' && !(idx > 0 && to_append.ptr[idx - 1] == '\\')) {
+ ++*quote_count;
+ }
+ }
+ }
+ return aws_byte_buf_append_dynamic(buf, &to_append);
+}
+
+static struct aws_byte_cursor escaped_closing_curly = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("}}");
+static struct aws_byte_cursor escaped_opening_curly = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("{{");
+
+/*
+ * Small helper to deal with escapes correctly in strings that occur before
+ * template opening curly. General flow for resolving is to look for opening and
+ * then closing curly. This function correctly appends any escaped closing
+ * curlies and errors out if closing is not escaped (i.e. its unmatched).
+ */
+int s_append_template_prefix_to_buffer(
+ struct aws_byte_buf *out_buf,
+ struct aws_byte_cursor prefix,
+ size_t *quote_count,
+ bool is_json) {
+
+ struct aws_byte_cursor split = {0};
+ struct aws_byte_cursor rest = {0};
+
+ while (s_split_on_first_delim(prefix, '}', &split, &rest)) {
+ if (s_buf_append_and_update_quote_count(out_buf, split, quote_count, is_json)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Failed to append to resolved template buffer.");
+ goto on_error;
+ }
+
+ if (*quote_count % 2 == 0) {
+ if (aws_byte_buf_append_byte_dynamic(out_buf, '}')) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Failed to append to resolved template buffer.");
+ goto on_error;
+ }
+ aws_byte_cursor_advance(&rest, 1);
+ prefix = rest;
+ continue;
+ }
+
+ if (aws_byte_cursor_starts_with(&rest, &escaped_closing_curly)) {
+ if (aws_byte_buf_append_byte_dynamic(out_buf, '}')) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Failed to append to resolved template buffer.");
+ goto on_error;
+ }
+ aws_byte_cursor_advance(&rest, 2);
+ } else {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Unmatched or unescaped closing curly.");
+ goto on_error;
+ }
+
+ prefix = rest;
+ }
+
+ if (s_buf_append_and_update_quote_count(out_buf, split, quote_count, is_json)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Failed to append to resolved template buffer.");
+ goto on_error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+}
+
+int aws_byte_buf_init_from_resolved_templated_string(
+ struct aws_allocator *allocator,
+ struct aws_byte_buf *out_buf,
+ struct aws_byte_cursor string,
+ aws_endpoints_template_resolve_fn resolve_callback,
+ void *user_data,
+ bool is_json) {
+ AWS_PRECONDITION(allocator);
+
+ struct aws_owning_cursor resolved_template;
+ AWS_ZERO_STRUCT(resolved_template);
+
+ if (aws_byte_buf_init(out_buf, allocator, string.len)) {
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+ }
+
+ size_t quote_count = is_json ? 0 : 1;
+ struct aws_byte_cursor split = {0};
+ struct aws_byte_cursor rest = {0};
+ while (s_split_on_first_delim(string, '{', &split, &rest)) {
+ if (s_append_template_prefix_to_buffer(out_buf, split, &quote_count, is_json)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Failed to append to buffer while evaluating templated sting.");
+ goto on_error;
+ }
+
+ if (quote_count % 2 == 0) {
+ if (aws_byte_buf_append_byte_dynamic(out_buf, '{')) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Failed to append to resolved template buffer.");
+ goto on_error;
+ }
+ aws_byte_cursor_advance(&rest, 1);
+ string = rest;
+ continue;
+ }
+
+ if (aws_byte_cursor_starts_with(&rest, &escaped_opening_curly)) {
+ if (aws_byte_buf_append_byte_dynamic(out_buf, '{')) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Failed to append to resolved template buffer.");
+ goto on_error;
+ }
+ aws_byte_cursor_advance(&rest, 2);
+ string = rest;
+ continue;
+ }
+
+ aws_byte_cursor_advance(&rest, 1);
+
+ struct aws_byte_cursor after_closing = {0};
+ if (!s_split_on_first_delim(rest, '}', &split, &after_closing)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Unmatched closing curly.");
+ goto on_error;
+ }
+ aws_byte_cursor_advance(&after_closing, 1);
+ string = after_closing;
+
+ if (resolve_callback(split, user_data, &resolved_template)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Failed to resolve template.");
+ goto on_error;
+ }
+
+ if (s_buf_append_and_update_quote_count(out_buf, resolved_template.cur, &quote_count, is_json)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Failed to append resolved value.");
+ goto on_error;
+ }
+
+ aws_owning_cursor_clean_up(&resolved_template);
+ }
+
+ if (s_buf_append_and_update_quote_count(out_buf, split, &quote_count, is_json)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_GENERAL, "Failed to append to resolved template buffer.");
+ goto on_error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_byte_buf_clean_up(out_buf);
+ aws_owning_cursor_clean_up(&resolved_template);
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+}
+
+int aws_path_through_json(
+ struct aws_allocator *allocator,
+ const struct aws_json_value *root,
+ struct aws_byte_cursor path,
+ const struct aws_json_value **out_value) {
+
+ struct aws_array_list path_segments;
+ if (aws_array_list_init_dynamic(&path_segments, allocator, 10, sizeof(struct aws_byte_cursor)) ||
+ aws_byte_cursor_split_on_char(&path, '.', &path_segments)) {
+ goto on_error;
+ }
+
+ *out_value = root;
+ for (size_t idx = 0; idx < aws_array_list_length(&path_segments); ++idx) {
+ struct aws_byte_cursor path_el_cur;
+ if (aws_array_list_get_at(&path_segments, &path_el_cur, idx)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Failed to get path element");
+ goto on_error;
+ }
+
+ struct aws_byte_cursor element_cur = {0};
+ aws_byte_cursor_next_split(&path_el_cur, '[', &element_cur);
+
+ struct aws_byte_cursor index_cur = {0};
+ bool has_index = aws_byte_cursor_next_split(&path_el_cur, '[', &index_cur) &&
+ aws_byte_cursor_next_split(&path_el_cur, ']', &index_cur);
+
+ if (element_cur.len > 0) {
+ *out_value = aws_json_value_get_from_object(*out_value, element_cur);
+ if (NULL == *out_value) {
+ AWS_LOGF_ERROR(
+ AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE, "Invalid path. " PRInSTR ".", AWS_BYTE_CURSOR_PRI(element_cur));
+ goto on_error;
+ }
+ }
+
+ if (has_index) {
+ uint64_t index;
+ if (aws_byte_cursor_utf8_parse_u64(index_cur, &index)) {
+ AWS_LOGF_ERROR(
+ AWS_LS_SDKUTILS_ENDPOINTS_RESOLVE,
+ "Failed to parse index: " PRInSTR,
+ AWS_BYTE_CURSOR_PRI(index_cur));
+ goto on_error;
+ }
+ *out_value = aws_json_get_array_element(*out_value, (size_t)index);
+ if (NULL == *out_value) {
+ aws_reset_error();
+ goto on_success;
+ }
+ }
+ }
+
+on_success:
+ aws_array_list_clean_up(&path_segments);
+ return AWS_OP_SUCCESS;
+
+on_error:
+ aws_array_list_clean_up(&path_segments);
+ *out_value = NULL;
+ return aws_raise_error(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED);
+}
+
+struct aws_owning_cursor aws_endpoints_owning_cursor_create(
+ struct aws_allocator *allocator,
+ const struct aws_string *str) {
+ struct aws_string *clone = aws_string_clone_or_reuse(allocator, str);
+ struct aws_owning_cursor ret = {.string = clone, .cur = aws_byte_cursor_from_string(clone)};
+ return ret;
+}
+
+struct aws_owning_cursor aws_endpoints_owning_cursor_from_string(struct aws_string *str) {
+ struct aws_owning_cursor ret = {.string = str, .cur = aws_byte_cursor_from_string(str)};
+ return ret;
+}
+
+struct aws_owning_cursor aws_endpoints_owning_cursor_from_cursor(
+ struct aws_allocator *allocator,
+ const struct aws_byte_cursor cur) {
+ struct aws_string *clone = aws_string_new_from_cursor(allocator, &cur);
+ struct aws_owning_cursor ret = {.string = clone, .cur = aws_byte_cursor_from_string(clone)};
+ return ret;
+}
+
+struct aws_owning_cursor aws_endpoints_non_owning_cursor_create(struct aws_byte_cursor cur) {
+ struct aws_owning_cursor ret = {.string = NULL, .cur = cur};
+ return ret;
+}
+
+void aws_owning_cursor_clean_up(struct aws_owning_cursor *cursor) {
+ aws_string_destroy(cursor->string);
+ cursor->string = NULL;
+ cursor->cur.ptr = NULL;
+ cursor->cur.len = 0;
+}
diff --git a/contrib/restricted/aws/aws-c-sdkutils/source/partitions.c b/contrib/restricted/aws/aws-c-sdkutils/source/partitions.c
new file mode 100644
index 0000000000..0ff758606f
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/source/partitions.c
@@ -0,0 +1,283 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/array_list.h>
+#include <aws/common/byte_buf.h>
+#include <aws/common/hash_table.h>
+#include <aws/common/json.h>
+#include <aws/common/ref_count.h>
+#include <aws/common/string.h>
+#include <aws/sdkutils/partitions.h>
+#include <aws/sdkutils/private/endpoints_types_impl.h>
+#include <aws/sdkutils/private/endpoints_util.h>
+
+static struct aws_byte_cursor s_supported_version = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("1.0");
+
+struct aws_byte_cursor aws_partitions_get_supported_version(void) {
+ return s_supported_version;
+}
+
+static void s_partitions_config_destroy(void *data) {
+ if (data == NULL) {
+ return;
+ }
+
+ struct aws_partitions_config *partitions = data;
+
+ aws_json_value_destroy(partitions->json_root);
+ aws_string_destroy(partitions->version);
+ aws_hash_table_clean_up(&partitions->region_to_partition_info);
+ aws_mem_release(partitions->allocator, partitions);
+}
+
+struct region_merge_wrapper {
+ struct aws_json_value *outputs_node;
+ struct aws_json_value *merge_node;
+};
+
+static int s_on_region_merge(
+ const struct aws_byte_cursor *key,
+ const struct aws_json_value *value,
+ bool *out_should_continue,
+ void *user_data) {
+ (void)out_should_continue;
+
+ struct region_merge_wrapper *merge = user_data;
+
+ /*
+ * Note: latest partitions file includes description on every region.
+ * This results in a separate record created for every region, since any
+ * overrides on region create a new record that is a merge of partition
+ * default and override.
+ * Description is not used by endpoints rule engine, hence lets ignore it
+ * during merge for now to avoid creating numerous records that all have the
+ * same data.
+ * This decision can be revisited later if we decide to extend partitions
+ * parsing for any other use cases.
+ */
+ if (aws_byte_cursor_eq_c_str(key, "description")) {
+ return AWS_OP_SUCCESS;
+ }
+
+ if (merge->merge_node == NULL) {
+ merge->merge_node = aws_json_value_duplicate(merge->outputs_node);
+ }
+
+ /*
+ * Note: Its valid for region to add new field to default partition outputs
+ * instead of overriding existing one. So only delete previous value if it exists.
+ */
+ if (aws_json_value_has_key(merge->merge_node, *key) && aws_json_value_remove_from_object(merge->merge_node, *key)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to remove previous partition value.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_PARTITIONS_PARSE_FAILED);
+ }
+
+ if (aws_json_value_add_to_object(merge->merge_node, *key, aws_json_value_duplicate(value))) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to overwrite partition data.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_PARTITIONS_PARSE_FAILED);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+struct partition_parse_wrapper {
+ struct aws_partitions_config *partitions;
+ struct aws_json_value *outputs_node;
+ struct aws_string *outputs_str;
+};
+
+static int s_on_region_element(
+ const struct aws_byte_cursor *key,
+ const struct aws_json_value *value,
+ bool *out_should_continue,
+ void *user_data) {
+ (void)out_should_continue;
+
+ struct aws_partition_info *partition_info = NULL;
+ struct partition_parse_wrapper *wrapper = user_data;
+
+ struct region_merge_wrapper merge = {
+ .outputs_node = wrapper->outputs_node,
+ .merge_node = NULL,
+ };
+
+ if (aws_json_const_iterate_object(value, s_on_region_merge, &merge)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to parse partitions.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_PARTITIONS_PARSE_FAILED);
+ }
+
+ if (merge.merge_node != NULL) {
+ partition_info = aws_partition_info_new(wrapper->partitions->allocator, *key);
+ partition_info->info = aws_string_new_from_json(wrapper->partitions->allocator, merge.merge_node);
+ aws_json_value_destroy(merge.merge_node);
+ } else {
+ partition_info = aws_partition_info_new(wrapper->partitions->allocator, *key);
+ partition_info->info = wrapper->outputs_str;
+ partition_info->is_copy = true;
+ }
+
+ if (aws_hash_table_put(
+ &wrapper->partitions->region_to_partition_info, &partition_info->name, partition_info, NULL)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to add partition info.");
+ goto on_error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ if (partition_info != NULL) {
+ aws_partition_info_destroy(partition_info);
+ }
+ return aws_raise_error(AWS_ERROR_SDKUTILS_PARTITIONS_PARSE_FAILED);
+}
+
+static int s_on_partition_element(
+ size_t idx,
+ const struct aws_json_value *partition_node,
+ bool *out_should_continue,
+ void *user_data) {
+ (void)out_should_continue;
+ (void)idx;
+
+ struct aws_partitions_config *partitions = user_data;
+
+ struct aws_byte_cursor id_cur;
+ struct aws_json_value *id_node = aws_json_value_get_from_object(partition_node, aws_byte_cursor_from_c_str("id"));
+ if (id_node == NULL || aws_json_value_get_string(id_node, &id_cur)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to extract id of partition.");
+ goto on_error;
+ }
+
+ struct aws_json_value *outputs_node =
+ aws_json_value_get_from_object(partition_node, aws_byte_cursor_from_c_str("outputs"));
+ if (outputs_node == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to extract outputs of partition.");
+ goto on_error;
+ }
+
+ struct aws_partition_info *partition_info = aws_partition_info_new(partitions->allocator, id_cur);
+ partition_info->info = aws_string_new_from_json(partitions->allocator, outputs_node);
+
+ if (partition_info->info == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to add partition info.");
+ goto on_error;
+ }
+
+ if (aws_hash_table_put(&partitions->region_to_partition_info, &partition_info->name, partition_info, NULL)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to add partition info.");
+ goto on_error;
+ }
+
+ struct partition_parse_wrapper wrapper = {
+ .outputs_node = outputs_node, .outputs_str = partition_info->info, .partitions = partitions};
+
+ struct aws_json_value *regions_node =
+ aws_json_value_get_from_object(partition_node, aws_byte_cursor_from_c_str("regions"));
+ if (regions_node != NULL && aws_json_const_iterate_object(regions_node, s_on_region_element, &wrapper)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to parse regions.");
+ goto on_error;
+ }
+
+ return AWS_OP_SUCCESS;
+
+on_error:
+ return aws_raise_error(AWS_ERROR_SDKUTILS_PARTITIONS_PARSE_FAILED);
+}
+
+static int s_init_partitions_config_from_json(
+ struct aws_allocator *allocator,
+ struct aws_partitions_config *partitions,
+ struct aws_byte_cursor partitions_cur) {
+
+ struct aws_json_value *root = aws_json_value_new_from_string(allocator, partitions_cur);
+
+ if (root == NULL) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_ENDPOINTS_PARSING, "Failed to parse provided string as json.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_PARTITIONS_PARSE_FAILED);
+ }
+
+ partitions->json_root = root;
+
+ struct aws_byte_cursor version_cur;
+ struct aws_json_value *version_node = aws_json_value_get_from_object(root, aws_byte_cursor_from_c_str("version"));
+ if (version_node == NULL || aws_json_value_get_string(version_node, &version_cur)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to extract version.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_PARTITIONS_UNSUPPORTED);
+ }
+
+#ifdef ENDPOINTS_VERSION_CHECK /* TODO: samples are currently inconsistent with versions. skip check for now */
+ if (!aws_byte_cursor_eq_c_str(&version_cur, &s_supported_version)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Unsupported partitions version.");
+ aws_raise_error(AWS_ERROR_SDKUTILS_PARTITIONS_UNSUPPORTED);
+ goto on_error;
+ }
+#endif
+
+ struct aws_json_value *partitions_node =
+ aws_json_value_get_from_object(root, aws_byte_cursor_from_c_str("partitions"));
+ if (partitions_node == NULL || aws_json_const_iterate_array(partitions_node, s_on_partition_element, partitions)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to parse partitions.");
+ return aws_raise_error(AWS_ERROR_SDKUTILS_PARTITIONS_PARSE_FAILED);
+ }
+
+ return AWS_OP_SUCCESS;
+}
+
+static void s_callback_partition_info_destroy(void *data) {
+ struct aws_partition_info *info = data;
+ aws_partition_info_destroy(info);
+}
+
+struct aws_partitions_config *aws_partitions_config_new_from_string(
+ struct aws_allocator *allocator,
+ struct aws_byte_cursor json) {
+
+ AWS_PRECONDITION(allocator);
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&json));
+
+ struct aws_partitions_config *partitions = aws_mem_calloc(allocator, 1, sizeof(struct aws_partitions_config));
+ partitions->allocator = allocator;
+
+ if (aws_hash_table_init(
+ &partitions->region_to_partition_info,
+ allocator,
+ 20,
+ aws_hash_byte_cursor_ptr,
+ aws_endpoints_byte_cursor_eq,
+ NULL,
+ s_callback_partition_info_destroy)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to init partition info map.");
+ aws_raise_error(AWS_ERROR_SDKUTILS_PARTITIONS_PARSE_FAILED);
+ goto on_error;
+ }
+
+ if (s_init_partitions_config_from_json(allocator, partitions, json)) {
+ AWS_LOGF_ERROR(AWS_LS_SDKUTILS_PARTITIONS_PARSING, "Failed to init partition info from json.");
+ goto on_error;
+ }
+
+ aws_ref_count_init(&partitions->ref_count, partitions, s_partitions_config_destroy);
+
+ return partitions;
+
+on_error:
+ s_partitions_config_destroy(partitions);
+ return NULL;
+}
+
+struct aws_partitions_config *aws_partitions_config_acquire(struct aws_partitions_config *partitions) {
+ AWS_PRECONDITION(partitions);
+ if (partitions) {
+ aws_ref_count_acquire(&partitions->ref_count);
+ }
+ return partitions;
+}
+
+struct aws_partitions_config *aws_partitions_config_release(struct aws_partitions_config *partitions) {
+ if (partitions) {
+ aws_ref_count_release(&partitions->ref_count);
+ }
+ return NULL;
+}
diff --git a/contrib/restricted/aws/aws-c-sdkutils/source/resource_name.c b/contrib/restricted/aws/aws-c-sdkutils/source/resource_name.c
new file mode 100644
index 0000000000..0687c5ea7e
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/source/resource_name.c
@@ -0,0 +1,108 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/sdkutils/resource_name.h>
+
+#define ARN_SPLIT_COUNT ((size_t)5)
+#define ARN_PARTS_COUNT ((size_t)6)
+
+static const char ARN_DELIMETER[] = ":";
+static const char ARN_DELIMETER_CHAR = ':';
+
+static const size_t DELIMETER_LEN = 8; /* strlen("arn:::::") */
+
+int aws_resource_name_init_from_cur(struct aws_resource_name *arn, const struct aws_byte_cursor *input) {
+ struct aws_byte_cursor arn_parts[ARN_PARTS_COUNT];
+ struct aws_array_list arn_part_list;
+ aws_array_list_init_static(&arn_part_list, arn_parts, ARN_PARTS_COUNT, sizeof(struct aws_byte_cursor));
+ if (aws_byte_cursor_split_on_char_n(input, ARN_DELIMETER_CHAR, ARN_SPLIT_COUNT, &arn_part_list)) {
+ return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ }
+
+ struct aws_byte_cursor *arn_prefix;
+ if (aws_array_list_get_at_ptr(&arn_part_list, (void **)&arn_prefix, 0) ||
+ !aws_byte_cursor_eq_c_str(arn_prefix, "arn")) {
+ return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ }
+ if (aws_array_list_get_at(&arn_part_list, &arn->partition, 1)) {
+ return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ }
+ if (aws_array_list_get_at(&arn_part_list, &arn->service, 2)) {
+ return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ }
+ if (aws_array_list_get_at(&arn_part_list, &arn->region, 3)) {
+ return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ }
+ if (aws_array_list_get_at(&arn_part_list, &arn->account_id, 4)) {
+ return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ }
+ if (aws_array_list_get_at(&arn_part_list, &arn->resource_id, 5)) {
+ return aws_raise_error(AWS_ERROR_MALFORMED_INPUT_STRING);
+ }
+ return AWS_OP_SUCCESS;
+}
+
+int aws_resource_name_length(const struct aws_resource_name *arn, size_t *size) {
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->partition));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->service));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->region));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->account_id));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->resource_id));
+
+ *size = arn->partition.len + arn->region.len + arn->service.len + arn->account_id.len + arn->resource_id.len +
+ DELIMETER_LEN;
+
+ return AWS_OP_SUCCESS;
+}
+
+int aws_byte_buf_append_resource_name(struct aws_byte_buf *buf, const struct aws_resource_name *arn) {
+ AWS_PRECONDITION(aws_byte_buf_is_valid(buf));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->partition));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->service));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->region));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->account_id));
+ AWS_PRECONDITION(aws_byte_cursor_is_valid(&arn->resource_id));
+
+ const struct aws_byte_cursor prefix = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL("arn:");
+ const struct aws_byte_cursor colon_cur = AWS_BYTE_CUR_INIT_FROM_STRING_LITERAL(ARN_DELIMETER);
+
+ if (aws_byte_buf_append(buf, &prefix)) {
+ return aws_raise_error(aws_last_error());
+ }
+ if (aws_byte_buf_append(buf, &arn->partition)) {
+ return aws_raise_error(aws_last_error());
+ }
+ if (aws_byte_buf_append(buf, &colon_cur)) {
+ return aws_raise_error(aws_last_error());
+ }
+
+ if (aws_byte_buf_append(buf, &arn->service)) {
+ return aws_raise_error(aws_last_error());
+ }
+ if (aws_byte_buf_append(buf, &colon_cur)) {
+ return aws_raise_error(aws_last_error());
+ }
+
+ if (aws_byte_buf_append(buf, &arn->region)) {
+ return aws_raise_error(aws_last_error());
+ }
+ if (aws_byte_buf_append(buf, &colon_cur)) {
+ return aws_raise_error(aws_last_error());
+ }
+
+ if (aws_byte_buf_append(buf, &arn->account_id)) {
+ return aws_raise_error(aws_last_error());
+ }
+ if (aws_byte_buf_append(buf, &colon_cur)) {
+ return aws_raise_error(aws_last_error());
+ }
+
+ if (aws_byte_buf_append(buf, &arn->resource_id)) {
+ return aws_raise_error(aws_last_error());
+ }
+
+ AWS_POSTCONDITION(aws_byte_buf_is_valid(buf));
+ return AWS_OP_SUCCESS;
+}
diff --git a/contrib/restricted/aws/aws-c-sdkutils/source/sdkutils.c b/contrib/restricted/aws/aws-c-sdkutils/source/sdkutils.c
new file mode 100644
index 0000000000..2fb102f43e
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/source/sdkutils.c
@@ -0,0 +1,67 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/sdkutils/private/endpoints_types_impl.h>
+#include <aws/sdkutils/sdkutils.h>
+
+/* clang-format off */
+static struct aws_error_info s_errors[] = {
+ AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_GENERAL, "General error in SDK Utility library", "aws-c-sdkutils"),
+ AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_PARSE_FATAL, "Parser encountered a fatal error", "aws-c-sdkutils"),
+ AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_PARSE_RECOVERABLE, "Parser encountered an error, but recovered", "aws-c-sdkutils"),
+ AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_ENDPOINTS_UNSUPPORTED_RULESET, "Ruleset version not supported", "aws-c-sdkutils"),
+ AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_ENDPOINTS_PARSE_FAILED, "Ruleset parsing failed", "aws-c-sdkutils"),
+ AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_INIT_FAILED, "Endpoints eval failed to initialize", "aws-c-sdkutils"),
+ AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_ENDPOINTS_RESOLVE_FAILED, "Unexpected eval error", "aws-c-sdkutils"),
+ AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_ENDPOINTS_EMPTY_RULESET, "Ruleset has no rules", "aws-c-sdkutils"),
+ AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_ENDPOINTS_RULESET_EXHAUSTED, "Ruleset was exhausted before finding a matching rule", "aws-c-sdkutils"),
+ AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_PARTITIONS_UNSUPPORTED, "Partitions version not supported.", "aws-c-sdkutils"),
+ AWS_DEFINE_ERROR_INFO(AWS_ERROR_SDKUTILS_PARTITIONS_PARSE_FAILED, "Partitions parsing failed.", "aws-c-sdkutils"),
+};
+/* clang-format on */
+
+static struct aws_error_info_list s_sdkutils_error_info = {
+ .error_list = s_errors,
+ .count = sizeof(s_errors) / sizeof(struct aws_error_info),
+};
+
+static struct aws_log_subject_info s_log_subject_infos[] = {
+ DEFINE_LOG_SUBJECT_INFO(
+ AWS_LS_SDKUTILS_GENERAL,
+ "SDKUtils",
+ "Subject for SDK utility logging that defies categorization."),
+ DEFINE_LOG_SUBJECT_INFO(AWS_LS_SDKUTILS_PROFILE, "AWSProfile", "Subject for AWS Profile parser and utilities"),
+};
+
+static struct aws_log_subject_info_list s_sdkutils_log_subjects = {
+ .subject_list = s_log_subject_infos,
+ .count = AWS_ARRAY_SIZE(s_log_subject_infos),
+};
+
+static int s_library_init_count = 0;
+
+void aws_sdkutils_library_init(struct aws_allocator *allocator) {
+ if (s_library_init_count++ != 0) {
+ return;
+ }
+
+ aws_common_library_init(allocator);
+
+ aws_register_error_info(&s_sdkutils_error_info);
+ aws_register_log_subject_info_list(&s_sdkutils_log_subjects);
+
+ aws_endpoints_rule_engine_init();
+}
+
+void aws_sdkutils_library_clean_up(void) {
+ if (--s_library_init_count != 0) {
+ return;
+ }
+
+ aws_unregister_log_subject_info_list(&s_sdkutils_log_subjects);
+ aws_unregister_error_info(&s_sdkutils_error_info);
+
+ aws_common_library_clean_up();
+}
diff --git a/contrib/restricted/aws/aws-c-sdkutils/ya.make b/contrib/restricted/aws/aws-c-sdkutils/ya.make
new file mode 100644
index 0000000000..023ade1edb
--- /dev/null
+++ b/contrib/restricted/aws/aws-c-sdkutils/ya.make
@@ -0,0 +1,43 @@
+# Generated by devtools/yamaker from nixpkgs 23.05.
+
+LIBRARY()
+
+LICENSE(Apache-2.0)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+VERSION(0.1.9)
+
+ORIGINAL_SOURCE(https://github.com/awslabs/aws-c-sdkutils/archive/v0.1.9.tar.gz)
+
+PEERDIR(
+ contrib/restricted/aws/aws-c-common
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/aws/aws-c-sdkutils/include
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_RUNTIME()
+
+CFLAGS(
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
+ -DHAVE_SYSCONF
+)
+
+SRCS(
+ source/aws_profile.c
+ source/endpoints_rule_engine.c
+ source/endpoints_ruleset.c
+ source/endpoints_standard_lib.c
+ source/endpoints_types_impl.c
+ source/endpoints_util.c
+ source/partitions.c
+ source/resource_name.c
+ source/sdkutils.c
+)
+
+END()
diff --git a/contrib/restricted/aws/aws-crt-cpp/CMakeLists.darwin-arm64.txt b/contrib/restricted/aws/aws-crt-cpp/CMakeLists.darwin-arm64.txt
new file mode 100644
index 0000000000..6c2e3fb3bc
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/CMakeLists.darwin-arm64.txt
@@ -0,0 +1,90 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-crt-cpp)
+target_compile_options(restricted-aws-aws-crt-cpp PRIVATE
+ -DAWS_AUTH_USE_IMPORT_EXPORT
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_CHECKSUMS_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_CRT_CPP_USE_IMPORT_EXPORT
+ -DAWS_EVENT_STREAM_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_MQTT_USE_IMPORT_EXPORT
+ -DAWS_MQTT_WITH_WEBSOCKETS
+ -DAWS_S3_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DCJSON_HIDE_SYMBOLS
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-crt-cpp PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/include
+)
+target_link_libraries(restricted-aws-aws-crt-cpp PUBLIC
+ contrib-libs-cxxsupp
+ restricted-aws-aws-c-auth
+ restricted-aws-aws-c-cal
+ restricted-aws-aws-c-common
+ restricted-aws-aws-c-event-stream
+ restricted-aws-aws-c-http
+ restricted-aws-aws-c-io
+ restricted-aws-aws-c-mqtt
+ restricted-aws-aws-c-s3
+ restricted-aws-aws-c-sdkutils
+)
+target_sources(restricted-aws-aws-crt-cpp PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/Allocator.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/Api.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/DateTime.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/ImdsClient.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/JsonObject.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/StringUtils.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/Types.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/UUID.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/auth/Credentials.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/auth/Sigv4Signing.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/crypto/HMAC.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/crypto/Hash.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/endpoints/RuleEngine.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/external/cJSON.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/http/HttpConnection.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/http/HttpConnectionManager.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/http/HttpProxyStrategy.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/http/HttpRequestResponse.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/Bootstrap.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/ChannelHandler.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/EventLoopGroup.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/HostResolver.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/Pkcs11.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/SocketOptions.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/Stream.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/TlsOptions.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/Uri.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/iot/Mqtt5Client.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/iot/MqttClient.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/iot/MqttCommon.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/mqtt/Mqtt5Client.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/mqtt/Mqtt5Packets.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/mqtt/MqttClient.cpp
+)
diff --git a/contrib/restricted/aws/aws-crt-cpp/CMakeLists.darwin-x86_64.txt b/contrib/restricted/aws/aws-crt-cpp/CMakeLists.darwin-x86_64.txt
new file mode 100644
index 0000000000..6c2e3fb3bc
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/CMakeLists.darwin-x86_64.txt
@@ -0,0 +1,90 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-crt-cpp)
+target_compile_options(restricted-aws-aws-crt-cpp PRIVATE
+ -DAWS_AUTH_USE_IMPORT_EXPORT
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_CHECKSUMS_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_CRT_CPP_USE_IMPORT_EXPORT
+ -DAWS_EVENT_STREAM_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_MQTT_USE_IMPORT_EXPORT
+ -DAWS_MQTT_WITH_WEBSOCKETS
+ -DAWS_S3_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DCJSON_HIDE_SYMBOLS
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-crt-cpp PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/include
+)
+target_link_libraries(restricted-aws-aws-crt-cpp PUBLIC
+ contrib-libs-cxxsupp
+ restricted-aws-aws-c-auth
+ restricted-aws-aws-c-cal
+ restricted-aws-aws-c-common
+ restricted-aws-aws-c-event-stream
+ restricted-aws-aws-c-http
+ restricted-aws-aws-c-io
+ restricted-aws-aws-c-mqtt
+ restricted-aws-aws-c-s3
+ restricted-aws-aws-c-sdkutils
+)
+target_sources(restricted-aws-aws-crt-cpp PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/Allocator.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/Api.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/DateTime.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/ImdsClient.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/JsonObject.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/StringUtils.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/Types.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/UUID.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/auth/Credentials.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/auth/Sigv4Signing.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/crypto/HMAC.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/crypto/Hash.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/endpoints/RuleEngine.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/external/cJSON.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/http/HttpConnection.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/http/HttpConnectionManager.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/http/HttpProxyStrategy.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/http/HttpRequestResponse.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/Bootstrap.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/ChannelHandler.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/EventLoopGroup.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/HostResolver.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/Pkcs11.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/SocketOptions.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/Stream.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/TlsOptions.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/Uri.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/iot/Mqtt5Client.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/iot/MqttClient.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/iot/MqttCommon.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/mqtt/Mqtt5Client.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/mqtt/Mqtt5Packets.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/mqtt/MqttClient.cpp
+)
diff --git a/contrib/restricted/aws/aws-crt-cpp/CMakeLists.linux-aarch64.txt b/contrib/restricted/aws/aws-crt-cpp/CMakeLists.linux-aarch64.txt
new file mode 100644
index 0000000000..04fd641c30
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/CMakeLists.linux-aarch64.txt
@@ -0,0 +1,91 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-crt-cpp)
+target_compile_options(restricted-aws-aws-crt-cpp PRIVATE
+ -DAWS_AUTH_USE_IMPORT_EXPORT
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_CHECKSUMS_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_CRT_CPP_USE_IMPORT_EXPORT
+ -DAWS_EVENT_STREAM_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_MQTT_USE_IMPORT_EXPORT
+ -DAWS_MQTT_WITH_WEBSOCKETS
+ -DAWS_S3_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DCJSON_HIDE_SYMBOLS
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-crt-cpp PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/include
+)
+target_link_libraries(restricted-aws-aws-crt-cpp PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ restricted-aws-aws-c-auth
+ restricted-aws-aws-c-cal
+ restricted-aws-aws-c-common
+ restricted-aws-aws-c-event-stream
+ restricted-aws-aws-c-http
+ restricted-aws-aws-c-io
+ restricted-aws-aws-c-mqtt
+ restricted-aws-aws-c-s3
+ restricted-aws-aws-c-sdkutils
+)
+target_sources(restricted-aws-aws-crt-cpp PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/Allocator.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/Api.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/DateTime.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/ImdsClient.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/JsonObject.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/StringUtils.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/Types.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/UUID.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/auth/Credentials.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/auth/Sigv4Signing.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/crypto/HMAC.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/crypto/Hash.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/endpoints/RuleEngine.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/external/cJSON.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/http/HttpConnection.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/http/HttpConnectionManager.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/http/HttpProxyStrategy.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/http/HttpRequestResponse.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/Bootstrap.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/ChannelHandler.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/EventLoopGroup.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/HostResolver.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/Pkcs11.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/SocketOptions.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/Stream.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/TlsOptions.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/Uri.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/iot/Mqtt5Client.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/iot/MqttClient.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/iot/MqttCommon.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/mqtt/Mqtt5Client.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/mqtt/Mqtt5Packets.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/mqtt/MqttClient.cpp
+)
diff --git a/contrib/restricted/aws/aws-crt-cpp/CMakeLists.linux-x86_64.txt b/contrib/restricted/aws/aws-crt-cpp/CMakeLists.linux-x86_64.txt
new file mode 100644
index 0000000000..04fd641c30
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/CMakeLists.linux-x86_64.txt
@@ -0,0 +1,91 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-crt-cpp)
+target_compile_options(restricted-aws-aws-crt-cpp PRIVATE
+ -DAWS_AUTH_USE_IMPORT_EXPORT
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_CHECKSUMS_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_CRT_CPP_USE_IMPORT_EXPORT
+ -DAWS_EVENT_STREAM_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_MQTT_USE_IMPORT_EXPORT
+ -DAWS_MQTT_WITH_WEBSOCKETS
+ -DAWS_S3_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DCJSON_HIDE_SYMBOLS
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-crt-cpp PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/include
+)
+target_link_libraries(restricted-aws-aws-crt-cpp PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ restricted-aws-aws-c-auth
+ restricted-aws-aws-c-cal
+ restricted-aws-aws-c-common
+ restricted-aws-aws-c-event-stream
+ restricted-aws-aws-c-http
+ restricted-aws-aws-c-io
+ restricted-aws-aws-c-mqtt
+ restricted-aws-aws-c-s3
+ restricted-aws-aws-c-sdkutils
+)
+target_sources(restricted-aws-aws-crt-cpp PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/Allocator.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/Api.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/DateTime.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/ImdsClient.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/JsonObject.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/StringUtils.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/Types.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/UUID.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/auth/Credentials.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/auth/Sigv4Signing.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/crypto/HMAC.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/crypto/Hash.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/endpoints/RuleEngine.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/external/cJSON.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/http/HttpConnection.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/http/HttpConnectionManager.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/http/HttpProxyStrategy.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/http/HttpRequestResponse.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/Bootstrap.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/ChannelHandler.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/EventLoopGroup.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/HostResolver.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/Pkcs11.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/SocketOptions.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/Stream.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/TlsOptions.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/Uri.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/iot/Mqtt5Client.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/iot/MqttClient.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/iot/MqttCommon.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/mqtt/Mqtt5Client.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/mqtt/Mqtt5Packets.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/mqtt/MqttClient.cpp
+)
diff --git a/contrib/restricted/aws/aws-crt-cpp/CMakeLists.txt b/contrib/restricted/aws/aws-crt-cpp/CMakeLists.txt
new file mode 100644
index 0000000000..2dce3a77fe
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/CMakeLists.txt
@@ -0,0 +1,19 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+if (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" AND NOT HAVE_CUDA)
+ include(CMakeLists.linux-aarch64.txt)
+elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
+ include(CMakeLists.darwin-x86_64.txt)
+elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
+ include(CMakeLists.darwin-arm64.txt)
+elseif (WIN32 AND CMAKE_SYSTEM_PROCESSOR STREQUAL "AMD64" AND NOT HAVE_CUDA)
+ include(CMakeLists.windows-x86_64.txt)
+elseif (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT HAVE_CUDA)
+ include(CMakeLists.linux-x86_64.txt)
+endif()
diff --git a/contrib/restricted/aws/aws-crt-cpp/CMakeLists.windows-x86_64.txt b/contrib/restricted/aws/aws-crt-cpp/CMakeLists.windows-x86_64.txt
new file mode 100644
index 0000000000..6c2e3fb3bc
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/CMakeLists.windows-x86_64.txt
@@ -0,0 +1,90 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_library(restricted-aws-aws-crt-cpp)
+target_compile_options(restricted-aws-aws-crt-cpp PRIVATE
+ -DAWS_AUTH_USE_IMPORT_EXPORT
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_CHECKSUMS_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_CRT_CPP_USE_IMPORT_EXPORT
+ -DAWS_EVENT_STREAM_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_MQTT_USE_IMPORT_EXPORT
+ -DAWS_MQTT_WITH_WEBSOCKETS
+ -DAWS_S3_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DCJSON_HIDE_SYMBOLS
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+ $<IF:$<CXX_COMPILER_ID:MSVC>,,-Wno-everything>
+)
+target_include_directories(restricted-aws-aws-crt-cpp PUBLIC
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/include
+)
+target_link_libraries(restricted-aws-aws-crt-cpp PUBLIC
+ contrib-libs-cxxsupp
+ restricted-aws-aws-c-auth
+ restricted-aws-aws-c-cal
+ restricted-aws-aws-c-common
+ restricted-aws-aws-c-event-stream
+ restricted-aws-aws-c-http
+ restricted-aws-aws-c-io
+ restricted-aws-aws-c-mqtt
+ restricted-aws-aws-c-s3
+ restricted-aws-aws-c-sdkutils
+)
+target_sources(restricted-aws-aws-crt-cpp PRIVATE
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/Allocator.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/Api.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/DateTime.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/ImdsClient.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/JsonObject.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/StringUtils.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/Types.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/UUID.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/auth/Credentials.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/auth/Sigv4Signing.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/crypto/HMAC.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/crypto/Hash.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/endpoints/RuleEngine.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/external/cJSON.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/http/HttpConnection.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/http/HttpConnectionManager.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/http/HttpProxyStrategy.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/http/HttpRequestResponse.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/Bootstrap.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/ChannelHandler.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/EventLoopGroup.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/HostResolver.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/Pkcs11.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/SocketOptions.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/Stream.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/TlsOptions.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/io/Uri.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/iot/Mqtt5Client.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/iot/MqttClient.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/iot/MqttCommon.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/mqtt/Mqtt5Client.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/mqtt/Mqtt5Packets.cpp
+ ${CMAKE_SOURCE_DIR}/contrib/restricted/aws/aws-crt-cpp/source/mqtt/MqttClient.cpp
+)
diff --git a/contrib/restricted/aws/aws-crt-cpp/CODE_OF_CONDUCT.md b/contrib/restricted/aws/aws-crt-cpp/CODE_OF_CONDUCT.md
new file mode 100644
index 0000000000..3b64466870
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/CODE_OF_CONDUCT.md
@@ -0,0 +1,4 @@
+## Code of Conduct
+This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
+For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
+opensource-codeofconduct@amazon.com with any additional questions or comments.
diff --git a/contrib/restricted/aws/aws-crt-cpp/CONTRIBUTING.md b/contrib/restricted/aws/aws-crt-cpp/CONTRIBUTING.md
new file mode 100644
index 0000000000..666c37d6c0
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/CONTRIBUTING.md
@@ -0,0 +1,62 @@
+# Contributing Guidelines
+
+Thank you for your interest in contributing to our project. Whether it's a bug report, new feature, correction, or additional
+documentation, we greatly value feedback and contributions from our community.
+
+Please read through this document before submitting any issues or pull requests to ensure we have all the necessary
+information to effectively respond to your bug report or contribution.
+
+
+## Reporting Bugs/Feature Requests
+
+We welcome you to use the GitHub issue tracker to report bugs or suggest features.
+
+When filing an issue, please check [existing open](https://github.com/awslabs/aws-crt-cpp/issues), or [recently closed](https://github.com/awslabs/aws-crt-cpp/issues?utf8=%E2%9C%93&q=is%3Aissue%20is%3Aclosed%20), issues to make sure somebody else hasn't already
+reported the issue. Please try to include as much information as you can. Details like these are incredibly useful:
+
+* A reproducible test case or series of steps
+* The version of our code being used
+* Any modifications you've made relevant to the bug
+* Anything unusual about your environment or deployment
+
+
+## Contributing via Pull Requests
+Contributions via pull requests are much appreciated. Before sending us a pull request, please ensure that:
+
+1. You are working against the latest source on the *main* branch.
+2. You check existing open, and recently merged, pull requests to make sure someone else hasn't addressed the problem already.
+3. You open an issue to discuss any significant work - we would hate for your time to be wasted.
+
+To send us a pull request, please:
+
+1. Fork the repository.
+2. Modify the source; please focus on the specific change you are contributing. If you also reformat all the code, it will be hard for us to focus on your change.
+3. Ensure local tests pass.
+4. Commit to your fork using clear commit messages.
+5. Send us a pull request, answering any default questions in the pull request interface.
+6. Wait for a repository collaborator to look at your pull request, run the automated tests, and review. If additional changes or discussion is needed, a collaborator will get back to you, so please stay involved in the conversation.
+ * Note: pull requests from forks will not run the automated tests without collaborator involvement for security reasons. If you make a pull request and see that the tests are pending, this is normal and expected.
+
+GitHub provides additional document on [forking a repository](https://help.github.com/articles/fork-a-repo/) and
+[creating a pull request](https://help.github.com/articles/creating-a-pull-request/).
+
+
+## Finding contributions to work on
+Looking at the existing issues is a great way to find something to contribute on. As our projects, by default, use the default GitHub issue labels (enhancement/bug/duplicate/help wanted/invalid/question/wontfix), looking at any ['help wanted'](https://github.com/awslabs/aws-crt-cpp/labels/help%20wanted) issues is a great place to start.
+
+
+## Code of Conduct
+This project has adopted the [Amazon Open Source Code of Conduct](https://aws.github.io/code-of-conduct).
+For more information see the [Code of Conduct FAQ](https://aws.github.io/code-of-conduct-faq) or contact
+opensource-codeofconduct@amazon.com with any additional questions or comments.
+
+
+## Security issue notifications
+If you discover a potential security issue in this project we ask that you notify AWS/Amazon Security via our [vulnerability reporting page](http://aws.amazon.com/security/vulnerability-reporting/). Please do **not** create a public github issue.
+
+
+## Licensing
+
+See the [LICENSE](https://github.com/awslabs/aws-crt-cpp/blob/main/LICENSE) file for our project's licensing. We will ask you to confirm the licensing of your contribution.
+
+We may ask you to sign a [Contributor License Agreement (CLA)](http://en.wikipedia.org/wiki/Contributor_License_Agreement) for larger changes.
diff --git a/contrib/restricted/aws/aws-crt-cpp/LICENSE b/contrib/restricted/aws/aws-crt-cpp/LICENSE
new file mode 100644
index 0000000000..d645695673
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/contrib/restricted/aws/aws-crt-cpp/NOTICE b/contrib/restricted/aws/aws-crt-cpp/NOTICE
new file mode 100644
index 0000000000..8b820137a0
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/NOTICE
@@ -0,0 +1,3 @@
+AWS Crt Cpp
+Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+SPDX-License-Identifier: Apache-2.0.
diff --git a/contrib/restricted/aws/aws-crt-cpp/README.md b/contrib/restricted/aws/aws-crt-cpp/README.md
new file mode 100644
index 0000000000..ba5adf52e3
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/README.md
@@ -0,0 +1,144 @@
+## AWS Crt Cpp
+
+C++ wrapper around the aws-c-* libraries. Provides Cross-Platform Transport Protocols and SSL/TLS implementations for C++.
+
+### Documentation
+
+https://awslabs.github.io/aws-crt-cpp/
+
+### Currently Included:
+
+* aws-c-common: Cross-platform primitives and data structures.
+* aws-c-io: Cross-platform event-loops, non-blocking I/O, and TLS implementations.
+* aws-c-mqtt: MQTT client.
+* aws-c-auth: Auth signers such as Aws-auth sigv4
+* aws-c-http: HTTP 1.1 client, and websockets (H2 coming soon)
+* aws-checksums: Cross-Platform HW accelerated CRC32c and CRC32 with fallback to efficient SW implementations.
+* aws-c-event-stream: C99 implementation of the vnd.amazon.event-stream content-type.
+
+More protocols and utilities are coming soon, so stay tuned.
+
+## Building
+
+The C99 libraries are already included for your convenience as submodules.
+You should perform a recursive clone `git clone --recursive` or initialize the submodules via
+`git submodule update --init`. These dependencies are compiled by CMake as part of the build process.
+
+If you want to manage these dependencies manually (e.g. you're using them in other projects), configure CMake with
+`-DBUILD_DEPS=OFF` and `-DCMAKE_PREFIX_PATH=<install>` pointing to the absolute path where you have them installed.
+
+### MSVC
+If you want to use a statically linked MSVCRT (/MT, /MTd), you can add `-DSTATIC_CRT=ON` to your cmake configuration.
+
+### Apple Silicon (aka M1) and Universal Binaries
+
+aws-crt-cpp supports both `arm64` and `x86_64` architectures.
+Configure cmake with `-DCMAKE_OSX_ARCHITECTURES=arm64` to target Apple silicon,
+or `-DCMAKE_OSX_ARCHITECTURES=x86_64` to target Intel.
+If you wish to create a [universal binary](https://developer.apple.com/documentation/apple-silicon/building-a-universal-macos-binary),
+you should use `lipo` to combine the `x86_64` and `arm64` binaries.
+For example: `lipo -create -output universal_app x86_app arm_app`
+
+You SHOULD NOT build for both architectures simultaneously via `-DCMAKE_OSX_ARCHITECTURES="arm64;x86_64"`.
+aws-crt-cpp has not been tested in this configuration.
+aws-crt-cpp's cmake configuration scripts are known to get confused by this,
+and will not enable optimizations that would benefit an independent `arm64` or `x86_64` build.
+
+### OpenSSL and LibCrypto (Unix only)
+
+If your application uses OpenSSL, configure with `-DUSE_OPENSSL=ON`.
+
+aws-crt-cpp does not use OpenSSL for TLS.
+On Apple and Windows devices, the OS's default TLS library is used.
+On Unix devices, [s2n-tls](https://github.com/aws/s2n-tls) is used.
+But s2n-tls uses libcrypto, the cryptography math library bundled with OpenSSL.
+To simplify the build process, the source code for s2n-tls and libcrypto are
+included as git submodules and built along with aws-crt-cpp.
+But if your application is also loading the system installation of OpenSSL
+(i.e. your application uses libcurl which uses libssl which uses libcrypto)
+there may be crashes as the application tries to use two different versions of libcrypto at once.
+
+Setting `-DUSE_OPENSSL=ON` will cause aws-crt-cpp to link against your system's existing `libcrypto`,
+instead of building its own copy.
+
+You can ignore all this on Windows and Apple platforms, where aws-crt-cpp uses the OS's default libraries for TLS and cryptography math.
+
+## Dependencies?
+
+There are no non-OS dependencies that AWS does not own, maintain, and ship.
+
+## Common Usage
+
+To do anything with IO, you'll need to create a few objects that will be used by the rest of the library.
+
+For example:
+
+````
+ Aws::Crt::LoadErrorStrings();
+````
+
+Will load error strings for debugging purposes. Since the C libraries use error codes, this will allow you to print the corresponding
+error string for each error code.
+
+````
+ Aws::Crt::ApiHandle apiHandle;
+````
+This performs one-time static initialization of the library. You'll need it to do anything, so don't forget to create one.
+
+````
+ Aws::Crt::Io::EventLoopGroup eventLoopGroup(<number of threads you want>);
+````
+To use any of our APIs that perform IO you'll need at least one event-loop. An event-loop group is a collection of event-loops that
+protocol implementations will load balance across. If you won't have very many connections (say, more than 100 or so), then you
+most likely only want 1 thread. In this case, you want to pass a single instance of this to every client or server implementation of a protocol
+you use in your application. In some advanced use cases, you may want to reserve a thread for different types of IO tasks. In that case, you can have an
+instance of this class for each reservation.
+
+````
+ Aws::Crt::Io::TlsContextOptions tlsCtxOptions =
+ Aws::Crt::Io::TlsContextOptions::InitClientWithMtls(certificatePath.c_str(), keyPath.c_str());
+ /*
+ * If we have a custom CA, set that up here.
+ */
+ if (!caFile.empty())
+ {
+ tlsCtxOptions.OverrideDefaultTrustStore(nullptr, caFile.c_str());
+ }
+
+ uint16_t port = 8883;
+ if (Io::TlsContextOptions::IsAlpnSupported())
+ {
+ /*
+ * Use ALPN to negotiate the mqtt protocol on a normal
+ * TLS port if possible.
+ */
+ tlsCtxOptions.SetAlpnList("x-amzn-mqtt-ca");
+ port = 443;
+ }
+
+ Aws::Crt::Io::TlsContext tlsCtx(tlsCtxOptions, Io::TlsMode::CLIENT);
+````
+
+If you plan on using TLS, you will need a TlsContext. These are NOT CHEAP, so use as few as possible to perform your task.
+If you're in client mode and not doing anything fancy (e.g. mutual TLS), then you can likely get away with using a single
+instance for the entire application.
+
+````
+Aws::Crt::Io::ClientBootstrap bootstrap(eventLoopGroup);
+````
+
+Lastly, you will need a client or server bootstrap to use a client or server protocol implementation. Since everything is
+non-blocking and event driven, this handles most of the "callback hell" inherent in the design. Assuming you aren't partitioning
+threads for particular use-cases, you can have a single instance of this that you pass to multiple clients.
+
+## Mac-Only TLS Behavior
+
+Please note that on Mac, once a private key is used with a certificate, that certificate-key pair is imported into the Mac Keychain. All subsequent uses of that certificate will use the stored private key and ignore anything passed in programmatically. Beginning in v0.8.10, when a stored private key from the Keychain is used, the following will be logged at the "info" log level:
+
+```
+static: certificate has an existing certificate-key pair that was previously imported into the Keychain. Using key from Keychain instead of the one provided.
+```
+
+## License
+
+This library is licensed under the Apache 2.0 License.
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/Allocator.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/Allocator.h
new file mode 100644
index 0000000000..d0193a24b7
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/Allocator.h
@@ -0,0 +1,47 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/common.h>
+#include <aws/crt/Exports.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ using Allocator = aws_allocator;
+
+ /**
+ * Each object from this library can use an explicit allocator.
+ * If you construct an object without specifying an allocator,
+ * then THIS allocator is used instead.
+ *
+ * You can customize this allocator when initializing
+ * \ref ApiHandle::ApiHandle(Allocator*) "ApiHandle".
+ */
+ AWS_CRT_CPP_API Allocator *ApiAllocator() noexcept;
+
+ /**
+ * Returns the default implementation of an Allocator.
+ *
+ * If you initialize \ref ApiHandle::ApiHandle(Allocator*) "ApiHandle"
+ * without specifying a custom allocator, then this implementation is used.
+ */
+ AWS_CRT_CPP_API Allocator *DefaultAllocatorImplementation() noexcept;
+
+ /**
+ * @deprecated Use DefaultAllocatorImplementation() instead.
+ * DefaultAllocator() is too easily confused with ApiAllocator().
+ */
+ AWS_CRT_CPP_API Allocator *DefaultAllocator() noexcept;
+
+ /**
+ * @deprecated Use ApiAllocator() instead, to avoid issues with delay-loaded DLLs.
+ * https://github.com/aws/aws-sdk-cpp/issues/1960
+ */
+ extern AWS_CRT_CPP_API Allocator *g_allocator;
+
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/Api.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/Api.h
new file mode 100644
index 0000000000..74fde424cf
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/Api.h
@@ -0,0 +1,218 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/Types.h>
+#include <aws/crt/crypto/HMAC.h>
+#include <aws/crt/crypto/Hash.h>
+#include <aws/crt/mqtt/Mqtt5Client.h>
+#include <aws/crt/mqtt/MqttClient.h>
+
+#include <aws/common/logging.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ /**
+ * Detail level control for logging output
+ */
+ enum class LogLevel
+ {
+ None = AWS_LL_NONE,
+ Fatal = AWS_LL_FATAL,
+ Error = AWS_LL_ERROR,
+ Warn = AWS_LL_WARN,
+ Info = AWS_LL_INFO,
+ Debug = AWS_LL_DEBUG,
+ Trace = AWS_LL_TRACE,
+
+ Count
+ };
+
+ /**
+ * Should the API Handle destructor block on all shutdown/thread completion logic or not?
+ */
+ enum class ApiHandleShutdownBehavior
+ {
+ Blocking,
+ NonBlocking
+ };
+
+ /**
+ * A singleton object representing the init/cleanup state of the entire CRT. It's invalid to have more than one
+ * active simultaneously and it's also invalid to use CRT functionality without one active.
+ */
+ class AWS_CRT_CPP_API ApiHandle
+ {
+ public:
+ /**
+ * Customize the ApiAllocator(), which is be used by any objects
+ * constructed without an explicit allocator.
+ */
+ ApiHandle(Allocator *allocator) noexcept;
+ ApiHandle() noexcept;
+ ~ApiHandle();
+ ApiHandle(const ApiHandle &) = delete;
+ ApiHandle(ApiHandle &&) = delete;
+ ApiHandle &operator=(const ApiHandle &) = delete;
+ ApiHandle &operator=(ApiHandle &&) = delete;
+
+ /**
+ * Initialize logging in awscrt.
+ * @param level: Display messages of this importance and higher. LogLevel.NoLogs will disable
+ * logging.
+ * @param filename: Logging destination, a file path from the disk.
+ */
+ void InitializeLogging(LogLevel level, const char *filename);
+
+ /**
+ * Initialize logging in awscrt.
+ * @param level: Display messages of this importance and higher. LogLevel.NoLogs will disable
+ * logging.
+ * @param fp: The FILE object for logging destination.
+ */
+ void InitializeLogging(LogLevel level, FILE *fp);
+
+ /**
+ * Configures the shutdown behavior of the api handle instance
+ * @param behavior desired shutdown behavior
+ */
+ void SetShutdownBehavior(ApiHandleShutdownBehavior behavior);
+
+ /**
+ * BYO_CRYPTO: set callback for creating MD5 hashes.
+ * If using BYO_CRYPTO, you must call this.
+ */
+ void SetBYOCryptoNewMD5Callback(Crypto::CreateHashCallback &&callback);
+
+ /**
+ * BYO_CRYPTO: set callback for creating SHA256 hashes.
+ * If using BYO_CRYPTO, you must call this.
+ */
+ void SetBYOCryptoNewSHA256Callback(Crypto::CreateHashCallback &&callback);
+
+ /**
+ * BYO_CRYPTO: set callback for creating Streaming SHA256 HMAC objects.
+ * If using BYO_CRYPTO, you must call this.
+ */
+ void SetBYOCryptoNewSHA256HMACCallback(Crypto::CreateHMACCallback &&callback);
+
+ /**
+ * BYO_CRYPTO: set callback for creating a ClientTlsChannelHandler.
+ * If using BYO_CRYPTO, you must call this prior to creating any client channels in the
+ * application.
+ */
+ void SetBYOCryptoClientTlsCallback(Io::NewClientTlsHandlerCallback &&callback);
+
+ /**
+ * BYO_CRYPTO: set callbacks for the TlsContext.
+ * If using BYO_CRYPTO, you need to call this function prior to creating a TlsContext.
+ *
+ * @param newCallback Create custom implementation object, to be stored inside TlsContext.
+ * Return nullptr if failure occurs.
+ * @param deleteCallback Destroy object that was created by newCallback.
+ * @param alpnCallback Return whether ALPN is supported.
+ */
+ void SetBYOCryptoTlsContextCallbacks(
+ Io::NewTlsContextImplCallback &&newCallback,
+ Io::DeleteTlsContextImplCallback &&deleteCallback,
+ Io::IsTlsAlpnSupportedCallback &&alpnCallback);
+
+ /// @private
+ static const Io::NewTlsContextImplCallback &GetBYOCryptoNewTlsContextImplCallback();
+ /// @private
+ static const Io::DeleteTlsContextImplCallback &GetBYOCryptoDeleteTlsContextImplCallback();
+ /// @private
+ static const Io::IsTlsAlpnSupportedCallback &GetBYOCryptoIsTlsAlpnSupportedCallback();
+
+ /**
+ * Gets the static default ClientBootstrap, creating it if necessary.
+ *
+ * This default will be used when a ClientBootstrap is not explicitly passed but is needed
+ * to allow the process to function. An example of this would be in the MQTT connection creation workflow.
+ * The default ClientBootstrap will use the default EventLoopGroup and HostResolver, creating them if
+ * necessary.
+ *
+ * The default ClientBootstrap will be automatically managed and released by the API handle when it's
+ * resources are being freed, not requiring any manual memory management.
+ *
+ * @return ClientBootstrap* A pointer to the static default ClientBootstrap
+ */
+ static Io::ClientBootstrap *GetOrCreateStaticDefaultClientBootstrap();
+
+ /**
+ * Gets the static default EventLoopGroup, creating it if necessary.
+ *
+ * This default will be used when a EventLoopGroup is not explicitly passed but is needed
+ * to allow the process to function. An example of this would be in the MQTT connection creation workflow.
+ *
+ * The EventLoopGroup will automatically pick a default number of threads based on the system. You can
+ * manually adjust the number of threads being used by creating a EventLoopGroup and passing it through
+ * the SetDefaultEventLoopGroup function.
+ *
+ * The default EventLoopGroup will be automatically managed and released by the API handle when it's
+ * resources are being freed, not requiring any manual memory management.
+ *
+ * @return EventLoopGroup* A pointer to the static default EventLoopGroup
+ */
+ static Io::EventLoopGroup *GetOrCreateStaticDefaultEventLoopGroup();
+
+ /**
+ * Gets the static default HostResolver, creating it if necessary.
+ *
+ * This default will be used when a HostResolver is not explicitly passed but is needed
+ * to allow the process to function. An example of this would be in the MQTT connection creation workflow.
+ *
+ * The HostResolver will be set to have a maximum of 8 entries by default. You can
+ * manually adjust the maximum number of entries being used by creating a HostResolver and passing it
+ * through the SetDefaultEventLoopGroup function.
+ *
+ * The default HostResolver will be automatically managed and released by the API handle when it's
+ * resources are being freed, not requiring any manual memory management.
+ *
+ * @return HostResolver* A pointer to the static default HostResolver
+ */
+ static Io::HostResolver *GetOrCreateStaticDefaultHostResolver();
+
+ private:
+ void InitializeLoggingCommon(struct aws_logger_standard_options &options);
+
+ aws_logger m_logger;
+
+ ApiHandleShutdownBehavior m_shutdownBehavior;
+
+ static Io::ClientBootstrap *s_static_bootstrap;
+ static std::mutex s_lock_client_bootstrap;
+ static void ReleaseStaticDefaultClientBootstrap();
+
+ static Io::EventLoopGroup *s_static_event_loop_group;
+ static std::mutex s_lock_event_loop_group;
+ static void ReleaseStaticDefaultEventLoopGroup();
+
+ static int s_host_resolver_default_max_hosts;
+ static Io::HostResolver *s_static_default_host_resolver;
+ static std::mutex s_lock_default_host_resolver;
+ static void ReleaseStaticDefaultHostResolver();
+ };
+
+ /**
+ * Gets a string description of a CRT error code
+ * @param error error code to get a descriptive string for
+ * @return a string description of the error code
+ */
+ AWS_CRT_CPP_API const char *ErrorDebugString(int error) noexcept;
+
+ /**
+ * @return the value of the last aws error on the current thread. Return 0 if no aws-error raised before.
+ */
+ AWS_CRT_CPP_API int LastError() noexcept;
+
+ /**
+ * @return the value of the last aws error on the current thread. Return AWS_ERROR_UNKNOWN, if no aws-error
+ * raised before.
+ */
+ AWS_CRT_CPP_API int LastErrorOrUnknown() noexcept;
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/Config.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/Config.h
new file mode 100644
index 0000000000..99d1aae4c6
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/Config.h
@@ -0,0 +1,11 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#define AWS_CRT_CPP_VERSION "0.19.8"
+#define AWS_CRT_CPP_VERSION_MAJOR 0
+#define AWS_CRT_CPP_VERSION_MINOR 19
+#define AWS_CRT_CPP_VERSION_PATCH 8
+#define AWS_CRT_CPP_GIT_HASH ""
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/DateTime.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/DateTime.h
new file mode 100644
index 0000000000..1861d1620e
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/DateTime.h
@@ -0,0 +1,198 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/Exports.h>
+
+#include <aws/crt/Types.h>
+
+#include <aws/common/date_time.h>
+
+#include <chrono>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ enum class DateFormat
+ {
+ RFC822 = AWS_DATE_FORMAT_RFC822,
+ ISO_8601 = AWS_DATE_FORMAT_ISO_8601,
+ AutoDetect = AWS_DATE_FORMAT_AUTO_DETECT,
+ };
+
+ enum class Month
+ {
+ January = AWS_DATE_MONTH_JANUARY,
+ February = AWS_DATE_MONTH_FEBRUARY,
+ March = AWS_DATE_MONTH_MARCH,
+ April = AWS_DATE_MONTH_APRIL,
+ May = AWS_DATE_MONTH_MAY,
+ June = AWS_DATE_MONTH_JUNE,
+ July = AWS_DATE_MONTH_JULY,
+ August = AWS_DATE_MONTH_AUGUST,
+ September = AWS_DATE_MONTH_SEPTEMBER,
+ October = AWS_DATE_MONTH_OCTOBER,
+ November = AWS_DATE_MONTH_NOVEMBER,
+ December = AWS_DATE_MONTH_DECEMBER,
+ };
+
+ enum class DayOfWeek
+ {
+ Sunday = AWS_DATE_DAY_OF_WEEK_SUNDAY,
+ Monday = AWS_DATE_DAY_OF_WEEK_MONDAY,
+ Tuesday = AWS_DATE_DAY_OF_WEEK_TUESDAY,
+ Wednesday = AWS_DATE_DAY_OF_WEEK_WEDNESDAY,
+ Thursday = AWS_DATE_DAY_OF_WEEK_THURSDAY,
+ Friday = AWS_DATE_DAY_OF_WEEK_FRIDAY,
+ Saturday = AWS_DATE_DAY_OF_WEEK_SATURDAY,
+ };
+
+ class AWS_CRT_CPP_API DateTime final
+ {
+ public:
+ /**
+ * Initializes time point to epoch
+ */
+ DateTime() noexcept;
+
+ /**
+ * Initializes time point to any other arbitrary timepoint
+ */
+ DateTime(const std::chrono::system_clock::time_point &timepointToAssign) noexcept;
+
+ /**
+ * Initializes time point to millis Since epoch
+ */
+ DateTime(uint64_t millisSinceEpoch) noexcept;
+
+ /**
+ * Initializes time point to epoch time in seconds.millis
+ */
+ DateTime(double epoch_millis) noexcept;
+
+ /**
+ * Initializes time point to value represented by timestamp and format.
+ */
+ DateTime(const char *timestamp, DateFormat format) noexcept;
+
+ bool operator==(const DateTime &other) const noexcept;
+ bool operator<(const DateTime &other) const noexcept;
+ bool operator>(const DateTime &other) const noexcept;
+ bool operator!=(const DateTime &other) const noexcept;
+ bool operator<=(const DateTime &other) const noexcept;
+ bool operator>=(const DateTime &other) const noexcept;
+
+ DateTime operator+(const std::chrono::milliseconds &a) const noexcept;
+ DateTime operator-(const std::chrono::milliseconds &a) const noexcept;
+
+ /**
+ * Assign from seconds.millis since epoch.
+ */
+ DateTime &operator=(double secondsSinceEpoch) noexcept;
+
+ /**
+ * Assign from millis since epoch.
+ */
+ DateTime &operator=(uint64_t millisSinceEpoch) noexcept;
+
+ /**
+ * Assign from another time_point
+ */
+ DateTime &operator=(const std::chrono::system_clock::time_point &timepointToAssign) noexcept;
+
+ /**
+ * Assign from an ISO8601 or RFC822 formatted string
+ */
+ DateTime &operator=(const char *timestamp) noexcept;
+
+ explicit operator bool() const noexcept;
+ int GetLastError() const noexcept;
+
+ /**
+ * Convert dateTime to local time string using predefined format.
+ */
+ bool ToLocalTimeString(DateFormat format, ByteBuf &outputBuf) const noexcept;
+
+ /**
+ * Convert dateTime to GMT time string using predefined format.
+ */
+ bool ToGmtString(DateFormat format, ByteBuf &outputBuf) const noexcept;
+
+ /**
+ * Get the representation of this datetime as seconds.milliseconds since epoch
+ */
+ double SecondsWithMSPrecision() const noexcept;
+
+ /**
+ * Milliseconds since epoch of this datetime.
+ */
+ uint64_t Millis() const noexcept;
+
+ /**
+ * In the likely case this class doesn't do everything you need to do, here's a copy of the time_point
+ * structure. Have fun.
+ */
+ std::chrono::system_clock::time_point UnderlyingTimestamp() const noexcept;
+
+ /**
+ * Get the Year portion of this dateTime. localTime if true, return local time, otherwise return UTC
+ */
+ uint16_t GetYear(bool localTime = false) const noexcept;
+
+ /**
+ * Get the Month portion of this dateTime. localTime if true, return local time, otherwise return UTC
+ */
+ Month GetMonth(bool localTime = false) const noexcept;
+
+ /**
+ * Get the Day of the Month portion of this dateTime. localTime if true, return local time, otherwise return
+ * UTC
+ */
+ uint8_t GetDay(bool localTime = false) const noexcept;
+
+ /**
+ * Get the Day of the Week portion of this dateTime. localTime if true, return local time, otherwise return
+ * UTC
+ */
+ DayOfWeek GetDayOfWeek(bool localTime = false) const noexcept;
+
+ /**
+ * Get the Hour portion of this dateTime. localTime if true, return local time, otherwise return UTC
+ */
+ uint8_t GetHour(bool localTime = false) const noexcept;
+
+ /**
+ * Get the Minute portion of this dateTime. localTime if true, return local time, otherwise return UTC
+ */
+ uint8_t GetMinute(bool localTime = false) const noexcept;
+
+ /**
+ * Get the Second portion of this dateTime. localTime if true, return local time, otherwise return UTC
+ */
+ uint8_t GetSecond(bool localTime = false) const noexcept;
+
+ /**
+ * Get whether or not this dateTime is in Daylight savings time. localTime if true, return local time,
+ * otherwise return UTC
+ */
+ bool IsDST(bool localTime = false) const noexcept;
+
+ /**
+ * Get an instance of DateTime representing this very instant.
+ */
+ static DateTime Now() noexcept;
+
+ /**
+ * Computes the difference between two DateTime instances and returns the difference
+ * in milliseconds.
+ */
+ std::chrono::milliseconds operator-(const DateTime &other) const noexcept;
+
+ private:
+ aws_date_time m_date_time;
+ bool m_good;
+ };
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/Exports.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/Exports.h
new file mode 100644
index 0000000000..bd171dc736
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/Exports.h
@@ -0,0 +1,39 @@
+#pragma once
+
+/*
+ *Copyright 2010-2017 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ *Licensed under the Apache License, Version 2.0 (the "License").
+ *You may not use this file except in compliance with the License.
+ *A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#if defined(USE_WINDOWS_DLL_SEMANTICS) || defined(WIN32)
+# ifdef _MSC_VER
+# pragma warning(disable : 4251)
+# endif // _MSC_VER
+# ifdef AWS_CRT_CPP_USE_IMPORT_EXPORT
+# ifdef AWS_CRT_CPP_EXPORTS
+# define AWS_CRT_CPP_API __declspec(dllexport)
+# else
+# define AWS_CRT_CPP_API __declspec(dllimport)
+# endif /* AWS_CRT_CPP_API */
+# else
+# define AWS_CRT_CPP_API
+# endif // AWS_CRT_CPP_USE_IMPORT_EXPORT
+
+#else // defined (USE_WINDOWS_DLL_SEMANTICS) || defined (WIN32)
+# if ((__GNUC__ >= 4) || defined(__clang__)) && defined(AWS_CRT_CPP_USE_IMPORT_EXPORT) && \
+ defined(AWS_CRT_CPP_EXPORTS)
+# define AWS_CRT_CPP_API __attribute__((visibility("default")))
+# else
+# define AWS_CRT_CPP_API
+# endif // __GNUC__ >= 4 || defined(__clang__)
+#endif
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/ImdsClient.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/ImdsClient.h
new file mode 100644
index 0000000000..c73e2bf27c
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/ImdsClient.h
@@ -0,0 +1,386 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/crt/DateTime.h>
+#include <aws/crt/Exports.h>
+#include <aws/crt/Types.h>
+#include <functional>
+
+struct aws_credentials;
+struct aws_imds_client;
+struct aws_imds_instance_info;
+struct aws_imds_iam_profile;
+
+namespace Aws
+{
+
+ namespace Crt
+ {
+
+ namespace Io
+ {
+ class ClientBootstrap;
+ }
+
+ namespace Auth
+ {
+ class Credentials;
+ }
+
+ namespace Imds
+ {
+
+ struct AWS_CRT_CPP_API ImdsClientConfig
+ {
+ ImdsClientConfig() : Bootstrap(nullptr) {}
+
+ /**
+ * Connection bootstrap to use to create the http connection required to
+ * query resource from the Ec2 instance metadata service
+ *
+ * Note: If null, then the default ClientBootstrap is used
+ * (see Aws::Crt::ApiHandle::GetOrCreateStaticDefaultClientBootstrap)
+ */
+ Io::ClientBootstrap *Bootstrap;
+
+ /* Should add retry strategy support once that is available */
+ };
+
+ /**
+ * https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instancedata-data-categories.html
+ */
+ struct AWS_CRT_CPP_API IamProfileView
+ {
+ DateTime lastUpdated;
+ StringView instanceProfileArn;
+ StringView instanceProfileId;
+ };
+
+ /**
+ * A convenient class for you to persist data from IamProfileView, which has StringView members.
+ */
+ struct AWS_CRT_CPP_API IamProfile
+ {
+ IamProfile() {}
+ IamProfile(const IamProfileView &other);
+
+ IamProfile &operator=(const IamProfileView &other);
+
+ DateTime lastUpdated;
+ String instanceProfileArn;
+ String instanceProfileId;
+ };
+
+ /**
+ * Block of per-instance EC2-specific data
+ *
+ * https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/instance-identity-documents.html
+ */
+ struct AWS_CRT_CPP_API InstanceInfoView
+ {
+ /* an array of StringView */
+ Vector<StringView> marketplaceProductCodes;
+ StringView availabilityZone;
+ StringView privateIp;
+ StringView version;
+ StringView instanceId;
+ /* an array of StringView */
+ Vector<StringView> billingProducts;
+ StringView instanceType;
+ StringView accountId;
+ StringView imageId;
+ DateTime pendingTime;
+ StringView architecture;
+ StringView kernelId;
+ StringView ramdiskId;
+ StringView region;
+ };
+
+ /**
+ * A convenient class for you to persist data from InstanceInfoView, which has StringView members.
+ */
+ struct AWS_CRT_CPP_API InstanceInfo
+ {
+ InstanceInfo() {}
+ InstanceInfo(const InstanceInfoView &other);
+
+ InstanceInfo &operator=(const InstanceInfoView &other);
+
+ /* an array of StringView */
+ Vector<String> marketplaceProductCodes;
+ String availabilityZone;
+ String privateIp;
+ String version;
+ String instanceId;
+ /* an array of StringView */
+ Vector<String> billingProducts;
+ String instanceType;
+ String accountId;
+ String imageId;
+ DateTime pendingTime;
+ String architecture;
+ String kernelId;
+ String ramdiskId;
+ String region;
+ };
+
+ using OnResourceAcquired = std::function<void(const StringView &resource, int errorCode, void *userData)>;
+ using OnVectorResourceAcquired =
+ std::function<void(const Vector<StringView> &resource, int errorCode, void *userData)>;
+ using OnCredentialsAcquired =
+ std::function<void(const Auth::Credentials &credentials, int errorCode, void *userData)>;
+ using OnIamProfileAcquired =
+ std::function<void(const IamProfileView &iamProfile, int errorCode, void *userData)>;
+ using OnInstanceInfoAcquired =
+ std::function<void(const InstanceInfoView &instanceInfo, int errorCode, void *userData)>;
+
+ class AWS_CRT_CPP_API ImdsClient
+ {
+ public:
+ ImdsClient(const ImdsClientConfig &config, Allocator *allocator = ApiAllocator()) noexcept;
+
+ ~ImdsClient();
+
+ ImdsClient(const ImdsClient &) = delete;
+ ImdsClient(ImdsClient &&) = delete;
+ ImdsClient &operator=(const ImdsClient &) = delete;
+ ImdsClient &operator=(ImdsClient &&) = delete;
+
+ aws_imds_client *GetUnderlyingHandle() { return m_client; }
+
+ /**
+ * Queries a generic resource (string) from the ec2 instance metadata document
+ *
+ * @param resourcePath path of the resource to query
+ * @param callback callback function to invoke on query success or failure
+ * @param userData opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+ int GetResource(const StringView &resourcePath, OnResourceAcquired callback, void *userData);
+
+ /**
+ * Gets the ami id of the ec2 instance from the instance metadata document
+ *
+ * @param callback callback function to invoke on query success or failure
+ * @param userData opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+ int GetAmiId(OnResourceAcquired callback, void *userData);
+
+ /**
+ * Gets the ami launch index of the ec2 instance from the instance metadata document
+ *
+ * @param callback callback function to invoke on query success or failure
+ * @param userData opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+ int GetAmiLaunchIndex(OnResourceAcquired callback, void *userData);
+
+ /**
+ * Gets the ami manifest path of the ec2 instance from the instance metadata document
+ *
+ * @param callback callback function to invoke on query success or failure
+ * @param userData opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+ int GetAmiManifestPath(OnResourceAcquired callback, void *userData);
+
+ /**
+ * Gets the list of ancestor ami ids of the ec2 instance from the instance metadata document
+ *
+ * @param callback callback function to invoke on query success or failure
+ * @param userData opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+ int GetAncestorAmiIds(OnVectorResourceAcquired callback, void *userData);
+
+ /**
+ * Gets the instance-action of the ec2 instance from the instance metadata document
+ *
+ * @param callback callback function to invoke on query success or failure
+ * @param userData opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+ int GetInstanceAction(OnResourceAcquired callback, void *userData);
+
+ /**
+ * Gets the instance id of the ec2 instance from the instance metadata document
+ *
+ * @param callback callback function to invoke on query success or failure
+ * @param userData opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+ int GetInstanceId(OnResourceAcquired callback, void *userData);
+
+ /**
+ * Gets the instance type of the ec2 instance from the instance metadata document
+ *
+ * @param callback callback function to invoke on query success or failure
+ * @param userData opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+ int GetInstanceType(OnResourceAcquired callback, void *userData);
+
+ /**
+ * Gets the mac address of the ec2 instance from the instance metadata document
+ *
+ * @param callback callback function to invoke on query success or failure
+ * @param userData opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+ int GetMacAddress(OnResourceAcquired callback, void *userData);
+
+ /**
+ * Gets the private ip address of the ec2 instance from the instance metadata document
+ *
+ * @param callback callback function to invoke on query success or failure
+ * @param userData opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+ int GetPrivateIpAddress(OnResourceAcquired callback, void *userData);
+
+ /**
+ * Gets the availability zone of the ec2 instance from the instance metadata document
+ *
+ * @param callback callback function to invoke on query success or failure
+ * @param userData opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+ int GetAvailabilityZone(OnResourceAcquired callback, void *userData);
+
+ /**
+ * Gets the product codes of the ec2 instance from the instance metadata document
+ *
+ * @param callback callback function to invoke on query success or failure
+ * @param userData opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+ int GetProductCodes(OnResourceAcquired callback, void *userData);
+
+ /**
+ * Gets the public key of the ec2 instance from the instance metadata document
+ *
+ * @param callback callback function to invoke on query success or failure
+ * @param userData opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+ int GetPublicKey(OnResourceAcquired callback, void *userData);
+
+ /**
+ * Gets the ramdisk id of the ec2 instance from the instance metadata document
+ *
+ * @param callback callback function to invoke on query success or failure
+ * @param userData opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+ int GetRamDiskId(OnResourceAcquired callback, void *userData);
+
+ /**
+ * Gets the reservation id of the ec2 instance from the instance metadata document
+ *
+ * @param callback callback function to invoke on query success or failure
+ * @param userData opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+ int GetReservationId(OnResourceAcquired callback, void *userData);
+
+ /**
+ * Gets the list of the security groups of the ec2 instance from the instance metadata document
+ *
+ * @param callback callback function to invoke on query success or failure
+ * @param userData opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+ int GetSecurityGroups(OnVectorResourceAcquired callback, void *userData);
+
+ /**
+ * Gets the list of block device mappings of the ec2 instance from the instance metadata document
+ *
+ * @param callback callback function to invoke on query success or failure
+ * @param userData opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+ int GetBlockDeviceMapping(OnVectorResourceAcquired callback, void *userData);
+
+ /**
+ * Gets the attached iam role of the ec2 instance from the instance metadata document
+ *
+ * @param callback callback function to invoke on query success or failure
+ * @param userData opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+ int GetAttachedIamRole(OnResourceAcquired callback, void *userData);
+
+ /**
+ * Gets temporary credentials based on the attached iam role of the ec2 instance
+ *
+ * @param iamRoleName iam role name to get temporary credentials through
+ * @param callback callback function to invoke on query success or failure
+ * @param userData opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+ int GetCredentials(const StringView &iamRoleName, OnCredentialsAcquired callback, void *userData);
+
+ /**
+ * Gets the iam profile information of the ec2 instance from the instance metadata document
+ *
+ * @param callback callback function to invoke on query success or failure
+ * @param userData opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+ int GetIamProfile(OnIamProfileAcquired callback, void *userData);
+
+ /**
+ * Gets the user data of the ec2 instance from the instance metadata document
+ *
+ * @param callback callback function to invoke on query success or failure
+ * @param userData opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+ int GetUserData(OnResourceAcquired callback, void *userData);
+
+ /**
+ * Gets the signature of the ec2 instance from the instance metadata document
+ *
+ * @param callback callback function to invoke on query success or failure
+ * @param userData opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+ int GetInstanceSignature(OnResourceAcquired callback, void *userData);
+
+ /**
+ * Gets the instance information data block of the ec2 instance from the instance metadata document
+ *
+ * @param callback callback function to invoke on query success or failure
+ * @param userData opaque data to invoke the completion callback with
+ * @return AWS_OP_SUCCESS if the query was successfully started, AWS_OP_ERR otherwise
+ */
+ int GetInstanceInfo(OnInstanceInfoAcquired callback, void *userData);
+
+ private:
+ static void s_onResourceAcquired(const aws_byte_buf *resource, int erroCode, void *userData);
+
+ static void s_onVectorResourceAcquired(const aws_array_list *array, int errorCode, void *userData);
+
+ static void s_onCredentialsAcquired(const aws_credentials *credentials, int errorCode, void *userData);
+
+ static void s_onIamProfileAcquired(
+ const aws_imds_iam_profile *iamProfileInfo,
+ int errorCode,
+ void *userData);
+
+ static void s_onInstanceInfoAcquired(
+ const aws_imds_instance_info *instanceInfo,
+ int error_code,
+ void *userData);
+
+ aws_imds_client *m_client;
+ Allocator *m_allocator;
+ };
+
+ } // namespace Imds
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/JsonObject.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/JsonObject.h
new file mode 100644
index 0000000000..feca11eb4f
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/JsonObject.h
@@ -0,0 +1,406 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/StlAllocator.h>
+#include <aws/crt/Types.h>
+
+namespace Aws
+{
+ struct cJSON;
+
+ namespace Crt
+ {
+ class JsonView;
+ /**
+ * JSON DOM manipulation class.
+ * To read or serialize use @ref View function.
+ */
+ class AWS_CRT_CPP_API JsonObject
+ {
+ public:
+ /**
+ * Constructs empty JSON DOM.
+ */
+ JsonObject();
+
+ /**
+ * Constructs a JSON DOM by parsing the input string.
+ */
+ JsonObject(const String &value);
+
+ /**
+ * Performs a deep copy of the JSON DOM parameter.
+ * Prefer using a @ref JsonView if copying is not needed.
+ */
+ JsonObject(const JsonObject &value);
+
+ /**
+ * Moves the ownership of the internal JSON DOM.
+ * No copying is performed.
+ */
+ JsonObject(JsonObject &&value) noexcept;
+
+ ~JsonObject();
+
+ /**
+ * Performs a deep copy of the JSON DOM parameter.
+ */
+ JsonObject &operator=(const JsonObject &other);
+
+ /**
+ * Moves the ownership of the internal JSON DOM of the parameter to the current object.
+ * No copying is performed.
+ * A DOM currently owned by the object will be freed prior to copying.
+ * @warning This will result in invalidating any outstanding views of the current DOM. However, views
+ * to the moved-from DOM would still valid.
+ */
+ JsonObject &operator=(JsonObject &&other) noexcept;
+
+ bool operator==(const JsonObject &other) const;
+ bool operator!=(const JsonObject &other) const;
+
+ /**
+ * Adds a string to the top level of this node with key.
+ */
+ JsonObject &WithString(const String &key, const String &value);
+ JsonObject &WithString(const char *key, const String &value);
+
+ /**
+ * Converts the current JSON node to a string.
+ */
+ JsonObject &AsString(const String &value);
+
+ /**
+ * Adds a bool value with key to the top level of this node.
+ */
+ JsonObject &WithBool(const String &key, bool value);
+ JsonObject &WithBool(const char *key, bool value);
+
+ /**
+ * Converts the current JSON node to a bool.
+ */
+ JsonObject &AsBool(bool value);
+
+ /**
+ * Adds an integer value at key at the top level of this node.
+ */
+ JsonObject &WithInteger(const String &key, int value);
+ JsonObject &WithInteger(const char *key, int value);
+
+ /**
+ * Converts the current JSON node to an integer.
+ */
+ JsonObject &AsInteger(int value);
+
+ /**
+ * Adds a 64-bit integer value at key to the top level of this node.
+ */
+ JsonObject &WithInt64(const String &key, int64_t value);
+ JsonObject &WithInt64(const char *key, int64_t value);
+
+ /**
+ * Converts the current JSON node to a 64-bit integer.
+ */
+ JsonObject &AsInt64(int64_t value);
+
+ /**
+ * Adds a double value at key at the top level of this node.
+ */
+ JsonObject &WithDouble(const String &key, double value);
+ JsonObject &WithDouble(const char *key, double value);
+
+ /**
+ * Converts the current JSON node to a double.
+ */
+ JsonObject &AsDouble(double value);
+
+ /**
+ * Adds an array of strings to the top level of this node at key.
+ */
+ JsonObject &WithArray(const String &key, const Vector<String> &array);
+ JsonObject &WithArray(const char *key, const Vector<String> &array);
+
+ /**
+ * Adds an array of arbitrary JSON objects to the top level of this node at key.
+ * The values in the array parameter will be deep-copied.
+ */
+ JsonObject &WithArray(const String &key, const Vector<JsonObject> &array);
+
+ /**
+ * Adds an array of arbitrary JSON objects to the top level of this node at key.
+ * The values in the array parameter will be moved-from.
+ */
+ JsonObject &WithArray(const String &key, Vector<JsonObject> &&array);
+
+ /**
+ * Converts the current JSON node to an array whose values are deep-copied from the array parameter.
+ */
+ JsonObject &AsArray(const Vector<JsonObject> &array);
+
+ /**
+ * Converts the current JSON node to an array whose values are moved from the array parameter.
+ */
+ JsonObject &AsArray(Vector<JsonObject> &&array);
+
+ /**
+ * Sets the current JSON node as null.
+ */
+ JsonObject &AsNull();
+
+ /**
+ * Adds a JSON object to the top level of this node at key.
+ * The object parameter is deep-copied.
+ */
+ JsonObject &WithObject(const String &key, const JsonObject &value);
+ JsonObject &WithObject(const char *key, const JsonObject &value);
+
+ /**
+ * Adds a JSON object to the top level of this node at key.
+ */
+ JsonObject &WithObject(const String &key, JsonObject &&value);
+ JsonObject &WithObject(const char *key, JsonObject &&value);
+
+ /**
+ * Converts the current JSON node to a JSON object by deep-copying the parameter.
+ */
+ JsonObject &AsObject(const JsonObject &value);
+
+ /**
+ * Converts the current JSON node to a JSON object by moving from the parameter.
+ */
+ JsonObject &AsObject(JsonObject &&value);
+
+ /**
+ * Returns true if the last parse request was successful. If this returns false,
+ * you can call GetErrorMessage() to find the cause.
+ */
+ inline bool WasParseSuccessful() const { return m_wasParseSuccessful; }
+
+ /**
+ * Returns the last error message from a failed parse attempt. Returns empty string if no error.
+ */
+ inline const String &GetErrorMessage() const { return m_errorMessage; }
+
+ /**
+ * Creates a view from the current root JSON node.
+ */
+ JsonView View() const;
+
+ private:
+ void Destroy();
+ JsonObject(cJSON *value);
+ cJSON *m_value;
+ bool m_wasParseSuccessful;
+ String m_errorMessage;
+ friend class JsonView;
+ };
+
+ /**
+ * Provides read-only view to an existing JsonObject. This allows lightweight copying without making deep
+ * copies of the JsonObject.
+ * Note: This class does not extend the lifetime of the given JsonObject. It's your responsibility to ensure
+ * the lifetime of the JsonObject is extended beyond the lifetime of its view.
+ */
+ class AWS_CRT_CPP_API JsonView
+ {
+ public:
+ /* constructors */
+ JsonView();
+ JsonView(const JsonObject &val);
+ JsonView &operator=(const JsonObject &val);
+
+ /**
+ * Gets a string from this node by its key.
+ */
+ String GetString(const String &key) const;
+ /**
+ * Gets a string from this node by its key.
+ */
+ String GetString(const char *key) const;
+
+ /**
+ * Returns the value of this node as a string.
+ * The behavior is undefined if the node is _not_ of type string.
+ */
+ String AsString() const;
+
+ /**
+ * Gets a boolean value from this node by its key.
+ */
+ bool GetBool(const String &key) const;
+ /**
+ * Gets a boolean value from this node by its key.
+ */
+ bool GetBool(const char *key) const;
+
+ /**
+ * Returns the value of this node as a boolean.
+ */
+ bool AsBool() const;
+
+ /**
+ * Gets an integer value from this node by its key.
+ * The integer is of the same size as an int on the machine.
+ */
+ int GetInteger(const String &key) const;
+ /**
+ * Gets an integer value from this node by its key.
+ * The integer is of the same size as an int on the machine.
+ */
+ int GetInteger(const char *key) const;
+
+ /**
+ * Returns the value of this node as an int.
+ */
+ int AsInteger() const;
+
+ /**
+ * Gets a 64-bit integer value from this node by its key.
+ * The value is 64-bit regardless of the platform/machine.
+ */
+ int64_t GetInt64(const String &key) const;
+ /**
+ * Gets a 64-bit integer value from this node by its key.
+ * The value is 64-bit regardless of the platform/machine.
+ */
+ int64_t GetInt64(const char *key) const;
+
+ /**
+ * Returns the value of this node as 64-bit integer.
+ */
+ int64_t AsInt64() const;
+
+ /**
+ * Gets a double precision floating-point value from this node by its key.
+ */
+ double GetDouble(const String &key) const;
+ /**
+ * Gets a double precision floating-point value from this node by its key.
+ */
+ double GetDouble(const char *key) const;
+
+ /**
+ * Returns the value of this node as a double precision floating-point.
+ */
+ double AsDouble() const;
+
+ /**
+ * Gets an array of JsonView objects from this node by its key.
+ */
+ Vector<JsonView> GetArray(const String &key) const;
+ /**
+ * Gets an array of JsonView objects from this node by its key.
+ */
+ Vector<JsonView> GetArray(const char *key) const;
+
+ /**
+ * Returns the value of this node as an array of JsonView objects.
+ */
+ Vector<JsonView> AsArray() const;
+
+ /**
+ * Gets a JsonView object from this node by its key.
+ */
+ JsonView GetJsonObject(const String &key) const;
+ /**
+ * Gets a JsonView object from this node by its key.
+ */
+ JsonView GetJsonObject(const char *key) const;
+
+ JsonObject GetJsonObjectCopy(const String &key) const;
+
+ JsonObject GetJsonObjectCopy(const char *key) const;
+
+ /**
+ * Returns the value of this node as a JsonView object.
+ */
+ JsonView AsObject() const;
+
+ /**
+ * Reads all json objects at the top level of this node (does not traverse the tree any further)
+ * along with their keys.
+ */
+ Map<String, JsonView> GetAllObjects() const;
+
+ /**
+ * Tests whether a value exists at the current node level for the given key.
+ * Returns true if a value has been found and its value is not null, false otherwise.
+ */
+ bool ValueExists(const String &key) const;
+ /**
+ * Tests whether a value exists at the current node level for the given key.
+ * Returns true if a value has been found and its value is not null, false otherwise.
+ */
+ bool ValueExists(const char *key) const;
+
+ /**
+ * Tests whether a key exists at the current node level.
+ */
+ bool KeyExists(const String &key) const;
+ /**
+ * Tests whether a key exists at the current node level.
+ */
+ bool KeyExists(const char *key) const;
+
+ /**
+ * Tests whether the current value is a JSON object.
+ */
+ bool IsObject() const;
+
+ /**
+ * Tests whether the current value is a boolean.
+ */
+ bool IsBool() const;
+
+ /**
+ * Tests whether the current value is a string.
+ */
+ bool IsString() const;
+
+ /**
+ * Tests whether the current value is an int or int64_t.
+ * Returns false if the value is floating-point.
+ */
+ bool IsIntegerType() const;
+
+ /**
+ * Tests whether the current value is a floating-point.
+ */
+ bool IsFloatingPointType() const;
+
+ /**
+ * Tests whether the current value is a JSON array.
+ */
+ bool IsListType() const;
+
+ /**
+ * Tests whether the current value is NULL.
+ */
+ bool IsNull() const;
+
+ /**
+ * Writes the current JSON view without whitespace characters starting at the current level to a string.
+ * @param treatAsObject if the current value is empty, writes out '{}' rather than an empty string.
+ */
+ String WriteCompact(bool treatAsObject = true) const;
+
+ /**
+ * Writes the current JSON view to a string in a human friendly format.
+ * @param treatAsObject if the current value is empty, writes out '{}' rather than an empty string.
+ */
+ String WriteReadable(bool treatAsObject = true) const;
+
+ /**
+ * Creates a deep copy of the JSON value rooted in the current JSON view.
+ */
+ JsonObject Materialize() const;
+
+ private:
+ JsonView(cJSON *val);
+ JsonView &operator=(cJSON *val);
+ cJSON *m_value;
+ };
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/Optional.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/Optional.h
new file mode 100644
index 0000000000..5acc232557
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/Optional.h
@@ -0,0 +1,203 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <utility>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ /**
+ * Custom implementation of an Option type. std::optional requires C++17
+ * @tparam T type of the optional value
+ */
+ template <typename T> class Optional
+ {
+ public:
+ Optional() : m_value(nullptr) {}
+ Optional(const T &val)
+ {
+ new (m_storage) T(val);
+ m_value = reinterpret_cast<T *>(m_storage);
+ }
+
+ Optional(T &&val)
+ {
+ new (m_storage) T(std::forward<T>(val));
+ m_value = reinterpret_cast<T *>(m_storage);
+ }
+
+ ~Optional()
+ {
+ if (m_value)
+ {
+ m_value->~T();
+ }
+ }
+
+ template <typename U = T> Optional &operator=(U &&u)
+ {
+ if (m_value)
+ {
+ *m_value = std::forward<U>(u);
+ return *this;
+ }
+
+ new (m_storage) T(std::forward<U>(u));
+ m_value = reinterpret_cast<T *>(m_storage);
+
+ return *this;
+ }
+
+ Optional(const Optional<T> &other)
+ {
+ if (other.m_value)
+ {
+ new (m_storage) T(*other.m_value);
+ m_value = reinterpret_cast<T *>(m_storage);
+ }
+ else
+ {
+ m_value = nullptr;
+ }
+ }
+
+ Optional(Optional<T> &&other)
+ {
+ if (other.m_value)
+ {
+ new (m_storage) T(std::forward<T>(*other.m_value));
+ m_value = reinterpret_cast<T *>(m_storage);
+ }
+ else
+ {
+ m_value = nullptr;
+ }
+ }
+
+ Optional &operator=(const Optional &other)
+ {
+ if (this == &other)
+ {
+ return *this;
+ }
+
+ if (m_value)
+ {
+ if (other.m_value)
+ {
+ *m_value = *other.m_value;
+ }
+ else
+ {
+ m_value->~T();
+ m_value = nullptr;
+ }
+
+ return *this;
+ }
+
+ if (other.m_value)
+ {
+ new (m_storage) T(*other.m_value);
+ m_value = reinterpret_cast<T *>(m_storage);
+ }
+
+ return *this;
+ }
+
+ template <typename U = T> Optional<T> &operator=(const Optional<U> &other)
+ {
+ if (this == &other)
+ {
+ return *this;
+ }
+
+ if (m_value)
+ {
+ if (other.m_value)
+ {
+ *m_value = *other.m_value;
+ }
+ else
+ {
+ m_value->~T();
+ m_value = nullptr;
+ }
+
+ return *this;
+ }
+
+ if (other.m_value)
+ {
+ new (m_storage) T(*other.m_value);
+ m_value = reinterpret_cast<T *>(m_storage);
+ }
+
+ return *this;
+ }
+
+ template <typename U = T> Optional<T> &operator=(Optional<U> &&other)
+ {
+ if (this == &other)
+ {
+ return *this;
+ }
+
+ if (m_value)
+ {
+ if (other.m_value)
+ {
+ *m_value = std::forward<U>(*other.m_value);
+ }
+ else
+ {
+ m_value->~T();
+ m_value = nullptr;
+ }
+
+ return *this;
+ }
+
+ if (other.m_value)
+ {
+ new (m_storage) T(std::forward<U>(*other.m_value));
+ m_value = reinterpret_cast<T *>(m_storage);
+ }
+
+ return *this;
+ }
+
+ const T *operator->() const { return m_value; }
+ T *operator->() { return m_value; }
+ const T &operator*() const & { return *m_value; }
+ T &operator*() & { return *m_value; }
+ const T &&operator*() const && { return std::move(*m_value); }
+ T &&operator*() && { return std::move(*m_value); }
+
+ explicit operator bool() const noexcept { return m_value != nullptr; }
+ bool has_value() const noexcept { return m_value != nullptr; }
+
+ T &value() & { return *m_value; }
+ const T &value() const & { return *m_value; }
+
+ T &&value() && { return std::move(*m_value); }
+ const T &&value() const && { return std::move(*m_value); }
+
+ void reset()
+ {
+ if (m_value)
+ {
+ m_value->~T();
+ m_value = nullptr;
+ }
+ }
+
+ private:
+ alignas(T) char m_storage[sizeof(T)];
+ T *m_value;
+ };
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/RefCounted.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/RefCounted.h
new file mode 100644
index 0000000000..d9ec11d818
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/RefCounted.h
@@ -0,0 +1,68 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/common/assert.h>
+#include <memory>
+#include <mutex>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ /**
+ * Inherit from RefCounted to allow reference-counting from C code,
+ * which will keep your C++ object alive as long as the count is non-zero.
+ *
+ * A class must inherit from RefCounted and std::enable_shared_from_this.
+ * Your class must always be placed inside a shared_ptr (do not create on
+ * the stack, or keep on the heap as a raw pointer).
+ *
+ * Whenever the reference count goes from 0 to 1 a shared_ptr is created
+ * internally to keep this object alive. Whenever the reference count
+ * goes from 1 to 0 the internal shared_ptr is reset, allowing this object
+ * to be destroyed.
+ */
+ template <class T> class RefCounted
+ {
+ protected:
+ RefCounted() {}
+ ~RefCounted() {}
+
+ void AcquireRef()
+ {
+ m_mutex.lock();
+ if (m_count++ == 0)
+ {
+ m_strongPtr = static_cast<T *>(this)->shared_from_this();
+ }
+ m_mutex.unlock();
+ }
+
+ void ReleaseRef()
+ {
+ // Move contents of m_strongPtr to a temp so that this
+ // object can't be destroyed until the function exits.
+ std::shared_ptr<T> tmpStrongPtr;
+
+ m_mutex.lock();
+ AWS_ASSERT(m_count > 0 && "refcount has gone negative");
+ if (m_count-- == 1)
+ {
+ std::swap(m_strongPtr, tmpStrongPtr);
+ }
+ m_mutex.unlock();
+ }
+
+ private:
+ RefCounted(const RefCounted &) = delete;
+ RefCounted &operator=(const RefCounted &) = delete;
+
+ size_t m_count = 0;
+ std::shared_ptr<T> m_strongPtr;
+ std::mutex m_mutex;
+ };
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/StlAllocator.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/StlAllocator.h
new file mode 100644
index 0000000000..13ec6ff68e
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/StlAllocator.h
@@ -0,0 +1,63 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/crt/Allocator.h>
+
+#include <memory>
+#include <type_traits>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ /**
+ * Stateful allocator variant that uses an underlying CRT allocator
+ * @tparam T type that allocator can allocate
+ */
+ template <typename T> class StlAllocator : public std::allocator<T>
+ {
+ public:
+ using Base = std::allocator<T>;
+
+ StlAllocator() noexcept : Base() { m_allocator = ApiAllocator(); }
+
+ StlAllocator(Allocator *allocator) noexcept : Base() { m_allocator = allocator; }
+
+ StlAllocator(const StlAllocator<T> &a) noexcept : Base(a) { m_allocator = a.m_allocator; }
+
+ template <class U> StlAllocator(const StlAllocator<U> &a) noexcept : Base(a)
+ {
+ m_allocator = a.m_allocator;
+ }
+
+ ~StlAllocator() {}
+
+ using size_type = std::size_t;
+
+ template <typename U> struct rebind
+ {
+ typedef StlAllocator<U> other;
+ };
+
+ using RawPointer = typename std::allocator_traits<std::allocator<T>>::pointer;
+
+ RawPointer allocate(size_type n, const void *hint = nullptr)
+ {
+ (void)hint;
+ AWS_ASSERT(m_allocator);
+ return static_cast<RawPointer>(aws_mem_acquire(m_allocator, n * sizeof(T)));
+ }
+
+ void deallocate(RawPointer p, size_type)
+ {
+ AWS_ASSERT(m_allocator);
+ aws_mem_release(m_allocator, p);
+ }
+
+ Allocator *m_allocator;
+ };
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/StringUtils.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/StringUtils.h
new file mode 100644
index 0000000000..7ab98c2b0f
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/StringUtils.h
@@ -0,0 +1,21 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/Exports.h>
+
+#include <stddef.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ /**
+ * C-string hash function
+ * @param str string to hash
+ * @return hash code of the string
+ */
+ size_t AWS_CRT_CPP_API HashString(const char *str) noexcept;
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/StringView.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/StringView.h
new file mode 100644
index 0000000000..b8c6c66881
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/StringView.h
@@ -0,0 +1,864 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+/**
+ * To keep ABI compatability, we use CRT's own string view implementation even for C++ 17.
+ */
+
+#include <algorithm>
+#include <cassert>
+#include <iterator>
+#include <limits>
+#include <stddef.h>
+#include <type_traits>
+
+#if __cplusplus >= 201703L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L)
+# include <string_view>
+#endif
+
+namespace Aws
+{
+ namespace Crt
+ {
+ /**
+ * Custom string view implementation in order to meet C++11 baseline
+ * @tparam CharT
+ * @tparam Traits
+ */
+ template <typename CharT, typename Traits = std::char_traits<CharT>> class basic_string_view
+ {
+ public:
+ // types
+ using traits_type = Traits;
+ using value_type = CharT;
+ using pointer = value_type *;
+ using const_pointer = const value_type *;
+ using reference = value_type &;
+ using const_reference = const value_type &;
+ using const_iterator = const value_type *;
+ using iterator = const_iterator;
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+ using reverse_iterator = const_reverse_iterator;
+ using size_type = size_t;
+ using difference_type = ptrdiff_t;
+ static constexpr size_type npos = static_cast<size_type>(-1);
+
+ // constructors and assignment
+
+ constexpr basic_string_view() noexcept : m_size{0}, m_data{nullptr} {}
+
+ constexpr basic_string_view(const basic_string_view &) noexcept = default;
+
+ constexpr basic_string_view(const CharT *s) noexcept : m_size{traits_type::length(s)}, m_data{s} {}
+
+ constexpr basic_string_view(const CharT *s, size_type count) noexcept : m_size{count}, m_data{s} {}
+
+ basic_string_view &operator=(const basic_string_view &) noexcept = default;
+
+#if __cplusplus >= 201703L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201703L)
+ constexpr basic_string_view(const std::basic_string_view<CharT, Traits> &other) noexcept
+ : m_size(other.size()), m_data(other.data())
+ {
+ }
+
+ basic_string_view &operator=(const std::basic_string_view<CharT, Traits> &other) noexcept
+ {
+ m_data = other->data();
+ m_size = other->size();
+ return *this;
+ }
+#endif
+ // iterators
+
+ constexpr const_iterator begin() const noexcept { return this->m_data; }
+
+ constexpr const_iterator end() const noexcept { return this->m_data + this->m_size; }
+
+ constexpr const_iterator cbegin() const noexcept { return this->m_data; }
+
+ constexpr const_iterator cend() const noexcept { return this->m_data + this->m_size; }
+
+ constexpr const_reverse_iterator rbegin() const noexcept { return const_reverse_iterator(this->end()); }
+
+ constexpr const_reverse_iterator rend() const noexcept { return const_reverse_iterator(this->begin()); }
+
+ constexpr const_reverse_iterator crbegin() const noexcept { return const_reverse_iterator(this->end()); }
+
+ constexpr const_reverse_iterator crend() const noexcept { return const_reverse_iterator(this->begin()); }
+
+ constexpr size_type size() const noexcept { return this->m_size; }
+
+ constexpr size_type length() const noexcept { return this->m_size; }
+
+ constexpr size_type max_size() const noexcept { return (std::numeric_limits<size_type>::max)(); }
+
+ constexpr bool empty() const noexcept { return this->m_size == 0; }
+
+ // element accessors
+
+ const_reference operator[](size_type pos) const noexcept
+ {
+ assert(pos < m_size);
+ return *(this->m_data + pos);
+ }
+
+ const_reference at(size_type pos) const
+ {
+ assert(pos < m_size);
+ return *(this->m_data + pos);
+ }
+
+ const_reference front() const noexcept
+ {
+ assert(m_size > 0);
+ return *this->m_data;
+ }
+
+ const_reference back() const noexcept
+ {
+ assert(m_size > 0);
+ return *(this->m_data + this->m_size - 1);
+ }
+
+ constexpr const_pointer data() const noexcept { return this->m_data; }
+
+ // modifiers
+ void remove_prefix(size_type n) noexcept
+ {
+ assert(this->m_size >= n);
+ this->m_data += n;
+ this->m_size -= n;
+ }
+
+ void remove_suffix(size_type n) noexcept { this->m_size -= n; }
+
+ void swap(basic_string_view &other) noexcept
+ {
+ auto tmp = *this;
+ *this = other;
+ other = tmp;
+ }
+
+ // string operations
+ size_type copy(CharT *s, size_type n, size_type pos = 0) const
+ {
+ assert(pos <= size());
+ const size_type copyLen = (std::min)(n, m_size - pos);
+ traits_type::copy(s, data() + pos, copyLen);
+ return copyLen;
+ }
+
+ basic_string_view substr(size_type pos = 0, size_type n = npos) const noexcept(false)
+ {
+ assert(pos <= size());
+ const size_type copyLen = (std::min)(n, m_size - pos);
+ return basic_string_view{m_data + pos, copyLen};
+ }
+
+ int compare(const basic_string_view &s) const noexcept
+ {
+ const size_type compareLen = (std::min)(this->m_size, s.m_size);
+ int ret = traits_type::compare(this->m_data, s.m_data, compareLen);
+ if (ret == 0)
+ {
+ ret = _s_compare(this->m_size, s.m_size);
+ }
+ return ret;
+ }
+
+ constexpr int compare(size_type pos1, size_type n1, const basic_string_view &s) const
+ {
+ return this->substr(pos1, n1).compare(s);
+ }
+
+ constexpr int compare(
+ size_type pos1,
+ size_type n1,
+ const basic_string_view &s,
+ size_type pos2,
+ size_type n2) const
+ {
+ return this->substr(pos1, n1).compare(s.substr(pos2, n2));
+ }
+
+ constexpr int compare(const CharT *s) const noexcept { return this->compare(basic_string_view{s}); }
+
+ constexpr int compare(size_type pos1, size_type n1, const CharT *s) const
+ {
+ return this->substr(pos1, n1).compare(basic_string_view{s});
+ }
+
+ constexpr int compare(size_type pos1, size_type n1, const CharT *s, size_type n2) const noexcept(false)
+ {
+ return this->substr(pos1, n1).compare(basic_string_view(s, n2));
+ }
+
+ constexpr bool starts_with(const basic_string_view &other) const noexcept
+ {
+ return this->substr(0, other.size()) == other;
+ }
+
+ constexpr bool starts_with(CharT c) const noexcept
+ {
+ return !this->empty() && traits_type::eq(this->front(), c);
+ }
+
+ constexpr bool starts_with(const CharT *s) const noexcept
+ {
+ return this->starts_with(basic_string_view(s));
+ }
+
+ constexpr bool ends_with(const basic_string_view &other) const noexcept
+ {
+ return this->m_size >= other.m_size && this->compare(this->m_size - other.m_size, npos, other) == 0;
+ }
+
+ constexpr bool ends_with(CharT c) const noexcept
+ {
+ return !this->empty() && traits_type::eq(this->back(), c);
+ }
+
+ constexpr bool ends_with(const CharT *s) const noexcept { return this->ends_with(basic_string_view(s)); }
+
+ // find utilities
+ constexpr size_type find(const basic_string_view &s, size_type pos = 0) const noexcept
+ {
+ return this->find(s.m_data, pos, s.m_size);
+ }
+
+ size_type find(CharT c, size_type pos = 0) const noexcept
+ {
+ if (pos >= m_size)
+ {
+ return npos;
+ }
+ const CharT *r = Traits::find(m_data + pos, m_size - pos, c);
+ if (r == nullptr)
+ {
+ return npos;
+ }
+ return static_cast<size_type>(r - m_data);
+ }
+
+ size_type find(const CharT *s, size_type pos, size_type n) const noexcept
+ {
+ if (n && !s)
+ {
+ return npos;
+ }
+
+ if (pos > m_size)
+ {
+ return npos;
+ }
+
+ if (n == 0)
+ {
+ return pos;
+ }
+
+ const CharT *r = _s_search_substr(m_data + pos, m_data + m_size, s, s + n);
+
+ if (r == m_data + m_size)
+ {
+ return npos;
+ }
+ return static_cast<size_type>(r - m_data);
+ }
+
+ constexpr size_type find(const CharT *s, size_type pos = 0) const noexcept
+ {
+ return this->find(s, pos, traits_type::length(s));
+ }
+
+ size_type rfind(basic_string_view s, size_type pos = npos) const noexcept
+ {
+ if (s.m_size && !s.m_data)
+ {
+ return npos;
+ }
+ return this->rfind(s.m_data, pos, s.m_size);
+ }
+
+ size_type rfind(CharT c, size_type pos = npos) const noexcept
+ {
+ if (m_size <= 0)
+ {
+ return npos;
+ }
+
+ if (pos < m_size)
+ {
+ ++pos;
+ }
+ else
+ {
+ pos = m_size;
+ }
+
+ for (const CharT *ptr = m_data + pos; ptr != m_data;)
+ {
+ if (Traits::eq(*--ptr, c))
+ {
+ return static_cast<size_type>(ptr - m_data);
+ }
+ }
+ return npos;
+ }
+
+ size_type rfind(const CharT *s, size_type pos, size_type n) const noexcept
+ {
+ if (n && !s)
+ {
+ return npos;
+ }
+
+ pos = (std::min)(pos, m_size);
+ if (n < m_size - pos)
+ {
+ pos += n;
+ }
+ else
+ {
+ pos = m_size;
+ }
+ const CharT *r = _s_find_end(m_data, m_data + pos, s, s + n);
+ if (n > 0 && r == m_data + pos)
+ {
+ return npos;
+ }
+ return static_cast<size_type>(r - m_data);
+ }
+
+ constexpr size_type rfind(const CharT *s, size_type pos = npos) const noexcept
+ {
+ return this->rfind(s, pos, traits_type::length(s));
+ }
+
+ constexpr size_type find_first_of(basic_string_view s, size_type pos = 0) const noexcept
+ {
+ return this->find_first_of(s.m_data, pos, s.m_size);
+ }
+
+ constexpr size_type find_first_of(CharT c, size_type pos = 0) const noexcept { return this->find(c, pos); }
+
+ size_type find_first_of(const CharT *s, size_type pos, size_type n) const noexcept
+ {
+ if (pos >= m_size || !n || !s)
+ {
+ return npos;
+ }
+
+ const CharT *r = _s_find_first_of_ce(m_data + pos, m_data + m_size, s, s + n);
+
+ if (r == m_data + m_size)
+ {
+ return npos;
+ }
+
+ return static_cast<size_type>(r - m_data);
+ }
+
+ constexpr size_type find_first_of(const CharT *s, size_type pos = 0) const noexcept
+ {
+ return this->find_first_of(s, pos, traits_type::length(s));
+ }
+
+ constexpr size_type find_last_of(basic_string_view s, size_type pos = npos) const noexcept
+ {
+ return this->find_last_of(s.m_data, pos, s.m_size);
+ }
+
+ constexpr size_type find_last_of(CharT c, size_type pos = npos) const noexcept
+ {
+ return this->rfind(c, pos);
+ }
+
+ size_type find_last_of(const CharT *s, size_type pos, size_type n) const noexcept
+ {
+ if (!n || s == nullptr)
+ {
+ return npos;
+ }
+
+ if (pos < m_size)
+ {
+ ++pos;
+ }
+ else
+ {
+ pos = m_size;
+ }
+
+ for (const CharT *ptr = m_data + pos; ptr != m_data;)
+ {
+ const CharT *r = Traits::find(s, n, *--ptr);
+ if (r)
+ {
+ return static_cast<size_type>(ptr - m_data);
+ }
+ }
+
+ return npos;
+ }
+
+ constexpr size_type find_last_of(const CharT *s, size_type pos = npos) const noexcept
+ {
+ return this->find_last_of(s, pos, traits_type::length(s));
+ }
+
+ size_type find_first_not_of(basic_string_view s, size_type pos = 0) const noexcept
+ {
+ if (s.m_size && !s.m_data)
+ {
+ return npos;
+ }
+ return this->find_first_not_of(s.m_data, pos, s.m_size);
+ }
+
+ size_type find_first_not_of(CharT c, size_type pos = 0) const noexcept
+ {
+ if (!m_data || pos >= m_size)
+ {
+ return npos;
+ }
+
+ const CharT *pend = m_data + m_size;
+ for (const CharT *ptr = m_data + pos; ptr != pend; ++ptr)
+ {
+ if (!Traits::eq(*ptr, c))
+ {
+ return static_cast<size_type>(ptr - m_data);
+ }
+ }
+
+ return npos;
+ }
+
+ size_type find_first_not_of(const CharT *s, size_type pos, size_type n) const noexcept
+ {
+ if (n && s == nullptr)
+ {
+ return npos;
+ }
+
+ if (m_data == nullptr || pos >= m_size)
+ {
+ return npos;
+ }
+
+ const CharT *pend = m_data + m_size;
+ for (const CharT *ptr = m_data + pos; ptr != pend; ++ptr)
+ {
+ if (Traits::find(s, n, *ptr) == 0)
+ {
+ return static_cast<size_type>(ptr - m_data);
+ }
+ }
+
+ return npos;
+ }
+
+ constexpr size_type find_first_not_of(const CharT *s, size_type pos = 0) const noexcept
+ {
+ return this->find_first_not_of(s, pos, traits_type::length(s));
+ }
+
+ size_type find_last_not_of(basic_string_view s, size_type pos = npos) const noexcept
+ {
+ if (s.m_size && !s.m_data)
+ {
+ return npos;
+ }
+ return this->find_last_not_of(s.m_data, pos, s.m_size);
+ }
+
+ size_type find_last_not_of(CharT c, size_type pos = npos) const noexcept
+ {
+ if (pos < m_size)
+ {
+ ++pos;
+ }
+ else
+ {
+ pos = m_size;
+ }
+
+ for (const CharT *ptr = m_data + pos; ptr != m_data;)
+ {
+ if (!Traits::eq(*--ptr, c))
+ {
+ return static_cast<size_type>(ptr - m_data);
+ }
+ }
+ return npos;
+ }
+
+ size_type find_last_not_of(const CharT *s, size_type pos, size_type n) const noexcept
+ {
+ if (n && !s)
+ {
+ return npos;
+ }
+
+ if (pos < m_size)
+ {
+ ++pos;
+ }
+ else
+ {
+ pos = m_size;
+ }
+
+ for (const CharT *ptr = m_data + pos; ptr != m_data;)
+ {
+ if (Traits::find(s, n, *--ptr) == 0)
+ {
+ return static_cast<size_type>(ptr - m_data);
+ }
+ }
+ return npos;
+ }
+
+ constexpr size_type find_last_not_of(const CharT *s, size_type pos = npos) const noexcept
+ {
+ return this->find_last_not_of(s, pos, traits_type::length(s));
+ }
+
+ private:
+ static int _s_compare(size_type n1, size_type n2) noexcept
+ {
+ const difference_type diff = n1 - n2;
+
+ if (diff > (std::numeric_limits<int>::max)())
+ {
+ return (std::numeric_limits<int>::max)();
+ }
+
+ if (diff < (std::numeric_limits<int>::min)())
+ {
+ return (std::numeric_limits<int>::min)();
+ }
+
+ return static_cast<int>(diff);
+ }
+
+ static const CharT *_s_search_substr(
+ const CharT *first1,
+ const CharT *last1,
+ const CharT *first2,
+ const CharT *last2)
+ {
+ const ptrdiff_t length2 = last2 - first2;
+ if (length2 == 0)
+ {
+ return first1;
+ }
+
+ ptrdiff_t length1 = last1 - first1;
+ if (length1 < length2)
+ {
+ return last1;
+ }
+
+ while (true)
+ {
+ length1 = last1 - first1;
+ if (length1 < length2)
+ {
+ return last1;
+ }
+
+ first1 = Traits::find(first1, length1 - length2 + 1, *first2);
+ if (first1 == 0)
+ {
+ return last1;
+ }
+
+ if (Traits::compare(first1, first2, length2) == 0)
+ {
+ return first1;
+ }
+
+ ++first1;
+ }
+ }
+
+ static const CharT *_s_find_end(
+ const CharT *first1,
+ const CharT *last1,
+ const CharT *first2,
+ const CharT *last2)
+ {
+ const CharT *r = last1;
+ if (first2 == last2)
+ {
+ return r;
+ }
+
+ while (true)
+ {
+ while (true)
+ {
+ if (first1 == last1)
+ {
+ return r;
+ }
+ if (Traits::eq(*first1, *first2))
+ {
+ break;
+ }
+ ++first1;
+ }
+
+ const CharT *m1 = first1;
+ const CharT *m2 = first2;
+ while (true)
+ {
+ if (++m2 == last2)
+ {
+ r = first1;
+ ++first1;
+ break;
+ }
+ if (++m1 == last1)
+ {
+ return r;
+ }
+ if (!Traits::eq(*m1, *m2))
+ {
+ ++first1;
+ break;
+ }
+ }
+ }
+ }
+
+ static const CharT *_s_find_first_of_ce(
+ const CharT *first1,
+ const CharT *last1,
+ const CharT *first2,
+ const CharT *last2)
+ {
+ for (; first1 != last1; ++first1)
+ {
+ for (const CharT *ptr = first2; ptr != last2; ++ptr)
+ {
+ if (Traits::eq(*first1, *ptr))
+ {
+ return first1;
+ }
+ }
+ }
+ return last1;
+ }
+
+ size_type m_size;
+ const CharT *m_data;
+ };
+
+ // operator ==
+ template <class CharT, class Traits>
+ bool operator==(
+ const basic_string_view<CharT, Traits> &lhs,
+ const basic_string_view<CharT, Traits> &rhs) noexcept
+ {
+ return (lhs.size() != rhs.size()) ? false : lhs.compare(rhs) == 0;
+ }
+
+ template <class CharT, class Traits>
+ bool operator==(
+ const basic_string_view<CharT, Traits> &lhs,
+ typename std::common_type<basic_string_view<CharT, Traits>>::type &rhs) noexcept
+ {
+ return (lhs.size() != rhs.size()) ? false : lhs.compare(rhs) == 0;
+ }
+
+ template <class CharT, class Traits>
+ bool operator==(
+ typename std::common_type<basic_string_view<CharT, Traits>>::type &lhs,
+ const basic_string_view<CharT, Traits> &rhs) noexcept
+ {
+ return (lhs.size() != rhs.size()) ? false : lhs.compare(rhs) == 0;
+ }
+
+ // operator !=
+ template <class CharT, class Traits>
+ bool operator!=(
+ const basic_string_view<CharT, Traits> &lhs,
+ const basic_string_view<CharT, Traits> &rhs) noexcept
+ {
+ return (lhs.size() != rhs.size()) ? true : lhs.compare(rhs) != 0;
+ }
+
+ template <class CharT, class Traits>
+ bool operator!=(
+ const basic_string_view<CharT, Traits> &lhs,
+ typename std::common_type<basic_string_view<CharT, Traits>>::type &rhs) noexcept
+ {
+ return (lhs.size() != rhs.size()) ? true : lhs.compare(rhs) != 0;
+ }
+
+ template <class CharT, class Traits>
+ bool operator!=(
+ typename std::common_type<basic_string_view<CharT, Traits>>::type &lhs,
+ const basic_string_view<CharT, Traits> &rhs) noexcept
+ {
+ return (lhs.size() != rhs.size()) ? true : lhs.compare(rhs) != 0;
+ }
+
+ // operator <
+ template <class CharT, class Traits>
+ bool operator<(
+ const basic_string_view<CharT, Traits> &lhs,
+ const basic_string_view<CharT, Traits> &rhs) noexcept
+ {
+ return lhs.compare(rhs) < 0;
+ }
+
+ template <class CharT, class Traits>
+ constexpr bool operator<(
+ const basic_string_view<CharT, Traits> &lhs,
+ typename std::common_type<basic_string_view<CharT, Traits>>::type &rhs) noexcept
+ {
+ return lhs.compare(rhs) < 0;
+ }
+
+ template <class CharT, class Traits>
+ constexpr bool operator<(
+ typename std::common_type<basic_string_view<CharT, Traits>>::type &lhs,
+ const basic_string_view<CharT, Traits> &rhs) noexcept
+ {
+ return lhs.compare(rhs) < 0;
+ }
+
+ // operator >
+ template <class CharT, class Traits>
+ constexpr bool operator>(
+ const basic_string_view<CharT, Traits> &lhs,
+ const basic_string_view<CharT, Traits> &rhs) noexcept
+ {
+ return lhs.compare(rhs) > 0;
+ }
+
+ template <class CharT, class Traits>
+ constexpr bool operator>(
+ const basic_string_view<CharT, Traits> &lhs,
+ typename std::common_type<basic_string_view<CharT, Traits>>::type &rhs) noexcept
+ {
+ return lhs.compare(rhs) > 0;
+ }
+
+ template <class CharT, class Traits>
+ constexpr bool operator>(
+ typename std::common_type<basic_string_view<CharT, Traits>>::type &lhs,
+ const basic_string_view<CharT, Traits> &rhs) noexcept
+ {
+ return lhs.compare(rhs) > 0;
+ }
+
+ // operator <=
+ template <class CharT, class Traits>
+ constexpr bool operator<=(
+ const basic_string_view<CharT, Traits> &lhs,
+ const basic_string_view<CharT, Traits> &rhs) noexcept
+ {
+ return lhs.compare(rhs) <= 0;
+ }
+
+ template <class CharT, class Traits>
+ constexpr bool operator<=(
+ const basic_string_view<CharT, Traits> &lhs,
+ typename std::common_type<basic_string_view<CharT, Traits>>::type &rhs) noexcept
+ {
+ return lhs.compare(rhs) <= 0;
+ }
+
+ template <class CharT, class Traits>
+ constexpr bool operator<=(
+ typename std::common_type<basic_string_view<CharT, Traits>>::type &lhs,
+ const basic_string_view<CharT, Traits> &rhs) noexcept
+ {
+ return lhs.compare(rhs) <= 0;
+ }
+
+ // operator >=
+ template <class CharT, class Traits>
+ constexpr bool operator>=(
+ const basic_string_view<CharT, Traits> &lhs,
+ const basic_string_view<CharT, Traits> &rhs) noexcept
+ {
+ return lhs.compare(rhs) >= 0;
+ }
+
+ template <class CharT, class Traits>
+ constexpr bool operator>=(
+ const basic_string_view<CharT, Traits> &lhs,
+ typename std::common_type<basic_string_view<CharT, Traits>>::type &rhs) noexcept
+ {
+ return lhs.compare(rhs) >= 0;
+ }
+
+ template <class CharT, class Traits>
+ constexpr bool operator>=(
+ typename std::common_type<basic_string_view<CharT, Traits>>::type &lhs,
+ const basic_string_view<CharT, Traits> &rhs) noexcept
+ {
+ return lhs.compare(rhs) >= 0;
+ }
+
+ typedef basic_string_view<char> string_view;
+ typedef basic_string_view<char16_t> u16string_view;
+ typedef basic_string_view<char32_t> u32string_view;
+ typedef basic_string_view<wchar_t> wstring_view;
+
+ inline namespace literals
+ {
+ inline namespace string_view_literals
+ {
+ inline basic_string_view<char> operator"" _sv(const char *s, size_t length) noexcept
+ {
+ return basic_string_view<char>(s, length);
+ }
+
+ inline basic_string_view<wchar_t> operator"" _sv(const wchar_t *s, size_t length) noexcept
+ {
+ return basic_string_view<wchar_t>(s, length);
+ }
+
+ inline basic_string_view<char16_t> operator"" _sv(const char16_t *s, size_t length) noexcept
+ {
+ return basic_string_view<char16_t>(s, length);
+ }
+
+ inline basic_string_view<char32_t> operator"" _sv(const char32_t *s, size_t length) noexcept
+ {
+ return basic_string_view<char32_t>(s, length);
+ }
+ } // namespace string_view_literals
+
+ } // namespace literals
+
+ using StringView = string_view;
+ } // namespace Crt
+} // namespace Aws
+
+// hash
+namespace std
+{
+ template <class CharT, class Traits> struct hash<Aws::Crt::basic_string_view<CharT, Traits>>
+ {
+ size_t operator()(const Aws::Crt::basic_string_view<CharT, Traits> &val) const noexcept;
+ };
+
+ template <class CharT, class Traits>
+ size_t hash<Aws::Crt::basic_string_view<CharT, Traits>>::operator()(
+ const Aws::Crt::basic_string_view<CharT, Traits> &val) const noexcept
+ {
+ auto str = std::basic_string<CharT, Traits>(val.data(), val.size());
+ return std::hash<std::basic_string<CharT, Traits>>()(str);
+ }
+} // namespace std
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/Types.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/Types.h
new file mode 100644
index 0000000000..3972aa7aff
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/Types.h
@@ -0,0 +1,165 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/common/common.h>
+#include <aws/crt/Exports.h>
+#include <aws/crt/Optional.h>
+#include <aws/crt/StlAllocator.h>
+#include <aws/crt/StringView.h>
+#include <aws/io/socket.h>
+#include <aws/mqtt/mqtt.h>
+#include <functional>
+#include <list>
+#include <map>
+#include <sstream>
+#include <string>
+#include <unordered_map>
+#include <utility>
+#include <vector>
+
+struct aws_byte_buf;
+struct aws_byte_cursor;
+struct aws_socket_options;
+
+namespace Aws
+{
+ namespace Crt
+ {
+ using ByteBuf = aws_byte_buf;
+ using ByteCursor = aws_byte_cursor;
+
+ namespace Io
+ {
+ using IStream = std::basic_istream<char, std::char_traits<char>>;
+ } // namespace Io
+
+ namespace Mqtt
+ {
+ using QOS = aws_mqtt_qos;
+ using ReturnCode = aws_mqtt_connect_return_code;
+ } // namespace Mqtt
+
+ template <typename T> class StlAllocator;
+ using String = std::basic_string<char, std::char_traits<char>, StlAllocator<char>>;
+ using StringStream = std::basic_stringstream<char, std::char_traits<char>, StlAllocator<char>>;
+ template <typename K, typename V> using Map = std::map<K, V, std::less<K>, StlAllocator<std::pair<const K, V>>>;
+ template <typename K, typename V>
+ using UnorderedMap =
+ std::unordered_map<K, V, std::hash<K>, std::equal_to<K>, StlAllocator<std::pair<const K, V>>>;
+ template <typename K, typename V>
+ using MultiMap = std::multimap<K, V, std::less<K>, StlAllocator<std::pair<const K, V>>>;
+ template <typename T> using Vector = std::vector<T, StlAllocator<T>>;
+ template <typename T> using List = std::list<T, StlAllocator<T>>;
+
+ AWS_CRT_CPP_API ByteBuf ByteBufFromCString(const char *str) noexcept;
+ AWS_CRT_CPP_API ByteBuf ByteBufFromEmptyArray(const uint8_t *array, size_t len) noexcept;
+ AWS_CRT_CPP_API ByteBuf ByteBufFromArray(const uint8_t *array, size_t capacity) noexcept;
+ AWS_CRT_CPP_API ByteBuf ByteBufNewCopy(Allocator *alloc, const uint8_t *array, size_t len);
+ AWS_CRT_CPP_API void ByteBufDelete(ByteBuf &);
+
+ AWS_CRT_CPP_API ByteCursor ByteCursorFromCString(const char *str) noexcept;
+ AWS_CRT_CPP_API ByteCursor ByteCursorFromString(const Crt::String &str) noexcept;
+ AWS_CRT_CPP_API ByteCursor ByteCursorFromStringView(const Crt::StringView &str) noexcept;
+ AWS_CRT_CPP_API ByteCursor ByteCursorFromByteBuf(const ByteBuf &) noexcept;
+ AWS_CRT_CPP_API ByteCursor ByteCursorFromArray(const uint8_t *array, size_t len) noexcept;
+
+ AWS_CRT_CPP_API Vector<uint8_t> Base64Decode(const String &decode);
+ AWS_CRT_CPP_API String Base64Encode(const Vector<uint8_t> &encode);
+
+ template <typename RawType, typename TargetType> using TypeConvertor = std::function<TargetType(RawType)>;
+
+ /**
+ * Template function to convert an aws_array_list of RawType to a C++ like Vector of TargetType.
+ * A conversion function should be provided to do the type conversion
+ */
+ template <typename RawType, typename TargetType>
+ Vector<TargetType> ArrayListToVector(const aws_array_list *array, TypeConvertor<RawType, TargetType> conv)
+ {
+ Vector<TargetType> v;
+ size_t cnt = aws_array_list_length(array);
+ for (size_t i = 0; i < cnt; i++)
+ {
+ RawType t;
+ aws_array_list_get_at(array, &t, i);
+ v.emplace_back(conv(t));
+ }
+ return v;
+ }
+
+ /**
+ * Template function to convert an aws_array_list of RawType to a C++ like Vector of TargetType.
+ * This template assumes a direct constructor: TargetType(RawType) is available
+ */
+ template <typename RawType, typename TargetType>
+ Vector<TargetType> ArrayListToVector(const aws_array_list *array)
+ {
+ Vector<TargetType> v;
+ size_t cnt = aws_array_list_length(array);
+ for (size_t i = 0; i < cnt; i++)
+ {
+ RawType t;
+ aws_array_list_get_at(array, &t, i);
+ v.emplace_back(TargetType(t));
+ }
+ return v;
+ }
+
+ /**
+ * Template function to convert an aws_array_list of Type to a C++ like Vector of Type.
+ */
+ template <typename Type> Vector<Type> ArrayListToVector(const aws_array_list *array)
+ {
+ Vector<Type> v;
+ size_t cnt = aws_array_list_length(array);
+ for (size_t i = 0; i < cnt; i++)
+ {
+ Type t;
+ aws_array_list_get_at(array, &t, i);
+ v.emplace_back(t);
+ }
+ return v;
+ }
+
+ AWS_CRT_CPP_API inline StringView ByteCursorToStringView(const ByteCursor &bc)
+ {
+ return StringView(reinterpret_cast<char *>(bc.ptr), bc.len);
+ }
+
+ AWS_CRT_CPP_API inline ByteCursor StringViewToByteCursor(const StringView &sv)
+ {
+ ByteCursor bc;
+ bc.ptr = (uint8_t *)(sv.data());
+ bc.len = sv.size();
+ return bc;
+ }
+
+ template <typename T> void Delete(T *t, Allocator *allocator)
+ {
+ t->~T();
+ aws_mem_release(allocator, t);
+ }
+
+ template <typename T, typename... Args> T *New(Allocator *allocator, Args &&...args)
+ {
+ T *t = reinterpret_cast<T *>(aws_mem_acquire(allocator, sizeof(T)));
+ if (!t)
+ return nullptr;
+ return new (t) T(std::forward<Args>(args)...);
+ }
+
+ template <typename T, typename... Args> std::shared_ptr<T> MakeShared(Allocator *allocator, Args &&...args)
+ {
+ T *t = reinterpret_cast<T *>(aws_mem_acquire(allocator, sizeof(T)));
+ if (!t)
+ return nullptr;
+ new (t) T(std::forward<Args>(args)...);
+
+ return std::shared_ptr<T>(t, [allocator](T *obj) { Delete(obj, allocator); });
+ }
+
+ template <typename T> using ScopedResource = std::unique_ptr<T, std::function<void(T *)>>;
+
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/UUID.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/UUID.h
new file mode 100644
index 0000000000..5ca53221cc
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/UUID.h
@@ -0,0 +1,42 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/StlAllocator.h>
+#include <aws/crt/Types.h>
+
+#include <aws/common/uuid.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ /**
+ * Utility class for creating UUIDs and serializing them to a string
+ */
+ class AWS_CRT_CPP_API UUID final
+ {
+ public:
+ UUID() noexcept;
+ UUID(const String &str) noexcept;
+
+ UUID &operator=(const String &str) noexcept;
+
+ bool operator==(const UUID &other) noexcept;
+ bool operator!=(const UUID &other) noexcept;
+ operator String() const;
+ operator ByteBuf() const noexcept;
+
+ inline operator bool() const noexcept { return m_good; }
+
+ int GetLastError() const noexcept;
+
+ String ToString() const;
+
+ private:
+ aws_uuid m_uuid;
+ bool m_good;
+ };
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/auth/Credentials.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/auth/Credentials.h
new file mode 100644
index 0000000000..48181d1ce0
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/auth/Credentials.h
@@ -0,0 +1,585 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/crt/Exports.h>
+#include <aws/crt/Types.h>
+#include <aws/crt/http/HttpConnection.h>
+#include <aws/crt/io/TlsOptions.h>
+
+#include <chrono>
+#include <functional>
+
+struct aws_credentials;
+struct aws_credentials_provider;
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Io
+ {
+ class ClientBootstrap;
+ }
+
+ namespace Http
+ {
+ class HttpClientConnectionProxyOptions;
+ }
+
+ namespace Auth
+ {
+ /**
+ * A class to hold the basic components necessary for various AWS authentication protocols.
+ */
+ class AWS_CRT_CPP_API Credentials
+ {
+ public:
+ Credentials(const aws_credentials *credentials) noexcept;
+ Credentials(
+ ByteCursor access_key_id,
+ ByteCursor secret_access_key,
+ ByteCursor session_token,
+ uint64_t expiration_timepoint_in_seconds,
+ Allocator *allocator = ApiAllocator()) noexcept;
+
+ /**
+ * Create new anonymous Credentials.
+ * Use anonymous Credentials when you want to skip signing.
+ * @param allocator
+ */
+ Credentials(Allocator *allocator = ApiAllocator()) noexcept;
+
+ ~Credentials();
+
+ Credentials(const Credentials &) = delete;
+ Credentials(Credentials &&) = delete;
+ Credentials &operator=(const Credentials &) = delete;
+ Credentials &operator=(Credentials &&) = delete;
+
+ /**
+ * Gets the value of the access key component of aws credentials
+ */
+ ByteCursor GetAccessKeyId() const noexcept;
+
+ /**
+ * Gets the value of the secret access key component of aws credentials
+ */
+ ByteCursor GetSecretAccessKey() const noexcept;
+
+ /**
+ * Gets the value of the session token of aws credentials
+ */
+ ByteCursor GetSessionToken() const noexcept;
+
+ /**
+ * Gets the expiration timestamp for the credentials, or UINT64_MAX if no expiration
+ */
+ uint64_t GetExpirationTimepointInSeconds() const noexcept;
+
+ /**
+ * Validity check - returns true if the instance is valid, false otherwise
+ */
+ explicit operator bool() const noexcept;
+
+ /**
+ * Returns the underlying credentials implementation.
+ */
+ const aws_credentials *GetUnderlyingHandle() const noexcept { return m_credentials; }
+
+ private:
+ const aws_credentials *m_credentials;
+ };
+
+ /**
+ * Callback invoked by credentials providers when resolution succeeds (credentials will be non-null)
+ * or fails (credentials will be null)
+ */
+ using OnCredentialsResolved = std::function<void(std::shared_ptr<Credentials>, int errorCode)>;
+
+ /**
+ * Invoked when the native delegate credentials provider needs to fetch a credential.
+ */
+ using GetCredentialsHandler = std::function<std::shared_ptr<Credentials>()>;
+
+ /**
+ * Base interface for all credentials providers. Credentials providers are objects that
+ * retrieve AWS credentials from some source.
+ */
+ class AWS_CRT_CPP_API ICredentialsProvider : public std::enable_shared_from_this<ICredentialsProvider>
+ {
+ public:
+ virtual ~ICredentialsProvider() = default;
+
+ /**
+ * Asynchronous method to query for AWS credentials based on the internal provider implementation.
+ */
+ virtual bool GetCredentials(const OnCredentialsResolved &onCredentialsResolved) const = 0;
+
+ /**
+ * Returns the underlying credentials provider implementation. Support for credentials providers
+ * not based on a C implementation is theoretically possible, but requires some re-implementation to
+ * support provider chains and caching (whose implementations rely on links to C implementation
+ * providers)
+ */
+ virtual aws_credentials_provider *GetUnderlyingHandle() const noexcept = 0;
+
+ /**
+ * Validity check method
+ */
+ virtual bool IsValid() const noexcept = 0;
+ };
+
+ /**
+ * Configuration options for the static credentials provider
+ */
+ struct AWS_CRT_CPP_API CredentialsProviderStaticConfig
+ {
+ CredentialsProviderStaticConfig()
+ {
+ AWS_ZERO_STRUCT(AccessKeyId);
+ AWS_ZERO_STRUCT(SecretAccessKey);
+ AWS_ZERO_STRUCT(SessionToken);
+ }
+
+ /**
+ * The value of the access key component for the provider's static aws credentials
+ */
+ ByteCursor AccessKeyId;
+
+ /**
+ * The value of the secret access key component for the provider's static aws credentials
+ */
+ ByteCursor SecretAccessKey;
+
+ /**
+ * The value of the session token for the provider's static aws credentials
+ */
+ ByteCursor SessionToken;
+ };
+
+ /**
+ * Configuration options for the profile credentials provider
+ */
+ struct AWS_CRT_CPP_API CredentialsProviderProfileConfig
+ {
+ CredentialsProviderProfileConfig() : Bootstrap(nullptr), TlsContext(nullptr)
+ {
+ AWS_ZERO_STRUCT(ProfileNameOverride);
+ AWS_ZERO_STRUCT(ConfigFileNameOverride);
+ AWS_ZERO_STRUCT(CredentialsFileNameOverride);
+ }
+
+ /**
+ * Override profile name to use (instead of default) when the provider sources credentials
+ */
+ ByteCursor ProfileNameOverride;
+
+ /**
+ * Override file path (instead of '~/.aws/config' for the aws config file to use during
+ * credential sourcing
+ */
+ ByteCursor ConfigFileNameOverride;
+
+ /**
+ * Override file path (instead of '~/.aws/credentials' for the aws credentials file to use during
+ * credential sourcing
+ */
+ ByteCursor CredentialsFileNameOverride;
+
+ /**
+ * Connection bootstrap to use for any network connections made while sourcing credentials.
+ * (for example, a profile that uses assume-role will need to query STS).
+ */
+ Io::ClientBootstrap *Bootstrap;
+
+ /**
+ * Client TLS context to use for any secure network connections made while sourcing credentials
+ * (for example, a profile that uses assume-role will need to query STS).
+ *
+ * If a TLS context is needed, and you did not pass one in, it will be created automatically.
+ * However, you are encouraged to pass in a shared one since these are expensive objects.
+ * If using BYO_CRYPTO, you must provide the TLS context since it cannot be created automatically.
+ */
+ Io::TlsContext *TlsContext;
+ };
+
+ /**
+ * Configuration options for the Ec2 instance metadata service credentials provider
+ */
+ struct AWS_CRT_CPP_API CredentialsProviderImdsConfig
+ {
+ CredentialsProviderImdsConfig() : Bootstrap(nullptr) {}
+
+ /**
+ * Connection bootstrap to use to create the http connection required to
+ * query credentials from the Ec2 instance metadata service
+ *
+ * Note: If null, then the default ClientBootstrap is used
+ * (see Aws::Crt::ApiHandle::GetOrCreateStaticDefaultClientBootstrap)
+ */
+ Io::ClientBootstrap *Bootstrap;
+ };
+
+ /**
+ * Configuration options for a chain-of-responsibility-based credentials provider.
+ * This provider works by traversing the chain and returning the first positive
+ * result.
+ */
+ struct AWS_CRT_CPP_API CredentialsProviderChainConfig
+ {
+ CredentialsProviderChainConfig() : Providers() {}
+
+ /**
+ * The sequence of providers that make up the chain.
+ */
+ Vector<std::shared_ptr<ICredentialsProvider>> Providers;
+ };
+
+ /**
+ * Configuration options for a provider that caches the results of another provider
+ */
+ struct AWS_CRT_CPP_API CredentialsProviderCachedConfig
+ {
+ CredentialsProviderCachedConfig() : Provider(), CachedCredentialTTL() {}
+
+ /**
+ * The provider to cache credentials from
+ */
+ std::shared_ptr<ICredentialsProvider> Provider;
+
+ /**
+ * How long a cached credential set will be used for
+ */
+ std::chrono::milliseconds CachedCredentialTTL;
+ };
+
+ /**
+ * Configuration options for a provider that implements a cached provider chain
+ * based on the AWS SDK defaults:
+ *
+ * Cache-Of(Environment -> Profile -> IMDS)
+ */
+ struct AWS_CRT_CPP_API CredentialsProviderChainDefaultConfig
+ {
+ CredentialsProviderChainDefaultConfig() : Bootstrap(nullptr), TlsContext(nullptr) {}
+
+ /**
+ * Connection bootstrap to use for any network connections made while sourcing credentials.
+ *
+ * Note: If null, then the default ClientBootstrap is used
+ * (see Aws::Crt::ApiHandle::GetOrCreateStaticDefaultClientBootstrap)
+ */
+ Io::ClientBootstrap *Bootstrap;
+
+ /**
+ * Client TLS context to use for any secure network connections made while sourcing credentials.
+ *
+ * If not provided the default chain will construct a new one, but these
+ * are expensive objects so you are encouraged to pass in a shared one.
+ * Must be provided if using BYO_CRYPTO.
+ */
+ Io::TlsContext *TlsContext;
+ };
+
+ /**
+ * Configuration options for the X509 credentials provider
+ */
+ struct AWS_CRT_CPP_API CredentialsProviderX509Config
+ {
+ CredentialsProviderX509Config()
+ : Bootstrap(nullptr), TlsOptions(), ThingName(), RoleAlias(), Endpoint(), ProxyOptions()
+ {
+ }
+
+ /**
+ * Connection bootstrap to use to create the http connection required to
+ * query credentials from the x509 provider
+ *
+ * Note: If null, then the default ClientBootstrap is used
+ * (see Aws::Crt::ApiHandle::GetOrCreateStaticDefaultClientBootstrap)
+ */
+ Io::ClientBootstrap *Bootstrap;
+
+ /* TLS connection options that have been initialized with your x509 certificate and private key */
+ Io::TlsConnectionOptions TlsOptions;
+
+ /* IoT thing name you registered with AWS IOT for your device, it will be used in http request header */
+ String ThingName;
+
+ /* Iot role alias you created with AWS IoT for your IAM role, it will be used in http request path */
+ String RoleAlias;
+
+ /**
+ * AWS account specific endpoint that can be acquired using AWS CLI following instructions from the demo
+ * example: c2sakl5huz0afv.credentials.iot.us-east-1.amazonaws.com
+ *
+ * This a different endpoint than the IoT data mqtt broker endpoint.
+ */
+ String Endpoint;
+
+ /**
+ * (Optional) Http proxy configuration for the http request that fetches credentials
+ */
+ Optional<Http::HttpClientConnectionProxyOptions> ProxyOptions;
+ };
+
+ /**
+ * Configuration options for the delegate credentials provider
+ */
+ struct AWS_CRT_CPP_API CredentialsProviderDelegateConfig
+ {
+ /* handler to provider credentials */
+ GetCredentialsHandler Handler;
+ };
+
+ /**
+ * A pair defining an identity provider and a valid login token sourced from it.
+ */
+ struct AWS_CRT_CPP_API CognitoLoginPair
+ {
+
+ /**
+ * Name of an identity provider
+ */
+ String IdentityProviderName;
+
+ /**
+ * Valid login token source from the identity provider
+ */
+ String IdentityProviderToken;
+ };
+
+ /**
+ * Configuration options for the Cognito credentials provider
+ */
+ struct AWS_CRT_CPP_API CredentialsProviderCognitoConfig
+ {
+ CredentialsProviderCognitoConfig();
+
+ /**
+ * Cognito service regional endpoint to source credentials from.
+ */
+ String Endpoint;
+
+ /**
+ * Cognito identity to fetch credentials relative to.
+ */
+ String Identity;
+
+ /**
+ * Optional set of identity provider token pairs to allow for authenticated identity access.
+ */
+ Optional<Vector<CognitoLoginPair>> Logins;
+
+ /**
+ * Optional ARN of the role to be assumed when multiple roles were received in the token from the
+ * identity provider.
+ */
+ Optional<String> CustomRoleArn;
+
+ /**
+ * Connection bootstrap to use to create the http connection required to
+ * query credentials from the cognito provider
+ *
+ * Note: If null, then the default ClientBootstrap is used
+ * (see Aws::Crt::ApiHandle::GetOrCreateStaticDefaultClientBootstrap)
+ */
+ Io::ClientBootstrap *Bootstrap;
+
+ /**
+ * TLS configuration for secure socket connections.
+ */
+ Io::TlsContext TlsCtx;
+
+ /**
+ * (Optional) Http proxy configuration for the http request that fetches credentials
+ */
+ Optional<Http::HttpClientConnectionProxyOptions> ProxyOptions;
+ };
+
+ /**
+ * Configuration options for the STS credentials provider
+ */
+ struct AWS_CRT_CPP_API CredentialsProviderSTSConfig
+ {
+ CredentialsProviderSTSConfig();
+
+ /**
+ * Credentials provider to be used to sign the requests made to STS to fetch credentials.
+ */
+ std::shared_ptr<ICredentialsProvider> Provider;
+
+ /**
+ * Arn of the role to assume by fetching credentials for
+ */
+ String RoleArn;
+
+ /**
+ * Assumed role session identifier to be associated with the sourced credentials
+ */
+ String SessionName;
+
+ /**
+ * How long sourced credentials should remain valid for, in seconds. 900 is the minimum allowed value.
+ */
+ uint16_t DurationSeconds;
+
+ /**
+ * Connection bootstrap to use to create the http connection required to
+ * query credentials from the STS provider
+ *
+ * Note: If null, then the default ClientBootstrap is used
+ * (see Aws::Crt::ApiHandle::GetOrCreateStaticDefaultClientBootstrap)
+ */
+ Io::ClientBootstrap *Bootstrap;
+
+ /**
+ * TLS configuration for secure socket connections.
+ */
+ Io::TlsContext TlsCtx;
+
+ /**
+ * (Optional) Http proxy configuration for the http request that fetches credentials
+ */
+ Optional<Http::HttpClientConnectionProxyOptions> ProxyOptions;
+ };
+
+ /**
+ * Simple credentials provider implementation that wraps one of the internal C-based implementations.
+ *
+ * Contains a set of static factory methods for building each supported provider, as well as one for the
+ * default provider chain.
+ */
+ class AWS_CRT_CPP_API CredentialsProvider : public ICredentialsProvider
+ {
+ public:
+ CredentialsProvider(aws_credentials_provider *provider, Allocator *allocator = ApiAllocator()) noexcept;
+
+ virtual ~CredentialsProvider();
+
+ CredentialsProvider(const CredentialsProvider &) = delete;
+ CredentialsProvider(CredentialsProvider &&) = delete;
+ CredentialsProvider &operator=(const CredentialsProvider &) = delete;
+ CredentialsProvider &operator=(CredentialsProvider &&) = delete;
+
+ /**
+ * Asynchronous method to query for AWS credentials based on the internal provider implementation.
+ */
+ virtual bool GetCredentials(const OnCredentialsResolved &onCredentialsResolved) const override;
+
+ /**
+ * Returns the underlying credentials provider implementation.
+ */
+ virtual aws_credentials_provider *GetUnderlyingHandle() const noexcept override { return m_provider; }
+
+ /**
+ * Validity check method
+ */
+ virtual bool IsValid() const noexcept override { return m_provider != nullptr; }
+
+ /*
+ * Factory methods for all of the basic credentials provider types
+ */
+
+ /**
+ * Creates a provider that returns a fixed set of credentials
+ */
+ static std::shared_ptr<ICredentialsProvider> CreateCredentialsProviderStatic(
+ const CredentialsProviderStaticConfig &config,
+ Allocator *allocator = ApiAllocator());
+
+ /**
+ * Creates an anonymous provider that have anonymous credentials
+ * Use anonymous credentials when you want to skip signing
+ */
+ static std::shared_ptr<ICredentialsProvider> CreateCredentialsProviderAnonymous(
+ Allocator *allocator = ApiAllocator());
+
+ /**
+ * Creates a provider that returns credentials sourced from environment variables
+ */
+ static std::shared_ptr<ICredentialsProvider> CreateCredentialsProviderEnvironment(
+ Allocator *allocator = ApiAllocator());
+
+ /**
+ * Creates a provider that returns credentials sourced from config files
+ */
+ static std::shared_ptr<ICredentialsProvider> CreateCredentialsProviderProfile(
+ const CredentialsProviderProfileConfig &config,
+ Allocator *allocator = ApiAllocator());
+
+ /**
+ * Creates a provider that returns credentials sourced from Ec2 instance metadata service
+ */
+ static std::shared_ptr<ICredentialsProvider> CreateCredentialsProviderImds(
+ const CredentialsProviderImdsConfig &config,
+ Allocator *allocator = ApiAllocator());
+
+ /**
+ * Creates a provider that sources credentials by querying a series of providers and
+ * returning the first valid credential set encountered
+ */
+ static std::shared_ptr<ICredentialsProvider> CreateCredentialsProviderChain(
+ const CredentialsProviderChainConfig &config,
+ Allocator *allocator = ApiAllocator());
+
+ /*
+ * Creates a provider that puts a simple time-based cache in front of its queries
+ * to a subordinate provider.
+ */
+ static std::shared_ptr<ICredentialsProvider> CreateCredentialsProviderCached(
+ const CredentialsProviderCachedConfig &config,
+ Allocator *allocator = ApiAllocator());
+
+ /**
+ * Creates the SDK-standard default credentials provider which is a cache-fronted chain of:
+ *
+ * Environment -> Profile -> IMDS/ECS
+ *
+ */
+ static std::shared_ptr<ICredentialsProvider> CreateCredentialsProviderChainDefault(
+ const CredentialsProviderChainDefaultConfig &config,
+ Allocator *allocator = ApiAllocator());
+
+ /**
+ * Creates a provider that sources credentials from the IoT X509 provider service
+ *
+ */
+ static std::shared_ptr<ICredentialsProvider> CreateCredentialsProviderX509(
+ const CredentialsProviderX509Config &config,
+ Allocator *allocator = ApiAllocator());
+
+ /**
+ * Creates a provider that sources credentials from the provided function.
+ *
+ */
+ static std::shared_ptr<ICredentialsProvider> CreateCredentialsProviderDelegate(
+ const CredentialsProviderDelegateConfig &config,
+ Allocator *allocator = ApiAllocator());
+
+ /**
+ * Creates a provider that sources credentials from the Cognito Identity service
+ */
+ static std::shared_ptr<ICredentialsProvider> CreateCredentialsProviderCognito(
+ const CredentialsProviderCognitoConfig &config,
+ Allocator *allocator = ApiAllocator());
+
+ /**
+ * Creates a provider that sources credentials from STS
+ */
+ static std::shared_ptr<ICredentialsProvider> CreateCredentialsProviderSTS(
+ const CredentialsProviderSTSConfig &config,
+ Allocator *allocator = ApiAllocator());
+
+ private:
+ static void s_onCredentialsResolved(aws_credentials *credentials, int error_code, void *user_data);
+
+ Allocator *m_allocator;
+ aws_credentials_provider *m_provider;
+ };
+ } // namespace Auth
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/auth/Signing.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/auth/Signing.h
new file mode 100644
index 0000000000..5723f00e2b
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/auth/Signing.h
@@ -0,0 +1,99 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/crt/Exports.h>
+
+#include <aws/auth/signing_config.h>
+
+#include <functional>
+#include <memory>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Http
+ {
+ class HttpRequest;
+ }
+
+ namespace Auth
+ {
+ /**
+ * RTTI indicator for signing configuration. We currently only support a single type (AWS), but
+ * we could expand to others in the future if needed.
+ */
+ enum class SigningConfigType
+ {
+ Aws = AWS_SIGNING_CONFIG_AWS
+ };
+
+ /**
+ * HTTP signing callback. The second parameter is an aws error code, The signing was successful
+ * iff the error code is AWS_ERROR_SUCCESS.
+ */
+ using OnHttpRequestSigningComplete =
+ std::function<void(const std::shared_ptr<Aws::Crt::Http::HttpRequest> &, int)>;
+
+ /**
+ * Base class for all different signing configurations. Type functions as a
+ * primitive RTTI for downcasting.
+ */
+ class AWS_CRT_CPP_API ISigningConfig
+ {
+ public:
+ ISigningConfig() = default;
+ ISigningConfig(const ISigningConfig &) = delete;
+ ISigningConfig(ISigningConfig &&) = delete;
+ ISigningConfig &operator=(const ISigningConfig &) = delete;
+ ISigningConfig &operator=(ISigningConfig &&) = delete;
+
+ virtual ~ISigningConfig() = default;
+
+ /**
+ * RTTI query for the SigningConfig hierarchy
+ * @return the type of signing configuration
+ */
+ virtual SigningConfigType GetType(void) const = 0;
+ };
+
+ /**
+ * Abstract base for all http request signers. Asynchronous interface. Intended to
+ * be a tight wrapper around aws-c-* signer implementations.
+ */
+ class AWS_CRT_CPP_API IHttpRequestSigner
+ {
+ public:
+ IHttpRequestSigner() = default;
+ IHttpRequestSigner(const IHttpRequestSigner &) = delete;
+ IHttpRequestSigner(IHttpRequestSigner &&) = delete;
+ IHttpRequestSigner &operator=(const IHttpRequestSigner &) = delete;
+ IHttpRequestSigner &operator=(IHttpRequestSigner &&) = delete;
+
+ virtual ~IHttpRequestSigner() = default;
+
+ /**
+ * Signs an http request based on the signing implementation and supplied configuration
+ * @param request http request to sign
+ * @param config base signing configuration. Actual type should match the configuration expected
+ * by the signer implementation
+ * @param completionCallback completion function to invoke when signing has completed or failed
+ * @return true if the signing process was kicked off, false if there was a synchronous failure.
+ */
+ virtual bool SignRequest(
+ const std::shared_ptr<Aws::Crt::Http::HttpRequest> &request,
+ const ISigningConfig &config,
+ const OnHttpRequestSigningComplete &completionCallback) = 0;
+
+ /**
+ * @return Whether or not the signer is in a valid state
+ */
+ virtual bool IsValid() const = 0;
+ };
+
+ } // namespace Auth
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/auth/Sigv4Signing.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/auth/Sigv4Signing.h
new file mode 100644
index 0000000000..e902b1363f
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/auth/Sigv4Signing.h
@@ -0,0 +1,352 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/crt/Exports.h>
+
+#include <aws/crt/DateTime.h>
+#include <aws/crt/Types.h>
+#include <aws/crt/auth/Signing.h>
+
+struct aws_signing_config_aws;
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Auth
+ {
+ class Credentials;
+ class ICredentialsProvider;
+
+ /**
+ * Enumeration indicating what version of the AWS signing process we should use.
+ */
+ enum class SigningAlgorithm
+ {
+ /**
+ * Standard AWS Sigv4 signing using a symmetric secret, per
+ * https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html
+ */
+ SigV4 = AWS_SIGNING_ALGORITHM_V4,
+
+ /**
+ * A variant of AWS Sigv4 signing that uses ecdsa signatures based on an ECC key, rather than relying on
+ * a shared secret.
+ */
+ SigV4A = AWS_SIGNING_ALGORITHM_V4_ASYMMETRIC,
+ };
+
+ /**
+ * What kind of AWS signature should be computed?
+ */
+ enum class SignatureType
+ {
+ /**
+ * A signature for a full http request should be computed, with header updates applied to the signing
+ * result.
+ */
+ HttpRequestViaHeaders = AWS_ST_HTTP_REQUEST_HEADERS,
+
+ /**
+ * A signature for a full http request should be computed, with query param updates applied to the
+ * signing result.
+ */
+ HttpRequestViaQueryParams = AWS_ST_HTTP_REQUEST_QUERY_PARAMS,
+
+ /**
+ * Compute a signature for a payload chunk.
+ */
+ HttpRequestChunk = AWS_ST_HTTP_REQUEST_CHUNK,
+
+ /**
+ * Compute a signature for an event stream event.
+ *
+ * This option is not yet supported.
+ */
+ HttpRequestEvent = AWS_ST_HTTP_REQUEST_EVENT,
+ };
+
+ /**
+ * A collection of signed body constants. Some are specific to certain
+ * signature types, while others are just there to save time (empty sha, for example).
+ */
+ namespace SignedBodyValue
+ {
+ /**
+ * The SHA-256 of an empty string:
+ * 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
+ * For use with `Aws::Crt::Auth::AwsSigningConfig.SetSignedBodyValue()`.
+ */
+ AWS_CRT_CPP_API const char *EmptySha256Str();
+
+ /**
+ * 'UNSIGNED-PAYLOAD'
+ * For use with `Aws::Crt::Auth::AwsSigningConfig.SetSignedBodyValue()`.
+ */
+ AWS_CRT_CPP_API const char *UnsignedPayloadStr();
+
+ /**
+ * 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD'
+ * For use with `Aws::Crt::Auth::AwsSigningConfig.SetSignedBodyValue()`.
+ */
+ AWS_CRT_CPP_API const char *StreamingAws4HmacSha256PayloadStr();
+ /**
+ * 'STREAMING-AWS4-HMAC-SHA256-EVENTS'
+ * For use with `Aws::Crt::Auth::AwsSigningConfig.SetSignedBodyValue()`.
+ */
+ AWS_CRT_CPP_API const char *StreamingAws4HmacSha256EventsStr();
+
+ /** @deprecated to avoid issues with /DELAYLOAD on Windows. */
+ AWS_CRT_CPP_API extern const char *UnsignedPayload;
+ /** @deprecated to avoid issues with /DELAYLOAD on Windows. */
+ AWS_CRT_CPP_API extern const char *EmptySha256;
+ /** @deprecated to avoid issues with /DELAYLOAD on Windows. */
+ AWS_CRT_CPP_API extern const char *StreamingAws4HmacSha256Payload;
+ /** @deprecated to avoid issues with /DELAYLOAD on Windows. */
+ AWS_CRT_CPP_API extern const char *StreamingAws4HmacSha256Events;
+ } // namespace SignedBodyValue
+
+ /**
+ * Controls if signing adds a header containing the canonical request's body value
+ */
+ enum class SignedBodyHeaderType
+ {
+ /**
+ * Do not add a header
+ */
+ None = AWS_SBHT_NONE,
+
+ /**
+ * Add the "x-amz-content-sha256" header with the canonical request's body value
+ */
+ XAmzContentSha256 = AWS_SBHT_X_AMZ_CONTENT_SHA256,
+ };
+
+ using ShouldSignHeaderCb = bool (*)(const Crt::ByteCursor *, void *);
+
+ /**
+ * Wrapper around the configuration structure specific to the AWS
+ * Sigv4 signing process
+ */
+ class AWS_CRT_CPP_API AwsSigningConfig : public ISigningConfig
+ {
+ public:
+ AwsSigningConfig(Allocator *allocator = ApiAllocator());
+ virtual ~AwsSigningConfig();
+
+ virtual SigningConfigType GetType() const noexcept override { return SigningConfigType::Aws; }
+
+ /**
+ * @return the signing process we want to invoke
+ */
+ SigningAlgorithm GetSigningAlgorithm() const noexcept;
+
+ /**
+ * Sets the signing process we want to invoke
+ */
+ void SetSigningAlgorithm(SigningAlgorithm algorithm) noexcept;
+
+ /**
+ * @return the type of signature we want to calculate
+ */
+ SignatureType GetSignatureType() const noexcept;
+
+ /**
+ * Sets the type of signature we want to calculate
+ */
+ void SetSignatureType(SignatureType signatureType) noexcept;
+
+ /**
+ * @return the AWS region to sign against
+ */
+ const Crt::String &GetRegion() const noexcept;
+
+ /**
+ * Sets the AWS region to sign against
+ */
+ void SetRegion(const Crt::String &region) noexcept;
+
+ /**
+ * @return the (signing) name of the AWS service to sign a request for
+ */
+ const Crt::String &GetService() const noexcept;
+
+ /**
+ * Sets the (signing) name of the AWS service to sign a request for
+ */
+ void SetService(const Crt::String &service) noexcept;
+
+ /**
+ * @return the timestamp to use during the signing process.
+ */
+ DateTime GetSigningTimepoint() const noexcept;
+
+ /**
+ * Sets the timestamp to use during the signing process.
+ */
+ void SetSigningTimepoint(const DateTime &date) noexcept;
+
+ /*
+ * We assume the uri will be encoded once in preparation for transmission. Certain services
+ * do not decode before checking signature, requiring us to actually double-encode the uri in the
+ * canonical request in order to pass a signature check.
+ */
+
+ /**
+ * @return whether or not the signing process should perform a uri encode step before creating the
+ * canonical request.
+ */
+ bool GetUseDoubleUriEncode() const noexcept;
+
+ /**
+ * Sets whether or not the signing process should perform a uri encode step before creating the
+ * canonical request.
+ */
+ void SetUseDoubleUriEncode(bool useDoubleUriEncode) noexcept;
+
+ /**
+ * @return whether or not the uri paths should be normalized when building the canonical request
+ */
+ bool GetShouldNormalizeUriPath() const noexcept;
+
+ /**
+ * Sets whether or not the uri paths should be normalized when building the canonical request
+ */
+ void SetShouldNormalizeUriPath(bool shouldNormalizeUriPath) noexcept;
+
+ /**
+ * @return whether or not to omit the session token during signing. Only set to true when performing
+ * a websocket handshake with IoT Core.
+ */
+ bool GetOmitSessionToken() const noexcept;
+
+ /**
+ * Sets whether or not to omit the session token during signing. Only set to true when performing
+ * a websocket handshake with IoT Core.
+ */
+ void SetOmitSessionToken(bool omitSessionToken) noexcept;
+
+ /**
+ * @return the ShouldSignHeadersCb from the underlying config.
+ */
+ ShouldSignHeaderCb GetShouldSignHeaderCallback() const noexcept;
+
+ /**
+ * Sets a callback invoked during the signing process for white-listing headers that can be signed.
+ * If you do not set this, all headers will be signed.
+ */
+ void SetShouldSignHeaderCallback(ShouldSignHeaderCb shouldSignHeaderCb) noexcept;
+
+ /**
+ * @return the should_sign_header_ud from the underlying config.
+ */
+ void *GetShouldSignHeaderUserData() const noexcept;
+
+ /**
+ * Sets the userData you could get from the ShouldSignHeaderCb callback function.
+ */
+ void SetShouldSignHeaderUserData(void *userData) noexcept;
+
+ /**
+ * @return the string used as the canonical request's body value.
+ * If string is empty, a value is be calculated from the payload during signing.
+ */
+ const Crt::String &GetSignedBodyValue() const noexcept;
+
+ /**
+ * Sets the string to use as the canonical request's body value.
+ * If an empty string is set (the default), a value will be calculated from the payload during signing.
+ * Typically, this is the SHA-256 of the (request/chunk/event) payload, written as lowercase hex.
+ * If this has been precalculated, it can be set here.
+ * Special values used by certain services can also be set (see Aws::Crt::Auth::SignedBodyValue).
+ */
+ void SetSignedBodyValue(const Crt::String &signedBodyValue) noexcept;
+
+ /**
+ * @return the name of the header to add that stores the signed body value
+ */
+ SignedBodyHeaderType GetSignedBodyHeader() const noexcept;
+
+ /**
+ * Sets the name of the header to add that stores the signed body value
+ */
+ void SetSignedBodyHeader(SignedBodyHeaderType signedBodyHeader) noexcept;
+
+ /**
+ * @return (Query param signing only) Gets the amount of time, in seconds, the (pre)signed URI will be
+ * good for
+ */
+ uint64_t GetExpirationInSeconds() const noexcept;
+
+ /**
+ * (Query param signing only) Sets the amount of time, in seconds, the (pre)signed URI will be good for
+ */
+ void SetExpirationInSeconds(uint64_t expirationInSeconds) noexcept;
+
+ /*
+ * For Sigv4 signing, either the credentials provider or the credentials must be set.
+ * Credentials, if set, takes precedence over the provider.
+ */
+
+ /**
+ * @return the credentials provider to use for signing.
+ */
+ const std::shared_ptr<ICredentialsProvider> &GetCredentialsProvider() const noexcept;
+
+ /**
+ * Set the credentials provider to use for signing.
+ */
+ void SetCredentialsProvider(const std::shared_ptr<ICredentialsProvider> &credsProvider) noexcept;
+
+ /**
+ * @return the credentials to use for signing.
+ */
+ const std::shared_ptr<Credentials> &GetCredentials() const noexcept;
+
+ /**
+ * Set the credentials to use for signing.
+ */
+ void SetCredentials(const std::shared_ptr<Credentials> &credentials) noexcept;
+
+ /// @private
+ const struct aws_signing_config_aws *GetUnderlyingHandle() const noexcept;
+
+ private:
+ Allocator *m_allocator;
+ std::shared_ptr<ICredentialsProvider> m_credentialsProvider;
+ std::shared_ptr<Credentials> m_credentials;
+ struct aws_signing_config_aws m_config;
+ Crt::String m_signingRegion;
+ Crt::String m_serviceName;
+ Crt::String m_signedBodyValue;
+ };
+
+ /**
+ * Http request signer that performs Aws Sigv4 signing. Expects the signing configuration to be and
+ * instance of AwsSigningConfig
+ */
+ class AWS_CRT_CPP_API Sigv4HttpRequestSigner : public IHttpRequestSigner
+ {
+ public:
+ Sigv4HttpRequestSigner(Allocator *allocator = ApiAllocator());
+ virtual ~Sigv4HttpRequestSigner() = default;
+
+ bool IsValid() const override { return true; }
+
+ /**
+ * Signs an http request with AWS-auth sigv4. OnCompletionCallback will be invoked upon completion.
+ */
+ virtual bool SignRequest(
+ const std::shared_ptr<Aws::Crt::Http::HttpRequest> &request,
+ const ISigningConfig &config,
+ const OnHttpRequestSigningComplete &completionCallback) override;
+
+ private:
+ Allocator *m_allocator;
+ };
+ } // namespace Auth
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/crypto/HMAC.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/crypto/HMAC.h
new file mode 100644
index 0000000000..e4452f6bc5
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/crypto/HMAC.h
@@ -0,0 +1,150 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/cal/hmac.h>
+#include <aws/crt/Exports.h>
+#include <aws/crt/Types.h>
+
+struct aws_hmac;
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Crypto
+ {
+ static const size_t SHA256_HMAC_DIGEST_SIZE = 32;
+
+ /**
+ * Computes a SHA256 HMAC with secret over input, and writes the digest to output. If truncateTo is
+ * non-zero, the digest will be truncated to the value of truncateTo. Returns true on success. If this
+ * function fails, Aws::Crt::LastError() will contain the error that occurred. Unless you're using
+ * 'truncateTo', output should have a minimum capacity of SHA256_HMAC_DIGEST_SIZE.
+ */
+ bool AWS_CRT_CPP_API ComputeSHA256HMAC(
+ Allocator *allocator,
+ const ByteCursor &secret,
+ const ByteCursor &input,
+ ByteBuf &output,
+ size_t truncateTo = 0) noexcept;
+
+ /**
+ * Computes a SHA256 HMAC using the default allocator with secret over input, and writes the digest to
+ * output. If truncateTo is non-zero, the digest will be truncated to the value of truncateTo. Returns true
+ * on success. If this function fails, Aws::Crt::LastError() will contain the error that occurred. Unless
+ * you're using 'truncateTo', output should have a minimum capacity of SHA256_HMAC_DIGEST_SIZE.
+ */
+ bool AWS_CRT_CPP_API ComputeSHA256HMAC(
+ const ByteCursor &secret,
+ const ByteCursor &input,
+ ByteBuf &output,
+ size_t truncateTo = 0) noexcept;
+ /**
+ * Streaming HMAC object. The typical use case is for computing the HMAC of an object that is too large to
+ * load into memory. You can call Update() multiple times as you load chunks of data into memory. When
+ * you're finished simply call Digest(). After Digest() is called, this object is no longer usable.
+ */
+ class AWS_CRT_CPP_API HMAC final
+ {
+ public:
+ ~HMAC();
+ HMAC(const HMAC &) = delete;
+ HMAC &operator=(const HMAC &) = delete;
+ HMAC(HMAC &&toMove);
+ HMAC &operator=(HMAC &&toMove);
+
+ /**
+ * Returns true if the instance is in a valid state, false otherwise.
+ */
+ inline operator bool() const noexcept { return m_good; }
+
+ /**
+ * Returns the value of the last aws error encountered by operations on this instance.
+ */
+ inline int LastError() const noexcept { return m_lastError; }
+
+ /**
+ * Creates an instance of a Streaming SHA256 HMAC.
+ */
+ static HMAC CreateSHA256HMAC(Allocator *allocator, const ByteCursor &secret) noexcept;
+
+ /**
+ * Creates an instance of a Streaming SHA256 HMAC using the Default Allocator.
+ */
+ static HMAC CreateSHA256HMAC(const ByteCursor &secret) noexcept;
+
+ /**
+ * Updates the running HMAC object with data in toHMAC. Returns true on success. Call
+ * LastError() for the reason this call failed.
+ */
+ bool Update(const ByteCursor &toHMAC) noexcept;
+
+ /**
+ * Finishes the running HMAC operation and writes the digest into output. The available capacity of
+ * output must be large enough for the digest. See: SHA256_DIGEST_SIZE and MD5_DIGEST_SIZE for size
+ * hints. 'truncateTo' is for if you want truncated output (e.g. you only want the first 16 bytes of a
+ * SHA256 digest. Returns true on success. Call LastError() for the reason this call failed.
+ */
+ bool Digest(ByteBuf &output, size_t truncateTo = 0) noexcept;
+
+ private:
+ HMAC(aws_hmac *hmac) noexcept;
+ HMAC() = delete;
+
+ aws_hmac *m_hmac;
+ bool m_good;
+ int m_lastError;
+ };
+
+ /**
+ * BYO_CRYPTO: Base class for custom HMAC implementations.
+ *
+ * If using BYO_CRYPTO, you must define concrete implementations for the required HMAC algorithms
+ * and set their creation callbacks via functions like ApiHandle.SetBYOCryptoNewSHA256HMACCallback().
+ */
+ class AWS_CRT_CPP_API ByoHMAC
+ {
+ public:
+ virtual ~ByoHMAC() = default;
+
+ /** @private
+ * this is called by the framework. If you're trying to create instances of this class manually,
+ * please don't. But if you do. Look at the other factory functions for reference.
+ */
+ aws_hmac *SeatForCInterop(const std::shared_ptr<ByoHMAC> &selfRef);
+
+ protected:
+ ByoHMAC(size_t digestSize, const ByteCursor &secret, Allocator *allocator = ApiAllocator());
+
+ /**
+ * Updates the running HMAC with to_hash.
+ * This can be called multiple times.
+ * Raise an AWS error and return false to indicate failure.
+ */
+ virtual bool UpdateInternal(const ByteCursor &toHash) noexcept = 0;
+
+ /**
+ * Complete the HMAC computation and write the final digest to output.
+ * This cannote be called more than once.
+ * If truncate_to is something other than 0, the output must be truncated to that number of bytes.
+ * Raise an AWS error and return false to indicate failure.
+ */
+ virtual bool DigestInternal(ByteBuf &output, size_t truncateTo = 0) noexcept = 0;
+
+ private:
+ static void s_Destroy(struct aws_hmac *hmac);
+ static int s_Update(struct aws_hmac *hmac, const struct aws_byte_cursor *buf);
+ static int s_Finalize(struct aws_hmac *hmac, struct aws_byte_buf *out);
+
+ static aws_hmac_vtable s_Vtable;
+ aws_hmac m_hmacValue;
+ std::shared_ptr<ByoHMAC> m_selfReference;
+ };
+
+ using CreateHMACCallback =
+ std::function<std::shared_ptr<ByoHMAC>(size_t digestSize, const ByteCursor &secret, Allocator *)>;
+
+ } // namespace Crypto
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/crypto/Hash.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/crypto/Hash.h
new file mode 100644
index 0000000000..42b9a3ed34
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/crypto/Hash.h
@@ -0,0 +1,168 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/Exports.h>
+#include <aws/crt/Types.h>
+
+#include <aws/cal/hash.h>
+
+struct aws_hash;
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Crypto
+ {
+ static const size_t SHA256_DIGEST_SIZE = 32;
+ static const size_t MD5_DIGEST_SIZE = 16;
+
+ /**
+ * Computes a SHA256 Hash over input, and writes the digest to output. If truncateTo is non-zero, the digest
+ * will be truncated to the value of truncateTo. Returns true on success. If this function fails,
+ * Aws::Crt::LastError() will contain the error that occurred. Unless you're using 'truncateTo', output
+ * should have a minimum capacity of SHA256_DIGEST_SIZE.
+ */
+ bool AWS_CRT_CPP_API ComputeSHA256(
+ Allocator *allocator,
+ const ByteCursor &input,
+ ByteBuf &output,
+ size_t truncateTo = 0) noexcept;
+
+ /**
+ * Computes a SHA256 Hash using the default allocator over input, and writes the digest to output. If
+ * truncateTo is non-zero, the digest will be truncated to the value of truncateTo. Returns true on success.
+ * If this function fails, Aws::Crt::LastError() will contain the error that occurred. Unless you're using
+ * 'truncateTo', output should have a minimum capacity of SHA256_DIGEST_SIZE.
+ */
+ bool AWS_CRT_CPP_API
+ ComputeSHA256(const ByteCursor &input, ByteBuf &output, size_t truncateTo = 0) noexcept;
+
+ /**
+ * Computes a MD5 Hash over input, and writes the digest to output. If truncateTo is non-zero, the digest
+ * will be truncated to the value of truncateTo. Returns true on success. If this function fails,
+ * Aws::Crt::LastError() will contain the error that occurred. Unless you're using 'truncateTo',
+ * output should have a minimum capacity of MD5_DIGEST_SIZE.
+ */
+ bool AWS_CRT_CPP_API ComputeMD5(
+ Allocator *allocator,
+ const ByteCursor &input,
+ ByteBuf &output,
+ size_t truncateTo = 0) noexcept;
+
+ /**
+ * Computes a MD5 Hash using the default allocator over input, and writes the digest to output. If
+ * truncateTo is non-zero, the digest will be truncated to the value of truncateTo. Returns true on success.
+ * If this function fails, Aws::Crt::LastError() will contain the error that occurred. Unless you're using
+ * 'truncateTo', output should have a minimum capacity of MD5_DIGEST_SIZE.
+ */
+ bool AWS_CRT_CPP_API ComputeMD5(const ByteCursor &input, ByteBuf &output, size_t truncateTo = 0) noexcept;
+
+ /**
+ * Streaming Hash object. The typical use case is for computing the hash of an object that is too large to
+ * load into memory. You can call Update() multiple times as you load chunks of data into memory. When
+ * you're finished simply call Digest(). After Digest() is called, this object is no longer usable.
+ */
+ class AWS_CRT_CPP_API Hash final
+ {
+ public:
+ ~Hash();
+ Hash(const Hash &) = delete;
+ Hash &operator=(const Hash &) = delete;
+ Hash(Hash &&toMove);
+ Hash &operator=(Hash &&toMove);
+
+ /**
+ * Returns true if the instance is in a valid state, false otherwise.
+ */
+ inline operator bool() const noexcept { return m_good; }
+
+ /**
+ * Returns the value of the last aws error encountered by operations on this instance.
+ */
+ inline int LastError() const noexcept { return m_lastError; }
+
+ /**
+ * Creates an instance of a Streaming SHA256 Hash.
+ */
+ static Hash CreateSHA256(Allocator *allocator = ApiAllocator()) noexcept;
+
+ /**
+ * Creates an instance of a Streaming MD5 Hash.
+ */
+ static Hash CreateMD5(Allocator *allocator = ApiAllocator()) noexcept;
+
+ /**
+ * Updates the running hash object with data in toHash. Returns true on success. Call
+ * LastError() for the reason this call failed.
+ */
+ bool Update(const ByteCursor &toHash) noexcept;
+
+ /**
+ * Finishes the running hash operation and writes the digest into output. The available capacity of
+ * output must be large enough for the digest. See: SHA256_DIGEST_SIZE and MD5_DIGEST_SIZE for size
+ * hints. 'truncateTo' is for if you want truncated output (e.g. you only want the first 16 bytes of a
+ * SHA256 digest. Returns true on success. Call LastError() for the reason this call failed.
+ */
+ bool Digest(ByteBuf &output, size_t truncateTo = 0) noexcept;
+
+ private:
+ Hash(aws_hash *hash) noexcept;
+ Hash() = delete;
+
+ aws_hash *m_hash;
+ bool m_good;
+ int m_lastError;
+ };
+
+ /**
+ * BYO_CRYPTO: Base class for custom hash implementations.
+ *
+ * If using BYO_CRYPTO, you must define concrete implementations for the required hash algorithms
+ * and set their creation callbacks via functions like ApiHandle.SetBYOCryptoNewMD5Callback().
+ */
+ class AWS_CRT_CPP_API ByoHash
+ {
+ public:
+ virtual ~ByoHash();
+
+ /** @private
+ * this is called by the framework. If you're trying to create instances of this class manually,
+ * please don't. But if you do. Look at the other factory functions for reference.
+ */
+ aws_hash *SeatForCInterop(const std::shared_ptr<ByoHash> &selfRef);
+
+ protected:
+ ByoHash(size_t digestSize, Allocator *allocator = ApiAllocator());
+
+ /**
+ * Update the running hash with to_hash.
+ * This can be called multiple times.
+ * Raise an AWS error and return false to indicate failure.
+ */
+ virtual bool UpdateInternal(const ByteCursor &toHash) noexcept = 0;
+
+ /**
+ * Complete the hash computation and write the final digest to output.
+ * This cannote be called more than once.
+ * If truncate_to is something other than 0, the output must be truncated to that number of bytes.
+ * Raise an AWS error and return false to indicate failure.
+ */
+ virtual bool DigestInternal(ByteBuf &output, size_t truncateTo = 0) noexcept = 0;
+
+ private:
+ static void s_Destroy(struct aws_hash *hash);
+ static int s_Update(struct aws_hash *hash, const struct aws_byte_cursor *buf);
+ static int s_Finalize(struct aws_hash *hash, struct aws_byte_buf *out);
+
+ static aws_hash_vtable s_Vtable;
+ aws_hash m_hashValue;
+ std::shared_ptr<ByoHash> m_selfReference;
+ };
+
+ using CreateHashCallback = std::function<std::shared_ptr<ByoHash>(size_t digestSize, Allocator *)>;
+
+ } // namespace Crypto
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/endpoints/RuleEngine.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/endpoints/RuleEngine.h
new file mode 100644
index 0000000000..84baff71d9
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/endpoints/RuleEngine.h
@@ -0,0 +1,155 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/crt/Types.h>
+
+struct aws_endpoints_rule_engine;
+struct aws_endpoints_request_context;
+struct aws_endpoints_resolved_endpoint;
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Endpoints
+ {
+ /*
+ * Add parameter to the context.
+ * Only string and boolean values are supported.
+ * Adding parameter several times with the same name will overwrite
+ * previous values.
+ */
+ class AWS_CRT_CPP_API RequestContext final
+ {
+ public:
+ RequestContext(Allocator *allocator = ApiAllocator()) noexcept;
+ ~RequestContext();
+
+ /* TODO: move/copy semantics */
+ RequestContext(const RequestContext &) = delete;
+ RequestContext &operator=(const RequestContext &) = delete;
+ RequestContext(RequestContext &&) = delete;
+ RequestContext &operator=(RequestContext &&) = delete;
+
+ /**
+ * @return true if the instance is in a valid state, false otherwise.
+ */
+ operator bool() const noexcept { return m_requestContext != nullptr; }
+
+ /*
+ * Add string parameter.
+ * True if added successfully and false if failed.
+ * Aws::Crt::LastError() can be used to retrieve failure error code.
+ */
+ bool AddString(const ByteCursor &name, const ByteCursor &value);
+
+ /*
+ * Add boolean parameter.
+ * True if added successfully and false if failed.
+ * Aws::Crt::LastError() can be used to retrieve failure error code.
+ */
+ bool AddBoolean(const ByteCursor &name, bool value);
+
+ /// @private
+ aws_endpoints_request_context *GetNativeHandle() const noexcept { return m_requestContext; }
+
+ private:
+ Allocator *m_allocator;
+ aws_endpoints_request_context *m_requestContext;
+ };
+
+ /*
+ * Outcome of Endpoint Resolution.
+ * Outcome can be either endpoint (IsEndpoint) or error (IsError).
+ * Endpoint outcome means that engine was able to resolve context to
+ * an endpoint and outcome can have the following fields defined:
+ * - Url (required) - resolved url
+ * - Headers (optional) - additional headers to be included with request
+ * - Properties (optional) - custom list of properties associated
+ * with request (json blob to be interpreted by the caller.)
+ *
+ * Error outcome means that context could not be resolved to an endpoint.
+ * Outcome will have following fields:
+ * - Error (required) - error message providing more info on why
+ * endpoint could not be resolved.
+ */
+ class AWS_CRT_CPP_API ResolutionOutcome final
+ {
+ public:
+ ~ResolutionOutcome();
+
+ /* TODO: move/copy semantics */
+ ResolutionOutcome(const ResolutionOutcome &) = delete;
+ ResolutionOutcome &operator=(const ResolutionOutcome &) = delete;
+ ResolutionOutcome(ResolutionOutcome &&toMove) noexcept;
+ ResolutionOutcome &operator=(ResolutionOutcome &&);
+
+ bool IsEndpoint() const noexcept;
+ bool IsError() const noexcept;
+
+ /*
+ * Endpoint properties.
+ * Note: following fields are none if outcome is error.
+ * Headers and Properties are optional and could also be None.
+ */
+ Optional<StringView> GetUrl() const;
+ Optional<StringView> GetProperties() const;
+ Optional<UnorderedMap<StringView, Vector<StringView>>> GetHeaders() const;
+
+ /*
+ * Error properties.
+ * Note: following fields are none if outcome is error.
+ */
+ Optional<StringView> GetError() const;
+
+ /**
+ * @return true if the instance is in a valid state, false otherwise.
+ */
+ operator bool() const noexcept { return m_resolvedEndpoint != nullptr; }
+
+ /// @private For use by rule engine.
+ ResolutionOutcome(aws_endpoints_resolved_endpoint *impl);
+
+ private:
+ aws_endpoints_resolved_endpoint *m_resolvedEndpoint;
+ };
+
+ /**
+ * Endpoints Rule Engine.
+ */
+ class AWS_CRT_CPP_API RuleEngine final
+ {
+ public:
+ RuleEngine(
+ const ByteCursor &rulesetCursor,
+ const ByteCursor &partitionsCursor,
+ Allocator *allocator = ApiAllocator()) noexcept;
+ ~RuleEngine();
+
+ RuleEngine(const RuleEngine &) = delete;
+ RuleEngine &operator=(const RuleEngine &) = delete;
+ RuleEngine(RuleEngine &&) = delete;
+ RuleEngine &operator=(RuleEngine &&) = delete;
+
+ /**
+ * @return true if the instance is in a valid state, false otherwise.
+ */
+ operator bool() const noexcept { return m_ruleEngine != nullptr; }
+
+ /*
+ * Resolves rules against the provided context.
+ * If successful return will have resolution outcome.
+ * If not, return will be none and Aws::Crt::LastError() can be
+ * used to retrieve CRT error code.
+ */
+ Optional<ResolutionOutcome> Resolve(const RequestContext &context) const;
+
+ private:
+ aws_endpoints_rule_engine *m_ruleEngine;
+ };
+ } // namespace Endpoints
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/external/cJSON.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/external/cJSON.h
new file mode 100644
index 0000000000..ee10f0225d
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/external/cJSON.h
@@ -0,0 +1,309 @@
+/*
+ Copyright (c) 2009-2017 Dave Gamble and cJSON contributors
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
+*/
+
+/** MODIFICATIONS:
+ * valueInt was moved up to improve alignment.
+ * Wrap all symbols in the Aws namespace as a short-term collision resolution
+ * Replace strcpy() with strncpy()
+ *
+ * Modifications licensed under:
+ *
+ * Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+#ifndef cJSON__h
+#define cJSON__h
+
+namespace Aws {
+
+#if !defined(__WINDOWS__) && (defined(WIN32) || defined(WIN64) || defined(_MSC_VER) || defined(_WIN32))
+#define __WINDOWS__
+#endif
+
+#ifdef __WINDOWS__
+
+/* When compiling for windows, we specify a specific calling convention to avoid issues where we are being called from a project with a different default calling convention. For windows you have 3 define options:
+
+CJSON_HIDE_SYMBOLS - Define this in the case where you don't want to ever dllexport symbols
+CJSON_EXPORT_SYMBOLS - Define this on library build when you want to dllexport symbols (default)
+CJSON_IMPORT_SYMBOLS - Define this if you want to dllimport symbol
+
+For *nix builds that support visibility attribute, you can define similar behavior by
+
+setting default visibility to hidden by adding
+-fvisibility=hidden (for gcc)
+or
+-xldscope=hidden (for sun cc)
+to CFLAGS
+
+then using the CJSON_API_VISIBILITY flag to "export" the same symbols the way CJSON_EXPORT_SYMBOLS does
+
+*/
+
+#define CJSON_CDECL __cdecl
+#define CJSON_STDCALL __stdcall
+
+/* export symbols by default, this is necessary for copy pasting the C and header file */
+#if !defined(CJSON_HIDE_SYMBOLS) && !defined(CJSON_IMPORT_SYMBOLS) && !defined(CJSON_EXPORT_SYMBOLS)
+#define CJSON_EXPORT_SYMBOLS
+#endif
+
+#if defined(CJSON_HIDE_SYMBOLS)
+#define CJSON_PUBLIC(type) type CJSON_STDCALL
+#elif defined(CJSON_EXPORT_SYMBOLS)
+#define CJSON_PUBLIC(type) __declspec(dllexport) type CJSON_STDCALL
+#elif defined(CJSON_IMPORT_SYMBOLS)
+#define CJSON_PUBLIC(type) __declspec(dllimport) type CJSON_STDCALL
+#endif
+#else /* !__WINDOWS__ */
+#define CJSON_CDECL
+#define CJSON_STDCALL
+
+#if (defined(__GNUC__) || defined(__SUNPRO_CC) || defined (__SUNPRO_C)) && defined(CJSON_API_VISIBILITY)
+#define CJSON_PUBLIC(type) __attribute__((visibility("default"))) type
+#else
+#define CJSON_PUBLIC(type) type
+#endif
+#endif
+
+/* project version */
+#define CJSON_VERSION_MAJOR 1
+#define CJSON_VERSION_MINOR 7
+#define CJSON_VERSION_PATCH 14
+
+#include <stddef.h>
+
+/* cJSON Types: */
+#define cJSON_Invalid (0)
+#define cJSON_False (1 << 0)
+#define cJSON_True (1 << 1)
+#define cJSON_NULL (1 << 2)
+#define cJSON_Number (1 << 3)
+#define cJSON_String (1 << 4)
+#define cJSON_Array (1 << 5)
+#define cJSON_Object (1 << 6)
+#define cJSON_Raw (1 << 7) /* raw json */
+
+#define cJSON_IsReference 256
+#define cJSON_StringIsConst 512
+
+/* The cJSON structure: */
+typedef struct cJSON
+{
+ /* next/prev allow you to walk array/object chains. Alternatively, use GetArraySize/GetArrayItem/GetObjectItem */
+ struct cJSON *next;
+ struct cJSON *prev;
+ /* An array or object item will have a child pointer pointing to a chain of the items in the array/object. */
+ struct cJSON *child;
+
+ /* The type of the item, as above. */
+ int type;
+
+ /* writing to valueint is DEPRECATED, use cJSON_SetNumberValue instead */
+ int valueint;
+ /* The item's string, if type==cJSON_String and type == cJSON_Raw */
+ char *valuestring;
+ /* The item's number, if type==cJSON_Number */
+ double valuedouble;
+
+ /* The item's name string, if this item is the child of, or is in the list of subitems of an object. */
+ char *string;
+} cJSON;
+
+typedef struct cJSON_Hooks
+{
+ /* malloc/free are CDECL on Windows regardless of the default calling convention of the compiler, so ensure the hooks allow passing those functions directly. */
+ void *(CJSON_CDECL *malloc_fn)(size_t sz);
+ void (CJSON_CDECL *free_fn)(void *ptr);
+} cJSON_Hooks;
+
+typedef int cJSON_bool;
+
+/* Limits how deeply nested arrays/objects can be before cJSON rejects to parse them.
+ * This is to prevent stack overflows. */
+#ifndef CJSON_NESTING_LIMIT
+#define CJSON_NESTING_LIMIT 1000
+#endif
+
+/* returns the version of cJSON as a string */
+CJSON_PUBLIC(const char*) cJSON_Version(void);
+
+/* Supply malloc, realloc and free functions to cJSON */
+CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks* hooks);
+
+/* Memory Management: the caller is always responsible to free the results from all variants of cJSON_Parse (with cJSON_Delete) and cJSON_Print (with stdlib free, cJSON_Hooks.free_fn, or cJSON_free as appropriate). The exception is cJSON_PrintPreallocated, where the caller has full responsibility of the buffer. */
+/* Supply a block of JSON, and this returns a cJSON object you can interrogate. */
+CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value);
+CJSON_PUBLIC(cJSON *) cJSON_ParseWithLength(const char *value, size_t buffer_length);
+/* ParseWithOpts allows you to require (and check) that the JSON is null terminated, and to retrieve the pointer to the final byte parsed. */
+/* If you supply a ptr in return_parse_end and parsing fails, then return_parse_end will contain a pointer to the error so will match cJSON_GetErrorPtr(). */
+CJSON_PUBLIC(cJSON *) cJSON_ParseWithOpts(const char *value, const char **return_parse_end, cJSON_bool require_null_terminated);
+CJSON_PUBLIC(cJSON *) cJSON_ParseWithLengthOpts(const char *value, size_t buffer_length, const char **return_parse_end, cJSON_bool require_null_terminated);
+
+/* Render a cJSON entity to text for transfer/storage. */
+CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item);
+/* Render a cJSON entity to text for transfer/storage without any formatting. */
+CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item);
+/* Render a cJSON entity to text using a buffered strategy. prebuffer is a guess at the final size. guessing well reduces reallocation. fmt=0 gives unformatted, =1 gives formatted */
+CJSON_PUBLIC(char *) cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt);
+/* Render a cJSON entity to text using a buffer already allocated in memory with given length. Returns 1 on success and 0 on failure. */
+/* NOTE: cJSON is not always 100% accurate in estimating how much memory it will use, so to be safe allocate 5 bytes more than you actually need */
+CJSON_PUBLIC(cJSON_bool) cJSON_PrintPreallocated(cJSON *item, char *buffer, const int length, const cJSON_bool format);
+/* Delete a cJSON entity and all subentities. */
+CJSON_PUBLIC(void) cJSON_Delete(cJSON *item);
+
+/* Returns the number of items in an array (or object). */
+CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array);
+/* Retrieve item number "index" from array "array". Returns NULL if unsuccessful. */
+CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index);
+/* Get item "string" from object. Case insensitive. */
+CJSON_PUBLIC(cJSON *) cJSON_GetObjectItem(const cJSON * const object, const char * const string);
+CJSON_PUBLIC(cJSON *) cJSON_GetObjectItemCaseSensitive(const cJSON * const object, const char * const string);
+CJSON_PUBLIC(cJSON_bool) cJSON_HasObjectItem(const cJSON *object, const char *string);
+/* For analysing failed parses. This returns a pointer to the parse error. You'll probably need to look a few chars back to make sense of it. Defined when cJSON_Parse() returns 0. 0 when cJSON_Parse() succeeds. */
+CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void);
+
+/* Check item type and return its value */
+CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON * const item);
+CJSON_PUBLIC(double) cJSON_GetNumberValue(const cJSON * const item);
+
+/* These functions check the type of an item */
+CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON * const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON * const item);
+
+/* These calls create a cJSON item of the appropriate type. */
+CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void);
+CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void);
+CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void);
+CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool boolean);
+CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num);
+CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string);
+/* raw json */
+CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw);
+CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void);
+CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void);
+
+/* Create a string where valuestring references a string so
+ * it will not be freed by cJSON_Delete */
+CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string);
+/* Create an object/array that only references it's elements so
+ * they will not be freed by cJSON_Delete */
+CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child);
+CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child);
+
+/* These utilities create an Array of count items.
+ * The parameter count cannot be greater than the number of elements in the number array, otherwise array access will be out of bounds.*/
+CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count);
+CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count);
+CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count);
+CJSON_PUBLIC(cJSON *) cJSON_CreateStringArray(const char *const *strings, int count);
+
+/* Append item to the specified array/object. */
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToArray(cJSON *array, cJSON *item);
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item);
+/* Use this when string is definitely const (i.e. a literal, or as good as), and will definitely survive the cJSON object.
+ * WARNING: When this function was used, make sure to always check that (item->type & cJSON_StringIsConst) is zero before
+ * writing to `item->string` */
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item);
+/* Append reference to item to the specified array/object. Use this when you want to add an existing cJSON to a new cJSON, but don't want to corrupt your existing cJSON. */
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item);
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item);
+
+/* Remove/Detach items from Arrays/Objects. */
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemViaPointer(cJSON *parent, cJSON * const item);
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which);
+CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which);
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObject(cJSON *object, const char *string);
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string);
+CJSON_PUBLIC(void) cJSON_DeleteItemFromObject(cJSON *object, const char *string);
+CJSON_PUBLIC(void) cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string);
+
+/* Update array items. */
+CJSON_PUBLIC(cJSON_bool) cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem); /* Shifts pre-existing items to the right. */
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemViaPointer(cJSON * const parent, cJSON * const item, cJSON * replacement);
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem);
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObject(cJSON *object,const char *string,cJSON *newitem);
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object,const char *string,cJSON *newitem);
+
+/* Duplicate a cJSON item */
+CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse);
+/* Duplicate will create a new, identical cJSON item to the one you pass, in new memory that will
+ * need to be released. With recurse!=0, it will duplicate any children connected to the item.
+ * The item->next and ->prev pointers are always zero on return from Duplicate. */
+/* Recursively compare two cJSON items for equality. If either a or b is NULL or invalid, they will be considered unequal.
+ * case_sensitive determines if object keys are treated case sensitive (1) or case insensitive (0) */
+CJSON_PUBLIC(cJSON_bool) cJSON_Compare(const cJSON * const a, const cJSON * const b, const cJSON_bool case_sensitive);
+
+/* Minify a strings, remove blank characters(such as ' ', '\t', '\r', '\n') from strings.
+ * The input pointer json cannot point to a read-only address area, such as a string constant,
+ * but should point to a readable and writable adress area. */
+CJSON_PUBLIC(void) cJSON_Minify(char *json);
+
+/* Helper functions for creating and adding items to an object at the same time.
+ * They return the added item or NULL on failure. */
+CJSON_PUBLIC(cJSON*) cJSON_AddNullToObject(cJSON * const object, const char * const name);
+CJSON_PUBLIC(cJSON*) cJSON_AddTrueToObject(cJSON * const object, const char * const name);
+CJSON_PUBLIC(cJSON*) cJSON_AddFalseToObject(cJSON * const object, const char * const name);
+CJSON_PUBLIC(cJSON*) cJSON_AddBoolToObject(cJSON * const object, const char * const name, const cJSON_bool boolean);
+CJSON_PUBLIC(cJSON*) cJSON_AddNumberToObject(cJSON * const object, const char * const name, const double number);
+CJSON_PUBLIC(cJSON*) cJSON_AddStringToObject(cJSON * const object, const char * const name, const char * const string);
+CJSON_PUBLIC(cJSON*) cJSON_AddRawToObject(cJSON * const object, const char * const name, const char * const raw);
+CJSON_PUBLIC(cJSON*) cJSON_AddObjectToObject(cJSON * const object, const char * const name);
+CJSON_PUBLIC(cJSON*) cJSON_AddArrayToObject(cJSON * const object, const char * const name);
+
+/* When assigning an integer value, it needs to be propagated to valuedouble too. */
+#define cJSON_SetIntValue(object, number) ((object) ? (object)->valueint = (object)->valuedouble = (number) : (number))
+/* helper for the cJSON_SetNumberValue macro */
+CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number);
+#define cJSON_SetNumberValue(object, number) ((object != NULL) ? cJSON_SetNumberHelper(object, (double)number) : (number))
+/* Change the valuestring of a cJSON_String object, only takes effect when type of object is cJSON_String */
+CJSON_PUBLIC(char*) cJSON_SetValuestring(cJSON *object, const char *valuestring);
+
+/* Macro for iterating over an array or object */
+#define cJSON_ArrayForEach(element, array) for(element = (array != NULL) ? (array)->child : NULL; element != NULL; element = element->next)
+
+/* malloc/free objects using the malloc/free functions that have been set with cJSON_InitHooks */
+CJSON_PUBLIC(void *) cJSON_malloc(size_t size);
+CJSON_PUBLIC(void) cJSON_free(void *object);
+
+}
+
+#endif
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/http/HttpConnection.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/http/HttpConnection.h
new file mode 100644
index 0000000000..1ae79cefd8
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/http/HttpConnection.h
@@ -0,0 +1,514 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/http/connection.h>
+#include <aws/http/proxy.h>
+#include <aws/http/request_response.h>
+
+#include <aws/crt/Types.h>
+#include <aws/crt/io/Bootstrap.h>
+#include <aws/crt/io/SocketOptions.h>
+#include <aws/crt/io/TlsOptions.h>
+
+#include <functional>
+#include <memory>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Io
+ {
+ class ClientBootstrap;
+ }
+
+ namespace Http
+ {
+ class HttpClientConnection;
+ class HttpStream;
+ class HttpClientStream;
+ class HttpRequest;
+ class HttpProxyStrategy;
+ using HttpHeader = aws_http_header;
+
+ /**
+ * Invoked upon connection setup, whether it was successful or not. If the connection was
+ * successfully established, `connection` will be valid and errorCode will be AWS_ERROR_SUCCESS.
+ * Upon an error, `connection` will not be valid, and errorCode will contain the cause of the connection
+ * failure.
+ */
+ using OnConnectionSetup =
+ std::function<void(const std::shared_ptr<HttpClientConnection> &connection, int errorCode)>;
+
+ /**
+ * Invoked upon connection shutdown. `connection` will always be a valid pointer. `errorCode` will specify
+ * shutdown reason. A graceful connection close will set `errorCode` to AWS_ERROR_SUCCESS.
+ * Internally, the connection pointer will be unreferenced immediately after this call; if you took a
+ * reference to it in OnConnectionSetup(), you'll need to release your reference before the underlying
+ * memory is released. If you never took a reference to it, the resources for the connection will be
+ * immediately released after completion of this callback.
+ */
+ using OnConnectionShutdown = std::function<void(HttpClientConnection &connection, int errorCode)>;
+
+ /**
+ * Called as headers are received from the peer. `headersArray` will contain the header value
+ * read from the wire. The number of entries in `headersArray` are specified in `headersCount`.
+ *
+ * Keep in mind that this function will likely be called multiple times until all headers are received.
+ *
+ * On HttpStream, this function must be set.
+ */
+ using OnIncomingHeaders = std::function<void(
+ HttpStream &stream,
+ enum aws_http_header_block headerBlock,
+ const HttpHeader *headersArray,
+ std::size_t headersCount)>;
+
+ /**
+ * Invoked when the headers portion of the message has been completely received. `hasBody` will indicate
+ * if there is an incoming body.
+ *
+ * On HttpStream, this function can be empty.
+ */
+ using OnIncomingHeadersBlockDone =
+ std::function<void(HttpStream &stream, enum aws_http_header_block block)>;
+
+ /**
+ * Invoked as chunks of the body are read. `data` contains the data read from the wire. If chunked encoding
+ * was used, it will already be decoded (TBD).
+ *
+ * On HttpStream, this function can be empty if you are not expecting a body (e.g. a HEAD request).
+ */
+ using OnIncomingBody = std::function<void(HttpStream &stream, const ByteCursor &data)>;
+
+ /**
+ * Invoked upon completion of the stream. This means the request has been sent and a completed response
+ * has been received (in client mode), or the request has been received and the response has been completed.
+ *
+ * In H2, this will mean RST_STREAM state has been reached for the stream.
+ *
+ * On HttpStream, this function must be set.
+ */
+ using OnStreamComplete = std::function<void(HttpStream &stream, int errorCode)>;
+
+ /**
+ * POD structure used for setting up an Http Request
+ */
+ struct AWS_CRT_CPP_API HttpRequestOptions
+ {
+ /**
+ * The actual http request
+ */
+ HttpRequest *request;
+
+ /**
+ * See `OnIncomingHeaders` for more info. This value must be set.
+ */
+ OnIncomingHeaders onIncomingHeaders;
+ OnIncomingHeadersBlockDone onIncomingHeadersBlockDone;
+
+ /**
+ * See `OnIncomingBody` for more info. This value can be empty if you will not be receiving a body.
+ */
+ OnIncomingBody onIncomingBody;
+
+ /**
+ * See `OnStreamComplete` for more info. This value can be empty.
+ */
+ OnStreamComplete onStreamComplete;
+ };
+
+ /**
+ * Represents a single http message exchange (request/response) or in H2, it can also represent
+ * a PUSH_PROMISE followed by the accompanying Response.
+ */
+ class AWS_CRT_CPP_API HttpStream : public std::enable_shared_from_this<HttpStream>
+ {
+ public:
+ virtual ~HttpStream();
+ HttpStream(const HttpStream &) = delete;
+ HttpStream(HttpStream &&) = delete;
+ HttpStream &operator=(const HttpStream &) = delete;
+ HttpStream &operator=(HttpStream &&) = delete;
+
+ /**
+ * Get the underlying connection for the stream.
+ */
+ HttpClientConnection &GetConnection() const noexcept;
+
+ /**
+ * @return request's Http Response Code. Requires response headers to have been processed first. *
+ */
+ virtual int GetResponseStatusCode() const noexcept = 0;
+
+ /**
+ * Updates the read window on the connection. In Http 1.1 this relieves TCP back pressure, in H2
+ * this will trigger two WINDOW_UPDATE frames, one for the connection and one for the stream.
+ *
+ * You do not need to call this unless you utilized the `outWindowUpdateSize` in `OnIncomingBody`.
+ * See `OnIncomingBody` for more information.
+ *
+ * `incrementSize` is the amount to update the read window by.
+ */
+ void UpdateWindow(std::size_t incrementSize) noexcept;
+
+ protected:
+ aws_http_stream *m_stream;
+ std::shared_ptr<HttpClientConnection> m_connection;
+ HttpStream(const std::shared_ptr<HttpClientConnection> &connection) noexcept;
+
+ private:
+ OnIncomingHeaders m_onIncomingHeaders;
+ OnIncomingHeadersBlockDone m_onIncomingHeadersBlockDone;
+ OnIncomingBody m_onIncomingBody;
+ OnStreamComplete m_onStreamComplete;
+
+ static int s_onIncomingHeaders(
+ struct aws_http_stream *stream,
+ enum aws_http_header_block headerBlock,
+ const struct aws_http_header *headerArray,
+ size_t numHeaders,
+ void *userData) noexcept;
+ static int s_onIncomingHeaderBlockDone(
+ struct aws_http_stream *stream,
+ enum aws_http_header_block headerBlock,
+ void *userData) noexcept;
+ static int s_onIncomingBody(
+ struct aws_http_stream *stream,
+ const struct aws_byte_cursor *data,
+ void *userData) noexcept;
+ static void s_onStreamComplete(struct aws_http_stream *stream, int errorCode, void *userData) noexcept;
+
+ friend class HttpClientConnection;
+ };
+
+ struct ClientStreamCallbackData
+ {
+ ClientStreamCallbackData() : allocator(nullptr), stream(nullptr) {}
+ Allocator *allocator;
+ std::shared_ptr<HttpStream> stream;
+ };
+
+ /**
+ * Subclass that represents an http client's view of an HttpStream.
+ */
+ class AWS_CRT_CPP_API HttpClientStream final : public HttpStream
+ {
+ public:
+ ~HttpClientStream();
+ HttpClientStream(const HttpClientStream &) = delete;
+ HttpClientStream(HttpClientStream &&) = delete;
+ HttpClientStream &operator=(const HttpClientStream &) = delete;
+ HttpClientStream &operator=(HttpClientStream &&) = delete;
+
+ /**
+ * If this stream was initiated as a request, assuming the headers of the response has been
+ * received, this value contains the Http Response Code. *
+ */
+ virtual int GetResponseStatusCode() const noexcept override;
+
+ /**
+ * Activates the request's outgoing stream processing.
+ *
+ * Returns true on success, false otherwise.
+ */
+ bool Activate() noexcept;
+
+ private:
+ HttpClientStream(const std::shared_ptr<HttpClientConnection> &connection) noexcept;
+
+ ClientStreamCallbackData m_callbackData;
+ friend class HttpClientConnection;
+ };
+
+ /**
+ * @deprecated enum that designates what kind of authentication, if any, to use when connecting to a
+ * proxy server.
+ *
+ * Here for backwards compatibility. Has been superceded by proxy strategies.
+ */
+ enum class AwsHttpProxyAuthenticationType
+ {
+ None,
+ Basic,
+ };
+
+ /**
+ * Mirror of aws_http_proxy_connection_type enum. Indicates the basic http proxy behavior of the
+ * proxy we're connecting to.
+ */
+ enum class AwsHttpProxyConnectionType
+ {
+ /**
+ * Deprecated, but 0-valued for backwards compatibility
+ *
+ * If tls options are provided (for the main connection) then treat the proxy as a tunneling proxy
+ * If tls options are not provided (for the main connection), then treat the proxy as a forwarding
+ * proxy
+ */
+ Legacy = AWS_HPCT_HTTP_LEGACY,
+
+ /**
+ * Use the proxy to forward http requests. Attempting to use both this mode and TLS to the destination
+ * is a configuration error.
+ */
+ Forwarding = AWS_HPCT_HTTP_FORWARD,
+
+ /**
+ * Use the proxy to establish an http connection via a CONNECT request to the proxy. Works for both
+ * plaintext and tls connections.
+ */
+ Tunneling = AWS_HPCT_HTTP_TUNNEL,
+ };
+
+ /**
+ * Configuration structure that holds all proxy-related http connection options
+ */
+ class AWS_CRT_CPP_API HttpClientConnectionProxyOptions
+ {
+ public:
+ HttpClientConnectionProxyOptions();
+ HttpClientConnectionProxyOptions(const HttpClientConnectionProxyOptions &rhs) = default;
+ HttpClientConnectionProxyOptions(HttpClientConnectionProxyOptions &&rhs) = default;
+
+ HttpClientConnectionProxyOptions &operator=(const HttpClientConnectionProxyOptions &rhs) = default;
+ HttpClientConnectionProxyOptions &operator=(HttpClientConnectionProxyOptions &&rhs) = default;
+
+ ~HttpClientConnectionProxyOptions() = default;
+
+ /**
+ * Intended for internal use only. Initializes the C proxy configuration structure,
+ * aws_http_proxy_options, from an HttpClientConnectionProxyOptions instance.
+ *
+ * @param raw_options - output parameter containing low level proxy options to be passed to the C
+ * interface
+ *
+ */
+ void InitializeRawProxyOptions(struct aws_http_proxy_options &raw_options) const;
+
+ /**
+ * The name of the proxy server to connect through.
+ * Required.
+ */
+ String HostName;
+
+ /**
+ * The port of the proxy server to connect to.
+ * Required.
+ */
+ uint16_t Port;
+
+ /**
+ * Sets the TLS options for the connection to the proxy.
+ * Optional.
+ */
+ Optional<Io::TlsConnectionOptions> TlsOptions;
+
+ /**
+ * What kind of proxy connection to make
+ */
+ AwsHttpProxyConnectionType ProxyConnectionType;
+
+ /**
+ * Proxy strategy to use while negotiating the connection. Use null for no additional
+ * steps.
+ */
+ std::shared_ptr<HttpProxyStrategy> ProxyStrategy;
+
+ /**
+ * @deprecated What kind of authentication approach to use when connecting to the proxy
+ * Replaced by proxy strategy
+ *
+ * Backwards compatibility achieved by invoking CreateBasicHttpProxyStrategy if
+ * (1) ProxyStrategy is null
+ * (2) AuthType is AwsHttpProxyAuthenticationType::Basic
+ */
+ AwsHttpProxyAuthenticationType AuthType;
+
+ /**
+ * @deprecated The username to use if connecting to the proxy via basic authentication
+ * Replaced by using the result of CreateBasicHttpProxyStrategy()
+ */
+ String BasicAuthUsername;
+
+ /**
+ * @deprecated The password to use if connecting to the proxy via basic authentication
+ * Replaced by using the result of CreateBasicHttpProxyStrategy()
+ */
+ String BasicAuthPassword;
+ };
+
+ /**
+ * Configuration structure holding all options relating to http connection establishment
+ */
+ class AWS_CRT_CPP_API HttpClientConnectionOptions
+ {
+ public:
+ HttpClientConnectionOptions();
+ HttpClientConnectionOptions(const HttpClientConnectionOptions &rhs) = default;
+ HttpClientConnectionOptions(HttpClientConnectionOptions &&rhs) = default;
+
+ ~HttpClientConnectionOptions() = default;
+
+ HttpClientConnectionOptions &operator=(const HttpClientConnectionOptions &rhs) = default;
+ HttpClientConnectionOptions &operator=(HttpClientConnectionOptions &&rhs) = default;
+
+ /**
+ * The client bootstrap to use for setting up and tearing down connections.
+ * Note: If null, then the default ClientBootstrap is used
+ * (see Aws::Crt::ApiHandle::GetOrCreateStaticDefaultClientBootstrap)
+ */
+ Io::ClientBootstrap *Bootstrap;
+
+ /**
+ * The TCP read window allowed for Http 1.1 connections and Initial Windows for H2 connections.
+ */
+ size_t InitialWindowSize;
+
+ /**
+ * The callback invoked on connection establishment, whether success or failure.
+ * See `OnConnectionSetup` for more info.
+ * Required.
+ */
+ OnConnectionSetup OnConnectionSetupCallback;
+
+ /**
+ * The callback invoked on connection shutdown.
+ * See `OnConnectionShutdown` for more info.
+ * Required.
+ */
+ OnConnectionShutdown OnConnectionShutdownCallback;
+
+ /**
+ * The name of the http server to connect to.
+ * Required.
+ */
+ String HostName;
+
+ /**
+ * The port of the http server to connect to.
+ * Required.
+ */
+ uint16_t Port;
+
+ /**
+ * The socket options of the connection.
+ * Required.
+ */
+ Io::SocketOptions SocketOptions;
+
+ /**
+ * The TLS options for the http connection.
+ * Optional.
+ */
+ Optional<Io::TlsConnectionOptions> TlsOptions;
+
+ /**
+ * The proxy options for the http connection.
+ * Optional.
+ */
+ Optional<HttpClientConnectionProxyOptions> ProxyOptions;
+
+ /**
+ * If set to true, then the TCP read back pressure mechanism will be enabled. You should
+ * only use this if you're allowing http response body data to escape the callbacks. E.g. you're
+ * putting the data into a queue for another thread to process and need to make sure the memory
+ * usage is bounded. If this is enabled, you must call HttpStream::UpdateWindow() for every
+ * byte read from the OnIncomingBody callback.
+ */
+ bool ManualWindowManagement;
+ };
+
+ enum class HttpVersion
+ {
+ Unknown = AWS_HTTP_VERSION_UNKNOWN,
+ Http1_0 = AWS_HTTP_VERSION_1_0,
+ Http1_1 = AWS_HTTP_VERSION_1_1,
+ Http2 = AWS_HTTP_VERSION_2,
+ };
+
+ /**
+ * Represents a connection from a Http Client to a Server.
+ */
+ class AWS_CRT_CPP_API HttpClientConnection : public std::enable_shared_from_this<HttpClientConnection>
+ {
+ public:
+ virtual ~HttpClientConnection() = default;
+ HttpClientConnection(const HttpClientConnection &) = delete;
+ HttpClientConnection(HttpClientConnection &&) = delete;
+ HttpClientConnection &operator=(const HttpClientConnection &) = delete;
+ HttpClientConnection &operator=(HttpClientConnection &&) = delete;
+
+ /**
+ * Make a new client initiated request on this connection.
+ *
+ * If you take a reference to the return value, the memory and resources for the connection
+ * and stream will not be cleaned up until you release it. You can however, release the reference
+ * as soon as you don't need it anymore. The internal reference count ensures the resources will
+ * not be freed until the stream is completed.
+ *
+ * Returns an instance of HttpStream upon success and nullptr on failure.
+ *
+ * You must call HttpClientStream::Activate() to begin outgoing processing of the stream.
+ */
+ std::shared_ptr<HttpClientStream> NewClientStream(const HttpRequestOptions &requestOptions) noexcept;
+
+ /**
+ * @return true unless the connection is closed or closing.
+ */
+ bool IsOpen() const noexcept;
+
+ /**
+ * Initiate a shutdown of the connection. Sometimes, connections are persistent and you want
+ * to close them before shutting down your application or whatever is consuming this interface.
+ *
+ * Assuming `OnConnectionShutdown` has not already been invoked, it will be invoked as a result of this
+ * call.
+ */
+ void Close() noexcept;
+
+ /**
+ * @return protocol version the connection used
+ */
+ HttpVersion GetVersion() noexcept;
+
+ /**
+ * @return the value of the last aws error encountered by operations on this instance.
+ */
+ int LastError() const noexcept { return m_lastError; }
+
+ /**
+ * Create a new Https Connection to hostName:port, using `socketOptions` for tcp options and
+ * `tlsConnOptions` for TLS/SSL options. If `tlsConnOptions` is null http (plain-text) will be used.
+ *
+ * returns true on success, and false on failure. If false is returned, `onConnectionSetup` will not
+ * be invoked. On success, `onConnectionSetup` will be called, either with a connection, or an
+ * errorCode.
+ */
+ static bool CreateConnection(
+ const HttpClientConnectionOptions &connectionOptions,
+ Allocator *allocator) noexcept;
+
+ protected:
+ HttpClientConnection(aws_http_connection *m_connection, Allocator *allocator) noexcept;
+ aws_http_connection *m_connection;
+
+ private:
+ Allocator *m_allocator;
+ int m_lastError;
+
+ static void s_onClientConnectionSetup(
+ struct aws_http_connection *connection,
+ int error_code,
+ void *user_data) noexcept;
+ static void s_onClientConnectionShutdown(
+ struct aws_http_connection *connection,
+ int error_code,
+ void *user_data) noexcept;
+ };
+
+ } // namespace Http
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/http/HttpConnectionManager.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/http/HttpConnectionManager.h
new file mode 100644
index 0000000000..0ecd9b48d7
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/http/HttpConnectionManager.h
@@ -0,0 +1,127 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/http/HttpConnection.h>
+
+#include <atomic>
+#include <condition_variable>
+#include <future>
+#include <mutex>
+
+struct aws_http_connection_manager;
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Http
+ {
+ /**
+ * Invoked when a connection from the pool is available. If a connection was successfully obtained
+ * the connection shared_ptr can be seated into your own copy of connection. If it failed, errorCode
+ * will be non-zero.
+ */
+ using OnClientConnectionAvailable =
+ std::function<void(std::shared_ptr<HttpClientConnection>, int errorCode)>;
+
+ /**
+ * Configuration struct containing all options related to connection manager behavior
+ */
+ class AWS_CRT_CPP_API HttpClientConnectionManagerOptions
+ {
+ public:
+ HttpClientConnectionManagerOptions() noexcept;
+ HttpClientConnectionManagerOptions(const HttpClientConnectionManagerOptions &rhs) = default;
+ HttpClientConnectionManagerOptions(HttpClientConnectionManagerOptions &&rhs) = default;
+
+ HttpClientConnectionManagerOptions &operator=(const HttpClientConnectionManagerOptions &rhs) = default;
+ HttpClientConnectionManagerOptions &operator=(HttpClientConnectionManagerOptions &&rhs) = default;
+
+ /**
+ * The http connection options to use for each connection created by the manager
+ */
+ HttpClientConnectionOptions ConnectionOptions;
+
+ /**
+ * The maximum number of connections the manager is allowed to create/manage
+ */
+ size_t MaxConnections;
+
+ /** If set, initiate shutdown will return a future that will allow a user to block until the
+ * connection manager has completely released all resources. This isn't necessary during the normal
+ * flow of an application, but it is useful for scenarios, such as tests, that need deterministic
+ * shutdown ordering. Be aware, if you use this anywhere other than the main thread, you will most
+ * likely cause a deadlock. If this is set, you MUST call InitiateShutdown() before releasing your last
+ * reference to the connection manager.
+ */
+ bool EnableBlockingShutdown;
+ };
+
+ /**
+ * Manages a pool of connections to a specific endpoint using the same socket and tls options.
+ */
+ class AWS_CRT_CPP_API HttpClientConnectionManager final
+ : public std::enable_shared_from_this<HttpClientConnectionManager>
+ {
+ public:
+ ~HttpClientConnectionManager();
+
+ /**
+ * Acquires a connection from the pool. onClientConnectionAvailable will be invoked upon an available
+ * connection. Returns true if the connection request was successfully queued, returns false if it
+ * failed. On failure, onClientConnectionAvailable will not be invoked. After receiving a connection, it
+ * will automatically be cleaned up when your last reference to the shared_ptr is released.
+ *
+ * @param onClientConnectionAvailable callback to invoke when a connection becomes available or the
+ * acquisition attempt terminates
+ * @return true if the acquisition was successfully kicked off, false otherwise (no callback)
+ */
+ bool AcquireConnection(const OnClientConnectionAvailable &onClientConnectionAvailable) noexcept;
+
+ /**
+ * Starts shutdown of the connection manager. Returns a future to the connection manager's shutdown
+ * process. If EnableBlockingDestruct was enabled on the connection manager options, calling get() on
+ * the returned future will block until the last connection is released. If the option is not set, get()
+ * will immediately return.
+ * @return future which will complete when shutdown has completed
+ */
+ std::future<void> InitiateShutdown() noexcept;
+
+ /**
+ * Factory function for connection managers
+ *
+ * @param connectionManagerOptions connection manager configuration data
+ * @param allocator allocator to use
+ * @return a new connection manager instance
+ */
+ static std::shared_ptr<HttpClientConnectionManager> NewClientConnectionManager(
+ const HttpClientConnectionManagerOptions &connectionManagerOptions,
+ Allocator *allocator = ApiAllocator()) noexcept;
+
+ private:
+ HttpClientConnectionManager(
+ const HttpClientConnectionManagerOptions &options,
+ Allocator *allocator = ApiAllocator()) noexcept;
+
+ Allocator *m_allocator;
+
+ aws_http_connection_manager *m_connectionManager;
+
+ HttpClientConnectionManagerOptions m_options;
+ std::promise<void> m_shutdownPromise;
+ std::atomic<bool> m_releaseInvoked;
+
+ static void s_onConnectionSetup(
+ aws_http_connection *connection,
+ int errorCode,
+ void *userData) noexcept;
+
+ static void s_shutdownCompleted(void *userData) noexcept;
+
+ friend class ManagedConnection;
+ };
+ } // namespace Http
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/http/HttpProxyStrategy.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/http/HttpProxyStrategy.h
new file mode 100644
index 0000000000..bf72490693
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/http/HttpProxyStrategy.h
@@ -0,0 +1,116 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/crt/Types.h>
+
+#include <memory>
+
+struct aws_http_proxy_strategy;
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Http
+ {
+ enum class AwsHttpProxyConnectionType;
+
+ /**
+ * Configuration for a proxy strategy that performs basic authentication
+ */
+ struct AWS_CRT_CPP_API HttpProxyStrategyBasicAuthConfig
+ {
+ HttpProxyStrategyBasicAuthConfig();
+
+ /**
+ * Basic auth can be applied either to forwarding or tunneling proxy connections, but we need
+ * to know the type ahead of time
+ */
+ AwsHttpProxyConnectionType ConnectionType;
+
+ /**
+ * Username to apply to the basic authentication process
+ */
+ String Username;
+
+ /**
+ * Password to apply to the basic authentication process
+ */
+ String Password;
+ };
+
+ using KerberosGetTokenFunction = std::function<bool(String &)>;
+ using NtlmGetTokenFunction = std::function<bool(const String &, String &)>;
+
+ /**
+ * Configuration for a proxy strategy that attempts to use kerberos and ntlm, based on authentication
+ * failure feedback from the proxy's responses to CONNECT attempts. The kerberos/ntlm callbacks are
+ * currently synchronous but invoked potentially from within event loop threads. This is not optimal
+ * but transitioning to fully async hasn't been a need yet.
+ *
+ * The adapative strategy will skip an authentication method whose callbacks are not supplied, so you
+ * can use this for purely kerberos or ntlm as well.
+ */
+ struct AWS_CRT_CPP_API HttpProxyStrategyAdaptiveConfig
+ {
+ HttpProxyStrategyAdaptiveConfig() : KerberosGetToken(), NtlmGetCredential(), NtlmGetToken() {}
+
+ /**
+ * User-supplied callback for fetching kerberos tokens
+ */
+ KerberosGetTokenFunction KerberosGetToken;
+
+ /**
+ * User-supplied callback for fetching an ntlm credential
+ */
+ KerberosGetTokenFunction NtlmGetCredential;
+
+ /**
+ * User-supplied callback for fetching an ntlm token
+ */
+ NtlmGetTokenFunction NtlmGetToken;
+ };
+
+ /**
+ * Wrapper class for a C-level proxy strategy - an object that allows the user to transform or modify
+ * the authentication logic when connecting to a proxy.
+ */
+ class AWS_CRT_CPP_API HttpProxyStrategy
+ {
+ public:
+ HttpProxyStrategy(struct aws_http_proxy_strategy *strategy);
+ virtual ~HttpProxyStrategy();
+
+ /// @private
+ struct aws_http_proxy_strategy *GetUnderlyingHandle() const noexcept { return m_strategy; }
+
+ /**
+ * Creates a proxy strategy that performs basic authentication
+ * @param config basic authentication configuration options
+ * @param allocator allocator to use
+ * @return a new basic authentication proxy strategy
+ */
+ static std::shared_ptr<HttpProxyStrategy> CreateBasicHttpProxyStrategy(
+ const HttpProxyStrategyBasicAuthConfig &config,
+ Allocator *allocator = ApiAllocator());
+
+ /**
+ * Creates a proxy strategy that, depending on configuration, can attempt kerberos and/or ntlm
+ * authentication when connecting to the proxy
+ * @param config the adaptive strategy configuration options
+ * @param allocator allocator to use
+ * @return a new adaptive proxy strategy
+ */
+ static std::shared_ptr<HttpProxyStrategy> CreateAdaptiveHttpProxyStrategy(
+ const HttpProxyStrategyAdaptiveConfig &config,
+ Allocator *allocator = ApiAllocator());
+
+ protected:
+ struct aws_http_proxy_strategy *m_strategy;
+ };
+ } // namespace Http
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/http/HttpRequestResponse.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/http/HttpRequestResponse.h
new file mode 100644
index 0000000000..bf305e8b99
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/http/HttpRequestResponse.h
@@ -0,0 +1,160 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/crt/Exports.h>
+#include <aws/crt/Types.h>
+#include <aws/crt/io/Stream.h>
+
+struct aws_http_header;
+struct aws_http_message;
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Mqtt
+ {
+ class MqttConnection;
+ }
+ namespace Mqtt5
+ {
+ class Mqtt5Client;
+ }
+ namespace Http
+ {
+ using HttpHeader = aws_http_header;
+
+ /**
+ * Base class representing a mutable http request or response.
+ */
+ class AWS_CRT_CPP_API HttpMessage
+ {
+ public:
+ virtual ~HttpMessage();
+
+ HttpMessage(const HttpMessage &) = delete;
+ HttpMessage(HttpMessage &&) = delete;
+ HttpMessage &operator=(const HttpMessage &) = delete;
+ HttpMessage &operator=(HttpMessage &&) = delete;
+
+ /**
+ * Gets the input stream representing the message body
+ */
+ std::shared_ptr<Aws::Crt::Io::InputStream> GetBody() const noexcept;
+
+ /**
+ * Sets the input stream representing the message body
+ * @param body the input stream representing the message body
+ * @return success/failure
+ */
+ bool SetBody(const std::shared_ptr<Aws::Crt::Io::IStream> &body) noexcept;
+
+ /**
+ * Sets the input stream representing the message body
+ * @param body the input stream representing the message body
+ * @return success/failure
+ */
+ bool SetBody(const std::shared_ptr<Aws::Crt::Io::InputStream> &body) noexcept;
+
+ /**
+ * Gets the number of headers contained in this request
+ * @return the number of headers contained in this request
+ */
+ size_t GetHeaderCount() const noexcept;
+
+ /**
+ * Gets a particular header in the request
+ * @param index index of the header to fetch
+ * @return an option containing the requested header if the index is in bounds
+ */
+ Optional<HttpHeader> GetHeader(size_t index) const noexcept;
+
+ /**
+ * Adds a header to the request
+ * @param header header to add
+ * @return success/failure
+ */
+ bool AddHeader(const HttpHeader &header) noexcept;
+
+ /**
+ * Removes a header from the request
+ * @param index index of the header to remove
+ * @return success/failure
+ */
+ bool EraseHeader(size_t index) noexcept;
+
+ /**
+ * @return true/false if the underlying object is valid
+ */
+ operator bool() const noexcept { return m_message != nullptr; }
+
+ /// @private
+ struct aws_http_message *GetUnderlyingMessage() const noexcept { return m_message; }
+
+ protected:
+ HttpMessage(Allocator *allocator, struct aws_http_message *message) noexcept;
+
+ Allocator *m_allocator;
+ struct aws_http_message *m_message;
+ std::shared_ptr<Aws::Crt::Io::InputStream> m_bodyStream;
+ };
+
+ /**
+ * Class representing a mutable http request.
+ */
+ class AWS_CRT_CPP_API HttpRequest : public HttpMessage
+ {
+ friend class Mqtt::MqttConnection;
+ friend class Mqtt5::Mqtt5Client;
+
+ public:
+ HttpRequest(Allocator *allocator = ApiAllocator());
+
+ /**
+ * @return the value of the Http method associated with this request
+ */
+ Optional<ByteCursor> GetMethod() const noexcept;
+
+ /**
+ * Sets the value of the Http method associated with this request
+ */
+ bool SetMethod(ByteCursor method) noexcept;
+
+ /**
+ * @return the value of the URI-path associated with this request
+ */
+ Optional<ByteCursor> GetPath() const noexcept;
+
+ /**
+ * Sets the value of the URI-path associated with this request
+ */
+ bool SetPath(ByteCursor path) noexcept;
+
+ protected:
+ HttpRequest(Allocator *allocator, struct aws_http_message *message);
+ };
+
+ /**
+ * Class representing a mutable http response.
+ */
+ class AWS_CRT_CPP_API HttpResponse : public HttpMessage
+ {
+ public:
+ HttpResponse(Allocator *allocator = ApiAllocator());
+
+ /**
+ * @return the integral Http response code associated with this response
+ */
+ Optional<int> GetResponseCode() const noexcept;
+
+ /**
+ * Sets the integral Http response code associated with this response
+ */
+ bool SetResponseCode(int response) noexcept;
+ };
+ } // namespace Http
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/Bootstrap.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/Bootstrap.h
new file mode 100644
index 0000000000..e1175f83ab
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/Bootstrap.h
@@ -0,0 +1,104 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/crt/Exports.h>
+#include <aws/crt/Types.h>
+#include <aws/crt/io/EventLoopGroup.h>
+#include <aws/crt/io/HostResolver.h>
+
+#include <aws/io/channel_bootstrap.h>
+#include <aws/io/host_resolver.h>
+
+#include <future>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Io
+ {
+ using OnClientBootstrapShutdownComplete = std::function<void()>;
+
+ /**
+ * A ClientBootstrap handles creation and setup of socket connections
+ * to specific endpoints.
+ *
+ * Note that ClientBootstrap may not clean up all its behind-the-scenes
+ * resources immediately upon destruction. If you need to know when
+ * behind-the-scenes shutdown is complete, use SetShutdownCompleteCallback()
+ * or EnableBlockingShutdown() (only safe on main thread).
+ */
+ class AWS_CRT_CPP_API ClientBootstrap final
+ {
+ public:
+ /**
+ * @param elGroup: EventLoopGroup to use.
+ * @param resolver: DNS host resolver to use.
+ * @param allocator memory allocator to use
+ */
+ ClientBootstrap(
+ EventLoopGroup &elGroup,
+ HostResolver &resolver,
+ Allocator *allocator = ApiAllocator()) noexcept;
+
+ /**
+ * Uses the default EventLoopGroup and HostResolver.
+ * See Aws::Crt::ApiHandle::GetOrCreateStaticDefaultEventLoopGroup
+ * and Aws::Crt::ApiHandle::GetOrCreateStaticDefaultHostResolver
+ */
+ ClientBootstrap(Allocator *allocator = ApiAllocator()) noexcept;
+
+ ~ClientBootstrap();
+ ClientBootstrap(const ClientBootstrap &) = delete;
+ ClientBootstrap &operator=(const ClientBootstrap &) = delete;
+ ClientBootstrap(ClientBootstrap &&) = delete;
+ ClientBootstrap &operator=(ClientBootstrap &&) = delete;
+
+ /**
+ * @return true if the instance is in a valid state, false otherwise.
+ */
+ operator bool() const noexcept;
+
+ /**
+ * @return the value of the last aws error encountered by operations on this instance.
+ */
+ int LastError() const noexcept;
+
+ /**
+ * Set function to invoke when ClientBootstrap's behind-the-scenes
+ * resources finish shutting down. This function may be invoked
+ * on any thread. Shutdown begins when the ClientBootstrap's
+ * destructor runs.
+ */
+ void SetShutdownCompleteCallback(OnClientBootstrapShutdownComplete callback);
+
+ /**
+ * Force the ClientBootstrap's destructor to block until all
+ * behind-the-scenes resources finish shutting down.
+ *
+ * This isn't necessary during the normal flow of an application,
+ * but it is useful for scenarios, such as tests, that need deterministic
+ * shutdown ordering. Be aware, if you use this anywhere other
+ * than the main thread, YOU WILL MOST LIKELY CAUSE A DEADLOCK.
+ *
+ * Use SetShutdownCompleteCallback() for a thread-safe way to
+ * know when shutdown is complete.
+ */
+ void EnableBlockingShutdown() noexcept;
+
+ /// @private
+ aws_client_bootstrap *GetUnderlyingHandle() const noexcept;
+
+ private:
+ aws_client_bootstrap *m_bootstrap;
+ int m_lastError;
+ std::unique_ptr<class ClientBootstrapCallbackData> m_callbackData;
+ std::future<void> m_shutdownFuture;
+ bool m_enableBlockingShutdown;
+ };
+ } // namespace Io
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/ChannelHandler.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/ChannelHandler.h
new file mode 100644
index 0000000000..7f5c8ecd99
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/ChannelHandler.h
@@ -0,0 +1,238 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/crt/Exports.h>
+#include <aws/crt/Types.h>
+#include <aws/io/channel.h>
+
+#include <chrono>
+#include <cstddef>
+
+struct aws_array_list;
+struct aws_io_message;
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Io
+ {
+ enum class ChannelDirection
+ {
+ Read,
+ Write,
+ };
+
+ enum class MessageType
+ {
+ ApplicationData,
+ };
+
+ enum class TaskStatus
+ {
+ RunReady,
+ Canceled,
+ };
+
+ /**
+ * Wrapper for aws-c-io channel handlers. The semantics are identical as the functions on
+ * aws_channel_handler.
+ *
+ * All virtual calls are made from the same thread (the channel's thread).
+ */
+ class AWS_CRT_CPP_API ChannelHandler
+ {
+ public:
+ virtual ~ChannelHandler() = default;
+
+ ChannelHandler(const ChannelHandler &) = delete;
+ ChannelHandler &operator=(const ChannelHandler &) = delete;
+
+ protected:
+ /**
+ * Called by the channel when a message is available for processing in the read direction. It is your
+ * responsibility to call aws_mem_release(message->allocator, message); on message when you are finished
+ * with it.
+ *
+ * Also keep in mind that your slot's internal window has been decremented. You'll want to call
+ * aws_channel_slot_increment_read_window() at some point in the future if you want to keep receiving
+ * data.
+ *
+ * @return AWS_OP_SUCCESS if the message is being processed.
+ * If the message cannot be processed raise an error and return AWS_OP_ERR
+ * and do NOT release the message, it will be released by the caller.
+ */
+ virtual int ProcessReadMessage(struct aws_io_message *message) = 0;
+
+ /**
+ * Called by the channel when a message is available for processing in the write direction. It is your
+ * responsibility to call aws_mem_release(message->allocator, message); on message when you are finished
+ * with it.
+ *
+ * @return AWS_OP_SUCCESS if the message is being processed.
+ * If the message cannot be processed raise an error and return AWS_OP_ERR
+ * and do NOT release the message, it will be released by the caller.
+ */
+ virtual int ProcessWriteMessage(struct aws_io_message *message) = 0;
+
+ /**
+ * Called by the channel when a downstream handler has issued a window increment. You'll want to update
+ * your internal state and likely propagate a window increment message of your own by calling
+ * IncrementUpstreamReadWindow()
+ *
+ * @return AWS_OP_SUCCESS if successful.
+ * Otherwise, raise an error and return AWS_OP_ERR.
+ */
+ virtual int IncrementReadWindow(size_t size) = 0;
+
+ /**
+ * The channel calls shutdown on all handlers twice, once to shut down reading, and once to shut down
+ * writing. Shutdown always begins with the left-most handler, and proceeds to the right with dir set to
+ * ChannelDirection::Read. Then shutdown is called on handlers from right to left with dir set to
+ * ChannelDirection::Write.
+ *
+ * The shutdown process does not need to complete immediately and may rely on scheduled tasks.
+ * The handler MUST call OnShutdownComplete() when it is finished,
+ * which propagates shutdown to the next handler. If 'freeScarceResourcesImmediately' is true,
+ * then resources vulnerable to denial-of-service attacks (such as sockets and file handles)
+ * must be closed immediately before the shutdown process complete.
+ */
+ virtual void ProcessShutdown(
+ ChannelDirection dir,
+ int errorCode,
+ bool freeScarceResourcesImmediately) = 0;
+
+ /**
+ * Called by the channel when the handler is added to a slot, to get the initial window size.
+ */
+ virtual size_t InitialWindowSize() = 0;
+
+ /**
+ * Called by the channel anytime a handler is added or removed, provides a hint for downstream
+ * handlers to avoid message fragmentation due to message overhead.
+ */
+ virtual size_t MessageOverhead() = 0;
+
+ /**
+ * Directs the channel handler to reset all of the internal statistics it tracks about itself.
+ */
+ virtual void ResetStatistics(){};
+
+ /**
+ * Adds a pointer to the handler's internal statistics (if they exist) to a list of statistics
+ * structures associated with the channel's handler chain.
+ */
+ virtual void GatherStatistics(struct aws_array_list *) {}
+
+ public:
+ /// @private
+ struct aws_channel_handler *SeatForCInterop(const std::shared_ptr<ChannelHandler> &selfRef);
+
+ /**
+ * Return whether the caller is on the same thread as the handler's channel.
+ */
+ bool ChannelsThreadIsCallersThread() const;
+
+ /**
+ * Initiate a shutdown of the handler's channel.
+ *
+ * If the channel is already shutting down, this call has no effect.
+ */
+ void ShutDownChannel(int errorCode);
+
+ /**
+ * Schedule a task to run on the next "tick" of the event loop.
+ * If the channel is completely shut down, the task will run with the 'Canceled' status.
+ */
+ void ScheduleTask(std::function<void(TaskStatus)> &&task);
+
+ /**
+ * Schedule a task to run after a desired length of time has passed.
+ * The task will run with the 'Canceled' status if the channel completes shutdown
+ * before that length of time elapses.
+ */
+ void ScheduleTask(std::function<void(TaskStatus)> &&task, std::chrono::nanoseconds run_in);
+
+ protected:
+ ChannelHandler(Allocator *allocator = ApiAllocator());
+
+ /**
+ * Acquire an aws_io_message from the channel's pool.
+ */
+ struct aws_io_message *AcquireMessageFromPool(MessageType messageType, size_t sizeHint);
+
+ /**
+ * Convenience function that invokes AcquireMessageFromPool(),
+ * asking for the largest reasonable DATA message that can be sent in the write direction,
+ * with upstream overhead accounted for.
+ */
+ struct aws_io_message *AcquireMaxSizeMessageForWrite();
+
+ /**
+ * Send a message in the read or write direction.
+ * Returns true if message successfully sent.
+ * If false is returned, you must release the message yourself.
+ */
+ bool SendMessage(struct aws_io_message *message, ChannelDirection direction);
+
+ /**
+ * Issue a window update notification upstream.
+ * Returns true if successful.
+ */
+ bool IncrementUpstreamReadWindow(size_t windowUpdateSize);
+
+ /**
+ * Must be called by a handler once they have finished their shutdown in the 'dir' direction.
+ * Propagates the shutdown process to the next handler in the channel.
+ */
+ void OnShutdownComplete(ChannelDirection direction, int errorCode, bool freeScarceResourcesImmediately);
+
+ /**
+ * Fetches the downstream read window.
+ * This gives you the information necessary to honor the read window.
+ * If you call send_message() and it exceeds this window, the message will be rejected.
+ */
+ size_t DownstreamReadWindow() const;
+
+ /**
+ * Fetches the current overhead of upstream handlers.
+ * This provides a hint to avoid fragmentation if you care.
+ */
+ size_t UpstreamMessageOverhead() const;
+
+ struct aws_channel_slot *GetSlot() const;
+
+ struct aws_channel_handler m_handler;
+ Allocator *m_allocator;
+
+ private:
+ std::shared_ptr<ChannelHandler> m_selfReference;
+ static struct aws_channel_handler_vtable s_vtable;
+
+ static void s_Destroy(struct aws_channel_handler *handler);
+ static int s_ProcessReadMessage(
+ struct aws_channel_handler *,
+ struct aws_channel_slot *,
+ struct aws_io_message *);
+ static int s_ProcessWriteMessage(
+ struct aws_channel_handler *,
+ struct aws_channel_slot *,
+ struct aws_io_message *);
+ static int s_IncrementReadWindow(struct aws_channel_handler *, struct aws_channel_slot *, size_t size);
+ static int s_ProcessShutdown(
+ struct aws_channel_handler *,
+ struct aws_channel_slot *,
+ enum aws_channel_direction,
+ int errorCode,
+ bool freeScarceResourcesImmediately);
+ static size_t s_InitialWindowSize(struct aws_channel_handler *);
+ static size_t s_MessageOverhead(struct aws_channel_handler *);
+ static void s_ResetStatistics(struct aws_channel_handler *);
+ static void s_GatherStatistics(struct aws_channel_handler *, struct aws_array_list *statsList);
+ };
+ } // namespace Io
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/EventLoopGroup.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/EventLoopGroup.h
new file mode 100644
index 0000000000..0ef904f285
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/EventLoopGroup.h
@@ -0,0 +1,74 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/Types.h>
+
+#include <aws/io/event_loop.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Io
+ {
+ /**
+ * A collection of event loops.
+ *
+ * An event-loop is a thread for doing async work, such as I/O. Classes that need to do async work will ask
+ * the EventLoopGroup for an event-loop to use.
+ *
+ * The number of threads used depends on your use-case. IF you
+ * have a maximum of less than a few hundred connections 1 thread is the ideal
+ * threadCount.
+ *
+ * There should only be one instance of an EventLoopGroup per application and it
+ * should be passed to all network clients. One exception to this is if you
+ * want to peg different types of IO to different threads. In that case, you
+ * may want to have one event loop group dedicated to one IO activity and another
+ * dedicated to another type.
+ */
+ class AWS_CRT_CPP_API EventLoopGroup final
+ {
+ public:
+ /**
+ * @param threadCount: The number of event-loops to create, default will be 0, which will create one for
+ * each processor on the machine.
+ * @param allocator memory allocator to use.
+ */
+ EventLoopGroup(uint16_t threadCount = 0, Allocator *allocator = ApiAllocator()) noexcept;
+ /**
+ * @param cpuGroup: The CPU group (e.g. NUMA nodes) that all hardware threads are pinned to.
+ * @param threadCount: The number of event-loops to create, default will be 0, which will create one for
+ * each processor on the machine.
+ * @param allocator memory allocator to use.
+ */
+ EventLoopGroup(uint16_t cpuGroup, uint16_t threadCount, Allocator *allocator = ApiAllocator()) noexcept;
+ ~EventLoopGroup();
+ EventLoopGroup(const EventLoopGroup &) = delete;
+ EventLoopGroup(EventLoopGroup &&) noexcept;
+ EventLoopGroup &operator=(const EventLoopGroup &) = delete;
+ EventLoopGroup &operator=(EventLoopGroup &&) noexcept;
+
+ /**
+ * @return true if the instance is in a valid state, false otherwise.
+ */
+ operator bool() const;
+
+ /**
+ * @return the value of the last aws error encountered by operations on this instance.
+ */
+ int LastError() const;
+
+ /// @private
+ aws_event_loop_group *GetUnderlyingHandle() noexcept;
+
+ private:
+ aws_event_loop_group *m_eventLoopGroup;
+ int m_lastError;
+ };
+ } // namespace Io
+
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/HostResolver.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/HostResolver.h
new file mode 100644
index 0000000000..dac0e60237
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/HostResolver.h
@@ -0,0 +1,123 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/Types.h>
+
+#include <aws/io/host_resolver.h>
+
+#include <functional>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Io
+ {
+ class EventLoopGroup;
+ class HostResolver;
+
+ using HostAddress = aws_host_address;
+
+ /**
+ * Invoked upon resolution of an address. You do not own the memory pointed to in addresses, if you persist
+ * the data, copy it first. If errorCode is AWS_ERROR_SUCCESS, the operation succeeded. Otherwise, the
+ * operation failed.
+ */
+ using OnHostResolved =
+ std::function<void(HostResolver &resolver, const Vector<HostAddress> &addresses, int errorCode)>;
+
+ /**
+ * Simple interface for DNS name lookup implementations
+ */
+ class AWS_CRT_CPP_API HostResolver
+ {
+ public:
+ virtual ~HostResolver();
+ virtual bool ResolveHost(const String &host, const OnHostResolved &onResolved) noexcept = 0;
+
+ /// @private
+ virtual aws_host_resolver *GetUnderlyingHandle() noexcept = 0;
+ /// @private
+ virtual aws_host_resolution_config *GetConfig() noexcept = 0;
+ };
+
+ /**
+ * A wrapper around the CRT default host resolution system that uses getaddrinfo() farmed off
+ * to separate threads in order to resolve names.
+ */
+ class AWS_CRT_CPP_API DefaultHostResolver final : public HostResolver
+ {
+ public:
+ /**
+ * Resolves DNS addresses.
+ *
+ * @param elGroup: EventLoopGroup to use.
+ * @param maxHosts: the number of unique hosts to maintain in the cache.
+ * @param maxTTL: how long to keep an address in the cache before evicting it.
+ * @param allocator memory allocator to use.
+ */
+ DefaultHostResolver(
+ EventLoopGroup &elGroup,
+ size_t maxHosts,
+ size_t maxTTL,
+ Allocator *allocator = ApiAllocator()) noexcept;
+
+ /**
+ * Resolves DNS addresses using the default EventLoopGroup.
+ *
+ * For more information on the default EventLoopGroup see
+ * Aws::Crt::ApiHandle::GetOrCreateStaticDefaultEventLoopGroup
+ *
+ * @param maxHosts: the number of unique hosts to maintain in the cache.
+ * @param maxTTL: how long to keep an address in the cache before evicting it.
+ * @param allocator memory allocator to use.
+ */
+ DefaultHostResolver(size_t maxHosts, size_t maxTTL, Allocator *allocator = ApiAllocator()) noexcept;
+
+ ~DefaultHostResolver();
+ DefaultHostResolver(const DefaultHostResolver &) = delete;
+ DefaultHostResolver &operator=(const DefaultHostResolver &) = delete;
+ DefaultHostResolver(DefaultHostResolver &&) = delete;
+ DefaultHostResolver &operator=(DefaultHostResolver &&) = delete;
+
+ /**
+ * @return true if the instance is in a valid state, false otherwise.
+ */
+ operator bool() const noexcept { return m_initialized; }
+
+ /**
+ * @return the value of the last aws error encountered by operations on this instance.
+ */
+ int LastError() const noexcept { return aws_last_error(); }
+
+ /**
+ * Kicks off an asynchronous resolution of host. onResolved will be invoked upon completion of the
+ * resolution.
+ * @return False, the resolution was not attempted. True, onResolved will be
+ * called with the result.
+ */
+ bool ResolveHost(const String &host, const OnHostResolved &onResolved) noexcept override;
+
+ /// @private
+ aws_host_resolver *GetUnderlyingHandle() noexcept override { return m_resolver; }
+ /// @private
+ aws_host_resolution_config *GetConfig() noexcept override { return &m_config; }
+
+ private:
+ aws_host_resolver *m_resolver;
+ aws_host_resolution_config m_config;
+ Allocator *m_allocator;
+ bool m_initialized;
+
+ static void s_onHostResolved(
+ struct aws_host_resolver *resolver,
+ const struct aws_string *host_name,
+ int err_code,
+ const struct aws_array_list *host_addresses,
+ void *user_data);
+ };
+ } // namespace Io
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/Pkcs11.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/Pkcs11.h
new file mode 100644
index 0000000000..7f10baad83
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/Pkcs11.h
@@ -0,0 +1,116 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/crt/Types.h>
+
+struct aws_pkcs11_lib;
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Io
+ {
+ /**
+ * Handle to a loaded PKCS#11 library.
+ *
+ * For most use cases, a single instance of Pkcs11Lib should be used for the
+ * lifetime of your application.
+ */
+ class AWS_CRT_CPP_API Pkcs11Lib
+ {
+ public:
+ /**
+ * Controls how Pkcs11Lib calls `C_Initialize()` and `C_Finalize()`
+ * on the PKCS#11 library.
+ */
+ enum class InitializeFinalizeBehavior
+ {
+ /**
+ * Default behavior that accommodates most use cases.
+ *
+ * `C_Initialize()` is called on creation, and "already-initialized"
+ * errors are ignored. `C_Finalize()` is never called, just in case
+ * another part of your application is still using the PKCS#11 library.
+ */
+ Default,
+
+ /**
+ * Skip calling `C_Initialize()` and `C_Finalize()`.
+ *
+ * Use this if your application has already initialized the PKCS#11 library, and
+ * you do not want `C_Initialize()` called again.
+ */
+ Omit,
+
+ /**
+ * `C_Initialize()` is called on creation and `C_Finalize()` is
+ * called on cleanup.
+ *
+ * If `C_Initialize()` reports that's it's already initialized, this is
+ * treated as an error. Use this if you need perfect cleanup (ex: running
+ * valgrind with --leak-check).
+ */
+ Strict,
+ };
+
+ /**
+ * Load and initialize a PKCS#11 library.
+ *
+ * `C_Initialize()` and `C_Finalize()` are called on the PKCS#11
+ * library in the InitializeFinalizeBehavior::Default way.
+ *
+ * @param filename Name or path of PKCS#11 library file to load (UTF-8).
+ * Pass an empty string if your application already has PKCS#11 symbols linked in.
+ *
+ * @param allocator Memory allocator to use.
+ *
+ * @return If successful a `shared_ptr` containing the Pkcs11Lib is returned.
+ * If unsuccessful the `shared_ptr` will be empty, and Aws::Crt::LastError()
+ * will contain the error that occurred.
+ */
+ static std::shared_ptr<Pkcs11Lib> Create(const String &filename, Allocator *allocator = ApiAllocator());
+
+ /**
+ * Load a PKCS#11 library, specifying how `C_Initialize()` and `C_Finalize()` will be called.
+ *
+ * @param filename Name or path of PKCS#11 library file to load (UTF-8).
+ * Pass an empty string if your application already has PKCS#11 symbols linked in.
+ *
+ * @param initializeFinalizeBehavior Specifies how `C_Initialize()` and
+ * `C_Finalize()` will be called on the
+ * PKCS#11 library.
+ * @param allocator Memory allocator to use.
+ *
+ * @return If successful a `shared_ptr` containing the Pkcs11Lib is returned.
+ * If unsuccessful the `shared_ptr` will be empty, and Aws::Crt::LastError()
+ * will contain the error that occurred.
+ */
+ static std::shared_ptr<Pkcs11Lib> Create(
+ const String &filename,
+ InitializeFinalizeBehavior initializeFinalizeBehavior,
+ Allocator *allocator = ApiAllocator());
+
+ ~Pkcs11Lib();
+
+ /// @private
+ aws_pkcs11_lib *GetNativeHandle() { return impl; }
+
+ /// @private Use Create(...), this constructor is for internal use only
+ explicit Pkcs11Lib(aws_pkcs11_lib &impl);
+
+ private:
+ // no copy/move
+ Pkcs11Lib(const Pkcs11Lib &) = delete;
+ Pkcs11Lib(Pkcs11Lib &&) = delete;
+ Pkcs11Lib &operator=(const Pkcs11Lib &) = delete;
+ Pkcs11Lib &operator=(Pkcs11Lib &&) = delete;
+
+ aws_pkcs11_lib *impl = nullptr;
+ };
+ } // namespace Io
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/SocketOptions.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/SocketOptions.h
new file mode 100644
index 0000000000..9f31250dde
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/SocketOptions.h
@@ -0,0 +1,157 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/crt/Exports.h>
+
+#include <aws/io/socket.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Io
+ {
+ enum class SocketType
+ {
+ /**
+ * A streaming socket sends reliable messages over a two-way connection.
+ * This means TCP when used with IPV4/6, and Unix domain sockets, when used with
+ * AWS_SOCKET_LOCAL
+ */
+ Stream = AWS_SOCKET_STREAM,
+
+ /**
+ * A datagram socket is connectionless and sends unreliable messages.
+ * This means UDP when used with IPV4/6.
+ * LOCAL sockets are not compatible with DGRAM.
+ */
+ Dgram = AWS_SOCKET_DGRAM,
+ };
+
+ enum class SocketDomain
+ {
+ IPv4 = AWS_SOCKET_IPV4,
+ IPv6 = AWS_SOCKET_IPV6,
+ /**
+ * Unix domain sockets (or at least something like them)
+ */
+ Local = AWS_SOCKET_LOCAL,
+ };
+
+ /**
+ * Socket configuration options
+ */
+ class AWS_CRT_CPP_API SocketOptions
+ {
+ public:
+ SocketOptions();
+ SocketOptions(const SocketOptions &rhs) = default;
+ SocketOptions(SocketOptions &&rhs) = default;
+
+ SocketOptions &operator=(const SocketOptions &rhs) = default;
+ SocketOptions &operator=(SocketOptions &&rhs) = default;
+
+ /**
+ * Set socket type
+ * @param type: SocketType object.
+ */
+ void SetSocketType(SocketType type) { options.type = (enum aws_socket_type)type; }
+
+ /**
+ * @return the type of socket to use
+ */
+ SocketType GetSocketType() const { return (SocketType)options.type; }
+
+ /**
+ * Set socket domain
+ * @param domain: SocketDomain object.
+ */
+ void SetSocketDomain(SocketDomain domain) { options.domain = (enum aws_socket_domain)domain; }
+
+ /**
+ * @return the domain type to use with the socket
+ */
+ SocketDomain GetSocketDomain() const { return (SocketDomain)options.domain; }
+
+ /**
+ * Set connection timeout
+ * @param timeout: connection timeout in milliseconds.
+ */
+ void SetConnectTimeoutMs(uint32_t timeout) { options.connect_timeout_ms = timeout; }
+
+ /**
+ * @return the connection timeout in milliseconds to use with the socket
+ */
+ uint32_t GetConnectTimeoutMs() const { return options.connect_timeout_ms; }
+
+ /**
+ * Set keep alive interval seconds.
+ * @param keepAliveInterval: Duration, in seconds, between keepalive probes. If 0, then a default value
+ * is used.
+ */
+ void SetKeepAliveIntervalSec(uint16_t keepAliveInterval)
+ {
+ options.keep_alive_interval_sec = keepAliveInterval;
+ }
+
+ /**
+ * @return the (tcp) keep alive interval to use with the socket, in seconds
+ */
+ uint16_t GetKeepAliveIntervalSec() const { return options.keep_alive_interval_sec; }
+
+ /**
+ * Set keep alive time out seconds.
+ * @param keepAliveTimeout: interval, in seconds, that a connection must be idle for before keep alive
+ * probes begin to get sent out
+ */
+ void SetKeepAliveTimeoutSec(uint16_t keepAliveTimeout)
+ {
+ options.keep_alive_timeout_sec = keepAliveTimeout;
+ }
+
+ /**
+ * @return interval, in seconds, that a connection must be idle for before keep alive probes begin
+ * to get sent out
+ */
+ uint16_t GetKeepAliveTimeoutSec() const { return options.keep_alive_timeout_sec; }
+
+ /**
+ * Set keep alive max failed probes.
+ * @param maxProbes: The number of keepalive probes allowed to fail before a connection is considered
+ * lost.
+ */
+ void SetKeepAliveMaxFailedProbes(uint16_t maxProbes)
+ {
+ options.keep_alive_max_failed_probes = maxProbes;
+ }
+
+ /**
+ * @return number of keepalive probes allowed to fail before a connection is considered lost.
+ */
+ uint16_t GetKeepAliveMaxFailedProbes() const { return options.keep_alive_max_failed_probes; }
+
+ /**
+ * Set keep alive option.
+ * @param keepAlive: True, periodically transmit keepalive messages for detecting a disconnected peer.
+ */
+ void SetKeepAlive(bool keepAlive) { options.keepalive = keepAlive; }
+
+ /**
+ * @return true/false if the socket implementation should use TCP keepalive
+ */
+ bool GetKeepAlive() const { return options.keepalive; }
+
+ /// @private
+ aws_socket_options &GetImpl() { return options; }
+ /// @private
+ const aws_socket_options &GetImpl() const { return options; }
+
+ private:
+ aws_socket_options options;
+ };
+ } // namespace Io
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/Stream.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/Stream.h
new file mode 100644
index 0000000000..cf6c49fa3e
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/Stream.h
@@ -0,0 +1,173 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/crt/Exports.h>
+#include <aws/crt/RefCounted.h>
+#include <aws/crt/Types.h>
+#include <aws/io/stream.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Io
+ {
+ using StreamStatus = aws_stream_status;
+
+ /**
+ * @deprecated Use int64_t instead for offsets in public APIs.
+ */
+ using OffsetType = aws_off_t;
+
+ /**
+ * Controls the direction to seek from
+ */
+ enum class StreamSeekBasis
+ {
+ Begin = AWS_SSB_BEGIN,
+ End = AWS_SSB_END,
+ };
+
+ /***
+ * Interface for building an Object oriented stream that will be honored by the CRT's low-level
+ * aws_input_stream interface. To use, create a subclass of InputStream and define the abstract
+ * functions.
+ */
+ class AWS_CRT_CPP_API InputStream : public std::enable_shared_from_this<InputStream>,
+ public RefCounted<InputStream>
+ {
+ public:
+ virtual ~InputStream();
+
+ InputStream(const InputStream &) = delete;
+ InputStream &operator=(const InputStream &) = delete;
+ InputStream(InputStream &&) = delete;
+ InputStream &operator=(InputStream &&) = delete;
+
+ explicit operator bool() const noexcept { return IsValid(); }
+
+ /**
+ * @return true/false if this object is in a valid state
+ */
+ virtual bool IsValid() const noexcept = 0;
+
+ /// @private
+ aws_input_stream *GetUnderlyingStream() noexcept { return &m_underlying_stream; }
+
+ /**
+ * Reads data from the stream into a buffer
+ * @param dest buffer to add the read data into
+ * @return success/failure
+ */
+ bool Read(ByteBuf &dest) { return aws_input_stream_read(&m_underlying_stream, &dest) == 0; }
+
+ /**
+ * Moves the head of the stream to a new location
+ * @param offset how far to move, in bytes
+ * @param seekBasis what direction to move the head of stream
+ * @return success/failure
+ */
+ bool Seek(int64_t offset, StreamSeekBasis seekBasis)
+ {
+ return aws_input_stream_seek(&m_underlying_stream, offset, (aws_stream_seek_basis)seekBasis) == 0;
+ }
+
+ /**
+ * Gets the stream's current status
+ * @param status output parameter for the stream's status
+ * @return success/failure
+ */
+ bool GetStatus(StreamStatus &status)
+ {
+ return aws_input_stream_get_status(&m_underlying_stream, &status) == 0;
+ }
+
+ /**
+ * Gets the stream's length. Some streams may not be able to answer this.
+ * @param length output parameter for the length of the stream
+ * @return success/failure
+ */
+ bool GetLength(int64_t &length)
+ {
+ return aws_input_stream_get_length(&m_underlying_stream, &length) == 0;
+ }
+
+ protected:
+ Allocator *m_allocator;
+ aws_input_stream m_underlying_stream;
+
+ InputStream(Aws::Crt::Allocator *allocator = ApiAllocator());
+
+ /***
+ * Read up-to buffer::capacity - buffer::len into buffer::buffer
+ * Increment buffer::len by the amount you read in.
+ *
+ * @return true if nothing went wrong.
+ * Return true even if you read 0 bytes because the end-of-file has been reached.
+ * Return true even if you read 0 bytes because data is not currently available.
+ *
+ * Return false if an actual failure condition occurs,
+ * you SHOULD also raise an error via aws_raise_error().
+ */
+ virtual bool ReadImpl(ByteBuf &buffer) noexcept = 0;
+
+ /**
+ * @return the current status of the stream.
+ */
+ virtual StreamStatus GetStatusImpl() const noexcept = 0;
+
+ /**
+ * @return the total length of the available data for the stream.
+ * @return -1 if not available.
+ */
+ virtual int64_t GetLengthImpl() const noexcept = 0;
+
+ /**
+ * Seek's the stream to seekBasis based offset bytes.
+ *
+ * It is expected, that if seeking to the beginning of a stream,
+ * all error's are cleared if possible.
+ *
+ * @return true on success, false otherwise. You SHOULD raise an error via aws_raise_error()
+ * if a failure occurs.
+ */
+ virtual bool SeekImpl(int64_t offset, StreamSeekBasis seekBasis) noexcept = 0;
+
+ private:
+ static int s_Seek(aws_input_stream *stream, int64_t offset, enum aws_stream_seek_basis basis);
+ static int s_Read(aws_input_stream *stream, aws_byte_buf *dest);
+ static int s_GetStatus(aws_input_stream *stream, aws_stream_status *status);
+ static int s_GetLength(struct aws_input_stream *stream, int64_t *out_length);
+ static void s_Acquire(aws_input_stream *stream);
+ static void s_Release(aws_input_stream *stream);
+
+ static aws_input_stream_vtable s_vtable;
+ };
+
+ /***
+ * Implementation of Aws::Crt::Io::InputStream that wraps a std::input_stream.
+ */
+ class AWS_CRT_CPP_API StdIOStreamInputStream : public InputStream
+ {
+ public:
+ StdIOStreamInputStream(
+ std::shared_ptr<Aws::Crt::Io::IStream> stream,
+ Aws::Crt::Allocator *allocator = ApiAllocator()) noexcept;
+
+ bool IsValid() const noexcept override;
+
+ protected:
+ bool ReadImpl(ByteBuf &buffer) noexcept override;
+ StreamStatus GetStatusImpl() const noexcept override;
+ int64_t GetLengthImpl() const noexcept override;
+ bool SeekImpl(OffsetType offsetType, StreamSeekBasis seekBasis) noexcept override;
+
+ private:
+ std::shared_ptr<Aws::Crt::Io::IStream> m_stream;
+ };
+ } // namespace Io
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/TlsOptions.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/TlsOptions.h
new file mode 100644
index 0000000000..afb543a92a
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/TlsOptions.h
@@ -0,0 +1,453 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/crt/Types.h>
+#include <aws/crt/io/ChannelHandler.h>
+#include <aws/io/tls_channel_handler.h>
+
+#include <functional>
+#include <memory>
+
+struct aws_tls_ctx_options;
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Io
+ {
+ class Pkcs11Lib;
+ class TlsContextPkcs11Options;
+
+ enum class TlsMode
+ {
+ CLIENT,
+ SERVER,
+ };
+
+ /**
+ * Top-level tls configuration options. These options are used to create a context from which
+ * per-connection TLS contexts can be created.
+ */
+ class AWS_CRT_CPP_API TlsContextOptions
+ {
+ friend class TlsContext;
+
+ public:
+ TlsContextOptions() noexcept;
+ virtual ~TlsContextOptions();
+ TlsContextOptions(const TlsContextOptions &) noexcept = delete;
+ TlsContextOptions &operator=(const TlsContextOptions &) noexcept = delete;
+ TlsContextOptions(TlsContextOptions &&) noexcept;
+ TlsContextOptions &operator=(TlsContextOptions &&) noexcept;
+
+ /**
+ * @return true if the instance is in a valid state, false otherwise.
+ */
+ explicit operator bool() const noexcept { return m_isInit; }
+
+ /**
+ * @return the value of the last aws error encountered by operations on this instance.
+ */
+ int LastError() const noexcept;
+
+ /**
+ * Initializes TlsContextOptions with secure by default options, with
+ * no client certificates.
+ */
+ static TlsContextOptions InitDefaultClient(Allocator *allocator = ApiAllocator()) noexcept;
+
+ /**
+ * Initializes TlsContextOptions for mutual TLS (mTLS), with
+ * client certificate and private key. These are paths to a file on disk. These files
+ * must be in the PEM format.
+ *
+ * NOTE: This is unsupported on iOS.
+ *
+ * @param cert_path: Path to certificate file.
+ * @param pkey_path: Path to private key file.
+ * @param allocator Memory allocator to use.
+ */
+ static TlsContextOptions InitClientWithMtls(
+ const char *cert_path,
+ const char *pkey_path,
+ Allocator *allocator = ApiAllocator()) noexcept;
+
+ /**
+ * Initializes TlsContextOptions for mutual TLS (mTLS), with
+ * client certificate and private key. These are in memory buffers. These buffers
+ * must be in the PEM format.
+ *
+ * NOTE: This is unsupported on iOS.
+ *
+ * @param cert: Certificate contents in memory.
+ * @param pkey: Private key contents in memory.
+ * @param allocator Memory allocator to use.
+ */
+ static TlsContextOptions InitClientWithMtls(
+ const ByteCursor &cert,
+ const ByteCursor &pkey,
+ Allocator *allocator = ApiAllocator()) noexcept;
+
+ /**
+ * Initializes TlsContextOptions for mutual TLS (mTLS),
+ * using a PKCS#11 library for private key operations.
+ *
+ * NOTE: This only works on Unix devices.
+ *
+ * @param pkcs11Options PKCS#11 options
+ * @param allocator Memory allocator to use.
+ */
+ static TlsContextOptions InitClientWithMtlsPkcs11(
+ const TlsContextPkcs11Options &pkcs11Options,
+ Allocator *allocator = ApiAllocator()) noexcept;
+
+ /**
+ * Initializes TlsContextOptions for mutual TLS (mTLS), with
+ * client certificate and private key in the PKCS#12 format.
+ *
+ * NOTE: This only works on Apple devices.
+ *
+ * @param pkcs12_path: Path to PKCS #12 file. The file is loaded from disk and stored internally. It
+ * must remain in memory for the lifetime of the returned object.
+ * @param pkcs12_pwd: Password to PKCS #12 file. It must remain in memory for the lifetime of the
+ * returned object.
+ * @param allocator Memory allocator to use.
+ */
+ static TlsContextOptions InitClientWithMtlsPkcs12(
+ const char *pkcs12_path,
+ const char *pkcs12_pwd,
+ Allocator *allocator = ApiAllocator()) noexcept;
+
+ /**
+ * @deprecated Custom keychain management is deprecated.
+ *
+ * By default the certificates and private keys are stored in the default keychain
+ * of the account of the process. If you instead wish to provide your own keychain
+ * for storing them, this makes the TlsContext to use that instead.
+ * NOTE: The password of your keychain must be empty.
+ *
+ * NOTE: This only works on MacOS.
+ */
+ bool SetKeychainPath(ByteCursor &keychain_path) noexcept;
+
+ /**
+ * Initializes TlsContextOptions for mutual TLS (mTLS),
+ * using a client certificate in a Windows certificate store.
+ *
+ * NOTE: This only works on Windows.
+ *
+ * @param windowsCertStorePath Path to certificate in a Windows certificate store.
+ * The path must use backslashes and end with the certificate's thumbprint.
+ * Example: `CurrentUser\MY\A11F8A9B5DF5B98BA3508FBCA575D09570E0D2C6`
+ * @param allocator The memory allocator to use.
+ */
+ static TlsContextOptions InitClientWithMtlsSystemPath(
+ const char *windowsCertStorePath,
+ Allocator *allocator = ApiAllocator()) noexcept;
+
+ /**
+ * @return true if alpn is supported by the underlying security provider, false
+ * otherwise.
+ */
+ static bool IsAlpnSupported() noexcept;
+
+ /**
+ * Sets the list of alpn protocols.
+ * @param alpnList: List of protocol names, delimited by ';'. This string must remain in memory for the
+ * lifetime of this object.
+ */
+ bool SetAlpnList(const char *alpnList) noexcept;
+
+ /**
+ * In client mode, this turns off x.509 validation. Don't do this unless you're testing.
+ * It's much better, to just override the default trust store and pass the self-signed
+ * certificate as the caFile argument.
+ *
+ * In server mode, this defaults to false. If you want to support mutual TLS from the server,
+ * you'll want to set this to true.
+ */
+ void SetVerifyPeer(bool verifyPeer) noexcept;
+
+ /**
+ * Sets the minimum TLS version allowed.
+ * @param minimumTlsVersion: The minimum TLS version.
+ */
+ void SetMinimumTlsVersion(aws_tls_versions minimumTlsVersion);
+
+ /**
+ * Sets the preferred TLS Cipher List
+ * @param cipher_pref: The preferred TLS cipher list.
+ */
+ void SetTlsCipherPreference(aws_tls_cipher_pref cipher_pref);
+
+ /**
+ * Overrides the default system trust store.
+ * @param caPath: Path to directory containing trusted certificates, which will overrides the
+ * default trust store. Only useful on Unix style systems where all anchors are stored in a directory
+ * (like /etc/ssl/certs). This string must remain in memory for the lifetime of this object.
+ * @param caFile: Path to file containing PEM armored chain of trusted CA certificates. This
+ * string must remain in memory for the lifetime of this object.
+ */
+ bool OverrideDefaultTrustStore(const char *caPath, const char *caFile) noexcept;
+
+ /**
+ * Overrides the default system trust store.
+ * @param ca: PEM armored chain of trusted CA certificates.
+ */
+ bool OverrideDefaultTrustStore(const ByteCursor &ca) noexcept;
+
+ /// @private
+ const aws_tls_ctx_options *GetUnderlyingHandle() const noexcept { return &m_options; }
+
+ private:
+ aws_tls_ctx_options m_options;
+ bool m_isInit;
+ };
+
+ /**
+ * Options for TLS, when using a PKCS#11 library for private key operations.
+ *
+ * @see TlsContextOptions::InitClientWithMtlsPkcs11()
+ */
+ class AWS_CRT_CPP_API TlsContextPkcs11Options final
+ {
+ public:
+ /**
+ * @param pkcs11Lib use this PKCS#11 library
+ * @param allocator Memory allocator to use.
+ */
+ TlsContextPkcs11Options(
+ const std::shared_ptr<Pkcs11Lib> &pkcs11Lib,
+ Allocator *allocator = ApiAllocator()) noexcept;
+
+ /**
+ * Use this PIN to log the user into the PKCS#11 token.
+ * Leave unspecified to log into a token with a "protected authentication path".
+ *
+ * @param pin PIN
+ */
+ void SetUserPin(const String &pin) noexcept;
+
+ /**
+ * Specify the slot ID containing a PKCS#11 token.
+ * If not specified, the token will be chosen based on other criteria (such as token label).
+ *
+ * @param id slot ID
+ */
+ void SetSlotId(const uint64_t id) noexcept;
+
+ /**
+ * Specify the label of the PKCS#11 token to use.
+ * If not specified, the token will be chosen based on other criteria (such as slot ID).
+ *
+ * @param label label of token
+ */
+ void SetTokenLabel(const String &label) noexcept;
+
+ /**
+ * Specify the label of the private key object on the PKCS#11 token.
+ * If not specified, the key will be chosen based on other criteria
+ * (such as being the only available private key on the token).
+ *
+ * @param label label of private key object
+ */
+ void SetPrivateKeyObjectLabel(const String &label) noexcept;
+
+ /**
+ * Use this X.509 certificate (file on disk).
+ * The certificate may be specified by other means instead (ex: SetCertificateFileContents())
+ *
+ * @param path path to PEM-formatted certificate file on disk.
+ */
+ void SetCertificateFilePath(const String &path) noexcept;
+
+ /**
+ * Use this X.509 certificate (contents in memory).
+ * The certificate may be specified by other means instead (ex: SetCertificateFilePath())
+ *
+ * @param contents contents of PEM-formatted certificate file.
+ */
+ void SetCertificateFileContents(const String &contents) noexcept;
+
+ /// @private
+ aws_tls_ctx_pkcs11_options GetUnderlyingHandle() const noexcept;
+
+ private:
+ std::shared_ptr<Pkcs11Lib> m_pkcs11Lib;
+ Optional<uint64_t> m_slotId;
+ Optional<String> m_userPin;
+ Optional<String> m_tokenLabel;
+ Optional<String> m_privateKeyObjectLabel;
+ Optional<String> m_certificateFilePath;
+ Optional<String> m_certificateFileContents;
+ };
+
+ /**
+ * Options specific to a single connection.
+ */
+ class AWS_CRT_CPP_API TlsConnectionOptions final
+ {
+ public:
+ TlsConnectionOptions() noexcept;
+ ~TlsConnectionOptions();
+ TlsConnectionOptions(const TlsConnectionOptions &) noexcept;
+ TlsConnectionOptions &operator=(const TlsConnectionOptions &) noexcept;
+ TlsConnectionOptions(TlsConnectionOptions &&options) noexcept;
+ TlsConnectionOptions &operator=(TlsConnectionOptions &&options) noexcept;
+
+ /**
+ * Sets SNI extension, and also the name used for X.509 validation. serverName is copied.
+ *
+ * @return true if the copy succeeded, or false otherwise.
+ */
+ bool SetServerName(ByteCursor &serverName) noexcept;
+
+ /**
+ * Sets list of protocols (semi-colon delimited in priority order) used for ALPN extension.
+ * alpnList is copied.
+ *
+ * @return true if the copy succeeded, or false otherwise.
+ */
+ bool SetAlpnList(const char *alpnList) noexcept;
+
+ /**
+ * @return true if the instance is in a valid state, false otherwise.
+ */
+ explicit operator bool() const noexcept { return isValid(); }
+
+ /**
+ * @return the value of the last aws error encountered by operations on this instance.
+ */
+ int LastError() const noexcept { return m_lastError; }
+
+ /// @private
+ const aws_tls_connection_options *GetUnderlyingHandle() const noexcept
+ {
+ return &m_tls_connection_options;
+ }
+
+ private:
+ bool isValid() const noexcept { return m_isInit; }
+
+ TlsConnectionOptions(aws_tls_ctx *ctx, Allocator *allocator) noexcept;
+ aws_tls_connection_options m_tls_connection_options;
+ aws_allocator *m_allocator;
+ int m_lastError;
+ bool m_isInit;
+
+ friend class TlsContext;
+ };
+
+ /**
+ * Stateful context for TLS with a given configuration. Per-connection TLS "contexts"
+ * (TlsConnectionOptions) are instantiated from this as needed.
+ */
+ class AWS_CRT_CPP_API TlsContext final
+ {
+ public:
+ TlsContext() noexcept;
+ TlsContext(TlsContextOptions &options, TlsMode mode, Allocator *allocator = ApiAllocator()) noexcept;
+ ~TlsContext() = default;
+ TlsContext(const TlsContext &) noexcept = default;
+ TlsContext &operator=(const TlsContext &) noexcept = default;
+ TlsContext(TlsContext &&) noexcept = default;
+ TlsContext &operator=(TlsContext &&) noexcept = default;
+
+ /**
+ * @return a new connection-specific TLS context that can be configured with per-connection options
+ * (server name, peer verification, etc...)
+ */
+ TlsConnectionOptions NewConnectionOptions() const noexcept;
+
+ /**
+ * @return true if the instance is in a valid state, false otherwise.
+ */
+ explicit operator bool() const noexcept { return isValid(); }
+
+ /**
+ * @return the value of the last aws error encountered by operations on this instance.
+ */
+ int GetInitializationError() const noexcept { return m_initializationError; }
+
+ /// @private
+ aws_tls_ctx *GetUnderlyingHandle() const noexcept { return m_ctx.get(); }
+
+ private:
+ bool isValid() const noexcept { return m_ctx && m_initializationError == AWS_ERROR_SUCCESS; }
+
+ std::shared_ptr<aws_tls_ctx> m_ctx;
+ int m_initializationError;
+ };
+
+ using NewTlsContextImplCallback = std::function<void *(TlsContextOptions &, TlsMode, Allocator *)>;
+ using DeleteTlsContextImplCallback = std::function<void(void *)>;
+ using IsTlsAlpnSupportedCallback = std::function<bool()>;
+
+ /**
+ * BYO_CRYPTO: TLS channel-handler base class.
+ */
+ class AWS_CRT_CPP_API TlsChannelHandler : public ChannelHandler
+ {
+ public:
+ virtual ~TlsChannelHandler();
+
+ /**
+ * @return negotiated protocol (or empty string if no agreed upon protocol)
+ */
+ virtual String GetProtocol() const = 0;
+
+ protected:
+ TlsChannelHandler(
+ struct aws_channel_slot *slot,
+ const struct aws_tls_connection_options &options,
+ Allocator *allocator = ApiAllocator());
+
+ /**
+ * Invoke this function from inside your handler after TLS negotiation completes. errorCode ==
+ * AWS_ERROR_SUCCESS or 0 means the session was successfully established and the connection should
+ * continue on.
+ */
+ void CompleteTlsNegotiation(int errorCode);
+
+ private:
+ aws_tls_on_negotiation_result_fn *m_OnNegotiationResult;
+ void *m_userData;
+
+ aws_byte_buf m_protocolByteBuf;
+ friend aws_byte_buf(::aws_tls_handler_protocol)(aws_channel_handler *);
+ };
+
+ /**
+ * BYO_CRYPTO: Client TLS channel-handler base class.
+ *
+ * If using BYO_CRYPTO, you must define a concrete implementation
+ * and set its creation callback via ApiHandle.SetBYOCryptoClientTlsCallback().
+ */
+ class AWS_CRT_CPP_API ClientTlsChannelHandler : public TlsChannelHandler
+ {
+ public:
+ /**
+ * Initiates the TLS session negotiation. This is called by the common runtime when it's time to start
+ * a new session.
+ */
+ virtual void StartNegotiation() = 0;
+
+ protected:
+ ClientTlsChannelHandler(
+ struct aws_channel_slot *slot,
+ const struct aws_tls_connection_options &options,
+ Allocator *allocator = ApiAllocator());
+ };
+
+ using NewClientTlsHandlerCallback = std::function<std::shared_ptr<ClientTlsChannelHandler>(
+ struct aws_channel_slot *slot,
+ const struct aws_tls_connection_options &options,
+ Allocator *allocator)>;
+
+ } // namespace Io
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/Uri.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/Uri.h
new file mode 100644
index 0000000000..80833f2b12
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/io/Uri.h
@@ -0,0 +1,102 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/Types.h>
+
+#include <aws/io/uri.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Io
+ {
+ /**
+ * Contains a URI used for networking application protocols. This type is move-only.
+ */
+ class AWS_CRT_CPP_API Uri final
+ {
+ public:
+ Uri() noexcept;
+ ~Uri();
+
+ /**
+ * Parses `cursor` as a URI. Upon failure the bool() operator will return false and LastError()
+ * will contain the errorCode.
+ */
+ Uri(const ByteCursor &cursor, Allocator *allocator = ApiAllocator()) noexcept;
+
+ /**
+ * Builds a URI from `builderOptions`. Upon failure the bool() operator will return false and
+ * LastError() will contain the errorCode.
+ */
+ Uri(aws_uri_builder_options &builderOptions, Allocator *allocator = ApiAllocator()) noexcept;
+
+ Uri(const Uri &);
+ Uri &operator=(const Uri &);
+ Uri(Uri &&uri) noexcept;
+ Uri &operator=(Uri &&) noexcept;
+
+ /**
+ * @return true if the instance is in a valid state, false otherwise.
+ */
+ operator bool() const noexcept { return m_isInit; }
+
+ /**
+ * @return the value of the last aws error encountered by operations on this instance.
+ */
+ int LastError() const noexcept { return m_lastError; }
+
+ /**
+ * @return the scheme portion of the URI if present (e.g. https, http, ftp etc....)
+ */
+ ByteCursor GetScheme() const noexcept;
+
+ /**
+ * @return the authority portion of the URI if present. This will contain host name and port if
+ * specified.
+ * */
+ ByteCursor GetAuthority() const noexcept;
+
+ /**
+ * @return the path portion of the URI. If no path was present, this will be set to '/'.
+ */
+ ByteCursor GetPath() const noexcept;
+
+ /**
+ * @return the query string portion of the URI if present.
+ */
+ ByteCursor GetQueryString() const noexcept;
+
+ /**
+ * @return the host name portion of the authority. (port will not be in this value).
+ */
+ ByteCursor GetHostName() const noexcept;
+
+ /**
+ * @return the port portion of the authority if a port was specified. If it was not, this will
+ * be set to 0. In that case, it is your responsibility to determine the correct port
+ * based on the protocol you're using.
+ */
+ uint16_t GetPort() const noexcept;
+
+ /** @return the Path and Query portion of the URI. In the case of Http, this likely the value for the
+ * URI parameter.
+ */
+ ByteCursor GetPathAndQuery() const noexcept;
+
+ /**
+ * @return The full URI as it was passed to or parsed from the constructors.
+ */
+ ByteCursor GetFullUri() const noexcept;
+
+ private:
+ aws_uri m_uri;
+ int m_lastError;
+ bool m_isInit;
+ };
+ } // namespace Io
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/mqtt/Mqtt5Client.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/mqtt/Mqtt5Client.h
new file mode 100644
index 0000000000..9968920197
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/mqtt/Mqtt5Client.h
@@ -0,0 +1,770 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/http/HttpConnection.h>
+#include <aws/crt/mqtt/Mqtt5Types.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Mqtt5
+ {
+ class ConnectPacket;
+ class ConnAckPacket;
+ class DisconnectPacket;
+ class Mqtt5Client;
+ class Mqtt5ClientOptions;
+ class NegotiatedSettings;
+ class PublishResult;
+ class PublishPacket;
+ class PubAckPacket;
+ class SubscribePacket;
+ class SubAckPacket;
+ class UnsubscribePacket;
+ class UnSubAckPacket;
+
+ struct AWS_CRT_CPP_API ReconnectOptions
+ {
+ /**
+ * Controls how the reconnect delay is modified in order to smooth out the distribution of reconnection
+ * attempt timepoints for a large set of reconnecting clients.
+ */
+ JitterMode m_reconnectMode;
+
+ /**
+ * Minimum amount of time to wait to reconnect after a disconnect. Exponential backoff is performed
+ * with jitter after each connection failure.
+ */
+ uint64_t m_minReconnectDelayMs;
+
+ /**
+ * Maximum amount of time to wait to reconnect after a disconnect. Exponential backoff is performed
+ * with jitter after each connection failure.
+ */
+ uint64_t m_maxReconnectDelayMs;
+
+ /**
+ * Amount of time that must elapse with an established connection before the reconnect delay is reset to
+ * the minimum. This helps alleviate bandwidth-waste in fast reconnect cycles due to permission failures
+ * on operations.
+ */
+ uint64_t m_minConnectedTimeToResetReconnectDelayMs;
+ };
+
+ /* Simple statistics about the current state of the client's queue of operations */
+ struct AWS_CRT_CPP_API Mqtt5ClientOperationStatistics
+ {
+ /*
+ * total number of operations submitted to the client that have not yet been completed. Unacked
+ * operations are a subset of this.
+ */
+ uint64_t incompleteOperationCount;
+
+ /*
+ * total packet size of operations submitted to the client that have not yet been completed. Unacked
+ * operations are a subset of this.
+ */
+ uint64_t incompleteOperationSize;
+
+ /*
+ * total number of operations that have been sent to the server and are waiting for a corresponding ACK
+ * before they can be completed.
+ */
+ uint64_t unackedOperationCount;
+
+ /*
+ * total packet size of operations that have been sent to the server and are waiting for a corresponding
+ * ACK before they can be completed.
+ */
+ uint64_t unackedOperationSize;
+ };
+
+ /**
+ * The data returned when AttemptingConnect is invoked in the LifecycleEvents callback.
+ * Currently empty, but may be used in the future for passing additional data.
+ */
+ struct AWS_CRT_CPP_API OnAttemptingConnectEventData
+ {
+ OnAttemptingConnectEventData() {}
+ };
+
+ /**
+ * The data returned when OnConnectionFailure is invoked in the LifecycleEvents callback.
+ */
+ struct AWS_CRT_CPP_API OnConnectionFailureEventData
+ {
+ OnConnectionFailureEventData() : errorCode(AWS_ERROR_SUCCESS), connAckPacket(nullptr) {}
+
+ int errorCode;
+ std::shared_ptr<ConnAckPacket> connAckPacket;
+ };
+
+ /**
+ * The data returned when OnConnectionSuccess is invoked in the LifecycleEvents callback.
+ */
+ struct AWS_CRT_CPP_API OnConnectionSuccessEventData
+ {
+ OnConnectionSuccessEventData() : connAckPacket(nullptr), negotiatedSettings(nullptr) {}
+
+ std::shared_ptr<ConnAckPacket> connAckPacket;
+ std::shared_ptr<NegotiatedSettings> negotiatedSettings;
+ };
+
+ /**
+ * The data returned when OnDisconnect is invoked in the LifecycleEvents callback.
+ */
+ struct AWS_CRT_CPP_API OnDisconnectionEventData
+ {
+ OnDisconnectionEventData() : errorCode(AWS_ERROR_SUCCESS), disconnectPacket(nullptr) {}
+
+ int errorCode;
+ std::shared_ptr<DisconnectPacket> disconnectPacket;
+ };
+
+ /**
+ * The data returned when OnStopped is invoked in the LifecycleEvents callback.
+ * Currently empty, but may be used in the future for passing additional data.
+ */
+ struct AWS_CRT_CPP_API OnStoppedEventData
+ {
+ OnStoppedEventData() {}
+ };
+
+ /**
+ * The data returned when a publish is made to a topic the MQTT5 client is subscribed to.
+ */
+ struct AWS_CRT_CPP_API PublishReceivedEventData
+ {
+ PublishReceivedEventData() : publishPacket(nullptr) {}
+ std::shared_ptr<PublishPacket> publishPacket;
+ };
+
+ /**
+ * Type signature of the callback invoked when connection succeed
+ * Mandatory event fields: client, connack_data, settings
+ */
+ using OnConnectionSuccessHandler = std::function<void(Mqtt5Client &, const OnConnectionSuccessEventData &)>;
+
+ /**
+ * Type signature of the callback invoked when connection failed
+ */
+ using OnConnectionFailureHandler = std::function<void(Mqtt5Client &, const OnConnectionFailureEventData &)>;
+
+ /**
+ * Type signature of the callback invoked when the internal connection is shutdown
+ */
+ using OnDisconnectionHandler = std::function<void(Mqtt5Client &, const OnDisconnectionEventData &)>;
+
+ /**
+ * Type signature of the callback invoked when attempting connect to client
+ * Mandatory event fields: client
+ */
+ using OnAttemptingConnectHandler = std::function<void(Mqtt5Client &, const OnAttemptingConnectEventData &)>;
+
+ /**
+ * Type signature of the callback invoked when client connection stopped
+ * Mandatory event fields: client
+ */
+ using OnStoppedHandler = std::function<void(Mqtt5Client &, const OnStoppedEventData &)>;
+
+ /**
+ * Type signature of the callback invoked when a Disconnection Comlete
+ *
+ */
+ using OnDisconnectCompletionHandler = std::function<void(std::shared_ptr<Mqtt5Client>, int)>;
+
+ /**
+ * Type signature of the callback invoked when a Publish Complete
+ */
+ using OnPublishCompletionHandler =
+ std::function<void(std::shared_ptr<Mqtt5Client>, int, std::shared_ptr<PublishResult>)>;
+
+ /**
+ * Type signature of the callback invoked when a Subscribe Complete
+ */
+ using OnSubscribeCompletionHandler =
+ std::function<void(std::shared_ptr<Mqtt5Client>, int, std::shared_ptr<SubAckPacket>)>;
+
+ /**
+ * Type signature of the callback invoked when a Unsubscribe Complete
+ */
+ using OnUnsubscribeCompletionHandler =
+ std::function<void(std::shared_ptr<Mqtt5Client>, int, std::shared_ptr<UnSubAckPacket>)>;
+
+ /**
+ * Type signature of the callback invoked when a PacketPublish message received (OnMessageHandler)
+ */
+ using OnPublishReceivedHandler = std::function<void(Mqtt5Client &, const PublishReceivedEventData &)>;
+
+ /**
+ * Callback for users to invoke upon completion of, presumably asynchronous, OnWebSocketHandshakeIntercept
+ * callback's initiated process.
+ */
+ using OnWebSocketHandshakeInterceptComplete =
+ std::function<void(const std::shared_ptr<Http::HttpRequest> &, int)>;
+
+ /**
+ * Invoked during websocket handshake to give users opportunity to transform an http request for purposes
+ * such as signing/authorization etc... Returning from this function does not continue the websocket
+ * handshake since some work flows may be asynchronous. To accommodate that, onComplete must be invoked upon
+ * completion of the signing process.
+ */
+ using OnWebSocketHandshakeIntercept =
+ std::function<void(std::shared_ptr<Http::HttpRequest>, const OnWebSocketHandshakeInterceptComplete &)>;
+
+ /**
+ * An MQTT5 client. This is a move-only type. Unless otherwise specified,
+ * all function arguments need only to live through the duration of the
+ * function call.
+ */
+ class AWS_CRT_CPP_API Mqtt5Client final : public std::enable_shared_from_this<Mqtt5Client>
+ {
+ public:
+ /**
+ * Factory function for mqtt5 client
+ *
+ * @param options: Mqtt5 Client Options
+ * @param allocator allocator to use
+ * @return a new mqtt5 client
+ */
+ static std::shared_ptr<Mqtt5Client> NewMqtt5Client(
+ const Mqtt5ClientOptions &options,
+ Allocator *allocator = ApiAllocator()) noexcept;
+
+ /**
+ * Get shared poitner of the Mqtt5Client. Mqtt5Client is inherited to enable_shared_from_this to help
+ * with memory safety.
+ *
+ * @return shared_ptr for the Mqtt5Client
+ */
+ std::shared_ptr<Mqtt5Client> getptr() { return shared_from_this(); }
+
+ /**
+ * @return true if the instance is in a valid state, false otherwise.
+ */
+ operator bool() const noexcept;
+
+ /**
+ * @return the value of the last aws error encountered by operations on this instance.
+ */
+ int LastError() const noexcept;
+
+ /**
+ * Notifies the MQTT5 client that you want it to attempt to connect to the configured endpoint.
+ * The client will attempt to stay connected using the properties of the reconnect-related parameters
+ * from the client configuration.
+ *
+ * @return bool: true if operation succeed, otherwise false.
+ */
+ bool Start() const noexcept;
+
+ /**
+ * Notifies the MQTT5 client that you want it to transition to the stopped state, disconnecting any
+ * existing connection and stopping subsequent reconnect attempts.
+ *
+ * @return bool: true if operation succeed, otherwise false
+ */
+ bool Stop() noexcept;
+
+ /**
+ * Notifies the MQTT5 client that you want it to transition to the stopped state, disconnecting any
+ * existing connection and stopping subsequent reconnect attempts.
+ *
+ * @param disconnectOptions (optional) properties of a DISCONNECT packet to send as part of the shutdown
+ * process
+ *
+ * @return bool: true if operation succeed, otherwise false
+ */
+ bool Stop(std::shared_ptr<DisconnectPacket> disconnectOptions) noexcept;
+
+ /**
+ * Tells the client to attempt to send a PUBLISH packet
+ *
+ * @param publishOptions: packet PUBLISH to send to the server
+ * @param onPublishCompletionCallback: callback on publish complete, default to NULL
+ *
+ * @return true if the publish operation succeed otherwise false
+ */
+ bool Publish(
+ std::shared_ptr<PublishPacket> publishOptions,
+ OnPublishCompletionHandler onPublishCompletionCallback = NULL) noexcept;
+
+ /**
+ * Tells the client to attempt to subscribe to one or more topic filters.
+ *
+ * @param subscribeOptions: SUBSCRIBE packet to send to the server
+ * @param onSubscribeCompletionCallback: callback on subscribe complete, default to NULL
+ *
+ * @return true if the subscription operation succeed otherwise false
+ */
+ bool Subscribe(
+ std::shared_ptr<SubscribePacket> subscribeOptions,
+ OnSubscribeCompletionHandler onSubscribeCompletionCallback = NULL) noexcept;
+
+ /**
+ * Tells the client to attempt to unsubscribe to one or more topic filters.
+ *
+ * @param unsubscribeOptions: UNSUBSCRIBE packet to send to the server
+ * @param onUnsubscribeCompletionCallback: callback on unsubscribe complete, default to NULL
+ *
+ * @return true if the unsubscription operation succeed otherwise false
+ */
+ bool Unsubscribe(
+ std::shared_ptr<UnsubscribePacket> unsubscribeOptions,
+ OnUnsubscribeCompletionHandler onUnsubscribeCompletionCallback = NULL) noexcept;
+
+ /**
+ * Get the statistics about the current state of the client's queue of operations
+ *
+ * @return Mqtt5ClientOperationStatistics
+ */
+ const Mqtt5ClientOperationStatistics &GetOperationStatistics() noexcept;
+
+ virtual ~Mqtt5Client();
+
+ private:
+ Mqtt5Client(const Mqtt5ClientOptions &options, Allocator *allocator = ApiAllocator()) noexcept;
+
+ /* Static Callbacks */
+ static void s_publishCompletionCallback(
+ enum aws_mqtt5_packet_type packet_type,
+ const void *packet,
+ int error_code,
+ void *complete_ctx);
+
+ static void s_subscribeCompletionCallback(
+ const struct aws_mqtt5_packet_suback_view *puback,
+ int error_code,
+ void *complete_ctx);
+
+ static void s_unsubscribeCompletionCallback(
+ const struct aws_mqtt5_packet_unsuback_view *puback,
+ int error_code,
+ void *complete_ctx);
+
+ static void s_lifeCycleEventCallback(const aws_mqtt5_client_lifecycle_event *event);
+
+ static void s_publishReceivedCallback(const aws_mqtt5_packet_publish_view *publish, void *user_data);
+
+ static void s_onWebsocketHandshake(
+ aws_http_message *rawRequest,
+ void *user_data,
+ aws_mqtt5_transform_websocket_handshake_complete_fn *complete_fn,
+ void *complete_ctx);
+
+ static void s_clientTerminationCompletion(void *complete_ctx);
+
+ /* The handler is set by clientoptions */
+ OnWebSocketHandshakeIntercept websocketInterceptor;
+ /**
+ * Callback handler trigged when client successfully establishes an MQTT connection
+ */
+ OnConnectionSuccessHandler onConnectionSuccess;
+
+ /**
+ * Callback handler trigged when client fails to establish an MQTT connection
+ */
+ OnConnectionFailureHandler onConnectionFailure;
+
+ /**
+ * Callback handler trigged when client's current MQTT connection is closed
+ */
+ OnDisconnectionHandler onDisconnection;
+
+ /**
+ * Callback handler trigged when client reaches the "Stopped" state
+ */
+ OnStoppedHandler onStopped;
+
+ /**
+ * Callback handler trigged when client begins an attempt to connect to the remote endpoint.
+ */
+ OnAttemptingConnectHandler onAttemptingConnect;
+
+ /**
+ * Callback handler trigged when an MQTT PUBLISH packet is received by the client
+ */
+ OnPublishReceivedHandler onPublishReceived;
+ aws_mqtt5_client *m_client;
+ Allocator *m_allocator;
+
+ Mqtt5ClientOperationStatistics m_operationStatistics;
+ std::condition_variable m_terminationCondition;
+ std::mutex m_terminationMutex;
+ bool m_terminationPredicate = false;
+ };
+
+ /**
+ * Configuration interface for mqtt5 clients
+ */
+ class AWS_CRT_CPP_API Mqtt5ClientOptions final
+ {
+
+ friend class Mqtt5Client;
+
+ public:
+ /**
+ * Default constructior of Mqtt5ClientOptions
+ */
+ Mqtt5ClientOptions(Crt::Allocator *allocator = ApiAllocator()) noexcept;
+
+ /**
+ * Sets host to connect to.
+ *
+ * @param hostname endpoint to connect to
+ *
+ * @return this option object
+ */
+ Mqtt5ClientOptions &withHostName(Crt::String hostname);
+
+ /**
+ * Set port to connect to
+ *
+ * @param port port to connect to
+ *
+ * @return this option object
+ */
+ Mqtt5ClientOptions &withPort(uint16_t port) noexcept;
+
+ /**
+ * Set booststrap for mqtt5 client
+ *
+ * @param bootStrap bootstrap used for mqtt5 client. The default ClientBootstrap see
+ * Aws::Crt::ApiHandle::GetOrCreateStaticDefaultClientBootstrap.
+ *
+ * @return this option object
+ */
+ Mqtt5ClientOptions &withBootstrap(Io::ClientBootstrap *bootStrap) noexcept;
+
+ /**
+ * Sets the aws socket options
+ *
+ * @param socketOptions Io::SocketOptions used to setup socket
+ *
+ * @return this option object
+ */
+ Mqtt5ClientOptions &withSocketOptions(Io::SocketOptions socketOptions) noexcept;
+
+ /**
+ * Sets the tls connection options
+ *
+ * @param tslOptions Io::TlsConnectionOptions
+ *
+ * @return this option object
+ */
+ Mqtt5ClientOptions &withTlsConnectionOptions(const Io::TlsConnectionOptions &tslOptions) noexcept;
+
+ /**
+ * Sets http proxy options.
+ *
+ * @param proxyOptions http proxy configuration for connection establishment
+ *
+ * @return this option object
+ */
+ Mqtt5ClientOptions &withHttpProxyOptions(
+ const Crt::Http::HttpClientConnectionProxyOptions &proxyOptions) noexcept;
+
+ /**
+ * Sets mqtt5 connection options
+ *
+ * @param packetConnect package connection options
+ *
+ * @return this option object
+ */
+ Mqtt5ClientOptions &withConnectOptions(std::shared_ptr<ConnectPacket> packetConnect) noexcept;
+
+ /**
+ * Sets session behavior. Overrides how the MQTT5 client should behave with respect to MQTT sessions.
+ *
+ * @param sessionBehavior
+ *
+ * @return this option object
+ */
+ Mqtt5ClientOptions &withSessionBehavior(ClientSessionBehaviorType sessionBehavior) noexcept;
+
+ /**
+ * Sets client extended validation and flow control, additional controls for client behavior with
+ * respect to operation validation and flow control; these checks go beyond the base MQTT5 spec to
+ * respect limits of specific MQTT brokers.
+ *
+ * @param clientExtendedValidationAndFlowControl
+ *
+ * @return this option object
+ */
+ Mqtt5ClientOptions &withClientExtendedValidationAndFlowControl(
+ ClientExtendedValidationAndFlowControl clientExtendedValidationAndFlowControl) noexcept;
+
+ /**
+ * Sets OfflineQueueBehavior, controls how disconnects affect the queued and in-progress operations
+ * tracked by the client. Also controls how new operations are handled while the client is not
+ * connected. In particular, if the client is not connected, then any operation that would be failed
+ * on disconnect (according to these rules) will also be rejected.
+ *
+ * @param offlineQueueBehavior
+ *
+ * @return this option object
+ */
+ Mqtt5ClientOptions &withOfflineQueueBehavior(
+ ClientOperationQueueBehaviorType offlineQueueBehavior) noexcept;
+
+ /**
+ * Sets ReconnectOptions. Reconnect options, includes retryJitterMode, min reconnect delay time and
+ * max reconnect delay time and reset reconnect delay time
+ *
+ * @param reconnectOptions
+ *
+ * @return this option object
+ */
+ Mqtt5ClientOptions &withReconnectOptions(ReconnectOptions reconnectOptions) noexcept;
+
+ /**
+ * Sets ping timeout (ms). Time interval to wait after sending a PINGREQ for a PINGRESP to arrive.
+ * If one does not arrive, the client will close the current connection.
+ *
+ * @param pingTimeoutMs
+ *
+ * @return this option object
+ */
+ Mqtt5ClientOptions &withPingTimeoutMs(uint32_t pingTimeoutMs) noexcept;
+
+ /**
+ * Sets Connack Timeout (ms). Time interval to wait after sending a CONNECT request for a CONNACK
+ * to arrive. If one does not arrive, the connection will be shut down.
+ *
+ * @param connackTimeoutMs
+ *
+ * @return this option object
+ */
+ Mqtt5ClientOptions &withConnackTimeoutMs(uint32_t connackTimeoutMs) noexcept;
+
+ /**
+ * Sets Operation Timeout(Seconds). Time interval to wait for an ack after sending a QoS 1+ PUBLISH,
+ * SUBSCRIBE, or UNSUBSCRIBE before failing the operation.
+ *
+ * @param ackTimeoutSeconds
+ *
+ * @return this option object
+ */
+ Mqtt5ClientOptions &withAckTimeoutSeconds(uint32_t ackTimeoutSeconds) noexcept;
+
+ /**
+ * Sets callback for transform HTTP request.
+ * This callback allows a custom transformation of the HTTP request that acts as the websocket
+ * handshake. Websockets will be used if this is set to a valid transformation callback. To use
+ * websockets but not perform a transformation, just set this as a trivial completion callback. If
+ * undefined, the connection will be made with direct MQTT.
+ *
+ * @param callback
+ *
+ * @return this option object
+ */
+ Mqtt5ClientOptions &withWebsocketHandshakeTransformCallback(
+ OnWebSocketHandshakeIntercept callback) noexcept;
+
+ /**
+ * Sets callback trigged when client successfully establishes an MQTT connection
+ *
+ * @param callback
+ *
+ * @return this option object
+ */
+ Mqtt5ClientOptions &withClientConnectionSuccessCallback(OnConnectionSuccessHandler callback) noexcept;
+
+ /**
+ * Sets callback trigged when client fails to establish an MQTT connection
+ *
+ * @param callback
+ *
+ * @return this option object
+ */
+ Mqtt5ClientOptions &withClientConnectionFailureCallback(OnConnectionFailureHandler callback) noexcept;
+
+ /**
+ * Sets callback trigged when client's current MQTT connection is closed
+ *
+ * @param callback
+ *
+ * @return this option object
+ */
+ Mqtt5ClientOptions &withClientDisconnectionCallback(OnDisconnectionHandler callback) noexcept;
+
+ /**
+ * Sets callback trigged when client reaches the "Stopped" state
+ *
+ * @param callback
+ *
+ * @return this option object
+ */
+ Mqtt5ClientOptions &withClientStoppedCallback(OnStoppedHandler callback) noexcept;
+
+ /**
+ * Sets callback trigged when client begins an attempt to connect to the remote endpoint.
+ *
+ * @param callback
+ *
+ * @return this option object
+ */
+ Mqtt5ClientOptions &withClientAttemptingConnectCallback(OnAttemptingConnectHandler callback) noexcept;
+
+ /**
+ * Sets callback trigged when a PUBLISH packet is received by the client
+ *
+ * @param callback
+ *
+ * @return this option object
+ */
+ Mqtt5ClientOptions &withPublishReceivedCallback(OnPublishReceivedHandler callback) noexcept;
+
+ /**
+ * Initializes the C aws_mqtt5_client_options from Mqtt5ClientOptions. For internal use
+ *
+ * @param raw_options - output parameter containing low level client options to be passed to the C
+ * interface
+ *
+ */
+ bool initializeRawOptions(aws_mqtt5_client_options &raw_options) const noexcept;
+
+ virtual ~Mqtt5ClientOptions();
+ Mqtt5ClientOptions(const Mqtt5ClientOptions &) = delete;
+ Mqtt5ClientOptions(Mqtt5ClientOptions &&) = delete;
+ Mqtt5ClientOptions &operator=(const Mqtt5ClientOptions &) = delete;
+ Mqtt5ClientOptions &operator=(Mqtt5ClientOptions &&) = delete;
+
+ private:
+ /**
+ * This callback allows a custom transformation of the HTTP request that acts as the websocket
+ * handshake. Websockets will be used if this is set to a valid transformation callback. To use
+ * websockets but not perform a transformation, just set this as a trivial completion callback. If
+ * undefined, the connection will be made with direct MQTT.
+ */
+ OnWebSocketHandshakeIntercept websocketHandshakeTransform;
+
+ /**
+ * Callback handler trigged when client successfully establishes an MQTT connection
+ */
+ OnConnectionSuccessHandler onConnectionSuccess;
+
+ /**
+ * Callback handler trigged when client fails to establish an MQTT connection
+ */
+ OnConnectionFailureHandler onConnectionFailure;
+
+ /**
+ * Callback handler trigged when client's current MQTT connection is closed
+ */
+ OnDisconnectionHandler onDisconnection;
+
+ /**
+ * Callback handler trigged when client reaches the "Stopped" state
+ *
+ * @param Mqtt5Client: The shared client
+ */
+ OnStoppedHandler onStopped;
+
+ /**
+ * Callback handler trigged when client begins an attempt to connect to the remote endpoint.
+ *
+ * @param Mqtt5Client: The shared client
+ */
+ OnAttemptingConnectHandler onAttemptingConnect;
+
+ /**
+ * Callback handler trigged when an MQTT PUBLISH packet is received by the client
+ *
+ * @param Mqtt5Client: The shared client
+ * @param PublishPacket: received Publish Packet
+ */
+ OnPublishReceivedHandler onPublishReceived;
+
+ /**
+ * Host name of the MQTT server to connect to.
+ */
+ Crt::String m_hostName;
+
+ /**
+ * Network port of the MQTT server to connect to.
+ */
+ uint16_t m_port;
+
+ /**
+ * Client bootstrap to use. In almost all cases, this can be left undefined.
+ */
+ Io::ClientBootstrap *m_bootstrap;
+
+ /**
+ * Controls socket properties of the underlying MQTT connections made by the client. Leave undefined to
+ * use defaults (no TCP keep alive, 10 second socket timeout).
+ */
+ Crt::Io::SocketOptions m_socketOptions;
+
+ /**
+ * TLS context for secure socket connections.
+ * If undefined, then a plaintext connection will be used.
+ */
+ Crt::Optional<Crt::Io::TlsConnectionOptions> m_tlsConnectionOptions;
+
+ /**
+ * Configures (tunneling) HTTP proxy usage when establishing MQTT connections
+ */
+ Crt::Optional<Crt::Http::HttpClientConnectionProxyOptions> m_proxyOptions;
+
+ /**
+ * All configurable options with respect to the CONNECT packet sent by the client, including the will.
+ * These connect properties will be used for every connection attempt made by the client.
+ */
+ std::shared_ptr<ConnectPacket> m_connectOptions;
+
+ /**
+ * Controls how the MQTT5 client should behave with respect to MQTT sessions.
+ */
+ ClientSessionBehaviorType m_sessionBehavior;
+
+ /**
+ * Additional controls for client behavior with respect to operation validation and flow control; these
+ * checks go beyond the base MQTT5 spec to respect limits of specific MQTT brokers.
+ */
+ ClientExtendedValidationAndFlowControl m_extendedValidationAndFlowControlOptions;
+
+ /**
+ * Controls how disconnects affect the queued and in-progress operations tracked by the client. Also
+ * controls how new operations are handled while the client is not connected. In particular, if the
+ * client is not connected, then any operation that would be failed on disconnect (according to these
+ * rules) will also be rejected.
+ */
+ ClientOperationQueueBehaviorType m_offlineQueueBehavior;
+
+ /**
+ * Reconnect options, includes retryJitterMode, min reconnect delay time and max reconnect delay time
+ */
+ ReconnectOptions m_reconnectionOptions;
+
+ /**
+ * Time interval to wait after sending a PINGREQ for a PINGRESP to arrive. If one does not arrive, the
+ * client will close the current connection.
+ */
+ uint32_t m_pingTimeoutMs;
+
+ /**
+ * Time interval to wait after sending a CONNECT request for a CONNACK to arrive. If one does not
+ * arrive, the connection will be shut down.
+ */
+ uint32_t m_connackTimeoutMs;
+
+ /**
+ * Time interval to wait for an ack after sending a QoS 1+ PUBLISH, SUBSCRIBE, or UNSUBSCRIBE before
+ * failing the operation.
+ */
+ uint32_t m_ackTimeoutSec;
+
+ /* Underlying Parameters */
+ Crt::Allocator *m_allocator;
+ aws_http_proxy_options m_httpProxyOptionsStorage;
+ aws_mqtt5_packet_connect_view m_packetConnectViewStorage;
+ };
+
+ } // namespace Mqtt5
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/mqtt/Mqtt5Packets.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/mqtt/Mqtt5Packets.h
new file mode 100644
index 0000000000..9588d6e0ef
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/mqtt/Mqtt5Packets.h
@@ -0,0 +1,2286 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/crt/mqtt/Mqtt5Client.h>
+#include <aws/crt/mqtt/Mqtt5Packets.h>
+#include <aws/crt/mqtt/Mqtt5Types.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Mqtt5
+ {
+
+ /**
+ * Data model for MQTT5 user properties.
+ *
+ * A user property is a name-value pair of utf-8 strings that can be added to mqtt5 packets.
+ */
+ class AWS_CRT_CPP_API UserProperty
+ {
+ public:
+ UserProperty(Crt::String key, Crt::String value) noexcept;
+
+ const Crt::String &getName() const noexcept { return m_name; };
+ const Crt::String &getValue() const noexcept { return m_value; }
+
+ ~UserProperty() noexcept;
+ UserProperty(const UserProperty &toCopy) noexcept;
+ UserProperty(UserProperty &&toMove) noexcept;
+ UserProperty &operator=(const UserProperty &toCopy) noexcept;
+ UserProperty &operator=(UserProperty &&toMove) noexcept;
+
+ private:
+ Crt::String m_name;
+ Crt::String m_value;
+ };
+
+ class AWS_CRT_CPP_API IPacket
+ {
+ public:
+ virtual PacketType getType() = 0;
+ };
+
+ /**
+ * Data model of an [MQTT5
+ * PUBLISH](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901100) packet
+ */
+ class AWS_CRT_CPP_API PublishPacket : public IPacket
+ {
+ public:
+ PublishPacket(
+ const aws_mqtt5_packet_publish_view &raw_options,
+ Allocator *allocator = ApiAllocator()) noexcept;
+ PublishPacket(Allocator *allocator = ApiAllocator()) noexcept;
+ PublishPacket(
+ Crt::String topic,
+ ByteCursor payload,
+ Mqtt5::QOS qos,
+ Allocator *allocator = ApiAllocator()) noexcept;
+ PacketType getType() override { return PacketType::AWS_MQTT5_PT_PUBLISH; };
+
+ /**
+ * Sets the payload for the publish message.
+ *
+ * See [MQTT5 Publish
+ * Payload](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901119)
+ *
+ * @param payload The payload for the publish message.
+ * @return The PublishPacket Object after setting the payload.
+ */
+ PublishPacket &withPayload(ByteCursor payload) noexcept;
+
+ /**
+ * Sets the MQTT quality of service level the message should be delivered with.
+ *
+ * See [MQTT5 QoS](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901103)
+ *
+ * @param packetQOS The MQTT quality of service level the message should be delivered with.
+ * @return The PublishPacket Object after setting the QOS.
+ */
+ PublishPacket &withQOS(Mqtt5::QOS packetQOS) noexcept;
+
+ /**
+ * Sets if this should be a retained message.
+ *
+ * See [MQTT5 Retain](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901104)
+ *
+ * @param retain if this is a retained message.
+ * @return The PublishPacket Object after setting the retain setting.
+ */
+ PublishPacket &withRetain(bool retain) noexcept;
+
+ /**
+ * Sets the topic this message should be published to.
+ * See [MQTT5 Topic Name](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901107)
+ *
+ * @param topic The topic this message should be published to.
+ * @return The PublishPacket Object after setting the topic.
+ */
+ PublishPacket &withTopic(Crt::String topic) noexcept;
+
+ /**
+ * Sets the property specifying the format of the payload data. The mqtt5 client does not enforce or use
+ * this value in a meaningful way.
+ *
+ * See [MQTT5 Payload Format
+ * Indicator](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901111)
+ *
+ * @param payloadFormat Property specifying the format of the payload data
+ * @return The PublishPacket Object after setting the payload format.
+ */
+ PublishPacket &withPayloadFormatIndicator(PayloadFormatIndicator payloadFormat) noexcept;
+
+ /**
+ * Sets the maximum amount of time allowed to elapse for message delivery before the server
+ * should instead delete the message (relative to a recipient).
+ *
+ * See [MQTT5 Message Expiry
+ * Interval](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901112)
+ *
+ * @param second The maximum amount of time allowed to elapse for message delivery
+ * before the server should instead delete the message (relative to a recipient).
+ * @return The PublishPacket Object after setting the message expiry interval.
+ */
+ PublishPacket &withMessageExpiryIntervalSec(uint32_t second) noexcept;
+
+ /**
+ * Sets the opaque topic string intended to assist with request/response implementations. Not
+ * internally meaningful to MQTT5 or this client.
+ *
+ * See [MQTT5 Response
+ * Topic](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901114)
+ * @param responseTopic
+ * @return The PublishPacket Object after setting the response topic.
+ */
+ PublishPacket &withResponseTopic(ByteCursor responseTopic) noexcept;
+
+ /**
+ * Sets the opaque binary data used to correlate between publish messages, as a potential method for
+ * request-response implementation. Not internally meaningful to MQTT5.
+ *
+ * See [MQTT5 Correlation
+ * Data](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901115)
+ *
+ * @param correlationData Opaque binary data used to correlate between publish messages
+ * @return The PublishPacket Object after setting the correlation data.
+ */
+ PublishPacket &withCorrelationData(ByteCursor correlationData) noexcept;
+
+ /**
+ * Sets the list of MQTT5 user properties included with the packet.
+ *
+ * See [MQTT5 User
+ * Property](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901116)
+ *
+ * @param userProperties List of MQTT5 user properties included with the packet.
+ * @return The PublishPacket Object after setting the user properties
+ */
+ PublishPacket &withUserProperties(const Vector<UserProperty> &userProperties) noexcept;
+
+ /**
+ * Sets the list of MQTT5 user properties included with the packet.
+ *
+ * See [MQTT5 User
+ * Property](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901116)
+ *
+ * @param userProperties List of MQTT5 user properties included with the packet.
+ * @return The PublishPacket Object after setting the user properties
+ */
+ PublishPacket &withUserProperties(Vector<UserProperty> &&userProperties) noexcept;
+
+ /**
+ * Put a MQTT5 user property to the back of the packet user property vector/list
+ *
+ * See [MQTT5 User
+ * Property](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901116)
+ *
+ * @param property set of userProperty of MQTT5 user properties included with the packet.
+ * @return The PublishPacket Object after setting the user property
+ */
+ PublishPacket &withUserProperty(UserProperty &&property) noexcept;
+
+ bool initializeRawOptions(aws_mqtt5_packet_publish_view &raw_options) noexcept;
+
+ /**
+ * The payload of the publish message.
+ *
+ * See [MQTT5 Publish
+ * Payload](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901119)
+ *
+ * @return The payload of the publish message.
+ */
+ const ByteCursor &getPayload() const noexcept;
+
+ /**
+ * Sent publishes - The MQTT quality of service level this message should be delivered with.
+ *
+ * Received publishes - The MQTT quality of service level this message was delivered at.
+ *
+ * See [MQTT5 QoS](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901103)
+ *
+ * @return The MQTT quality of service associated with this PUBLISH packet.
+ */
+ Mqtt5::QOS getQOS() const noexcept;
+
+ /**
+ * True if this is a retained message, false otherwise.
+ *
+ * Always set on received publishes.
+ *
+ * See [MQTT5 Retain](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901104)
+ *
+ * @return True if this is a retained message, false otherwise.
+ */
+ bool getRetain() const noexcept;
+
+ /**
+ * Sent publishes - The topic this message should be published to.
+ *
+ * Received publishes - The topic this message was published to.
+ *
+ * See [MQTT5 Topic Name](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901107)
+ * @return The topic associated with this PUBLISH packet.
+ */
+ const Crt::String &getTopic() const noexcept;
+
+ /**
+ * Property specifying the format of the payload data. The mqtt5 client does not enforce or use this
+ * value in a meaningful way.
+ *
+ * See [MQTT5 Payload Format
+ * Indicator](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901111)
+ *
+ * @return Property specifying the format of the payload data.
+ */
+ const Crt::Optional<PayloadFormatIndicator> &getPayloadFormatIndicator() const noexcept;
+
+ /**
+ * Sent publishes - indicates the maximum amount of time allowed to elapse for message delivery before
+ * the server should instead delete the message (relative to a recipient).
+ *
+ * Received publishes - indicates the remaining amount of time (from the server's perspective) before
+ * the message would have been deleted relative to the subscribing client.
+ *
+ * If left null, indicates no expiration timeout.
+ *
+ * See [MQTT5 Message Expiry
+ * Interval](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901112)
+ *
+ * @return The message expiry interval associated with this PUBLISH packet.
+ */
+ const Crt::Optional<uint32_t> &getMessageExpiryIntervalSec() const noexcept;
+
+ /**
+ * Opaque topic string intended to assist with request/response implementations. Not internally
+ * meaningful to MQTT5 or this client.
+ *
+ * See [MQTT5 Response
+ * Topic](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901114)
+ *
+ * @return ByteCursor to topic string intended to assist with request/response implementations.
+ */
+ const Crt::Optional<ByteCursor> &getResponseTopic() const noexcept;
+
+ /**
+ * Opaque binary data used to correlate between publish messages, as a potential method for
+ * request-response implementation. Not internally meaningful to MQTT5.
+ *
+ * See [MQTT5 Correlation
+ * Data](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901115)
+ *
+ * @return ByteCursor to opaque binary data used to correlate between publish messages.
+ */
+ const Crt::Optional<ByteCursor> &getCorrelationData() const noexcept;
+
+ /**
+ * Sent publishes - ignored
+ *
+ * Received publishes - the subscription identifiers of all the subscriptions this message matched.
+ *
+ * See [MQTT5 Subscription
+ * Identifier](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901117)
+ *
+ * @return the subscription identifiers of all the subscriptions this message matched.
+ */
+ const Crt::Vector<uint32_t> &getSubscriptionIdentifiers() const noexcept;
+
+ /**
+ * Property specifying the content type of the payload. Not internally meaningful to MQTT5.
+ *
+ * See [MQTT5 Content Type](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901118)
+ *
+ * @return ByteCursor to opaque binary data to the content type of the payload.
+ */
+ const Crt::Optional<ByteCursor> &getContentType() const noexcept;
+
+ /**
+ * List of MQTT5 user properties included with the packet.
+ *
+ * See [MQTT5 User
+ * Property](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901116)
+ *
+ * @return List of MQTT5 user properties included with the packet.
+ */
+ const Crt::Vector<UserProperty> &getUserProperties() const noexcept;
+
+ virtual ~PublishPacket();
+ PublishPacket(const PublishPacket &) = delete;
+ PublishPacket(PublishPacket &&) noexcept = delete;
+ PublishPacket &operator=(const PublishPacket &) = delete;
+ PublishPacket &operator=(PublishPacket &&) noexcept = delete;
+
+ private:
+ Allocator *m_allocator;
+
+ /**
+ * The payload of the publish message.
+ *
+ * See [MQTT5 Publish
+ * Payload](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901119)
+ */
+ ByteCursor m_payload;
+
+ /**
+ * Sent publishes - The MQTT quality of service level this message should be delivered with.
+ *
+ * Received publishes - The MQTT quality of service level this message was delivered at.
+ *
+ * See [MQTT5 QoS](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901103)
+ */
+ Mqtt5::QOS m_qos;
+
+ /**
+ * True if this is a retained message, false otherwise.
+ *
+ * Always set on received publishes, default to false
+ *
+ * See [MQTT5 Retain](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901104)
+ */
+ bool m_retain;
+
+ /**
+ * Sent publishes - The topic this message should be published to.
+ *
+ * Received publishes - The topic this message was published to.
+ *
+ * See [MQTT5 Topic Name](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901107)
+ */
+ Crt::String m_topicName;
+
+ /**
+ * Property specifying the format of the payload data. The mqtt5 client does not enforce or use this
+ * value in a meaningful way.
+ *
+ * See [MQTT5 Payload Format
+ * Indicator](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901111)
+ */
+ Crt::Optional<PayloadFormatIndicator> m_payloadFormatIndicator;
+
+ /**
+ * Sent publishes - indicates the maximum amount of time allowed to elapse for message delivery before
+ * the server should instead delete the message (relative to a recipient).
+ *
+ * Received publishes - indicates the remaining amount of time (from the server's perspective) before
+ * the message would have been deleted relative to the subscribing client.
+ *
+ * If left undefined, indicates no expiration timeout.
+ *
+ * See [MQTT5 Message Expiry
+ * Interval](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901112)
+ */
+ Crt::Optional<uint32_t> m_messageExpiryIntervalSec;
+
+ /**
+ * Opaque topic string intended to assist with request/response implementations. Not internally
+ * meaningful to MQTT5 or this client.
+ *
+ * See [MQTT5 Response
+ * Topic](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901114)
+ */
+ Crt::Optional<ByteCursor> m_responseTopic;
+
+ /**
+ * Opaque binary data used to correlate between publish messages, as a potential method for
+ * request-response implementation. Not internally meaningful to MQTT5.
+ *
+ * See [MQTT5 Correlation
+ * Data](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901115)
+ */
+ Crt::Optional<ByteCursor> m_correlationData;
+
+ /**
+ * Set of MQTT5 user properties included with the packet.
+ *
+ * See [MQTT5 User
+ * Property](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901116)
+ */
+ Crt::Vector<UserProperty> m_userProperties;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // The following parameters are ignored when building publish operations */
+ ///////////////////////////////////////////////////////////////////////////
+
+ /**
+ * Sent publishes - ignored
+ *
+ * Received publishes - the subscription identifiers of all the subscriptions this message matched.
+ *
+ * See [MQTT5 Subscription
+ * Identifier](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901117)
+ */
+ Crt::Vector<uint32_t> m_subscriptionIdentifiers;
+
+ /**
+ * Property specifying the content type of the payload. Not internally meaningful to MQTT5.
+ *
+ * See [MQTT5 Content Type](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901118)
+ */
+ Crt::Optional<ByteCursor> m_contentType;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Underlying data storage for internal use
+ ///////////////////////////////////////////////////////////////////////////
+ ByteBuf m_payloadStorage;
+ ByteBuf m_contentTypeStorage;
+ ByteBuf m_correlationDataStorage;
+ Crt::String m_responseTopicString;
+ struct aws_mqtt5_user_property *m_userPropertiesStorage;
+ };
+
+ /**
+ * Mqtt behavior settings that are dynamically negotiated as part of the CONNECT/CONNACK exchange.
+ *
+ * While you can infer all of these values from a combination of
+ * (1) defaults as specified in the mqtt5 spec
+ * (2) your CONNECT settings
+ * (3) the CONNACK from the broker
+ *
+ * the client instead does the combining for you and emits a NegotiatedSettings object with final,
+ * authoritative values.
+ *
+ * Negotiated settings are communicated with every successful connection establishment.
+ */
+ class AWS_CRT_CPP_API NegotiatedSettings
+ {
+ public:
+ NegotiatedSettings(
+ const aws_mqtt5_negotiated_settings &negotiated_settings,
+
+ Allocator *allocator = ApiAllocator()) noexcept;
+
+ /**
+ * @return The maximum QoS allowed for publishes on this connection instance
+ */
+ Mqtt5::QOS getMaximumQOS() const noexcept;
+
+ /**
+ * @return The amount of time in seconds the server will retain the MQTT session after a disconnect.
+ */
+ uint32_t getSessionExpiryIntervalSec() const noexcept;
+
+ /**
+ * @return The number of in-flight QoS 1 and QoS 2 publications the server is willing to process
+ * concurrently.
+ */
+ uint16_t getReceiveMaximumFromServer() const noexcept;
+
+ /**
+ * @return The maximum packet size the server is willing to accept.
+ */
+ uint32_t getMaximumPacketSizeBytes() const noexcept;
+
+ /**
+ * The maximum amount of time in seconds between client packets. The client should use PINGREQs to
+ * ensure this limit is not breached. The server will disconnect the client for inactivity if no MQTT
+ * packet is received in a time interval equal to 1.5 x this value.
+ *
+ * @return The maximum amount of time in seconds between client packets.
+ */
+ uint16_t getServerKeepAlive() const noexcept;
+
+ /**
+ * @return Whether the server supports retained messages.
+ */
+ bool getRetainAvailable() const noexcept;
+
+ /**
+ * @return Whether the server supports wildcard subscriptions.
+ */
+ bool getWildcardSubscriptionsAvaliable() const noexcept;
+
+ /**
+ * @return Whether the server supports subscription identifiers
+ */
+ bool getSubscriptionIdentifiersAvaliable() const noexcept;
+
+ /**
+ * @return Whether the server supports shared subscriptions
+ */
+ bool getSharedSubscriptionsAvaliable() const noexcept;
+
+ /**
+ * @return Whether the client has rejoined an existing session.
+ */
+ bool getRejoinedSession() const noexcept;
+
+ /**
+ * The final client id in use by the newly-established connection. This will be the configured client
+ * id if one was given in the configuration, otherwise, if no client id was specified, this will be the
+ * client id assigned by the server. Reconnection attempts will always use the auto-assigned client id,
+ * allowing for auto-assigned session resumption.
+ *
+ * @return The final client id in use by the newly-established connection
+ */
+ const Crt::String &getClientId() const noexcept;
+
+ virtual ~NegotiatedSettings(){};
+ NegotiatedSettings(const NegotiatedSettings &) = delete;
+ NegotiatedSettings(NegotiatedSettings &&) noexcept = delete;
+ NegotiatedSettings &operator=(const NegotiatedSettings &) = delete;
+ NegotiatedSettings &operator=(NegotiatedSettings &&) noexcept = delete;
+
+ private:
+ /**
+ * The maximum QoS allowed for publishes on this connection instance
+ */
+ Mqtt5::QOS m_maximumQOS;
+
+ /**
+ * The amount of time in seconds the server will retain the MQTT session after a disconnect.
+ */
+ uint32_t m_sessionExpiryIntervalSec;
+
+ /**
+ * The number of in-flight QoS 1 and QoS2 publications the server is willing to process concurrently.
+ */
+ uint16_t m_receiveMaximumFromServer;
+
+ /**
+ * The maximum packet size the server is willing to accept.
+ */
+ uint32_t m_maximumPacketSizeBytes;
+
+ /**
+ * The maximum amount of time in seconds between client packets. The client should use PINGREQs to
+ * ensure this limit is not breached. The server will disconnect the client for inactivity if no MQTT
+ * packet is received in a time interval equal to 1.5 x this value.
+ */
+ uint16_t m_serverKeepAliveSec;
+
+ /**
+ * Whether the server supports retained messages.
+ */
+ bool m_retainAvailable;
+
+ /**
+ * Whether the server supports wildcard subscriptions.
+ */
+ bool m_wildcardSubscriptionsAvaliable;
+
+ /**
+ * Whether the server supports subscription identifiers
+ */
+ bool m_subscriptionIdentifiersAvaliable;
+
+ /**
+ * Whether the server supports shared subscriptions
+ */
+ bool m_sharedSubscriptionsAvaliable;
+
+ /**
+ * Whether the client has rejoined an existing session.
+ */
+ bool m_rejoinedSession;
+
+ /**
+ * The final client id in use by the newly-established connection. This will be the configured client
+ * id if one was given in the configuration, otherwise, if no client id was specified, this will be the
+ * client id assigned by the server. Reconnection attempts will always use the auto-assigned client id,
+ * allowing for auto-assigned session resumption.
+ */
+ Crt::String m_clientId;
+ };
+
+ /**
+ * Data model of an [MQTT5
+ * CONNECT](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901033) packet.
+ */
+ class AWS_CRT_CPP_API ConnectPacket : public IPacket
+ {
+ public:
+ /* Default constructor */
+ ConnectPacket(Allocator *allocator = ApiAllocator()) noexcept;
+
+ /* The packet type */
+ PacketType getType() override { return PacketType::AWS_MQTT5_PT_CONNECT; };
+
+ /**
+ * Sets the maximum time interval, in seconds, that is permitted to elapse between the point at which
+ * the client finishes transmitting one MQTT packet and the point it starts sending the next. The
+ * client will use PINGREQ packets to maintain this property.
+ *
+ * If the responding CONNACK contains a keep alive property value, then that is the negotiated keep
+ * alive value. Otherwise, the keep alive sent by the client is the negotiated value.
+ *
+ * See [MQTT5 Keep Alive](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901045)
+ *
+ * NOTE: The keepAliveIntervalSeconds HAS to be larger than the pingTimeoutMs time set in the
+ * Mqtt5ClientOptions.
+ *
+ * @param keepAliveInteralSeconds the maximum time interval, in seconds, that is permitted to elapse
+ * between the point at which the client finishes transmitting one MQTT packet and the point it starts
+ * sending the next.
+ * @return The ConnectPacket Object after setting the keep alive interval.
+ */
+ ConnectPacket &withKeepAliveIntervalSec(uint16_t keepAliveInteralSeconds) noexcept;
+
+ /**
+ * Sets the unique string identifying the client to the server. Used to restore session state between
+ * connections.
+ *
+ * If left empty, the broker will auto-assign a unique client id. When reconnecting, the mqtt5 client
+ * will always use the auto-assigned client id.
+ *
+ * See [MQTT5 Client
+ * Identifier](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901059)
+ *
+ * @param clientId A unique string identifying the client to the server.
+ * @return The ConnectPacket Object after setting the client ID.
+ */
+ ConnectPacket &withClientId(Crt::String clientId) noexcept;
+
+ /**
+ * Sets the string value that the server may use for client authentication and authorization.
+ *
+ * See [MQTT5 User Name](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901071)
+ *
+ * @param username The string value that the server may use for client authentication and authorization.
+ * @return The ConnectPacket Object after setting the username.
+ */
+ ConnectPacket &withUserName(Crt::String username) noexcept;
+
+ /**
+ * Sets the opaque binary data that the server may use for client authentication and authorization.
+ *
+ * See [MQTT5 Password](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901072)
+ *
+ * @param password Opaque binary data that the server may use for client authentication and
+ * authorization.
+ * @return The ConnectPacket Object after setting the password.
+ */
+ ConnectPacket &withPassword(ByteCursor password) noexcept;
+
+ /**
+ * Sets the time interval, in seconds, that the client requests the server to persist this connection's
+ * MQTT session state for. Has no meaning if the client has not been configured to rejoin sessions.
+ * Must be non-zero in order to successfully rejoin a session.
+ *
+ * If the responding CONNACK contains a session expiry property value, then that is the negotiated
+ * session expiry value. Otherwise, the session expiry sent by the client is the negotiated value.
+ *
+ * See [MQTT5 Session Expiry
+ * Interval](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901048)
+ *
+ * @param sessionExpiryIntervalSeconds A time interval, in seconds, that the client requests the server
+ * to persist this connection's MQTT session state for.
+ * @return The ConnectPacket Object after setting the session expiry interval.
+ */
+ ConnectPacket &withSessionExpiryIntervalSec(uint32_t sessionExpiryIntervalSeconds) noexcept;
+
+ /**
+ * Sets whether requests that the server send response information in the subsequent CONNACK. This
+ * response information may be used to set up request-response implementations over MQTT, but doing so
+ * is outside the scope of the MQTT5 spec and client.
+ *
+ * See [MQTT5 Request Response
+ * Information](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901052)
+ *
+ * @param requestResponseInformation If true, requests that the server send response information in the
+ * subsequent CONNACK.
+ * @return The ConnectPacket Object after setting the request response information.
+ */
+ ConnectPacket &withRequestResponseInformation(bool requestResponseInformation) noexcept;
+
+ /**
+ * Sets whether requests that the server send additional diagnostic information (via response string or
+ * user properties) in DISCONNECT or CONNACK packets from the server.
+ *
+ * See [MQTT5 Request Problem
+ * Information](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901053)
+ *
+ * @param requestProblemInformation If true, requests that the server send additional diagnostic
+ * information (via response string or user properties) in DISCONNECT or CONNACK packets from the
+ * server.
+ * @return The ConnectPacket Object after setting the request problem information.
+ */
+ ConnectPacket &withRequestProblemInformation(bool requestProblemInformation) noexcept;
+
+ /**
+ * Sets the maximum number of in-flight QoS 1 and 2 messages the client is willing to handle. If
+ * omitted, then no limit is requested.
+ *
+ * See [MQTT5 Receive
+ * Maximum](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901049)
+ *
+ * @param receiveMaximum The maximum number of in-flight QoS 1 and 2 messages the client is willing to
+ * handle.
+ * @return The ConnectPacket Object after setting the receive maximum.
+ */
+ ConnectPacket &withReceiveMaximum(uint16_t receiveMaximum) noexcept;
+
+ /**
+ * Sets the maximum packet size the client is willing to handle. If
+ * omitted, then no limit beyond the natural limits of MQTT packet size is requested.
+ *
+ * See [MQTT5 Maximum Packet
+ * Size](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901050)
+ *
+ * @param maximumPacketSizeBytes The maximum packet size the client is willing to handle
+ * @return The ConnectPacket Object after setting the maximum packet size.
+ */
+ ConnectPacket &withMaximumPacketSizeBytes(uint32_t maximumPacketSizeBytes) noexcept;
+
+ /**
+ * Sets the time interval, in seconds, that the server should wait (for a session reconnection) before
+ * sending the will message associated with the connection's session. If omitted, the server
+ * will send the will when the associated session is destroyed. If the session is destroyed before a
+ * will delay interval has elapsed, then the will must be sent at the time of session destruction.
+ *
+ * See [MQTT5 Will Delay
+ * Interval](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901062)
+ *
+ * @param willDelayIntervalSeconds A time interval, in seconds, that the server should wait (for a
+ * session reconnection) before sending the will message associated with the connection's session.
+ * @return The ConnectPacket Object after setting the will message delay interval.
+ */
+ ConnectPacket &withWillDelayIntervalSec(uint32_t willDelayIntervalSeconds) noexcept;
+
+ /**
+ * Sets the definition of a message to be published when the connection's session is destroyed by the
+ * server or when the will delay interval has elapsed, whichever comes first. If null, then nothing
+ * will be sent.
+ *
+ * See [MQTT5 Will](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901040)
+ *
+ * @param will The message to be published when the connection's session is destroyed by the server or
+ * when the will delay interval has elapsed, whichever comes first.
+ * @return The ConnectPacket Object after setting the will message.
+ */
+ ConnectPacket &withWill(std::shared_ptr<PublishPacket> will) noexcept;
+
+ /**
+ * Sets the list of MQTT5 user properties included with the packet.
+ *
+ * See [MQTT5 User
+ * Property](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901054)
+ *
+ * @param userProperties List of MQTT5 user properties included with the packet.
+ * @return The ConnectPacket Object after setting the user properties.
+ */
+ ConnectPacket &withUserProperties(const Vector<UserProperty> &userProperties) noexcept;
+
+ /**
+ * Sets the list of MQTT5 user properties included with the packet.
+ *
+ * See [MQTT5 User
+ * Property](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901054)
+ *
+ * @param userProperties List of MQTT5 user properties included with the packet.
+ * @return The ConnectPacket Object after setting the user properties.
+ */
+ ConnectPacket &withUserProperties(Vector<UserProperty> &&userProperties) noexcept;
+
+ /**
+ * Put a MQTT5 user property to the back of the packet user property vector/list
+ *
+ * See [MQTT5 User
+ * Property](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901116)
+ *
+ * @param property set of userProperty of MQTT5 user properties included with the packet.
+ * @return The ConnectPacket Object after setting the user property
+ */
+ ConnectPacket &withUserProperty(UserProperty &&property) noexcept;
+
+ /********************************************
+ * Access Functions
+ ********************************************/
+
+ /**
+ * The maximum time interval, in seconds, that is permitted to elapse between the point at which the
+ * client finishes transmitting one MQTT packet and the point it starts sending the next. The client
+ * will use PINGREQ packets to maintain this property.
+ *
+ * If the responding CONNACK contains a keep alive property value, then that is the negotiated keep
+ * alive value. Otherwise, the keep alive sent by the client is the negotiated value.
+ *
+ * See [MQTT5 Keep Alive](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901045)
+ *
+ * @return The maximum time interval, in seconds, that is permitted to elapse between the point at which
+ * the client finishes transmitting one MQTT packet and the point it starts sending the next.
+ */
+ uint16_t getKeepAliveIntervalSec() const noexcept;
+
+ /**
+ * A unique string identifying the client to the server. Used to restore session state between
+ * connections.
+ *
+ * If left empty, the broker will auto-assign a unique client id. When reconnecting, the mqtt5 client
+ * will always use the auto-assigned client id.
+ *
+ * See [MQTT5 Client
+ * Identifier](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901059)
+ *
+ * @return A unique string identifying the client to the server.
+ */
+ const Crt::String &getClientId() const noexcept;
+
+ /**
+ * A string value that the server may use for client authentication and authorization.
+ *
+ * See [MQTT5 User Name](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901071)
+ *
+ * @return A string value that the server may use for client authentication and authorization.
+ */
+ const Crt::Optional<Crt::String> &getUsername() const noexcept;
+
+ /**
+ * Opaque binary data that the server may use for client authentication and authorization.
+ *
+ * See [MQTT5 Password](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901072)
+ *
+ * @return Opaque binary data that the server may use for client authentication and authorization.
+ */
+ const Crt::Optional<Crt::ByteCursor> &getPassword() const noexcept;
+
+ /**
+ * A time interval, in seconds, that the client requests the server to persist this connection's MQTT
+ * session state for. Has no meaning if the client has not been configured to rejoin sessions. Must be
+ * non-zero in order to successfully rejoin a session.
+ *
+ * If the responding CONNACK contains a session expiry property value, then that is the negotiated
+ * session expiry value. Otherwise, the session expiry sent by the client is the negotiated value.
+ *
+ * See [MQTT5 Session Expiry
+ * Interval](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901048)
+ *
+ * @return A time interval, in seconds, that the client requests the server to persist this connection's
+ * MQTT session state for.
+ */
+ const Crt::Optional<uint32_t> &getSessionExpiryIntervalSec() const noexcept;
+
+ /**
+ * If true, requests that the server send response information in the subsequent CONNACK. This response
+ * information may be used to set up request-response implementations over MQTT, but doing so is outside
+ * the scope of the MQTT5 spec and client.
+ *
+ * See [MQTT5 Request Response
+ * Information](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901052)
+ *
+ * @return If true, requests that the server send response information in the subsequent CONNACK.
+ */
+ const Crt::Optional<bool> &getRequestResponseInformation() const noexcept;
+
+ /**
+ * If true, requests that the server send additional diagnostic information (via response string or
+ * user properties) in DISCONNECT or CONNACK packets from the server.
+ *
+ * See [MQTT5 Request Problem
+ * Information](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901053)
+ *
+ * @return If true, requests that the server send additional diagnostic information (via response string
+ * or user properties) in DISCONNECT or CONNACK packets from the server.
+ */
+ const Crt::Optional<bool> &getRequestProblemInformation() const noexcept;
+
+ /**
+ * Notifies the server of the maximum number of in-flight QoS 1 and 2 messages the client is willing to
+ * handle. If omitted or null, then no limit is requested.
+ *
+ * See [MQTT5 Receive
+ * Maximum](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901049)
+ *
+ * @return The maximum number of in-flight QoS 1 and 2 messages the client is willing to handle.
+ */
+ const Crt::Optional<uint16_t> &getReceiveMaximum() const noexcept;
+
+ /**
+ * Notifies the server of the maximum packet size the client is willing to handle. If
+ * omitted or null, then no limit beyond the natural limits of MQTT packet size is requested.
+ *
+ * See [MQTT5 Maximum Packet
+ * Size](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901050)
+ *
+ * @return The maximum packet size the client is willing to handle
+ */
+ const Crt::Optional<uint32_t> &getMaximumPacketSizeBytes() const noexcept;
+
+ /**
+ * A time interval, in seconds, that the server should wait (for a session reconnection) before sending
+ * the will message associated with the connection's session. If omitted or null, the server will send
+ * the will when the associated session is destroyed. If the session is destroyed before a will delay
+ * interval has elapsed, then the will must be sent at the time of session destruction.
+ *
+ * See [MQTT5 Will Delay
+ * Interval](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901062)
+ *
+ * @return A time interval, in seconds, that the server should wait (for a session reconnection) before
+ * sending the will message associated with the connection's session.
+ */
+ const Crt::Optional<uint32_t> &getWillDelayIntervalSec() const noexcept;
+
+ /**
+ * The definition of a message to be published when the connection's session is destroyed by the server
+ * or when the will delay interval has elapsed, whichever comes first. If null, then nothing will be
+ * sent.
+ *
+ * See [MQTT5 Will](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901040)
+ *
+ * @return The message to be published when the connection's session is destroyed by the server or when
+ * the will delay interval has elapsed, whichever comes first.
+ */
+ const Crt::Optional<std::shared_ptr<PublishPacket>> &getWill() const noexcept;
+
+ /**
+ * List of MQTT5 user properties included with the packet.
+ *
+ * See [MQTT5 User
+ * Property](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901054)
+ *
+ * @return List of MQTT5 user properties included with the packet.
+ */
+ const Crt::Vector<UserProperty> &getUserProperties() const noexcept;
+
+ /**
+ * Intended for internal use only. Initializes the C aws_mqtt5_packet_connack_view
+ * from PacketConnect
+ *
+ * @param raw_options - output parameter containing low level client options to be passed to the C
+ * @param allocator - memory Allocator
+ *
+ */
+ bool initializeRawOptions(aws_mqtt5_packet_connect_view &raw_options, Allocator *allocator) noexcept;
+
+ virtual ~ConnectPacket();
+ ConnectPacket(const ConnectPacket &) = delete;
+ ConnectPacket(ConnectPacket &&) noexcept = delete;
+ ConnectPacket &operator=(const ConnectPacket &) = delete;
+ ConnectPacket &operator=(ConnectPacket &&) noexcept = delete;
+
+ private:
+ Allocator *m_allocator;
+
+ /**
+ * The maximum time interval, in seconds, that is permitted to elapse between the point at which the
+ * client finishes transmitting one MQTT packet and the point it starts sending the next. The client
+ * will use PINGREQ packets to maintain this property.
+ *
+ * If the responding CONNACK contains a keep alive property value, then that is the negotiated keep
+ * alive value. Otherwise, the keep alive sent by the client is the negotiated value.
+ *
+ * See [MQTT5 Keep Alive](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901045)
+ */
+ uint16_t m_keepAliveIntervalSec;
+
+ /**
+ * A unique string identifying the client to the server. Used to restore session state between
+ * connections.
+ *
+ * If left empty, the broker will auto-assign a unique client id. When reconnecting, the mqtt5 client
+ * will always use the auto-assigned client id.
+ *
+ * See [MQTT5 Client
+ * Identifier](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901059)
+ */
+ Crt::String m_clientId;
+
+ /**
+ * A string value that the server may use for client authentication and authorization.
+ *
+ * See [MQTT5 User Name](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901071)
+ */
+ Crt::Optional<Crt::String> m_username;
+
+ /**
+ * Opaque binary data that the server may use for client authentication and authorization.
+ *
+ * See [MQTT5 Password](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901072)
+ */
+ Crt::Optional<ByteCursor> m_password;
+
+ /**
+ * A time interval, in seconds, that the client requests the server to persist this connection's MQTT
+ * session state for. Has no meaning if the client has not been configured to rejoin sessions. Must be
+ * non-zero in order to successfully rejoin a session.
+ *
+ * If the responding CONNACK contains a session expiry property value, then that is the negotiated
+ * session expiry value. Otherwise, the session expiry sent by the client is the negotiated value.
+ *
+ * See [MQTT5 Session Expiry
+ * Interval](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901048)
+ */
+ Crt::Optional<uint32_t> m_sessionExpiryIntervalSec;
+
+ /**
+ * If set to true, requests that the server send response information in the subsequent CONNACK. This
+ * response information may be used to set up request-response implementations over MQTT, but doing so
+ * is outside the scope of the MQTT5 spec and client.
+ *
+ * See [MQTT5 Request Response
+ * Information](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901052)
+ */
+ Crt::Optional<bool> m_requestResponseInformation;
+
+ /**
+ * If set to true, requests that the server send additional diagnostic information (via response string
+ * or user properties) in DISCONNECT or CONNACK packets from the server.
+ *
+ * See [MQTT5 Request Problem
+ * Information](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901053)
+ */
+ Crt::Optional<bool> m_requestProblemInformation;
+
+ /**
+ * Notifies the server of the maximum number of in-flight Qos 1 and 2 messages the client is willing to
+ * handle. If omitted, then no limit is requested.
+ *
+ * See [MQTT5 Receive
+ * Maximum](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901049)
+ */
+ Crt::Optional<uint16_t> m_receiveMaximum;
+
+ /**
+ * Notifies the server of the maximum packet size the client is willing to handle. If
+ * omitted, then no limit beyond the natural limits of MQTT packet size is requested.
+ *
+ * See [MQTT5 Maximum Packet
+ * Size](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901050)
+ */
+ Crt::Optional<uint32_t> m_maximumPacketSizeBytes;
+
+ /**
+ * A time interval, in seconds, that the server should wait (for a session reconnection) before sending
+ * the will message associated with the connection's session. If omitted, the server will send the will
+ * when the associated session is destroyed. If the session is destroyed before a will delay interval
+ * has elapsed, then the will must be sent at the time of session destruction.
+ *
+ * See [MQTT5 Will Delay
+ * Interval](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901062)
+ */
+ Crt::Optional<uint32_t> m_willDelayIntervalSeconds;
+
+ /**
+ * The definition of a message to be published when the connection's session is destroyed by the server
+ * or when the will delay interval has elapsed, whichever comes first. If undefined, then nothing will
+ * be sent.
+ *
+ * See [MQTT5 Will](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901040)
+ */
+ Crt::Optional<std::shared_ptr<PublishPacket>> m_will;
+
+ /**
+ * Set of MQTT5 user properties included with the packet.
+ *
+ * See [MQTT5 User
+ * Property](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901054)
+ */
+ Crt::Vector<UserProperty> m_userProperties;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Underlying data storage for internal use
+ ///////////////////////////////////////////////////////////////////////////
+ struct aws_byte_cursor m_usernameCursor;
+ struct aws_byte_buf m_passowrdStorage;
+ struct aws_mqtt5_packet_publish_view m_willStorage;
+ struct aws_mqtt5_user_property *m_userPropertiesStorage;
+ uint8_t m_requestResponseInformationStorage;
+ uint8_t m_requestProblemInformationStorage;
+ };
+
+ /**
+ * Data model of an [MQTT5
+ * CONNACK](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901074) packet.
+ */
+ class AWS_CRT_CPP_API ConnAckPacket : public IPacket
+ {
+ public:
+ ConnAckPacket(
+ const aws_mqtt5_packet_connack_view &packet,
+ Allocator *allocator = ApiAllocator()) noexcept;
+
+ /* The packet type */
+ PacketType getType() override { return PacketType::AWS_MQTT5_PT_CONNACK; };
+
+ /**
+ * True if the client rejoined an existing session on the server, false otherwise.
+ *
+ * See [MQTT5 Session
+ * Present](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901078)
+ *
+ * @return True if the client rejoined an existing session on the server, false otherwise.
+ */
+ bool getSessionPresent() const noexcept;
+
+ /**
+ * Indicates either success or the reason for failure for the connection attempt.
+ *
+ * See [MQTT5 Connect Reason
+ * Code](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901079)
+ *
+ * @return Code indicating either success or the reason for failure for the connection attempt.
+ */
+ ConnectReasonCode getReasonCode() const noexcept;
+
+ /**
+ * A time interval, in seconds, that the server will persist this connection's MQTT session state
+ * for. If present, this value overrides any session expiry specified in the preceding CONNECT packet.
+ *
+ * See [MQTT5 Session Expiry
+ * Interval](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901082)
+ *
+ * @return A time interval, in seconds, that the server will persist this connection's MQTT session
+ * state for.
+ */
+ const Crt::Optional<uint32_t> &getSessionExpiryInterval() const noexcept;
+
+ /**
+ * The maximum amount of in-flight QoS 1 or 2 messages that the server is willing to handle at once. If
+ * omitted or null, the limit is based on the valid MQTT packet id space (65535).
+ *
+ * See [MQTT5 Receive
+ * Maximum](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901083)
+ *
+ * @return The maximum amount of in-flight QoS 1 or 2 messages that the server is willing to handle at
+ * once.
+ */
+ const Crt::Optional<uint16_t> &getReceiveMaximum() const noexcept;
+
+ /**
+ * The maximum message delivery quality of service that the server will allow on this connection.
+ *
+ * See [MQTT5 Maximum QoS](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901084)
+ *
+ * @return The maximum message delivery quality of service that the server will allow on this
+ * connection.
+ */
+ const Crt::Optional<QOS> &getMaximumQOS() const noexcept;
+
+ /**
+ * Indicates whether the server supports retained messages. If null, retained messages are
+ * supported.
+ *
+ * See [MQTT5 Retain
+ * Available](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901085)
+ *
+ * @return Whether the server supports retained messages
+ */
+ const Crt::Optional<bool> &getRetainAvailable() const noexcept;
+
+ /**
+ * Specifies the maximum packet size, in bytes, that the server is willing to accept. If null, there
+ * is no limit beyond what is imposed by the MQTT spec itself.
+ *
+ * See [MQTT5 Maximum Packet
+ * Size](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901086)
+ *
+ * @return The maximum packet size, in bytes, that the server is willing to accept.
+ */
+ const Crt::Optional<uint32_t> &getMaximumPacketSize() const noexcept;
+
+ /**
+ * Specifies a client identifier assigned to this connection by the server. Only valid when the client
+ * id of the preceding CONNECT packet was left empty.
+ *
+ * See [MQTT5 Assigned Client
+ * Identifier](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901087)
+ *
+ * @return Client identifier assigned to this connection by the server
+ */
+ const Crt::Optional<String> &getAssignedClientIdentifier() const noexcept;
+
+ /**
+ * Specifies the maximum topic alias value that the server will accept from the client.
+ *
+ * See [MQTT5 Topic Alias
+ * Maximum](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901088)
+ *
+ * @return maximum topic alias
+ */
+ const Crt::Optional<uint16_t> getTopicAliasMaximum() const noexcept;
+
+ /**
+ * Additional diagnostic information about the result of the connection attempt.
+ *
+ * See [MQTT5 Reason
+ * String](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901089)
+ *
+ * @return Additional diagnostic information about the result of the connection attempt.
+ */
+ const Crt::Optional<String> &getReasonString() const noexcept;
+
+ /**
+ * List of MQTT5 user properties included with the packet.
+ *
+ * See [MQTT5 User
+ * Property](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901090)
+ *
+ * @return List of MQTT5 user properties included with the packet.
+ */
+ const Vector<UserProperty> &getUserProperty() const noexcept;
+
+ /**
+ * Indicates whether the server supports wildcard subscriptions. If null, wildcard subscriptions
+ * are supported.
+ *
+ * See [MQTT5 Wildcard Subscriptions
+ * Available](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901091)
+ *
+ * @return Whether the server supports wildcard subscriptions.
+ */
+ const Crt::Optional<bool> &getWildcardSubscriptionsAvaliable() const noexcept;
+
+ /**
+ * Indicates whether the server supports subscription identifiers. If null, subscription identifiers
+ * are supported.
+ *
+ * See [MQTT5 Subscription Identifiers
+ * Available](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901092)
+ *
+ * @return whether the server supports subscription identifiers.
+ */
+ const Crt::Optional<bool> &getSubscriptionIdentifiersAvaliable() const noexcept;
+
+ /**
+ * Indicates whether the server supports shared subscription topic filters. If null, shared
+ * subscriptions are supported.
+ *
+ * See [MQTT5 Shared Subscriptions
+ * Available](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901093)
+ *
+ * @return whether the server supports shared subscription topic filters.
+ */
+ const Crt::Optional<bool> &getSharedSubscriptionsAvaliable() const noexcept;
+
+ /**
+ * Server-requested override of the keep alive interval, in seconds. If null, the keep alive value sent
+ * by the client should be used.
+ *
+ * See [MQTT5 Server Keep
+ * Alive](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901094)
+ *
+ * @return Server-requested override of the keep alive interval, in seconds
+ */
+ const Crt::Optional<uint16_t> &getServerKeepAlive() const noexcept;
+
+ /**
+ * A value that can be used in the creation of a response topic associated with this connection.
+ * MQTT5-based request/response is outside the purview of the MQTT5 spec and this client.
+ *
+ * See [MQTT5 Response
+ * Information](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901095)
+ *
+ * @return A value that can be used in the creation of a response topic associated with this connection.
+ */
+ const Crt::Optional<String> &getResponseInformation() const noexcept;
+
+ /**
+ * Property indicating an alternate server that the client may temporarily or permanently attempt
+ * to connect to instead of the configured endpoint. Will only be set if the reason code indicates
+ * another server may be used (ServerMoved, UseAnotherServer).
+ *
+ * See [MQTT5 Server
+ * Reference](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901096)
+ *
+ * @return Property indicating an alternate server that the client may temporarily or permanently
+ * attempt to connect to instead of the configured endpoint.
+ */
+ const Crt::Optional<String> &getServerReference() const noexcept;
+
+ virtual ~ConnAckPacket(){};
+ ConnAckPacket(const ConnAckPacket &) = delete;
+ ConnAckPacket(ConnAckPacket &&) noexcept = delete;
+ ConnAckPacket &operator=(const ConnAckPacket &) = delete;
+ ConnAckPacket &operator=(ConnAckPacket &&) noexcept = delete;
+
+ private:
+ /**
+ * True if the client rejoined an existing session on the server, false otherwise.
+ *
+ * See [MQTT5 Session
+ * Present](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901078)
+ */
+ bool m_sessionPresent;
+
+ /**
+ * Indicates either success or the reason for failure for the connection attempt.
+ *
+ * See [MQTT5 Connect Reason
+ * Code](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901079)
+ */
+ ConnectReasonCode m_reasonCode;
+
+ /**
+ * A time interval, in seconds, that the server will persist this connection's MQTT session state
+ * for. If present, this value overrides any session expiry specified in the preceding CONNECT packet.
+ *
+ * See [MQTT5 Session Expiry
+ * Interval](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901082)
+ */
+ Crt::Optional<uint32_t> m_sessionExpiryInterval;
+
+ /**
+ * The maximum amount of in-flight QoS 1 or 2 messages that the server is willing to handle at once. If
+ * omitted, the limit is based on the valid MQTT packet id space (65535).
+ *
+ * See [MQTT5 Receive
+ * Maximum](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901083)
+ */
+ Crt::Optional<uint16_t> m_receiveMaximum;
+
+ /**
+ * The maximum message delivery quality of service that the server will allow on this connection.
+ *
+ * See [MQTT5 Maximum QoS](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901084)
+ */
+ Crt::Optional<QOS> m_maximumQOS;
+
+ /**
+ * Indicates whether the server supports retained messages. If undefined, retained messages are
+ * supported.
+ *
+ * See [MQTT5 Retain
+ * Available](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901085)
+ */
+ Crt::Optional<bool> m_retainAvailable;
+
+ /**
+ * Specifies the maximum packet size, in bytes, that the server is willing to accept. If undefined,
+ * there is no limit beyond what is imposed by the MQTT spec itself.
+ *
+ * See [MQTT5 Maximum Packet
+ * Size](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901086)
+ */
+ Crt::Optional<uint32_t> m_maximumPacketSize;
+
+ /**
+ * Specifies a client identifier assigned to this connection by the server. Only valid when the client
+ * id of the preceding CONNECT packet was left empty.
+ *
+ * See [MQTT5 Assigned Client
+ * Identifier](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901087)
+ */
+ Crt::Optional<String> m_assignedClientIdentifier;
+
+ /**
+ * Specifies the maximum topic alias value that the server will accept from the client.
+ *
+ * See [MQTT5 Topic Alias
+ * Maximum](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901088)
+ */
+ Crt::Optional<uint16_t> m_topicAliasMaximum;
+
+ /**
+ * Additional diagnostic information about the result of the connection attempt.
+ *
+ * See [MQTT5 Reason
+ * String](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901089)
+ */
+ Crt::Optional<String> m_reasonString;
+
+ /**
+ * Indicates whether the server supports wildcard subscriptions. If undefined, wildcard subscriptions
+ * are supported.
+ *
+ * See [MQTT5 Wildcard Subscriptions
+ * Available](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901091)
+ */
+ Crt::Optional<bool> m_wildcardSubscriptionsAvaliable;
+
+ /**
+ * Indicates whether the server supports subscription identifiers. If undefined, subscription
+ * identifiers are supported.
+ *
+ * See [MQTT5 Subscription Identifiers
+ * Available](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901092)
+ */
+ Crt::Optional<bool> m_subscriptionIdentifiersAvaliable;
+
+ /**
+ * Indicates whether the server supports shared subscription topic filters. If undefined, shared
+ * subscriptions are supported.
+ *
+ * See [MQTT5 Shared Subscriptions
+ * Available](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901093)
+ */
+ Crt::Optional<bool> m_sharedSubscriptionsAvaliable;
+
+ /**
+ * Server-requested override of the keep alive interval, in seconds. If undefined, the keep alive value
+ * sent by the client should be used.
+ *
+ * See [MQTT5 Server Keep
+ * Alive](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901094)
+ */
+ Crt::Optional<uint16_t> m_serverKeepAlive;
+
+ /**
+ * A value that can be used in the creation of a response topic associated with this connection.
+ * MQTT5-based request/response is outside the purview of the MQTT5 spec and this client.
+ *
+ * See [MQTT5 Response
+ * Information](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901095)
+ */
+ Crt::Optional<String> m_responseInformation;
+
+ /**
+ * Property indicating an alternate server that the client may temporarily or permanently attempt
+ * to connect to instead of the configured endpoint. Will only be set if the reason code indicates
+ * another server may be used (ServerMoved, UseAnotherServer).
+ *
+ * See [MQTT5 Server
+ * Reference](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901096)
+ */
+ Crt::Optional<String> m_serverReference;
+
+ /**
+ * Set of MQTT5 user properties included with the packet.
+ *
+ * See [MQTT5 User
+ * Property](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901090)
+ */
+ Vector<UserProperty> m_userProperties;
+ };
+
+ /**
+ * Data model of an [MQTT5
+ * DISCONNECT](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901205) packet.
+ */
+ class AWS_CRT_CPP_API DisconnectPacket : public IPacket
+ {
+ public:
+ DisconnectPacket(Allocator *allocator = ApiAllocator()) noexcept;
+ DisconnectPacket(
+ const aws_mqtt5_packet_disconnect_view &raw_options,
+ Allocator *allocator = ApiAllocator()) noexcept;
+ /* The packet type */
+ PacketType getType() override { return PacketType::AWS_MQTT5_PT_DISCONNECT; };
+
+ bool initializeRawOptions(aws_mqtt5_packet_disconnect_view &raw_options) noexcept;
+
+ /**
+ * Sets the value indicating the reason that the sender is closing the connection
+ *
+ * See [MQTT5 Disconnect Reason
+ * Code](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901208)
+ *
+ * @param reasonCode Value indicating the reason that the sender is closing the connection
+ * @return The DisconnectPacket Object after setting the reason code.
+ */
+ DisconnectPacket &withReasonCode(const DisconnectReasonCode reasonCode) noexcept;
+
+ /**
+ * Sets the change to the session expiry interval negotiated at connection time as part of the
+ * disconnect. Only valid for DISCONNECT packets sent from client to server. It is not valid to
+ * attempt to change session expiry from zero to a non-zero value.
+ *
+ * See [MQTT5 Session Expiry
+ * Interval](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901211)
+ *
+ * @param sessionExpiryIntervalSeconds
+ * @return The DisconnectPacket Object after setting the session expiry interval.
+ */
+ DisconnectPacket &withSessionExpiryIntervalSec(const uint32_t sessionExpiryIntervalSeconds) noexcept;
+
+ /**
+ * Sets the additional diagnostic information about the reason that the sender is closing the connection
+ *
+ * See [MQTT5 Reason
+ * String](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901212)
+ *
+ * @param reasonString Additional diagnostic information about the reason that the sender is closing the
+ * connection
+ * @return The DisconnectPacket Object after setting the reason string.
+ */
+ DisconnectPacket &withReasonString(Crt::String reasonString) noexcept;
+
+ /**
+ * Sets the property indicating an alternate server that the client may temporarily or permanently
+ * attempt to connect to instead of the configured endpoint. Will only be set if the reason code
+ * indicates another server may be used (ServerMoved, UseAnotherServer).
+ *
+ * See [MQTT5 Server
+ * Reference](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901214)
+ *
+ * @param serverReference Property indicating an alternate server that the client may temporarily or
+ * permanently attempt to connect to instead of the configured endpoint.
+ * @return The DisconnectPacket Object after setting the server reference.
+ */
+ DisconnectPacket &withServerReference(Crt::String serverReference) noexcept;
+
+ /**
+ * Sets the list of MQTT5 user properties included with the packet.
+ *
+ * See [MQTT5 User
+ * Property](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901213)
+ *
+ * @param userProperties List of MQTT5 user properties included with the packet.
+ * @return The DisconnectPacket Object after setting the user properties.
+ */
+ DisconnectPacket &withUserProperties(const Vector<UserProperty> &userProperties) noexcept;
+
+ /**
+ * Sets the list of MQTT5 user properties included with the packet.
+ *
+ * See [MQTT5 User
+ * Property](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901213)
+ *
+ * @param userProperties List of MQTT5 user properties included with the packet.
+ * @return The DisconnectPacket Object after setting the user properties.
+ */
+ DisconnectPacket &withUserProperties(Vector<UserProperty> &&userProperties) noexcept;
+
+ /**
+ * Put a MQTT5 user property to the back of the packet user property vector/list
+ *
+ * See [MQTT5 User
+ * Property](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901116)
+ *
+ * @param property set of userProperty of MQTT5 user properties included with the packet.
+ * @return The ConnectPacket Object after setting the user property
+ */
+ DisconnectPacket &withUserProperty(UserProperty &&property) noexcept;
+
+ /**
+ * Value indicating the reason that the sender is closing the connection
+ *
+ * See [MQTT5 Disconnect Reason
+ * Code](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901208)
+ *
+ * @return Value indicating the reason that the sender is closing the connection
+ */
+ DisconnectReasonCode getReasonCode() const noexcept;
+
+ /**
+ * A change to the session expiry interval negotiated at connection time as part of the disconnect. Only
+ * valid for DISCONNECT packets sent from client to server. It is not valid to attempt to change
+ * session expiry from zero to a non-zero value.
+ *
+ * See [MQTT5 Session Expiry
+ * Interval](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901211)
+ *
+ * @return A change to the session expiry interval negotiated at connection time as part of the
+ * disconnect.
+ */
+ const Crt::Optional<uint32_t> &getSessionExpiryIntervalSec() const noexcept;
+
+ /**
+ * Additional diagnostic information about the reason that the sender is closing the connection
+ *
+ * See [MQTT5 Reason
+ * String](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901212)
+ *
+ * @return Additional diagnostic information about the reason that the sender is closing the connection
+ */
+ const Crt::Optional<Crt::String> &getReasonString() const noexcept;
+
+ /**
+ * Property indicating an alternate server that the client may temporarily or permanently attempt
+ * to connect to instead of the configured endpoint. Will only be set if the reason code indicates
+ * another server may be used (ServerMoved, UseAnotherServer).
+ *
+ * See [MQTT5 Server
+ * Reference](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901214)
+ *
+ * @return Property indicating an alternate server that the client may temporarily or permanently
+ * attempt to connect to instead of the configured endpoint.
+ */
+ const Crt::Optional<Crt::String> &getServerReference() const noexcept;
+
+ /**
+ * List of MQTT5 user properties included with the packet.
+ *
+ * See [MQTT5 User
+ * Property](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901213)
+ *
+ * @return List of MQTT5 user properties included with the packet.
+ */
+ const Crt::Vector<UserProperty> &getUserProperties() const noexcept;
+
+ virtual ~DisconnectPacket();
+ DisconnectPacket(const DisconnectPacket &) = delete;
+ DisconnectPacket(DisconnectPacket &&) noexcept = delete;
+ DisconnectPacket &operator=(const DisconnectPacket &) = delete;
+ DisconnectPacket &operator=(DisconnectPacket &&) noexcept = delete;
+
+ private:
+ Crt::Allocator *m_allocator;
+
+ /**
+ * Value indicating the reason that the sender is closing the connection
+ *
+ * See [MQTT5 Disconnect Reason
+ * Code](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901208)
+ */
+ DisconnectReasonCode m_reasonCode;
+
+ /**
+ * Requests a change to the session expiry interval negotiated at connection time as part of the
+ * disconnect. Only valid for DISCONNECT packets sent from client to server. It is not valid to
+ * attempt to change session expiry from zero to a non-zero value.
+ *
+ * See [MQTT5 Session Expiry
+ * Interval](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901211)
+ */
+ Crt::Optional<uint32_t> m_sessionExpiryIntervalSec;
+
+ /**
+ * Additional diagnostic information about the reason that the sender is closing the connection
+ *
+ * See [MQTT5 Reason
+ * String](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901212)
+ */
+ Crt::Optional<Crt::String> m_reasonString;
+
+ /**
+ * Property indicating an alternate server that the client may temporarily or permanently attempt
+ * to connect to instead of the configured endpoint. Will only be set if the reason code indicates
+ * another server may be used (ServerMoved, UseAnotherServer).
+ *
+ * See [MQTT5 Server
+ * Reference](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901214)
+ */
+ Crt::Optional<Crt::String> m_serverReference;
+
+ /**
+ * Set of MQTT5 user properties included with the packet.
+ *
+ * See [MQTT5 User
+ * Property](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901213)
+ */
+ Crt::Vector<UserProperty> m_userProperties;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Underlying data storage for internal use
+ ///////////////////////////////////////////////////////////////////////////
+ struct aws_byte_cursor m_reasonStringCursor;
+ struct aws_byte_cursor m_serverReferenceCursor;
+ struct aws_mqtt5_user_property *m_userPropertiesStorage;
+ };
+
+ /**
+ * Data model of an [MQTT5
+ * PUBACK](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901121) packet
+ */
+ class AWS_CRT_CPP_API PubAckPacket : public IPacket
+ {
+ public:
+ PubAckPacket(
+ const aws_mqtt5_packet_puback_view &packet,
+ Allocator *allocator = ApiAllocator()) noexcept;
+
+ PacketType getType() override { return PacketType::AWS_MQTT5_PT_PUBACK; };
+
+ /**
+ * Success indicator or failure reason for the associated PUBLISH packet.
+ *
+ * See [MQTT5 PUBACK Reason
+ * Code](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901124)
+ *
+ * @return Success indicator or failure reason for the associated PUBLISH packet.
+ */
+ PubAckReasonCode getReasonCode() const noexcept;
+
+ /**
+ * Additional diagnostic information about the result of the PUBLISH attempt.
+ *
+ * See [MQTT5 Reason
+ * String](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901127)
+ *
+ * @return Additional diagnostic information about the result of the PUBLISH attempt.
+ */
+ const Crt::Optional<Crt::String> &getReasonString() const noexcept;
+
+ /**
+ * List of MQTT5 user properties included with the packet.
+ *
+ * See [MQTT5 User
+ * Property](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901128)
+ *
+ * @return List of MQTT5 user properties included with the packet.
+ */
+ const Crt::Vector<UserProperty> &getUserProperties() const noexcept;
+
+ virtual ~PubAckPacket(){};
+ PubAckPacket(const PubAckPacket &toCopy) noexcept = delete;
+ PubAckPacket(PubAckPacket &&toMove) noexcept = delete;
+ PubAckPacket &operator=(const PubAckPacket &toCopy) noexcept = delete;
+ PubAckPacket &operator=(PubAckPacket &&toMove) noexcept = delete;
+
+ private:
+ /**
+ * Success indicator or failure reason for the associated PUBLISH packet.
+ *
+ * See [MQTT5 PUBACK Reason
+ * Code](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901124)
+ */
+ PubAckReasonCode m_reasonCode;
+
+ /**
+ * Additional diagnostic information about the result of the PUBLISH attempt.
+ *
+ * See [MQTT5 Reason
+ * String](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901127)
+ */
+ Crt::Optional<Crt::String> m_reasonString;
+
+ /**
+ * Set of MQTT5 user properties included with the packet.
+ *
+ * See [MQTT5 User
+ * Property](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901128)
+ */
+ Crt::Vector<UserProperty> m_userProperties;
+ };
+
+ /**
+ * PublishResult returned with onPublishCompletionCallback after Publish get called
+ *
+ * Publish with QoS0: Ack will be nullptr
+ * QoS1: Ack will contains a PubAckPacket
+ */
+ class AWS_CRT_CPP_API PublishResult
+ {
+ public:
+ PublishResult(); // QoS 0 success
+ PublishResult(std::shared_ptr<PubAckPacket> puback); // Qos 1 success
+ PublishResult(int errorCode); // any failure
+
+ /**
+ * Get if the publish succeed or not
+ *
+ * @return true if error code == 0 and publish succeed
+ */
+ bool wasSuccessful() const { return m_errorCode == 0; };
+
+ /**
+ * Get the error code value
+ *
+ * @return the error code
+ */
+ int getErrorCode() const { return m_errorCode; };
+
+ /**
+ * Get Publish ack packet
+ *
+ * @return std::shared_ptr<IPacket> contains a PubAckPacket if client Publish with QoS1, otherwise
+ * nullptr.
+ */
+ std::shared_ptr<IPacket> getAck() const { return m_ack; };
+
+ ~PublishResult() noexcept;
+ PublishResult(const PublishResult &toCopy) noexcept = delete;
+ PublishResult(PublishResult &&toMove) noexcept = delete;
+ PublishResult &operator=(const PublishResult &toCopy) noexcept = delete;
+ PublishResult &operator=(PublishResult &&toMove) noexcept = delete;
+
+ private:
+ std::shared_ptr<IPacket> m_ack;
+ int m_errorCode;
+ };
+
+ /**
+ * Configures a single subscription within a Subscribe operation
+ *
+ * See [MQTT5 Subscription
+ * Options](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901169)
+ */
+ class AWS_CRT_CPP_API Subscription
+ {
+
+ public:
+ Subscription(Allocator *allocator = ApiAllocator());
+ Subscription(Crt::String topicFilter, Mqtt5::QOS qos, Allocator *allocator = ApiAllocator());
+
+ /**
+ * Sets topic filter to subscribe to
+ *
+ * See [MQTT5 Subscription
+ * Options](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901169)
+ *
+ * @param topicFilter string
+ * @return The Subscription Object after setting the reason string.
+ */
+ Subscription &withTopicFilter(Crt::String topicFilter) noexcept;
+
+ /**
+ * Sets Maximum QoS on which the subscriber will accept publish messages. Negotiated QoS may be
+ * different.
+ *
+ * See [MQTT5 Subscription
+ * Options](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901169)
+ *
+ * @param QOS
+ * @return The Subscription Object after setting the reason string.
+ */
+ Subscription &withQOS(Mqtt5::QOS QOS) noexcept;
+
+ /**
+ * Sets should the server not send publishes to a client when that client was the one who sent the
+ * publish? The value will be default to false.
+ *
+ * See [MQTT5 Subscription
+ * Options](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901169)
+ *
+ * @param noLocal bool
+ * @return The Subscription Object after setting the reason string.
+ */
+ Subscription &withNoLocal(bool noLocal) noexcept;
+
+ /**
+ * Sets should the server not send publishes to a client when that client was the one who sent the
+ * publish? The value will be default to false.
+ *
+ * See [MQTT5 Subscription
+ * Options](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901169)
+ *
+ * @param retain bool
+ * @return The Subscription Object after setting the reason string.
+ */
+ Subscription &withRetain(bool retain) noexcept;
+
+ /**
+ * Sets should messages sent due to this subscription keep the retain flag preserved on the message?
+ * The value will be default to false.
+ *
+ * See [MQTT5 Subscription
+ * Options](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901169)
+ *
+ * @param retainHandlingType
+ * @return The Subscription Object after setting the reason string.
+ */
+ Subscription &withRetainHandlingType(RetainHandlingType retainHandlingType) noexcept;
+
+ bool initializeRawOptions(aws_mqtt5_subscription_view &raw_options) const noexcept;
+
+ virtual ~Subscription(){};
+ Subscription(const Subscription &) noexcept;
+ Subscription(Subscription &&) noexcept;
+ Subscription &operator=(const Subscription &) noexcept;
+ Subscription &operator=(Subscription &&) noexcept;
+
+ private:
+ Allocator *m_allocator;
+
+ /**
+ * Topic filter to subscribe to
+ *
+ * See [MQTT5 Subscription
+ * Options](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901169)
+ */
+ Crt::String m_topicFilter;
+
+ /**
+ * Maximum QoS on which the subscriber will accept publish messages. Negotiated QoS may be different.
+ *
+ * See [MQTT5 Subscription
+ * Options](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901169)
+ */
+ Mqtt5::QOS m_qos;
+
+ /**
+ * Should the server not send publishes to a client when that client was the one who sent the publish?
+ * If undefined, this is assumed to be false.
+ *
+ * See [MQTT5 Subscription
+ * Options](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901169)
+ */
+ bool m_noLocal;
+
+ /**
+ * Should messages sent due to this subscription keep the retain flag preserved on the message? If
+ * undefined, this is assumed to be false.
+ *
+ * See [MQTT5 Subscription
+ * Options](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901169)
+ */
+ bool m_retain;
+
+ /**
+ * Should retained messages on matching topics be sent in reaction to this subscription? If undefined,
+ * this is assumed to be RetainHandlingType.SendOnSubscribe.
+ *
+ * See [MQTT5 Subscription
+ * Options](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901169)
+ */
+ RetainHandlingType m_retainHnadlingType;
+ };
+
+ /**
+ * Data model of an [MQTT5
+ * SUBSCRIBE](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901161) packet.
+ */
+ class AWS_CRT_CPP_API SubscribePacket : public IPacket
+ {
+ public:
+ SubscribePacket(Allocator *allocator = ApiAllocator()) noexcept;
+
+ /* The packet type */
+ PacketType getType() override { return PacketType::AWS_MQTT5_PT_SUBSCRIBE; };
+
+ /**
+ * Sets the list of MQTT5 user properties included with the packet.
+ *
+ * See [MQTT5 User
+ * Property](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901116)
+ *
+ * @param userProperties List of MQTT5 user properties included with the packet.
+ * @return the SubscribePacket Object after setting the reason string.
+ */
+ SubscribePacket &withUserProperties(const Vector<UserProperty> &userProperties) noexcept;
+
+ /**
+ * Sets the list of MQTT5 user properties included with the packet.
+ *
+ * See [MQTT5 User
+ * Property](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901116)
+ *
+ * @param userProperties List of MQTT5 user properties included with the packet.
+ * @return the SubscribePacket Object after setting the reason string.
+ */
+ SubscribePacket &withUserProperties(Vector<UserProperty> &&userProperties) noexcept;
+
+ /**
+ * Put a MQTT5 user property to the back of the packet user property vector/list
+ *
+ * See [MQTT5 User
+ * Property](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901116)
+ *
+ * @param property userProperty of MQTT5 user properties included with the packet.
+ * @return The SubscribePacket Object after setting the user property
+ */
+ SubscribePacket &withUserProperty(UserProperty &&property) noexcept;
+
+ /**
+ * Sets the value to associate with all subscriptions in this request. Publish packets that
+ * match a subscription in this request should include this identifier in the resulting message.
+ *
+ * See [MQTT5 Subscription
+ * Identifier](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901166)
+ *
+ * @param subscriptionIdentifier A positive long to associate with all subscriptions in this request.
+ * @return The SubscribePacket Object after setting the subscription identifier.
+ */
+ SubscribePacket &withSubscriptionIdentifier(uint32_t subscriptionIdentifier) noexcept;
+
+ /**
+ * Sets a list of subscriptions within the SUBSCRIBE packet.
+ *
+ * @param subscriptions vector of subscriptions to add within the SUBSCRIBE packet.
+ *
+ * @return The SubscribePacket Object after setting the subscription.
+ */
+ SubscribePacket &withSubscriptions(const Vector<Subscription> &subscriptions) noexcept;
+
+ /**
+ * Sets a list of subscriptions within the SUBSCRIBE packet.
+ *
+ * @param subscriptions vector of subscriptions to add within the SUBSCRIBE packet.
+ *
+ * @return The SubscribePacket Object after setting the subscription.
+ */
+ SubscribePacket &withSubscriptions(Crt::Vector<Subscription> &&subscriptions) noexcept;
+
+ /**
+ * Sets a single subscription within the SUBSCRIBE packet.
+ *
+ * @param subscription The subscription to add within the SUBSCRIBE packet.
+ *
+ * @return The SubscribePacket Object after setting the subscription.
+ */
+ SubscribePacket &withSubscription(Subscription &&subscription) noexcept;
+
+ bool initializeRawOptions(aws_mqtt5_packet_subscribe_view &raw_options) noexcept;
+
+ virtual ~SubscribePacket();
+ SubscribePacket(const SubscribePacket &) noexcept = delete;
+ SubscribePacket(SubscribePacket &&) noexcept = delete;
+ SubscribePacket &operator=(const SubscribePacket &) noexcept = delete;
+ SubscribePacket &operator=(SubscribePacket &&) noexcept = delete;
+
+ private:
+ Allocator *m_allocator;
+
+ /**
+ * List of topic filter subscriptions that the client wishes to listen to
+ *
+ * See [MQTT5 Subscribe
+ * Payload](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901168)
+ */
+ Crt::Vector<Subscription> m_subscriptions;
+
+ /**
+ * A positive integer to associate with all subscriptions in this request. Publish packets that match
+ * a subscription in this request should include this identifier in the resulting message.
+ *
+ * See [MQTT5 Subscription
+ * Identifier](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901166)
+ */
+ Crt::Optional<uint32_t> m_subscriptionIdentifier;
+
+ /**
+ * Set of MQTT5 user properties included with the packet.
+ *
+ * See [MQTT5 User
+ * Property](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901167)
+ */
+ Crt::Vector<UserProperty> m_userProperties;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Underlying data storage for internal use
+ ///////////////////////////////////////////////////////////////////////////
+ struct aws_mqtt5_subscription_view *m_subscriptionViewStorage;
+ struct aws_mqtt5_user_property *m_userPropertiesStorage;
+ };
+
+ /**
+ * Data model of an [MQTT5
+ * SUBACK](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901171) packet.
+ */
+ class AWS_CRT_CPP_API SubAckPacket : public IPacket
+ {
+ public:
+ SubAckPacket(
+ const aws_mqtt5_packet_suback_view &packet,
+ Allocator *allocator = ApiAllocator()) noexcept;
+
+ /* The packet type */
+ PacketType getType() override { return PacketType::AWS_MQTT5_PT_SUBACK; };
+
+ /**
+ * Returns additional diagnostic information about the result of the SUBSCRIBE attempt.
+ *
+ * See [MQTT5 Reason
+ * String](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901176)
+ *
+ * @return Additional diagnostic information about the result of the SUBSCRIBE attempt.
+ */
+ const Crt::Optional<Crt::String> &getReasonString() const noexcept;
+
+ /**
+ * Returns list of MQTT5 user properties included with the packet.
+ *
+ * See [MQTT5 User
+ * Property](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901177)
+ *
+ * @return List of MQTT5 user properties included with the packet.
+ */
+ const Crt::Vector<UserProperty> &getUserProperties() const noexcept;
+
+ /**
+ * Returns list of reason codes indicating the result of each individual subscription entry in the
+ * associated SUBSCRIBE packet.
+ *
+ * See [MQTT5 Suback
+ * Payload](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901178)
+ *
+ * @return list of reason codes indicating the result of each individual subscription entry in the
+ * associated SUBSCRIBE packet.
+ */
+ const Crt::Vector<SubAckReasonCode> &getReasonCodes() const noexcept;
+
+ virtual ~SubAckPacket() { m_userProperties.clear(); };
+ SubAckPacket(const SubAckPacket &) noexcept = delete;
+ SubAckPacket(SubAckPacket &&) noexcept = delete;
+ SubAckPacket &operator=(const SubAckPacket &) noexcept = delete;
+ SubAckPacket &operator=(SubAckPacket &&) noexcept = delete;
+
+ private:
+ /**
+ * A list of reason codes indicating the result of each individual subscription entry in the
+ * associated SUBSCRIBE packet.
+ *
+ * See [MQTT5 Suback
+ * Payload](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901178)
+ */
+ Crt::Vector<SubAckReasonCode> m_reasonCodes;
+
+ /**
+ * Additional diagnostic information about the result of the SUBSCRIBE attempt.
+ *
+ * See [MQTT5 Reason
+ * String](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901176)
+ */
+ Crt::Optional<Crt::String> m_reasonString;
+
+ /**
+ * Set of MQTT5 user properties included with the packet.
+ *
+ * See [MQTT5 User
+ * Property](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901177)
+ */
+ Crt::Vector<UserProperty> m_userProperties;
+ };
+
+ /**
+ * Data model of an [MQTT5
+ * UNSUBSCRIBE](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901179) packet.
+ */
+ class AWS_CRT_CPP_API UnsubscribePacket : public IPacket
+ {
+ public:
+ UnsubscribePacket(Allocator *allocator = ApiAllocator()) noexcept;
+
+ /* The packet type */
+ PacketType getType() override { return PacketType::AWS_MQTT5_PT_UNSUBSCRIBE; };
+
+ /**
+ * Push back a topic filter that the client wishes to unsubscribe from.
+ *
+ * @param topicFilter that the client wishes to unsubscribe from
+ *
+ * @return The UnsubscribePacket Object after setting the subscription.
+ */
+ UnsubscribePacket &withTopicFilter(Crt::String topicFilter) noexcept;
+
+ /**
+ * Sets list of topic filter that the client wishes to unsubscribe from.
+ *
+ * @param topicFilters vector of subscription topic filters that the client wishes to unsubscribe from
+ *
+ * @return The UnsubscribePacket Object after setting the subscription.
+ */
+ UnsubscribePacket &withTopicFilters(Crt::Vector<String> topicFilters) noexcept;
+
+ /**
+ * Sets the list of MQTT5 user properties included with the packet.
+ *
+ * See [MQTT5 User
+ * Property](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901184)
+ *
+ * @param userProperties List of MQTT5 user properties included with the packet.
+ * @return The UnsubscribePacketBuilder after setting the user properties.
+ */
+ UnsubscribePacket &withUserProperties(const Vector<UserProperty> &userProperties) noexcept;
+
+ /**
+ * Sets the list of MQTT5 user properties included with the packet.
+ *
+ * See [MQTT5 User
+ * Property](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901184)
+ *
+ * @param userProperties List of MQTT5 user properties included with the packet.
+ * @return The UnsubscribePacketBuilder after setting the user properties.
+ */
+ UnsubscribePacket &withUserProperties(Vector<UserProperty> &&userProperties) noexcept;
+
+ /**
+ * Put a MQTT5 user property to the back of the packet user property vector/list
+ *
+ * See [MQTT5 User
+ * Property](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901116)
+ *
+ * @param property set of userProperty of MQTT5 user properties included with the packet.
+ * @return The PublishPacket Object after setting the user property
+ */
+ UnsubscribePacket &withUserProperty(UserProperty &&property) noexcept;
+
+ bool initializeRawOptions(aws_mqtt5_packet_unsubscribe_view &raw_options) noexcept;
+
+ virtual ~UnsubscribePacket();
+ UnsubscribePacket(const UnsubscribePacket &) noexcept = delete;
+ UnsubscribePacket(UnsubscribePacket &&) noexcept = delete;
+ UnsubscribePacket &operator=(const UnsubscribePacket &) noexcept = delete;
+ UnsubscribePacket &operator=(UnsubscribePacket &&) noexcept = delete;
+
+ private:
+ Allocator *m_allocator;
+
+ /**
+ * List of topic filters that the client wishes to unsubscribe from.
+ *
+ * See [MQTT5 Unsubscribe
+ * Payload](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901185)
+ */
+ Crt::Vector<String> m_topicFilters;
+
+ /**
+ * Set of MQTT5 user properties included with the packet.
+ *
+ * See [MQTT5 User
+ * Property](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901184)
+ */
+ Crt::Vector<UserProperty> m_userProperties;
+
+ ///////////////////////////////////////////////////////////////////////////
+ // Underlying data storage for internal use
+ ///////////////////////////////////////////////////////////////////////////
+ struct aws_array_list m_topicFiltersList;
+ struct aws_mqtt5_user_property *m_userPropertiesStorage;
+ };
+
+ /**
+ * Data model of an [MQTT5
+ * UNSUBACK](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901187) packet.
+ */
+ class AWS_CRT_CPP_API UnSubAckPacket : public IPacket
+ {
+ public:
+ UnSubAckPacket(
+ const aws_mqtt5_packet_unsuback_view &packet,
+ Allocator *allocator = ApiAllocator()) noexcept;
+
+ /* The packet type */
+ PacketType getType() override { return PacketType::AWS_MQTT5_PT_UNSUBACK; };
+
+ /**
+ * Returns additional diagnostic information about the result of the UNSUBSCRIBE attempt.
+ *
+ * See [MQTT5 Reason
+ * String](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901192)
+ *
+ * @return Additional diagnostic information about the result of the UNSUBSCRIBE attempt.
+ */
+ const Crt::Optional<Crt::String> &getReasonString() const noexcept;
+
+ /**
+ * Returns list of MQTT5 user properties included with the packet.
+ *
+ * See [MQTT5 User
+ * Property](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901193)
+ *
+ * @return List of MQTT5 user properties included with the packet.
+ */
+ const Crt::Vector<UserProperty> &getUserProperties() const noexcept;
+
+ /**
+ * Returns a list of reason codes indicating the result of unsubscribing from each individual topic
+ * filter entry in the associated UNSUBSCRIBE packet.
+ *
+ * See [MQTT5 Unsuback
+ * Payload](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901194)
+ *
+ * @return A list of reason codes indicating the result of unsubscribing from each individual topic
+ * filter entry in the associated UNSUBSCRIBE packet.
+ */
+ const Crt::Vector<UnSubAckReasonCode> &getReasonCodes() const noexcept;
+
+ virtual ~UnSubAckPacket() { m_userProperties.clear(); };
+ UnSubAckPacket(const UnSubAckPacket &) noexcept = delete;
+ UnSubAckPacket(UnSubAckPacket &&) noexcept = delete;
+ UnSubAckPacket &operator=(const UnSubAckPacket &) noexcept = delete;
+ UnSubAckPacket &operator=(UnSubAckPacket &&) noexcept = delete;
+
+ private:
+ /**
+ * Additional diagnostic information about the result of the UNSUBSCRIBE attempt.
+ *
+ * See [MQTT5 Reason
+ * String](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901192)
+ */
+ Crt::Optional<Crt::String> m_reasonString;
+
+ /**
+ * Set of MQTT5 user properties included with the packet.
+ *
+ * See [MQTT5 User
+ * Property](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901193)
+ */
+ Crt::Vector<UserProperty> m_userProperties;
+
+ /**
+ * A list of reason codes indicating the result of unsubscribing from each individual topic filter entry
+ * in the associated UNSUBSCRIBE packet.
+ *
+ * See [MQTT5 Unsuback
+ * Payload](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901194)
+ */
+ Crt::Vector<UnSubAckReasonCode> m_reasonCodes;
+ };
+
+ } // namespace Mqtt5
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/mqtt/Mqtt5Types.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/mqtt/Mqtt5Types.h
new file mode 100644
index 0000000000..b7aa4f3568
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/mqtt/Mqtt5Types.h
@@ -0,0 +1,120 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/mqtt/v5/mqtt5_client.h>
+#include <aws/mqtt/v5/mqtt5_types.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Mqtt5
+ {
+ /**
+ * MQTT message delivery quality of service.
+ *
+ * Enum values match [MQTT5
+ * spec](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901234) encoding values.
+ */
+ using QOS = aws_mqtt5_qos;
+
+ /**
+ * Server return code for connect attempts.
+ *
+ * Enum values match [MQTT5
+ * spec](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901079) encoding values.
+ */
+ using ConnectReasonCode = aws_mqtt5_connect_reason_code;
+
+ /**
+ * Reason code inside DISCONNECT packets. Helps determine why a connection was terminated.
+ *
+ * Enum values match [MQTT5
+ * spec](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901208) encoding values.
+ */
+ using DisconnectReasonCode = aws_mqtt5_disconnect_reason_code;
+
+ /**
+ * Reason code inside PUBACK packets
+ *
+ * Data model of an [MQTT5
+ * PUBACK](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901121) packet
+ */
+ using PubAckReasonCode = aws_mqtt5_puback_reason_code;
+
+ /**
+ * Reason code inside PUBACK packets that indicates the result of the associated PUBLISH request.
+ *
+ * Enum values match [MQTT5
+ * spec](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901124) encoding values.
+ */
+ using SubAckReasonCode = aws_mqtt5_suback_reason_code;
+
+ /**
+ * Reason codes inside UNSUBACK packet payloads that specify the results for each topic filter in the
+ * associated UNSUBSCRIBE packet.
+ *
+ * Enum values match [MQTT5
+ * spec](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901194) encoding values.
+ */
+ using UnSubAckReasonCode = aws_mqtt5_unsuback_reason_code;
+
+ /**
+ * Controls how the MQTT5 client should behave with respect to MQTT sessions.
+ */
+ using ClientSessionBehaviorType = aws_mqtt5_client_session_behavior_type;
+
+ /**
+ * Additional controls for client behavior with respect to operation validation and flow control; these
+ * checks go beyond the MQTT5 spec to respect limits of specific MQTT brokers.
+ */
+ using ClientExtendedValidationAndFlowControl = aws_mqtt5_extended_validation_and_flow_control_options;
+
+ /**
+ * Controls how disconnects affect the queued and in-progress operations tracked by the client. Also
+ * controls how operations are handled while the client is not connected. In particular, if the client is
+ * not connected, then any operation that would be failed on disconnect (according to these rules) will be
+ * rejected.
+ */
+ using ClientOperationQueueBehaviorType = aws_mqtt5_client_operation_queue_behavior_type;
+
+ /**
+ * Controls how the reconnect delay is modified in order to smooth out the distribution of reconnection
+ * attempt timepoints for a large set of reconnecting clients.
+ *
+ * See [Exponential Backoff and
+ * Jitter](https://aws.amazon.com/blogs/architecture/exponential-backoff-and-jitter/)
+ */
+ using JitterMode = aws_exponential_backoff_jitter_mode;
+
+ /**
+ * Optional property describing a PUBLISH payload's format.
+ *
+ * Enum values match [MQTT5
+ * spec](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901111) encoding values.
+ */
+ using PayloadFormatIndicator = aws_mqtt5_payload_format_indicator;
+
+ /**
+ * Configures how retained messages should be handled when subscribing with a topic filter that matches
+ * topics with associated retained messages.
+ *
+ * Enum values match [MQTT5
+ * spec](https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901169) encoding values.
+ */
+ using RetainHandlingType = aws_mqtt5_retain_handling_type;
+
+ /**
+ * Type of mqtt packet.
+ * Enum values match mqtt spec encoding values.
+ *
+ * https://docs.oasis-open.org/mqtt/mqtt/v5.0/os/mqtt-v5.0-os.html#_Toc3901022
+ */
+ using PacketType = aws_mqtt5_packet_type;
+
+ } // namespace Mqtt5
+
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/mqtt/MqttClient.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/mqtt/MqttClient.h
new file mode 100644
index 0000000000..03293237cb
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/crt/mqtt/MqttClient.h
@@ -0,0 +1,532 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/Exports.h>
+#include <aws/crt/StlAllocator.h>
+#include <aws/crt/Types.h>
+#include <aws/crt/http/HttpConnection.h>
+#include <aws/crt/io/SocketOptions.h>
+#include <aws/crt/io/TlsOptions.h>
+
+#include <aws/mqtt/client.h>
+
+#include <atomic>
+#include <functional>
+#include <memory>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Io
+ {
+ class ClientBootstrap;
+ }
+
+ namespace Http
+ {
+ class HttpRequest;
+ }
+
+ namespace Mqtt
+ {
+ class MqttClient;
+ class MqttConnection;
+
+ /**
+ * Invoked Upon Connection loss.
+ */
+ using OnConnectionInterruptedHandler = std::function<void(MqttConnection &connection, int error)>;
+
+ /**
+ * Invoked Upon Connection resumed.
+ */
+ using OnConnectionResumedHandler =
+ std::function<void(MqttConnection &connection, ReturnCode connectCode, bool sessionPresent)>;
+
+ /**
+ * Invoked when a connack message is received, or an error occurred.
+ */
+ using OnConnectionCompletedHandler = std::function<
+ void(MqttConnection &connection, int errorCode, ReturnCode returnCode, bool sessionPresent)>;
+
+ /**
+ * Invoked when a suback message is received.
+ */
+ using OnSubAckHandler = std::function<
+ void(MqttConnection &connection, uint16_t packetId, const String &topic, QOS qos, int errorCode)>;
+
+ /**
+ * Invoked when a suback message for multiple topics is received.
+ */
+ using OnMultiSubAckHandler = std::function<void(
+ MqttConnection &connection,
+ uint16_t packetId,
+ const Vector<String> &topics,
+ QOS qos,
+ int errorCode)>;
+
+ /**
+ * Invoked when a disconnect message has been sent.
+ */
+ using OnDisconnectHandler = std::function<void(MqttConnection &connection)>;
+
+ /**
+ * Invoked upon receipt of a Publish message on a subscribed topic.
+ * @param connection The connection object
+ * @param topic The information channel to which the payload data was published.
+ * @param payload The payload data.
+ * @param dup DUP flag. If true, this might be re-delivery of an earlier
+ * attempt to send the message.
+ * @param qos Quality of Service used to deliver the message.
+ * @param retain Retain flag. If true, the message was sent as a result of
+ * a new subscription being made by the client.
+ */
+ using OnMessageReceivedHandler = std::function<void(
+ MqttConnection &connection,
+ const String &topic,
+ const ByteBuf &payload,
+ bool dup,
+ QOS qos,
+ bool retain)>;
+
+ /**
+ * @deprecated Use OnMessageReceivedHandler
+ */
+ using OnPublishReceivedHandler =
+ std::function<void(MqttConnection &connection, const String &topic, const ByteBuf &payload)>;
+
+ /**
+ * Invoked when an operation completes. For QoS 0, this is when the packet is passed to the tls
+ * layer. For QoS 1 (and 2, in theory) this is when the final ack packet is received from the server.
+ */
+ using OnOperationCompleteHandler =
+ std::function<void(MqttConnection &connection, uint16_t packetId, int errorCode)>;
+
+ /**
+ * Callback for users to invoke upon completion of, presumably asynchronous, OnWebSocketHandshakeIntercept
+ * callback's initiated process.
+ */
+ using OnWebSocketHandshakeInterceptComplete =
+ std::function<void(const std::shared_ptr<Http::HttpRequest> &, int errorCode)>;
+
+ /**
+ * Invoked during websocket handshake to give users opportunity to transform an http request for purposes
+ * such as signing/authorization etc... Returning from this function does not continue the websocket
+ * handshake since some work flows may be asynchronous. To accommodate that, onComplete must be invoked upon
+ * completion of the signing process.
+ */
+ using OnWebSocketHandshakeIntercept = std::function<
+ void(std::shared_ptr<Http::HttpRequest> req, const OnWebSocketHandshakeInterceptComplete &onComplete)>;
+
+ /* Simple statistics about the current state of the client's queue of operations */
+ struct AWS_CRT_CPP_API MqttConnectionOperationStatistics
+ {
+ /*
+ * total number of operations submitted to the connection that have not yet been completed. Unacked
+ * operations are a subset of this.
+ */
+ uint64_t incompleteOperationCount;
+
+ /*
+ * total packet size of operations submitted to the connection that have not yet been completed. Unacked
+ * operations are a subset of this.
+ */
+ uint64_t incompleteOperationSize;
+
+ /*
+ * total number of operations that have been sent to the server and are waiting for a corresponding ACK
+ * before they can be completed.
+ */
+ uint64_t unackedOperationCount;
+
+ /*
+ * total packet size of operations that have been sent to the server and are waiting for a corresponding
+ * ACK before they can be completed.
+ */
+ uint64_t unackedOperationSize;
+ };
+
+ /**
+ * Represents a persistent Mqtt Connection. The memory is owned by MqttClient.
+ * To get a new instance of this class, see MqttClient::NewConnection. Unless
+ * specified all function arguments need only to live through the duration of the
+ * function call.
+ */
+ class AWS_CRT_CPP_API MqttConnection final
+ {
+ friend class MqttClient;
+
+ public:
+ ~MqttConnection();
+ MqttConnection(const MqttConnection &) = delete;
+ MqttConnection(MqttConnection &&) = delete;
+ MqttConnection &operator=(const MqttConnection &) = delete;
+ MqttConnection &operator=(MqttConnection &&) = delete;
+
+ /**
+ * @return true if the instance is in a valid state, false otherwise.
+ */
+ operator bool() const noexcept;
+
+ /**
+ * @return the value of the last aws error encountered by operations on this instance.
+ */
+ int LastError() const noexcept;
+
+ /**
+ * Sets LastWill for the connection.
+ * @param topic topic the will message should be published to
+ * @param qos QOS the will message should be published with
+ * @param retain true if the will publish should be treated as a retained publish
+ * @param payload payload of the will message
+ * @return success/failure in setting the will
+ */
+ bool SetWill(const char *topic, QOS qos, bool retain, const ByteBuf &payload) noexcept;
+
+ /**
+ * Sets login credentials for the connection. The must get set before the Connect call
+ * if it is to be used.
+ * @param userName user name to add to the MQTT CONNECT packet
+ * @param password password to add to the MQTT CONNECT packet
+ * @return success/failure
+ */
+ bool SetLogin(const char *userName, const char *password) noexcept;
+
+ /**
+ * @deprecated Sets websocket proxy options. Replaced by SetHttpProxyOptions.
+ */
+ bool SetWebsocketProxyOptions(const Http::HttpClientConnectionProxyOptions &proxyOptions) noexcept;
+
+ /**
+ * Sets http proxy options. In order to use an http proxy with mqtt either
+ * (1) Websockets are used
+ * (2) Mqtt-over-tls is used and the ALPN list of the tls context contains a tag that resolves to mqtt
+ *
+ * @param proxyOptions proxy configuration for making the mqtt connection
+ *
+ * @return success/failure
+ */
+ bool SetHttpProxyOptions(const Http::HttpClientConnectionProxyOptions &proxyOptions) noexcept;
+
+ /**
+ * Customize time to wait between reconnect attempts.
+ * The time will start at min and multiply by 2 until max is reached.
+ * The time resets back to min after a successful connection.
+ * This function should only be called before Connect().
+ *
+ * @param min_seconds minimum time to wait before attempting a reconnect
+ * @param max_seconds maximum time to wait before attempting a reconnect
+ *
+ * @return success/failure
+ */
+ bool SetReconnectTimeout(uint64_t min_seconds, uint64_t max_seconds) noexcept;
+
+ /**
+ * Initiates the connection, OnConnectionCompleted will
+ * be invoked in an event-loop thread.
+ *
+ * @param clientId client identifier to use when establishing the mqtt connection
+ * @param cleanSession false to attempt to rejoin an existing session for the client id, true to skip
+ * and start with a new session
+ * @param keepAliveTimeSecs time interval to space mqtt pings apart by
+ * @param pingTimeoutMs timeout in milliseconds before the keep alive ping is considered to have failed
+ * @param protocolOperationTimeoutMs timeout in milliseconds to give up waiting for a response packet
+ * for an operation. Necessary due to throttling properties on certain server implementations that do
+ * not return an ACK for throttled operations.
+ *
+ * @return true if the connection attempt was successfully started (implying a callback will be invoked
+ * with the eventual result), false if it could not be started (no callback will happen)
+ */
+ bool Connect(
+ const char *clientId,
+ bool cleanSession,
+ uint16_t keepAliveTimeSecs = 0,
+ uint32_t pingTimeoutMs = 0,
+ uint32_t protocolOperationTimeoutMs = 0) noexcept;
+
+ /**
+ * Initiates disconnect, OnDisconnectHandler will be invoked in an event-loop thread.
+ * @return success/failure in initiating disconnect
+ */
+ bool Disconnect() noexcept;
+
+ /// @private
+ aws_mqtt_client_connection *GetUnderlyingConnection() noexcept;
+
+ /**
+ * Subscribes to topicFilter. OnMessageReceivedHandler will be invoked from an event-loop
+ * thread upon an incoming Publish message. OnSubAckHandler will be invoked
+ * upon receipt of a suback message.
+ *
+ * @param topicFilter topic filter to subscribe to
+ * @param qos maximum qos client is willing to receive matching messages on
+ * @param onMessage callback to invoke when a message is received based on matching this filter
+ * @param onSubAck callback to invoke with the server's response to the subscribe request
+ *
+ * @return packet id of the subscribe request, or 0 if the attempt failed synchronously
+ */
+ uint16_t Subscribe(
+ const char *topicFilter,
+ QOS qos,
+ OnMessageReceivedHandler &&onMessage,
+ OnSubAckHandler &&onSubAck) noexcept;
+
+ /**
+ * @deprecated Use alternate Subscribe()
+ */
+ uint16_t Subscribe(
+ const char *topicFilter,
+ QOS qos,
+ OnPublishReceivedHandler &&onPublish,
+ OnSubAckHandler &&onSubAck) noexcept;
+
+ /**
+ * Subscribes to multiple topicFilters. OnMessageReceivedHandler will be invoked from an event-loop
+ * thread upon an incoming Publish message. OnMultiSubAckHandler will be invoked
+ * upon receipt of a suback message.
+ *
+ * @param topicFilters list of pairs of topic filters and message callbacks to invoke on a matching
+ * publish
+ * @param qos maximum qos client is willing to receive matching messages on
+ * @param onOpComplete callback to invoke with the server's response to the subscribe request
+ *
+ * @return packet id of the subscribe request, or 0 if the attempt failed synchronously
+ */
+ uint16_t Subscribe(
+ const Vector<std::pair<const char *, OnMessageReceivedHandler>> &topicFilters,
+ QOS qos,
+ OnMultiSubAckHandler &&onOpComplete) noexcept;
+
+ /**
+ * @deprecated Use alternate Subscribe()
+ */
+ uint16_t Subscribe(
+ const Vector<std::pair<const char *, OnPublishReceivedHandler>> &topicFilters,
+ QOS qos,
+ OnMultiSubAckHandler &&onOpComplete) noexcept;
+
+ /**
+ * Installs a handler for all incoming publish messages, regardless of if Subscribe has been
+ * called on the topic.
+ *
+ * @param onMessage callback to invoke for all received messages
+ * @return success/failure
+ */
+ bool SetOnMessageHandler(OnMessageReceivedHandler &&onMessage) noexcept;
+
+ /**
+ * @deprecated Use alternate SetOnMessageHandler()
+ */
+ bool SetOnMessageHandler(OnPublishReceivedHandler &&onPublish) noexcept;
+
+ /**
+ * Unsubscribes from topicFilter. OnOperationCompleteHandler will be invoked upon receipt of
+ * an unsuback message.
+ *
+ * @param topicFilter topic filter to unsubscribe the session from
+ * @param onOpComplete callback to invoke on receipt of the server's UNSUBACK message
+ *
+ * @return packet id of the unsubscribe request, or 0 if the attempt failed synchronously
+ */
+ uint16_t Unsubscribe(const char *topicFilter, OnOperationCompleteHandler &&onOpComplete) noexcept;
+
+ /**
+ * Publishes to a topic.
+ *
+ * @param topic topic to publish to
+ * @param qos QOS to publish the message with
+ * @param retain should this message replace the current retained message of the topic?
+ * @param payload payload of the message
+ * @param onOpComplete completion callback to invoke when the operation is complete. If QoS is 0, then
+ * the callback is invoked when the message is passed to the tls handler, otherwise it's invoked
+ * on receipt of the final response from the server.
+ *
+ * @return packet id of the publish request, or 0 if the attempt failed synchronously
+ */
+ uint16_t Publish(
+ const char *topic,
+ QOS qos,
+ bool retain,
+ const ByteBuf &payload,
+ OnOperationCompleteHandler &&onOpComplete) noexcept;
+
+ /**
+ * Get the statistics about the current state of the connection's queue of operations
+ *
+ * @return MqttConnectionOperationStatistics
+ */
+ const MqttConnectionOperationStatistics &GetOperationStatistics() noexcept;
+
+ OnConnectionInterruptedHandler OnConnectionInterrupted;
+ OnConnectionResumedHandler OnConnectionResumed;
+ OnConnectionCompletedHandler OnConnectionCompleted;
+ OnDisconnectHandler OnDisconnect;
+ OnWebSocketHandshakeIntercept WebsocketInterceptor;
+
+ private:
+ aws_mqtt_client *m_owningClient;
+ aws_mqtt_client_connection *m_underlyingConnection;
+ String m_hostName;
+ uint16_t m_port;
+ Crt::Io::TlsContext m_tlsContext;
+ Io::TlsConnectionOptions m_tlsOptions;
+ Io::SocketOptions m_socketOptions;
+ Crt::Optional<Http::HttpClientConnectionProxyOptions> m_proxyOptions;
+ void *m_onAnyCbData;
+ bool m_useTls;
+ bool m_useWebsocket;
+ MqttConnectionOperationStatistics m_operationStatistics;
+
+ MqttConnection(
+ aws_mqtt_client *client,
+ const char *hostName,
+ uint16_t port,
+ const Io::SocketOptions &socketOptions,
+ const Crt::Io::TlsContext &tlsContext,
+ bool useWebsocket) noexcept;
+
+ MqttConnection(
+ aws_mqtt_client *client,
+ const char *hostName,
+ uint16_t port,
+ const Io::SocketOptions &socketOptions,
+ bool useWebsocket) noexcept;
+
+ static void s_onConnectionInterrupted(aws_mqtt_client_connection *, int errorCode, void *userData);
+ static void s_onConnectionCompleted(
+ aws_mqtt_client_connection *,
+ int errorCode,
+ enum aws_mqtt_connect_return_code returnCode,
+ bool sessionPresent,
+ void *userData);
+ static void s_onConnectionResumed(
+ aws_mqtt_client_connection *,
+ ReturnCode returnCode,
+ bool sessionPresent,
+ void *userData);
+
+ static void s_onDisconnect(aws_mqtt_client_connection *connection, void *userData);
+ static void s_onPublish(
+ aws_mqtt_client_connection *connection,
+ const aws_byte_cursor *topic,
+ const aws_byte_cursor *payload,
+ bool dup,
+ enum aws_mqtt_qos qos,
+ bool retain,
+ void *user_data);
+
+ static void s_onSubAck(
+ aws_mqtt_client_connection *connection,
+ uint16_t packetId,
+ const struct aws_byte_cursor *topic,
+ enum aws_mqtt_qos qos,
+ int error_code,
+ void *userdata);
+ static void s_onMultiSubAck(
+ aws_mqtt_client_connection *connection,
+ uint16_t packetId,
+ const struct aws_array_list *topic_subacks,
+ int error_code,
+ void *userdata);
+ static void s_onOpComplete(
+ aws_mqtt_client_connection *connection,
+ uint16_t packetId,
+ int errorCode,
+ void *userdata);
+
+ static void s_onWebsocketHandshake(
+ struct aws_http_message *request,
+ void *user_data,
+ aws_mqtt_transform_websocket_handshake_complete_fn *complete_fn,
+ void *complete_ctx);
+
+ static void s_connectionInit(
+ MqttConnection *self,
+ const char *hostName,
+ uint16_t port,
+ const Io::SocketOptions &socketOptions);
+ };
+
+ /**
+ * An MQTT client. This is a move-only type. Unless otherwise specified,
+ * all function arguments need only to live through the duration of the
+ * function call.
+ */
+ class AWS_CRT_CPP_API MqttClient final
+ {
+ public:
+ /**
+ * Initialize an MqttClient using bootstrap and allocator
+ */
+ MqttClient(Io::ClientBootstrap &bootstrap, Allocator *allocator = ApiAllocator()) noexcept;
+
+ /**
+ * Initialize an MqttClient using a allocator and the default ClientBootstrap
+ *
+ * For more information on the default ClientBootstrap see
+ * Aws::Crt::ApiHandle::GetOrCreateStaticDefaultClientBootstrap
+ */
+ MqttClient(Allocator *allocator = ApiAllocator()) noexcept;
+
+ ~MqttClient();
+ MqttClient(const MqttClient &) = delete;
+ MqttClient(MqttClient &&) noexcept;
+ MqttClient &operator=(const MqttClient &) = delete;
+ MqttClient &operator=(MqttClient &&) noexcept;
+
+ /**
+ * @return true if the instance is in a valid state, false otherwise.
+ */
+ operator bool() const noexcept;
+
+ /**
+ * @return the value of the last aws error encountered by operations on this instance.
+ */
+ int LastError() const noexcept;
+
+ /**
+ * Create a new connection object using TLS from the client. The client must outlive
+ * all of its connection instances.
+ *
+ * @param hostName endpoint to connect to
+ * @param port port to connect to
+ * @param socketOptions socket options to use when establishing the connection
+ * @param tlsContext tls context to use with the connection
+ * @param useWebsocket should the connection use websockets or should it use direct mqtt?
+ *
+ * @return a new connection object. Connect() will still need to be called after all further
+ * configuration is finished.
+ */
+ std::shared_ptr<MqttConnection> NewConnection(
+ const char *hostName,
+ uint16_t port,
+ const Io::SocketOptions &socketOptions,
+ const Crt::Io::TlsContext &tlsContext,
+ bool useWebsocket = false) noexcept;
+
+ /**
+ * Create a new connection object over plain text from the client. The client must outlive
+ * all of its connection instances.
+ * @param hostName endpoint to connect to
+ * @param port port to connect to
+ * @param socketOptions socket options to use when establishing the connection
+ * @param useWebsocket should the connection use websockets or should it use direct mqtt?
+ *
+ * @return a new connection object. Connect() will still need to be called after all further
+ * configuration is finished.
+ */
+ std::shared_ptr<MqttConnection> NewConnection(
+ const char *hostName,
+ uint16_t port,
+ const Io::SocketOptions &socketOptions,
+ bool useWebsocket = false) noexcept;
+
+ private:
+ aws_mqtt_client *m_client;
+ };
+ } // namespace Mqtt
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/iot/Mqtt5Client.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/iot/Mqtt5Client.h
new file mode 100644
index 0000000000..4c22917013
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/iot/Mqtt5Client.h
@@ -0,0 +1,548 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/Config.h>
+#include <aws/crt/Exports.h>
+#include <aws/crt/auth/Sigv4Signing.h>
+#include <aws/crt/mqtt/Mqtt5Client.h>
+#include <aws/iot/MqttCommon.h>
+
+#if !BYO_CRYPTO
+
+namespace Aws
+{
+ using namespace Crt::Mqtt5;
+
+ namespace Io
+ {
+ class ClientBootstrap;
+ class SocketOptions;
+ class TlsContextOptions;
+ class WebsocketConfig;
+ } // namespace Io
+
+ namespace Iot
+ {
+
+ /**
+ * Class encapsulating configuration for establishing an Aws IoT Mqtt5 Connectin with custom authorizer
+ */
+ class AWS_CRT_CPP_API Mqtt5CustomAuthConfig
+ {
+ public:
+ /**
+ * Create a custom authorizer configuration
+ */
+ Mqtt5CustomAuthConfig(Crt::Allocator *allocator = Crt::ApiAllocator()) noexcept;
+ virtual ~Mqtt5CustomAuthConfig();
+
+ Mqtt5CustomAuthConfig(const Mqtt5CustomAuthConfig &rhs);
+ Mqtt5CustomAuthConfig(Mqtt5CustomAuthConfig &&rhs) = delete;
+
+ Mqtt5CustomAuthConfig &operator=(const Mqtt5CustomAuthConfig &rhs);
+ Mqtt5CustomAuthConfig &operator=(Mqtt5CustomAuthConfig &&rhs) = delete;
+
+ Mqtt5CustomAuthConfig &WithAuthorizerName(Crt::String authName);
+ Mqtt5CustomAuthConfig &WithUsername(Crt::String username);
+ Mqtt5CustomAuthConfig &WithPassword(Crt::ByteCursor password);
+ Mqtt5CustomAuthConfig &WithTokenKeyName(Crt::String tokenKeyName);
+ Mqtt5CustomAuthConfig &WithTokenValue(Crt::String tokenValue);
+ Mqtt5CustomAuthConfig &WithTokenSignature(Crt::String tokenSignature);
+
+ const Crt::Optional<Crt::String> &GetAuthorizerName();
+ const Crt::Optional<Crt::String> &GetUsername();
+ const Crt::Optional<Crt::ByteCursor> &GetPassword();
+ const Crt::Optional<Crt::String> &GetTokenKeyName();
+ const Crt::Optional<Crt::String> &GetTokenValue();
+ const Crt::Optional<Crt::String> &GetTokenSignature();
+
+ private:
+ /**
+ * Name of the custom authorizer to use.
+ *
+ * Required if the endpoint does not have a default custom authorizer associated with it. It is strongly
+ * suggested to URL-encode this value; the SDK will not do so for you.
+ */
+ Crt::Optional<Crt::String> m_authorizerName;
+
+ /**
+ * The username to use with the custom authorizer. Query-string elements of this property value will be
+ * unioned with the query-string elements implied by other properties in this object.
+ *
+ * For example, if you set this to:
+ *
+ * 'MyUsername?someKey=someValue'
+ *
+ * and use {@link authorizerName} to specify the authorizer, the final username would look like:
+ *
+ * `MyUsername?someKey=someValue&x-amz-customauthorizer-name=<your authorizer's name>&...`
+ */
+ Crt::Optional<Crt::String> m_username;
+
+ /**
+ * The password to use with the custom authorizer. Becomes the MQTT5 CONNECT packet's password property.
+ * AWS IoT Core will base64 encode this binary data before passing it to the authorizer's lambda function.
+ */
+ Crt::Optional<Crt::ByteCursor> m_password;
+
+ /**
+ * Key used to extract the custom authorizer token from MQTT username query-string properties.
+ *
+ * Required if the custom authorizer has signing enabled. It is strongly suggested to URL-encode this
+ * value; the SDK will not do so for you.
+ */
+ Crt::Optional<Crt::String> m_tokenKeyName;
+
+ /**
+ * An opaque token value. This value must be signed by the private key associated with the custom authorizer
+ * and the result placed in the {@link tokenSignature} property.
+ *
+ * Required if the custom authorizer has signing enabled.
+ */
+ Crt::Optional<Crt::String> m_tokenValue;
+
+ /**
+ * The digital signature of the token value in the {@link tokenValue} property. The signature must be based
+ * on the private key associated with the custom authorizer. The signature must be base64 encoded.
+ *
+ * Required if the custom authorizer has signing enabled. It is strongly suggested to URL-encode this
+ * value; the SDK will not do so for you.
+ */
+ Crt::Optional<Crt::String> m_tokenSignature;
+
+ Crt::ByteBuf m_passwordStorage;
+ Crt::Allocator *m_allocator;
+ };
+
+ /**
+ * Represents a unique configuration for mqtt5 client and connection. Helps to setup Mqtt5ClientOptionsBuilder
+ * for mqtt5 client.
+ */
+ class AWS_CRT_CPP_API Mqtt5ClientBuilder final
+ {
+ public:
+ /**
+ * Set the builder up for MTLS using certPath and pkeyPath. These are files on disk and must be in the
+ * PEM format.
+ *
+ * @param hostName - AWS IoT endpoint to connect to
+ * @param certPath path to the X509 certificate (pem file) to use
+ * @param pkeyPath path to the private key (pem file) to use
+ * @param allocator memory allocator to use
+ *
+ * @return Mqtt5ClientBuilder
+ */
+ static Mqtt5ClientBuilder *NewMqtt5ClientBuilderWithMtlsFromPath(
+ const Crt::String hostName,
+ const char *certPath,
+ const char *pkeyPath,
+ Crt::Allocator *allocator = Crt::ApiAllocator()) noexcept;
+
+ /**
+ * Sets the builder up for MTLS using cert and pkey. These are in-memory buffers and must be in the PEM
+ * format.
+ *
+ * @param hostName - AWS IoT endpoint to connect to
+ * @param certPath buffer containing the X509 certificate in a PEM format
+ * @param pkeyPath buffer containing the private key in a PEM format
+ * @param allocator memory allocator to use
+ *
+ * @return Mqtt5ClientBuilder
+ */
+ static Mqtt5ClientBuilder *NewMqtt5ClientBuilderWithMtlsFromMemory(
+ const Crt::String hostName,
+ const Crt::ByteCursor &certPath,
+ const Crt::ByteCursor &pkeyPath,
+ Crt::Allocator *allocator = Crt::ApiAllocator()) noexcept;
+
+ /**
+ * Sets the builder up for MTLS, using a PKCS#11 library for private key operations.
+ *
+ * NOTE: This only works on Unix devices.
+ *
+ * @param hostName - AWS IoT endpoint to connect to
+ * @param pkcs11Options PKCS#11 options
+ * @param allocator memory allocator to use
+ *
+ * @return Mqtt5ClientBuilder
+ */
+ static Mqtt5ClientBuilder *NewMqtt5ClientBuilderWithMtlsPkcs11(
+ const Crt::String hostName,
+ const Crt::Io::TlsContextPkcs11Options &pkcs11Options,
+ Crt::Allocator *allocator = Crt::ApiAllocator()) noexcept;
+
+ /**
+ * Sets the builder up for MTLS, using a certificate in a Windows certificate store.
+ *
+ * NOTE: This only works on Windows.
+ *
+ * @param hostName - AWS IoT endpoint to connect to
+ * @param windowsCertStorePath Path to certificate in a Windows certificate store.
+ * The path must use backslashes and end with the certificate's thumbprint.
+ * Example: `CurrentUser\MY\A11F8A9B5DF5B98BA3508FBCA575D09570E0D2C6`
+ * @param allocator memory allocator to use
+ *
+ * @return Mqtt5ClientBuilder
+ */
+ static Mqtt5ClientBuilder *NewMqtt5ClientBuilderWithWindowsCertStorePath(
+ const Crt::String hostName,
+ const char *windowsCertStorePath,
+ Crt::Allocator *allocator = Crt::ApiAllocator()) noexcept;
+
+ /**
+ * Sets the builder up for Websocket connection.
+ *
+ * @param hostName - AWS IoT endpoint to connect to
+ * @param config websocket configuration information
+ * @param allocator memory allocator to use
+ *
+ * Mqtt5ClientBuilder
+ */
+ static Mqtt5ClientBuilder *NewMqtt5ClientBuilderWithWebsocket(
+ const Crt::String hostName,
+ const WebsocketConfig &config,
+ Crt::Allocator *allocator = Crt::ApiAllocator()) noexcept;
+
+ /**
+ * Sets the builder up for connection using authorization configuration.
+ *
+ * @param hostName - AWS IoT endpoint to connect to
+ * @param customAuthConfig custom authorization configuration information
+ * @param allocator memory allocator to use
+ *
+ * Mqtt5ClientBuilder
+ */
+ static Mqtt5ClientBuilder *NewMqtt5ClientBuilderWithCustomAuthorizer(
+ const Crt::String hostName,
+ const Mqtt5CustomAuthConfig &customAuthConfig,
+ Crt::Allocator *allocator) noexcept;
+
+ /**
+ * Sets the builder up for connection using authorization configuration using Websockets.
+ *
+ * @param hostName - AWS IoT endpoint to connect to
+ * @param customAuthConfig custom authorization configuration information
+ * @param config websocket configuration information
+ * @param allocator memory allocator to use
+ *
+ * Mqtt5ClientBuilder
+ */
+ static Mqtt5ClientBuilder *NewMqtt5ClientBuilderWithCustomAuthorizerWebsocket(
+ const Crt::String hostName,
+ const Mqtt5CustomAuthConfig &customAuthConfig,
+ const WebsocketConfig &config,
+ Crt::Allocator *allocator) noexcept;
+
+ /**
+ * Sets the host to connect to.
+ *
+ * @param hostname endpoint to connect to
+ *
+ * @return this option object
+ */
+ Mqtt5ClientBuilder &withHostName(Crt::String hostname);
+
+ /**
+ * Set port to connect to
+ *
+ * @param port port to connect to
+ *
+ * @return this option object
+ */
+ Mqtt5ClientBuilder &withPort(uint16_t port) noexcept;
+
+ /**
+ * Sets the certificate authority for the endpoint you're connecting to. This is a path to a file on disk
+ * and must be in PEM format.
+ *
+ * @param caPath path to the CA file in PEM format
+ *
+ * @return this builder object
+ */
+ Mqtt5ClientBuilder &WithCertificateAuthority(const char *caPath) noexcept;
+
+ /**
+ * Sets the certificate authority for the endpoint you're connecting to. This is an in-memory buffer and
+ * must be in PEM format.
+ *
+ * @param cert buffer containing the CA certificate in a PEM format
+ *
+ * @return this builder object
+ */
+ Mqtt5ClientBuilder &WithCertificateAuthority(const Crt::ByteCursor &cert) noexcept;
+
+ /**
+ * Sets http proxy options.
+ *
+ * @param proxyOptions http proxy configuration for connection establishment
+ *
+ * @return this option object
+ */
+ Mqtt5ClientBuilder &withHttpProxyOptions(
+ const Crt::Http::HttpClientConnectionProxyOptions &proxyOptions) noexcept;
+
+ /**
+ * Sets the custom authorizer settings. This function will modify the username, port, and TLS options.
+ *
+ * @return this builder object
+ */
+ Mqtt5ClientBuilder &WithCustomAuthorizer(const Iot::Mqtt5CustomAuthConfig &config) noexcept;
+
+ /**
+ * Sets mqtt5 connection options
+ *
+ * @param packetConnect package connection options
+ *
+ * @return this option object
+ */
+ Mqtt5ClientBuilder &withConnectOptions(std::shared_ptr<ConnectPacket> packetConnect) noexcept;
+
+ /**
+ * Sets session behavior. Overrides how the MQTT5 client should behave with respect to MQTT sessions.
+ *
+ * @param sessionBehavior how the MQTT5 client should behave with respect to MQTT sessions.
+ *
+ * @return this option object
+ */
+ Mqtt5ClientBuilder &withSessionBehavior(ClientSessionBehaviorType sessionBehavior) noexcept;
+
+ /**
+ * Sets client extended validation and flow control, additional controls for client behavior with
+ * respect to operation validation and flow control; these checks go beyond the base MQTT5 spec to
+ * respect limits of specific MQTT brokers.
+ *
+ * @param clientExtendedValidationAndFlowControl
+ *
+ * @return this option object
+ */
+ Mqtt5ClientBuilder &withClientExtendedValidationAndFlowControl(
+ ClientExtendedValidationAndFlowControl clientExtendedValidationAndFlowControl) noexcept;
+
+ /**
+ * Sets OfflineQueueBehavior, controls how disconnects affect the queued and in-progress operations
+ * tracked by the client. Also controls how new operations are handled while the client is not
+ * connected. In particular, if the client is not connected, then any operation that would be failed
+ * on disconnect (according to these rules) will also be rejected.
+ *
+ * @param offlineQueueBehavior
+ *
+ * @return this option object
+ */
+ Mqtt5ClientBuilder &withOfflineQueueBehavior(
+ ClientOperationQueueBehaviorType offlineQueueBehavior) noexcept;
+
+ /**
+ * Sets ReconnectOptions. Reconnect options includes retryJitterMode, min reconnect delay time and
+ * max reconnect delay time
+ *
+ * @param reconnectOptions
+ *
+ * @return this option object
+ */
+ Mqtt5ClientBuilder &withReconnectOptions(ReconnectOptions reconnectOptions) noexcept;
+
+ /**
+ * Sets minConnectedTimeToResetReconnectDelayMs, amount of time that must elapse with an established
+ * connection before the reconnect delay is reset to the minimum. This helps alleviate bandwidth-waste
+ * in fast reconnect cycles due to permission failures on operations.
+ *
+ * @param minConnectedTimeToResetReconnectDelayMs
+ *
+ * @return this option object
+ */
+ Mqtt5ClientBuilder &withMinConnectedTimeToResetReconnectDelayMs(
+ uint64_t minConnectedTimeToResetReconnectDelayMs) noexcept;
+
+ /**
+ * Sets ping timeout (ms). Time interval to wait after sending a PINGREQ for a PINGRESP to arrive.
+ * If one does not arrive, the client will close the current connection.
+ *
+ * @param pingTimeoutMs
+ *
+ * @return this option object
+ */
+ Mqtt5ClientBuilder &withPingTimeoutMs(uint32_t pingTimeoutMs) noexcept;
+
+ /**
+ * Sets Connack Timeout (ms). Time interval to wait after sending a CONNECT request for a CONNACK
+ * to arrive. If one does not arrive, the connection will be shut down.
+ *
+ * @param connackTimeoutMs
+ *
+ * @return this option object
+ */
+ Mqtt5ClientBuilder &withConnackTimeoutMs(uint32_t connackTimeoutMs) noexcept;
+
+ /**
+ * Sets Operation Timeout(Seconds). Time interval to wait for an ack after sending a QoS 1+ PUBLISH,
+ * SUBSCRIBE, or UNSUBSCRIBE before failing the operation.
+ *
+ * @param ackTimeoutSeconds
+ *
+ * @return this option object
+ */
+ Mqtt5ClientBuilder &withAckTimeoutSeconds(uint32_t ackTimeoutSeconds) noexcept;
+
+ /**
+ * Overrides the default SDK Name to send as a metric in the MQTT CONNECT packet.
+ *
+ * @param sdkName string to use as the SDK name parameter in the connection string
+ *
+ * @return this builder object
+ */
+ Mqtt5ClientBuilder &WithSdkName(const Crt::String &sdkName);
+
+ /**
+ * Overrides the default SDK Version to send as a metric in the MQTT CONNECT packet.
+ *
+ * @param sdkVersion string to use as the SDK version parameter in the connection string
+ *
+ * @return this builder object
+ */
+ Mqtt5ClientBuilder &WithSdkVersion(const Crt::String &sdkVersion);
+
+ /**
+ * Builds a client configuration object from the set options.
+ *
+ * @return a new client connection config instance
+ */
+ std::shared_ptr<Mqtt5Client> Build() noexcept;
+
+ /**
+ * @return true if the instance is in a valid state, false otherwise.
+ */
+ explicit operator bool() const noexcept { return m_lastError == 0; }
+
+ /**
+ * @return the value of the last aws error encountered by operations on this instance.
+ */
+ int LastError() const noexcept { return m_lastError ? m_lastError : AWS_ERROR_UNKNOWN; }
+
+ virtual ~Mqtt5ClientBuilder()
+ {
+ if (m_options)
+ {
+ delete m_options;
+ }
+ };
+ Mqtt5ClientBuilder(const Mqtt5ClientBuilder &) = delete;
+ Mqtt5ClientBuilder(Mqtt5ClientBuilder &&) = delete;
+ Mqtt5ClientBuilder &operator=(const Mqtt5ClientBuilder &) = delete;
+ Mqtt5ClientBuilder &operator=(Mqtt5ClientBuilder &&) = delete;
+
+ /**
+ * Setup callback trigged when client successfully establishes an MQTT connection
+ *
+ * @param callback
+ *
+ * @return this option object
+ */
+ Mqtt5ClientBuilder &withClientConnectionSuccessCallback(OnConnectionSuccessHandler callback) noexcept;
+
+ /**
+ * Setup callback trigged when client fails to establish an MQTT connection
+ *
+ * @param callback
+ *
+ * @return this option object
+ */
+ Mqtt5ClientBuilder &withClientConnectionFailureCallback(OnConnectionFailureHandler callback) noexcept;
+
+ /**
+ * Setup callback handler trigged when client's current MQTT connection is closed
+ *
+ * @param callback
+ *
+ * @return this option object
+ */
+ Mqtt5ClientBuilder &withClientDisconnectionCallback(OnDisconnectionHandler callback) noexcept;
+
+ /**
+ * Setup callback handler trigged when client reaches the "Stopped" state
+ *
+ * @param callback
+ *
+ * @return this option object
+ */
+ Mqtt5ClientBuilder &withClientStoppedCallback(OnStoppedHandler callback) noexcept;
+
+ /**
+ * Setup callback handler trigged when client begins an attempt to connect to the remote endpoint.
+ *
+ * @param callback
+ *
+ * @return this option object
+ */
+ Mqtt5ClientBuilder &withClientAttemptingConnectCallback(OnAttemptingConnectHandler callback) noexcept;
+
+ /**
+ * Setup callback handler trigged when an MQTT PUBLISH packet is received by the client
+ *
+ * @param callback
+ *
+ * @return this option object
+ */
+ Mqtt5ClientBuilder &withPublishReceivedCallback(OnPublishReceivedHandler callback) noexcept;
+
+ private:
+ // Common setup shared by all valid constructors
+ Mqtt5ClientBuilder(Crt::Allocator *allocator) noexcept;
+ // Common setup shared by all valid constructors
+ Mqtt5ClientBuilder(int error, Crt::Allocator *allocator) noexcept;
+
+ Crt::Allocator *m_allocator;
+
+ /**
+ * Network port of the MQTT server to connect to.
+ */
+ uint16_t m_port;
+
+ /**
+ * Client bootstrap to use. In almost all cases, this can be left undefined.
+ */
+ Io::ClientBootstrap *m_bootstrap;
+
+ /**
+ * TLS context for secure socket connections.
+ * If undefined, then a plaintext connection will be used.
+ */
+ Crt::Optional<Crt::Io::TlsContextOptions> m_tlsConnectionOptions;
+
+ /**
+ * Configures (tunneling) HTTP proxy usage when establishing MQTT connections
+ */
+ Crt::Optional<Crt::Http::HttpClientConnectionProxyOptions> m_proxyOptions;
+
+ /**
+ * Websocket related options. The clinet with use websocket for connection when set.
+ */
+ Crt::Optional<WebsocketConfig> m_websocketConfig;
+
+ /**
+ * Custom Authorizer Configuration
+ */
+ Crt::Optional<Mqtt5CustomAuthConfig> m_customAuthConfig;
+
+ /**
+ * All configurable options with respect to the CONNECT packet sent by the client, including the will.
+ * These connect properties will be used for every connection attempt made by the client.
+ */
+ std::shared_ptr<ConnectPacket> m_connectOptions;
+
+ Crt::Mqtt5::Mqtt5ClientOptions *m_options;
+
+ /* Error */
+ int m_lastError;
+
+ bool m_enableMetricsCollection;
+
+ Crt::String m_sdkName = "CPPv2";
+ Crt::String m_sdkVersion = AWS_CRT_CPP_VERSION;
+ };
+
+ } // namespace Iot
+} // namespace Aws
+
+#endif // !BYO_CRYPTO
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/iot/MqttClient.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/iot/MqttClient.h
new file mode 100644
index 0000000000..7fc2d60e8a
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/iot/MqttClient.h
@@ -0,0 +1,450 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/Config.h>
+#include <aws/crt/Exports.h>
+#include <aws/crt/auth/Sigv4Signing.h>
+#include <aws/crt/mqtt/MqttClient.h>
+#include <aws/iot/MqttCommon.h>
+
+#if !BYO_CRYPTO
+
+namespace Aws
+{
+ namespace Iot
+ {
+ class MqttClient;
+
+ /**
+ * Represents a unique configuration for connecting to a single AWS IoT endpoint. You can use a single instance
+ * of this class PER endpoint you want to connect to. This object must live through the lifetime of your
+ * connection.
+ */
+ class AWS_CRT_CPP_API MqttClientConnectionConfig final
+ {
+ public:
+ static MqttClientConnectionConfig CreateInvalid(int lastError) noexcept;
+
+ /**
+ * Creates a client configuration for use with making new AWS Iot specific MQTT Connections with MTLS.
+ *
+ * @param endpoint endpoint to connect to
+ * @param port port to connect to
+ * @param socketOptions socket options to use when establishing the connection
+ * @param tlsContext tls context that should be used for all connections sourced from this config
+ */
+ MqttClientConnectionConfig(
+ const Crt::String &endpoint,
+ uint16_t port,
+ const Crt::Io::SocketOptions &socketOptions,
+ Crt::Io::TlsContext &&tlsContext);
+
+ /**
+ * Creates a client configuration for use with making new AWS Iot specific MQTT Connections with web
+ * sockets. interceptor: a callback invoked during web socket handshake giving you the opportunity to mutate
+ * the request for authorization/signing purposes. If not specified, it's assumed you don't need to sign the
+ * request. proxyOptions: optional, if you want to use a proxy with websockets, specify the configuration
+ * options here.
+ *
+ * If proxy options are used, the tlsContext is applied to the connection to the remote endpoint, NOT the
+ * proxy. To make a tls connection to the proxy itself, you'll want to specify tls options in proxyOptions.
+ *
+ * @param endpoint endpoint to connect to
+ * @param port port to connect to
+ * @param socketOptions socket options to use when establishing the connection
+ * @param tlsContext tls context that should be used for all connections sourced from this config
+ * @param interceptor websocket upgrade handshake transformation function
+ * @param proxyOptions proxy configuration options
+ */
+ MqttClientConnectionConfig(
+ const Crt::String &endpoint,
+ uint16_t port,
+ const Crt::Io::SocketOptions &socketOptions,
+ Crt::Io::TlsContext &&tlsContext,
+ Crt::Mqtt::OnWebSocketHandshakeIntercept &&interceptor,
+ const Crt::Optional<Crt::Http::HttpClientConnectionProxyOptions> &proxyOptions);
+
+ /**
+ * @return true if the instance is in a valid state, false otherwise.
+ */
+ explicit operator bool() const noexcept { return m_context ? true : false; }
+
+ /**
+ * @return the value of the last aws error encountered by operations on this instance.
+ */
+ int LastError() const noexcept { return m_lastError; }
+
+ private:
+ MqttClientConnectionConfig(int lastError) noexcept;
+
+ MqttClientConnectionConfig(
+ const Crt::String &endpoint,
+ uint16_t port,
+ const Crt::Io::SocketOptions &socketOptions,
+ Crt::Io::TlsContext &&tlsContext,
+ const Crt::Optional<Crt::Http::HttpClientConnectionProxyOptions> &proxyOptions);
+
+ Crt::String m_endpoint;
+ uint16_t m_port;
+ Crt::Io::TlsContext m_context;
+ Crt::Io::SocketOptions m_socketOptions;
+ Crt::Mqtt::OnWebSocketHandshakeIntercept m_webSocketInterceptor;
+ Crt::String m_username;
+ Crt::String m_password;
+ Crt::Optional<Crt::Http::HttpClientConnectionProxyOptions> m_proxyOptions;
+ int m_lastError;
+
+ friend class MqttClient;
+ friend class MqttClientConnectionConfigBuilder;
+ };
+
+ /**
+ * Represents configuration parameters for building a MqttClientConnectionConfig object. You can use a single
+ * instance of this class PER MqttClientConnectionConfig you want to generate. If you want to generate a config
+ * for a different endpoint or port etc... you need a new instance of this class.
+ */
+ class AWS_CRT_CPP_API MqttClientConnectionConfigBuilder final
+ {
+ public:
+ MqttClientConnectionConfigBuilder();
+
+ /**
+ * Sets the builder up for MTLS using certPath and pkeyPath. These are files on disk and must be in the PEM
+ * format.
+ *
+ * @param certPath path to the X509 certificate (pem file) to use
+ * @param pkeyPath path to the private key (pem file) to use
+ * @param allocator memory allocator to use
+ */
+ MqttClientConnectionConfigBuilder(
+ const char *certPath,
+ const char *pkeyPath,
+ Crt::Allocator *allocator = Crt::ApiAllocator()) noexcept;
+
+ /**
+ * Sets the builder up for MTLS using cert and pkey. These are in-memory buffers and must be in the PEM
+ * format.
+ *
+ * @param cert buffer containing the X509 certificate in a PEM format
+ * @param pkey buffer containing the private key in a PEM format
+ * @param allocator memory allocator to use
+ */
+ MqttClientConnectionConfigBuilder(
+ const Crt::ByteCursor &cert,
+ const Crt::ByteCursor &pkey,
+ Crt::Allocator *allocator = Crt::ApiAllocator()) noexcept;
+
+ /**
+ * Sets the builder up for MTLS, using a PKCS#11 library for private key operations.
+ *
+ * NOTE: This only works on Unix devices.
+ *
+ * @param pkcs11Options PKCS#11 options
+ * @param allocator memory allocator to use
+ */
+ MqttClientConnectionConfigBuilder(
+ const Crt::Io::TlsContextPkcs11Options &pkcs11Options,
+ Crt::Allocator *allocator = Crt::ApiAllocator()) noexcept;
+
+ /**
+ * Sets the builder up for MTLS, using a certificate in a Windows certificate store.
+ *
+ * NOTE: This only works on Windows.
+ *
+ * @param windowsCertStorePath Path to certificate in a Windows certificate store.
+ * The path must use backslashes and end with the certificate's thumbprint.
+ * Example: `CurrentUser\MY\A11F8A9B5DF5B98BA3508FBCA575D09570E0D2C6`
+ * @param allocator memory allocator to use
+ */
+ MqttClientConnectionConfigBuilder(
+ const char *windowsCertStorePath,
+ Crt::Allocator *allocator = Crt::ApiAllocator()) noexcept;
+
+ /**
+ * Sets the builder up for Websocket connection.
+ *
+ * @param config websocket configuration information
+ * @param allocator memory allocator to use
+ */
+ MqttClientConnectionConfigBuilder(
+ const WebsocketConfig &config,
+ Crt::Allocator *allocator = Crt::ApiAllocator()) noexcept;
+
+ /**
+ * Creates a new builder with default Tls options. This requires setting the connection details manually.
+ *
+ * @return a new builder with default Tls options
+ */
+ static MqttClientConnectionConfigBuilder NewDefaultBuilder() noexcept;
+
+ /**
+ * Sets endpoint to connect to.
+ *
+ * @param endpoint endpoint to connect to
+ *
+ * @return this builder object
+ */
+ MqttClientConnectionConfigBuilder &WithEndpoint(const Crt::String &endpoint);
+
+ /**
+ * Sets endpoint to connect to.
+ *
+ * @param endpoint endpoint to connect to
+ *
+ * @return this builder object
+ */
+ MqttClientConnectionConfigBuilder &WithEndpoint(Crt::String &&endpoint);
+
+ /**
+ * Overrides the default port. By default, if ALPN is supported, 443 will be used. Otherwise 8883 will be
+ * used. If you specify 443 and ALPN is not supported, we will still attempt to connect over 443 without
+ * ALPN.
+ *
+ * @param port port to connect to
+ *
+ * @return this builder object
+ */
+ MqttClientConnectionConfigBuilder &WithPortOverride(uint16_t port) noexcept;
+
+ /**
+ * Sets the certificate authority for the endpoint you're connecting to. This is a path to a file on disk
+ * and must be in PEM format.
+ *
+ * @param caPath path to the CA file in PEM format
+ *
+ * @return this builder object
+ */
+ MqttClientConnectionConfigBuilder &WithCertificateAuthority(const char *caPath) noexcept;
+
+ /**
+ * Sets the certificate authority for the endpoint you're connecting to. This is an in-memory buffer and
+ * must be in PEM format.
+ *
+ * @param cert buffer containing the CA certificate in a PEM format
+ *
+ * @return this builder object
+ */
+ MqttClientConnectionConfigBuilder &WithCertificateAuthority(const Crt::ByteCursor &cert) noexcept;
+
+ /**
+ * TCP option: Enables TCP keep alive. Defaults to off.
+ *
+ * @return this builder object
+ */
+ MqttClientConnectionConfigBuilder &WithTcpKeepAlive() noexcept;
+
+ /**
+ * TCP option: Sets the connect timeout. Defaults to 3 seconds.
+ *
+ * @param connectTimeoutMs socket connection timeout
+ *
+ * @return this builder object
+ */
+ MqttClientConnectionConfigBuilder &WithTcpConnectTimeout(uint32_t connectTimeoutMs) noexcept;
+
+ /**
+ * TCP option: Sets time before keep alive probes are sent. Defaults to kernel defaults
+ *
+ * @param keepAliveTimeoutSecs time interval of no activity, in seconds, before keep alive probes
+ * get sent
+ *
+ * @return this builder object
+ */
+ MqttClientConnectionConfigBuilder &WithTcpKeepAliveTimeout(uint16_t keepAliveTimeoutSecs) noexcept;
+
+ /**
+ * TCP option: Sets the frequency of sending keep alive probes in seconds once the keep alive timeout
+ * expires. Defaults to kernel defaults.
+ *
+ * @param keepAliveIntervalSecs the frequency of sending keep alive probes in seconds once the keep alive
+ * timeout expires
+ *
+ * @return this builder object
+ */
+ MqttClientConnectionConfigBuilder &WithTcpKeepAliveInterval(uint16_t keepAliveIntervalSecs) noexcept;
+
+ /**
+ * TCP option: Sets the amount of keep alive probes allowed to fail before the connection is terminated.
+ * Defaults to kernel defaults.
+ *
+ * @param maxProbes the amount of keep alive probes allowed to fail before the connection is terminated
+ *
+ * @return this builder object
+ */
+ MqttClientConnectionConfigBuilder &WithTcpKeepAliveMaxProbes(uint16_t maxProbes) noexcept;
+
+ /**
+ * Sets the minimum tls version that is acceptable for connection establishment
+ *
+ * @param minimumTlsVersion minimum tls version allowed in client connections
+ *
+ * @return this builder object
+ */
+ MqttClientConnectionConfigBuilder &WithMinimumTlsVersion(aws_tls_versions minimumTlsVersion) noexcept;
+
+ /**
+ * Sets http proxy options.
+ *
+ * @param proxyOptions proxy configuration options for connection establishment
+ *
+ * @return this builder object
+ */
+ MqttClientConnectionConfigBuilder &WithHttpProxyOptions(
+ const Crt::Http::HttpClientConnectionProxyOptions &proxyOptions) noexcept;
+
+ /**
+ * Whether to send the SDK name and version number in the MQTT CONNECT packet.
+ * Default is True.
+ *
+ * @param enabled true to send SDK version/name in the connect for metrics gathering purposes
+ *
+ * @return this builder object
+ */
+ MqttClientConnectionConfigBuilder &WithMetricsCollection(bool enabled);
+
+ /**
+ * Overrides the default SDK Name to send as a metric in the MQTT CONNECT packet.
+ *
+ * @param sdkName string to use as the SDK name parameter in the connection string
+ *
+ * @return this builder object
+ */
+ MqttClientConnectionConfigBuilder &WithSdkName(const Crt::String &sdkName);
+
+ /**
+ * Overrides the default SDK Version to send as a metric in the MQTT CONNECT packet.
+ *
+ * @param sdkVersion string to use as the SDK version parameter in the connection string
+ *
+ * @return this builder object
+ */
+ MqttClientConnectionConfigBuilder &WithSdkVersion(const Crt::String &sdkVersion);
+
+ /**
+ * Sets the custom authorizer settings. This function will modify the username, port, and TLS options.
+ *
+ * @param username The username to use with the custom authorizer. If an empty string is passed, it will
+ * check to see if a username has already been set (via WithUsername function). If no
+ * username is set then no username will be passed with the MQTT connection.
+ * @param authorizerName The name of the custom authorizer. If an empty string is passed, then
+ * 'x-amz-customauthorizer-name' will not be added with the MQTT connection.
+ * @param authorizerSignature The signature of the custom authorizer. If an empty string is passed, then
+ * 'x-amz-customauthorizer-signature' will not be added with the MQTT connection.
+ * @param password The password to use with the custom authorizer. If null is passed, then no password will
+ * be set.
+ *
+ * @return this builder object
+ */
+ MqttClientConnectionConfigBuilder &WithCustomAuthorizer(
+ const Crt::String &username,
+ const Crt::String &authorizerName,
+ const Crt::String &authorizerSignature,
+ const Crt::String &password) noexcept;
+
+ /**
+ * Sets username for the connection
+ *
+ * @param username the username that will be passed with the MQTT connection
+ *
+ * @return this builder object
+ */
+ MqttClientConnectionConfigBuilder &WithUsername(const Crt::String &username) noexcept;
+
+ /**
+ * Sets password for the connection
+ *
+ * @param password the password that will be passed with the MQTT connection
+ *
+ * @return this builder object
+ */
+ MqttClientConnectionConfigBuilder &WithPassword(const Crt::String &password) noexcept;
+
+ /**
+ * Builds a client configuration object from the set options.
+ *
+ * @return a new client connection config instance
+ */
+ MqttClientConnectionConfig Build() noexcept;
+
+ /**
+ * @return true if the instance is in a valid state, false otherwise.
+ */
+ explicit operator bool() const noexcept { return m_lastError == 0; }
+
+ /**
+ * @return the value of the last aws error encountered by operations on this instance.
+ */
+ int LastError() const noexcept { return m_lastError ? m_lastError : AWS_ERROR_UNKNOWN; }
+
+ private:
+ // Common setup shared by all valid constructors
+ MqttClientConnectionConfigBuilder(Crt::Allocator *allocator) noexcept;
+
+ // Helper function to add parameters to the username in the WithCustomAuthorizer function
+ Crt::String AddToUsernameParameter(
+ Crt::String currentUsername,
+ Crt::String parameterValue,
+ Crt::String parameterPreText);
+
+ Crt::Allocator *m_allocator;
+ Crt::String m_endpoint;
+ uint16_t m_portOverride;
+ Crt::Io::SocketOptions m_socketOptions;
+ Crt::Io::TlsContextOptions m_contextOptions;
+ Crt::Optional<WebsocketConfig> m_websocketConfig;
+ Crt::Optional<Crt::Http::HttpClientConnectionProxyOptions> m_proxyOptions;
+ bool m_enableMetricsCollection = true;
+ Crt::String m_sdkName = "CPPv2";
+ Crt::String m_sdkVersion;
+ Crt::String m_username = "";
+ Crt::String m_password = "";
+ bool m_isUsingCustomAuthorizer = false;
+
+ int m_lastError;
+ };
+
+ /**
+ * AWS IOT specific Mqtt Client. Sets defaults for using the AWS IOT service. You'll need an instance of
+ * MqttClientConnectionConfig to use. Once NewConnection returns, you use it's return value identically
+ * to how you would use Aws::Crt::Mqtt::MqttConnection
+ */
+ class AWS_CRT_CPP_API MqttClient final
+ {
+ public:
+ MqttClient(Crt::Io::ClientBootstrap &bootstrap, Crt::Allocator *allocator = Crt::ApiAllocator()) noexcept;
+
+ /**
+ * Constructs a new Mqtt Client object using the static default ClientBootstrap.
+ *
+ * For more information on the default ClientBootstrap see
+ * Aws::Crt::ApiHandle::GetOrCreateDefaultClientBootstrap
+ */
+ MqttClient(Crt::Allocator *allocator = Crt::ApiAllocator()) noexcept;
+
+ /**
+ * Creates a new mqtt connection from a connection configuration object
+ * @param config mqtt connection configuration
+ * @return a new mqtt connection
+ */
+ std::shared_ptr<Crt::Mqtt::MqttConnection> NewConnection(const MqttClientConnectionConfig &config) noexcept;
+
+ /**
+ * @return the value of the last aws error encountered by operations on this instance.
+ */
+ int LastError() const noexcept { return m_client.LastError(); }
+
+ /**
+ * @return true if the instance is in a valid state, false otherwise.
+ */
+ explicit operator bool() const noexcept { return m_client ? true : false; }
+
+ private:
+ Crt::Mqtt::MqttClient m_client;
+ int m_lastError;
+ };
+ } // namespace Iot
+} // namespace Aws
+
+#endif // !BYO_CRYPTO
diff --git a/contrib/restricted/aws/aws-crt-cpp/include/aws/iot/MqttCommon.h b/contrib/restricted/aws/aws-crt-cpp/include/aws/iot/MqttCommon.h
new file mode 100644
index 0000000000..80c4f14084
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/include/aws/iot/MqttCommon.h
@@ -0,0 +1,103 @@
+#pragma once
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/Config.h>
+#include <aws/crt/Exports.h>
+#include <aws/crt/auth/Sigv4Signing.h>
+#include <aws/crt/mqtt/MqttClient.h>
+
+#if !BYO_CRYPTO
+
+namespace Aws
+{
+ namespace Iot
+ {
+
+ using CreateSigningConfig = std::function<std::shared_ptr<Crt::Auth::ISigningConfig>(void)>;
+
+ /**
+ * Class encapsulating configuration for establishing an Aws IoT mqtt connection via websockets
+ */
+ struct AWS_CRT_CPP_API WebsocketConfig
+ {
+ /**
+ * Create a websocket configuration for use with the default credentials provider chain. Signing region
+ * will be used for Sigv4 signature calculations.
+ *
+ * @param signingRegion Aws region that is being connected to. Required in order to properly sign the
+ * handshake upgrade request
+ * @param bootstrap client bootstrap to establish any connections needed by the default credentials
+ * provider chain which will get built for the user
+ * @param allocator memory allocator to use
+ */
+ WebsocketConfig(
+ const Crt::String &signingRegion,
+ Crt::Io::ClientBootstrap *bootstrap,
+ Crt::Allocator *allocator = Crt::ApiAllocator()) noexcept;
+
+ /**
+ * Create a websocket configuration for use with the default credentials provider chain and default
+ * ClientBootstrap. Signing region will be used for Sigv4 signature calculations.
+ *
+ * For more information on the default ClientBootstrap see
+ * Aws::Crt::ApiHandle::GetOrCreateDefaultClientBootstrap
+ *
+ * @param signingRegion Aws region that is being connected to. Required in order to properly sign the
+ * handshake upgrade request
+ * @param allocator memory allocator to use
+ */
+ WebsocketConfig(const Crt::String &signingRegion, Crt::Allocator *allocator = Crt::ApiAllocator()) noexcept;
+
+ /**
+ * Create a websocket configuration for use with a custom credentials provider. Signing region will be used
+ * for Sigv4 signature calculations.
+ *
+ * @param signingRegion Aws region that is being connected to. Required in order to properly sign the
+ * handshake upgrade request
+ * @param credentialsProvider credentials provider to source AWS credentials from
+ * @param allocator memory allocator to use
+ */
+ WebsocketConfig(
+ const Crt::String &signingRegion,
+ const std::shared_ptr<Crt::Auth::ICredentialsProvider> &credentialsProvider,
+ Crt::Allocator *allocator = Crt::ApiAllocator()) noexcept;
+
+ /**
+ * Create a websocket configuration for use with a custom credentials provider, and a custom signer.
+ *
+ * You'll need to provide a function for use with creating a signing Config and pass it to
+ * createSigningConfig.
+ *
+ * This is useful for cases use with:
+ * https://docs.aws.amazon.com/iot/latest/developerguide/custom-auth.html
+ *
+ * @param credentialsProvider credentials provider
+ * @param signer HTTP request signer
+ * @param createSigningConfig function that creates a signing config
+ */
+ WebsocketConfig(
+ const std::shared_ptr<Crt::Auth::ICredentialsProvider> &credentialsProvider,
+ const std::shared_ptr<Crt::Auth::IHttpRequestSigner> &signer,
+ CreateSigningConfig createSigningConfig) noexcept;
+
+ std::shared_ptr<Crt::Auth::ICredentialsProvider> CredentialsProvider;
+ std::shared_ptr<Crt::Auth::IHttpRequestSigner> Signer;
+ CreateSigningConfig CreateSigningConfigCb;
+
+ /**
+ * @deprecated Specify ProxyOptions to use a proxy with your websocket connection.
+ *
+ * If MqttClientConnectionConfigBuilder::m_proxyOptions is valid, then that will be used over
+ * this value.
+ */
+ Crt::Optional<Crt::Http::HttpClientConnectionProxyOptions> ProxyOptions;
+ Crt::String SigningRegion;
+ Crt::String ServiceName;
+ };
+
+ } // namespace Iot
+} // namespace Aws
+
+#endif // !BYO_CRYPTO
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/Allocator.cpp b/contrib/restricted/aws/aws-crt-cpp/source/Allocator.cpp
new file mode 100644
index 0000000000..a27071ef5e
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/Allocator.cpp
@@ -0,0 +1,21 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/Allocator.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+
+ Allocator *DefaultAllocatorImplementation() noexcept { return aws_default_allocator(); }
+
+ Allocator *DefaultAllocator() noexcept { return DefaultAllocatorImplementation(); }
+
+ Allocator *g_allocator = Aws::Crt::DefaultAllocatorImplementation();
+
+ Allocator *ApiAllocator() noexcept { return g_allocator; }
+
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/Api.cpp b/contrib/restricted/aws/aws-crt-cpp/source/Api.cpp
new file mode 100644
index 0000000000..d7a7a03cf5
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/Api.cpp
@@ -0,0 +1,405 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/Api.h>
+#include <aws/crt/StlAllocator.h>
+#include <aws/crt/external/cJSON.h>
+#include <aws/crt/io/TlsOptions.h>
+
+#include <aws/auth/auth.h>
+#include <aws/common/ref_count.h>
+#include <aws/event-stream/event_stream.h>
+#include <aws/http/http.h>
+#include <aws/mqtt/mqtt.h>
+#include <aws/s3/s3.h>
+
+#include <thread>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ static Crypto::CreateHashCallback s_BYOCryptoNewMD5Callback;
+ static Crypto::CreateHashCallback s_BYOCryptoNewSHA256Callback;
+ static Crypto::CreateHMACCallback s_BYOCryptoNewSHA256HMACCallback;
+ static Io::NewClientTlsHandlerCallback s_BYOCryptoNewClientTlsHandlerCallback;
+ static Io::NewTlsContextImplCallback s_BYOCryptoNewTlsContextImplCallback;
+ static Io::DeleteTlsContextImplCallback s_BYOCryptoDeleteTlsContextImplCallback;
+ static Io::IsTlsAlpnSupportedCallback s_BYOCryptoIsTlsAlpnSupportedCallback;
+
+ Io::ClientBootstrap *ApiHandle::s_static_bootstrap = nullptr;
+ Io::EventLoopGroup *ApiHandle::s_static_event_loop_group = nullptr;
+ int ApiHandle::s_host_resolver_default_max_hosts = 8;
+ Io::HostResolver *ApiHandle::s_static_default_host_resolver = nullptr;
+ std::mutex ApiHandle::s_lock_client_bootstrap;
+ std::mutex ApiHandle::s_lock_event_loop_group;
+ std::mutex ApiHandle::s_lock_default_host_resolver;
+
+ static void *s_cJSONAlloc(size_t sz) { return aws_mem_acquire(ApiAllocator(), sz); }
+
+ static void s_cJSONFree(void *ptr) { return aws_mem_release(ApiAllocator(), ptr); }
+
+ static void s_initApi(Allocator *allocator)
+ {
+ // sets up the StlAllocator for use.
+ g_allocator = allocator;
+ aws_mqtt_library_init(allocator);
+ aws_s3_library_init(allocator);
+ aws_event_stream_library_init(allocator);
+ aws_sdkutils_library_init(allocator);
+
+ cJSON_Hooks hooks;
+ hooks.malloc_fn = s_cJSONAlloc;
+ hooks.free_fn = s_cJSONFree;
+ cJSON_InitHooks(&hooks);
+ }
+
+ ApiHandle::ApiHandle(Allocator *allocator) noexcept
+ : m_logger(), m_shutdownBehavior(ApiHandleShutdownBehavior::Blocking)
+ {
+ s_initApi(allocator);
+ }
+
+ ApiHandle::ApiHandle() noexcept : m_logger(), m_shutdownBehavior(ApiHandleShutdownBehavior::Blocking)
+ {
+ s_initApi(DefaultAllocator());
+ }
+
+ ApiHandle::~ApiHandle()
+ {
+ ReleaseStaticDefaultClientBootstrap();
+ ReleaseStaticDefaultEventLoopGroup();
+ ReleaseStaticDefaultHostResolver();
+
+ if (m_shutdownBehavior == ApiHandleShutdownBehavior::Blocking)
+ {
+ aws_thread_join_all_managed();
+ }
+
+ if (aws_logger_get() == &m_logger)
+ {
+ aws_logger_set(NULL);
+ aws_logger_clean_up(&m_logger);
+ }
+
+ g_allocator = nullptr;
+ aws_s3_library_clean_up();
+ aws_mqtt_library_clean_up();
+ aws_event_stream_library_clean_up();
+ aws_sdkutils_library_clean_up();
+
+ s_BYOCryptoNewMD5Callback = nullptr;
+ s_BYOCryptoNewSHA256Callback = nullptr;
+ s_BYOCryptoNewSHA256HMACCallback = nullptr;
+ s_BYOCryptoNewClientTlsHandlerCallback = nullptr;
+ s_BYOCryptoNewTlsContextImplCallback = nullptr;
+ s_BYOCryptoDeleteTlsContextImplCallback = nullptr;
+ s_BYOCryptoIsTlsAlpnSupportedCallback = nullptr;
+ }
+
+ void ApiHandle::InitializeLogging(Aws::Crt::LogLevel level, const char *filename)
+ {
+ struct aws_logger_standard_options options;
+ AWS_ZERO_STRUCT(options);
+
+ options.level = (enum aws_log_level)level;
+ options.filename = filename;
+
+ InitializeLoggingCommon(options);
+ }
+
+ void ApiHandle::InitializeLogging(Aws::Crt::LogLevel level, FILE *fp)
+ {
+ struct aws_logger_standard_options options;
+ AWS_ZERO_STRUCT(options);
+
+ options.level = (enum aws_log_level)level;
+ options.file = fp;
+
+ InitializeLoggingCommon(options);
+ }
+
+ void ApiHandle::InitializeLoggingCommon(struct aws_logger_standard_options &options)
+ {
+ if (aws_logger_get() == &m_logger)
+ {
+ aws_logger_set(NULL);
+ aws_logger_clean_up(&m_logger);
+ if (options.level == AWS_LL_NONE)
+ {
+ AWS_ZERO_STRUCT(m_logger);
+ return;
+ }
+ }
+
+ if (aws_logger_init_standard(&m_logger, ApiAllocator(), &options))
+ {
+ return;
+ }
+
+ aws_logger_set(&m_logger);
+ }
+
+ void ApiHandle::SetShutdownBehavior(ApiHandleShutdownBehavior behavior) { m_shutdownBehavior = behavior; }
+
+#if BYO_CRYPTO
+ static struct aws_hash *s_MD5New(struct aws_allocator *allocator)
+ {
+ if (!s_BYOCryptoNewMD5Callback)
+ {
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_TLS, "Must call ApiHandle::SetBYOCryptoNewMD5Callback() before MD5 hash can be created");
+ aws_raise_error(AWS_ERROR_UNIMPLEMENTED);
+ return nullptr;
+ }
+
+ auto hash = s_BYOCryptoNewMD5Callback(AWS_MD5_LEN, allocator);
+ if (!hash)
+ {
+ return nullptr;
+ }
+ return hash->SeatForCInterop(hash);
+ }
+
+ void ApiHandle::SetBYOCryptoNewMD5Callback(Crypto::CreateHashCallback &&callback)
+ {
+ s_BYOCryptoNewMD5Callback = std::move(callback);
+ aws_set_md5_new_fn(s_MD5New);
+ }
+
+ static struct aws_hash *s_Sha256New(struct aws_allocator *allocator)
+ {
+ if (!s_BYOCryptoNewSHA256Callback)
+ {
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_TLS,
+ "Must call ApiHandle::SetBYOCryptoNewSHA256Callback() before SHA256 hash can be created");
+ aws_raise_error(AWS_ERROR_UNIMPLEMENTED);
+ return nullptr;
+ }
+
+ auto hash = s_BYOCryptoNewSHA256Callback(AWS_SHA256_LEN, allocator);
+ if (!hash)
+ {
+ return nullptr;
+ }
+ return hash->SeatForCInterop(hash);
+ }
+
+ void ApiHandle::SetBYOCryptoNewSHA256Callback(Crypto::CreateHashCallback &&callback)
+ {
+ s_BYOCryptoNewSHA256Callback = std::move(callback);
+ aws_set_sha256_new_fn(s_Sha256New);
+ }
+
+ static struct aws_hmac *s_sha256HMACNew(struct aws_allocator *allocator, const struct aws_byte_cursor *secret)
+ {
+ if (!s_BYOCryptoNewSHA256HMACCallback)
+ {
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_TLS,
+ "Must call ApiHandle::SetBYOCryptoNewSHA256HMACCallback() before SHA256 HMAC can be created");
+ aws_raise_error(AWS_ERROR_UNIMPLEMENTED);
+ return nullptr;
+ }
+
+ auto hmac = s_BYOCryptoNewSHA256HMACCallback(AWS_SHA256_HMAC_LEN, *secret, allocator);
+ if (!hmac)
+ {
+ return nullptr;
+ }
+ return hmac->SeatForCInterop(hmac);
+ }
+
+ void ApiHandle::SetBYOCryptoNewSHA256HMACCallback(Crypto::CreateHMACCallback &&callback)
+ {
+ s_BYOCryptoNewSHA256HMACCallback = std::move(callback);
+ aws_set_sha256_hmac_new_fn(s_sha256HMACNew);
+ }
+
+ static struct aws_channel_handler *s_NewClientTlsHandler(
+ struct aws_allocator *allocator,
+ struct aws_tls_connection_options *options,
+ struct aws_channel_slot *slot,
+ void *)
+ {
+ if (!s_BYOCryptoNewClientTlsHandlerCallback)
+ {
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_TLS,
+ "Must call ApiHandle::SetBYOCryptoClientTlsCallback() before client TLS handler can be created");
+ aws_raise_error(AWS_ERROR_UNIMPLEMENTED);
+ return nullptr;
+ }
+
+ auto clientHandlerSelfReferencing = s_BYOCryptoNewClientTlsHandlerCallback(slot, *options, allocator);
+ if (!clientHandlerSelfReferencing)
+ {
+ return nullptr;
+ }
+ return clientHandlerSelfReferencing->SeatForCInterop(clientHandlerSelfReferencing);
+ }
+
+ static int s_ClientTlsHandlerStartNegotiation(struct aws_channel_handler *handler, void *)
+ {
+ auto *clientHandler = reinterpret_cast<Io::ClientTlsChannelHandler *>(handler->impl);
+ if (clientHandler->ChannelsThreadIsCallersThread())
+ {
+ clientHandler->StartNegotiation();
+ }
+ else
+ {
+ clientHandler->ScheduleTask([clientHandler](Io::TaskStatus) { clientHandler->StartNegotiation(); });
+ }
+ return AWS_OP_SUCCESS;
+ }
+
+ void ApiHandle::SetBYOCryptoClientTlsCallback(Io::NewClientTlsHandlerCallback &&callback)
+ {
+ s_BYOCryptoNewClientTlsHandlerCallback = std::move(callback);
+ struct aws_tls_byo_crypto_setup_options setupOptions;
+ setupOptions.new_handler_fn = s_NewClientTlsHandler;
+ setupOptions.start_negotiation_fn = s_ClientTlsHandlerStartNegotiation;
+ setupOptions.user_data = nullptr;
+ aws_tls_byo_crypto_set_client_setup_options(&setupOptions);
+ }
+
+ void ApiHandle::SetBYOCryptoTlsContextCallbacks(
+ Io::NewTlsContextImplCallback &&newCallback,
+ Io::DeleteTlsContextImplCallback &&deleteCallback,
+ Io::IsTlsAlpnSupportedCallback &&alpnCallback)
+ {
+ s_BYOCryptoNewTlsContextImplCallback = newCallback;
+ s_BYOCryptoDeleteTlsContextImplCallback = deleteCallback;
+ s_BYOCryptoIsTlsAlpnSupportedCallback = alpnCallback;
+ }
+
+#else // BYO_CRYPTO
+ void ApiHandle::SetBYOCryptoNewMD5Callback(Crypto::CreateHashCallback &&)
+ {
+ AWS_LOGF_WARN(AWS_LS_IO_TLS, "SetBYOCryptoNewMD5Callback() has no effect unless compiled with BYO_CRYPTO");
+ }
+
+ void ApiHandle::SetBYOCryptoNewSHA256Callback(Crypto::CreateHashCallback &&)
+ {
+ AWS_LOGF_WARN(
+ AWS_LS_IO_TLS, "SetBYOCryptoNewSHA256Callback() has no effect unless compiled with BYO_CRYPTO");
+ }
+
+ void ApiHandle::SetBYOCryptoNewSHA256HMACCallback(Crypto::CreateHMACCallback &&)
+ {
+ AWS_LOGF_WARN(
+ AWS_LS_IO_TLS, "SetBYOCryptoNewSHA256HMACCallback() has no effect unless compiled with BYO_CRYPTO");
+ }
+
+ void ApiHandle::SetBYOCryptoClientTlsCallback(Io::NewClientTlsHandlerCallback &&)
+ {
+ AWS_LOGF_WARN(
+ AWS_LS_IO_TLS, "SetBYOCryptoClientTlsCallback() has no effect unless compiled with BYO_CRYPTO");
+ }
+
+ void ApiHandle::SetBYOCryptoTlsContextCallbacks(
+ Io::NewTlsContextImplCallback &&,
+ Io::DeleteTlsContextImplCallback &&,
+ Io::IsTlsAlpnSupportedCallback &&)
+ {
+ AWS_LOGF_WARN(
+ AWS_LS_IO_TLS, "SetBYOCryptoClientTlsCallback() has no effect unless compiled with BYO_CRYPTO");
+ }
+#endif // BYO_CRYPTO
+
+ Io::ClientBootstrap *ApiHandle::GetOrCreateStaticDefaultClientBootstrap()
+ {
+ std::lock_guard<std::mutex> lock(s_lock_client_bootstrap);
+ if (s_static_bootstrap == nullptr)
+ {
+ s_static_bootstrap = Aws::Crt::New<Io::ClientBootstrap>(
+ ApiAllocator(), *GetOrCreateStaticDefaultEventLoopGroup(), *GetOrCreateStaticDefaultHostResolver());
+ }
+ return s_static_bootstrap;
+ }
+
+ Io::EventLoopGroup *ApiHandle::GetOrCreateStaticDefaultEventLoopGroup()
+ {
+ std::lock_guard<std::mutex> lock(s_lock_event_loop_group);
+ if (s_static_event_loop_group == nullptr)
+ {
+ s_static_event_loop_group = Aws::Crt::New<Io::EventLoopGroup>(ApiAllocator(), (uint16_t)0);
+ }
+ return s_static_event_loop_group;
+ }
+
+ Io::HostResolver *ApiHandle::GetOrCreateStaticDefaultHostResolver()
+ {
+ std::lock_guard<std::mutex> lock(s_lock_default_host_resolver);
+ if (s_static_default_host_resolver == nullptr)
+ {
+ s_static_default_host_resolver = Aws::Crt::New<Io::DefaultHostResolver>(
+ ApiAllocator(), *GetOrCreateStaticDefaultEventLoopGroup(), 1, s_host_resolver_default_max_hosts);
+ }
+ return s_static_default_host_resolver;
+ }
+
+ void ApiHandle::ReleaseStaticDefaultClientBootstrap()
+ {
+ std::lock_guard<std::mutex> lock(s_lock_client_bootstrap);
+ if (s_static_bootstrap != nullptr)
+ {
+ Aws::Crt::Delete(s_static_bootstrap, ApiAllocator());
+ s_static_bootstrap = nullptr;
+ }
+ }
+
+ void ApiHandle::ReleaseStaticDefaultEventLoopGroup()
+ {
+ std::lock_guard<std::mutex> lock(s_lock_event_loop_group);
+ if (s_static_event_loop_group != nullptr)
+ {
+ Aws::Crt::Delete(s_static_event_loop_group, ApiAllocator());
+ s_static_event_loop_group = nullptr;
+ }
+ }
+
+ void ApiHandle::ReleaseStaticDefaultHostResolver()
+ {
+ std::lock_guard<std::mutex> lock(s_lock_default_host_resolver);
+ if (s_static_default_host_resolver != nullptr)
+ {
+ Aws::Crt::Delete(s_static_default_host_resolver, ApiAllocator());
+ s_static_default_host_resolver = nullptr;
+ }
+ }
+
+ const Io::NewTlsContextImplCallback &ApiHandle::GetBYOCryptoNewTlsContextImplCallback()
+ {
+ return s_BYOCryptoNewTlsContextImplCallback;
+ }
+
+ const Io::DeleteTlsContextImplCallback &ApiHandle::GetBYOCryptoDeleteTlsContextImplCallback()
+ {
+ return s_BYOCryptoDeleteTlsContextImplCallback;
+ }
+
+ const Io::IsTlsAlpnSupportedCallback &ApiHandle::GetBYOCryptoIsTlsAlpnSupportedCallback()
+ {
+ return s_BYOCryptoIsTlsAlpnSupportedCallback;
+ }
+
+ const char *ErrorDebugString(int error) noexcept { return aws_error_debug_str(error); }
+
+ int LastError() noexcept { return aws_last_error(); }
+
+ int LastErrorOrUnknown() noexcept
+ {
+ int last_error = aws_last_error();
+ if (last_error == AWS_ERROR_SUCCESS)
+ {
+ last_error = AWS_ERROR_UNKNOWN;
+ }
+
+ return last_error;
+ }
+
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/DateTime.cpp b/contrib/restricted/aws/aws-crt-cpp/source/DateTime.cpp
new file mode 100644
index 0000000000..8e550cc91f
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/DateTime.cpp
@@ -0,0 +1,200 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/DateTime.h>
+
+#include <chrono>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ DateTime::DateTime() noexcept : m_good(true)
+ {
+ std::chrono::system_clock::time_point time;
+ aws_date_time_init_epoch_millis(
+ &m_date_time,
+ static_cast<uint64_t>(
+ std::chrono::duration_cast<std::chrono::milliseconds>(time.time_since_epoch()).count()));
+ }
+
+ DateTime::DateTime(const std::chrono::system_clock::time_point &timepointToAssign) noexcept : m_good(true)
+ {
+ aws_date_time_init_epoch_millis(
+ &m_date_time,
+ static_cast<uint64_t>(
+ std::chrono::duration_cast<std::chrono::milliseconds>(timepointToAssign.time_since_epoch())
+ .count()));
+ }
+
+ DateTime::DateTime(uint64_t millisSinceEpoch) noexcept : m_good(true)
+ {
+ aws_date_time_init_epoch_millis(&m_date_time, millisSinceEpoch);
+ }
+
+ DateTime::DateTime(double epoch_millis) noexcept : m_good(true)
+ {
+ aws_date_time_init_epoch_secs(&m_date_time, epoch_millis);
+ }
+
+ DateTime::DateTime(const char *timestamp, DateFormat format) noexcept
+ {
+ ByteBuf timeStampBuf = ByteBufFromCString(timestamp);
+
+ m_good =
+ (aws_date_time_init_from_str(&m_date_time, &timeStampBuf, static_cast<aws_date_format>(format)) ==
+ AWS_ERROR_SUCCESS);
+ }
+
+ bool DateTime::operator==(const DateTime &other) const noexcept
+ {
+ return aws_date_time_diff(&m_date_time, &other.m_date_time) == 0;
+ }
+
+ bool DateTime::operator<(const DateTime &other) const noexcept
+ {
+ return aws_date_time_diff(&m_date_time, &other.m_date_time) < 0;
+ }
+
+ bool DateTime::operator>(const DateTime &other) const noexcept
+ {
+ return aws_date_time_diff(&m_date_time, &other.m_date_time) > 0;
+ }
+
+ bool DateTime::operator!=(const DateTime &other) const noexcept { return !(*this == other); }
+
+ bool DateTime::operator<=(const DateTime &other) const noexcept
+ {
+ return aws_date_time_diff(&m_date_time, &other.m_date_time) <= 0;
+ }
+
+ bool DateTime::operator>=(const DateTime &other) const noexcept
+ {
+ return aws_date_time_diff(&m_date_time, &other.m_date_time) >= 0;
+ }
+
+ DateTime DateTime::operator+(const std::chrono::milliseconds &a) const noexcept
+ {
+ auto currentTime = aws_date_time_as_millis(&m_date_time);
+ currentTime += a.count();
+ return {currentTime};
+ }
+
+ DateTime DateTime::operator-(const std::chrono::milliseconds &a) const noexcept
+ {
+ auto currentTime = aws_date_time_as_millis(&m_date_time);
+ currentTime -= a.count();
+ return {currentTime};
+ }
+
+ DateTime &DateTime::operator=(double secondsSinceEpoch) noexcept
+ {
+ aws_date_time_init_epoch_secs(&m_date_time, secondsSinceEpoch);
+ m_good = true;
+ return *this;
+ }
+
+ DateTime &DateTime::operator=(uint64_t millisSinceEpoch) noexcept
+ {
+ aws_date_time_init_epoch_millis(&m_date_time, millisSinceEpoch);
+ m_good = true;
+ return *this;
+ }
+
+ DateTime &DateTime::operator=(const std::chrono::system_clock::time_point &timepointToAssign) noexcept
+ {
+ aws_date_time_init_epoch_millis(
+ &m_date_time,
+ static_cast<uint64_t>(
+ std::chrono::duration_cast<std::chrono::milliseconds>(timepointToAssign.time_since_epoch())
+ .count()));
+ m_good = true;
+ return *this;
+ }
+
+ DateTime &DateTime::operator=(const char *timestamp) noexcept
+ {
+ ByteBuf timeStampBuf = aws_byte_buf_from_c_str(timestamp);
+
+ m_good = aws_date_time_init_from_str(
+ &m_date_time, &timeStampBuf, static_cast<aws_date_format>(DateFormat::AutoDetect)) ==
+ AWS_ERROR_SUCCESS;
+ return *this;
+ }
+
+ DateTime::operator bool() const noexcept { return m_good; }
+
+ int DateTime::GetLastError() const noexcept { return aws_last_error(); }
+
+ bool DateTime::ToLocalTimeString(DateFormat format, ByteBuf &outputBuf) const noexcept
+ {
+ return (
+ aws_date_time_to_local_time_str(&m_date_time, static_cast<aws_date_format>(format), &outputBuf) ==
+ AWS_ERROR_SUCCESS);
+ }
+
+ bool DateTime::ToGmtString(DateFormat format, ByteBuf &outputBuf) const noexcept
+ {
+ return (
+ aws_date_time_to_utc_time_str(&m_date_time, static_cast<aws_date_format>(format), &outputBuf) ==
+ AWS_ERROR_SUCCESS);
+ }
+
+ double DateTime::SecondsWithMSPrecision() const noexcept { return aws_date_time_as_epoch_secs(&m_date_time); }
+
+ uint64_t DateTime::Millis() const noexcept { return aws_date_time_as_millis(&m_date_time); }
+
+ std::chrono::system_clock::time_point DateTime::UnderlyingTimestamp() const noexcept
+ {
+ return std::chrono::system_clock::from_time_t(m_date_time.timestamp);
+ }
+
+ uint16_t DateTime::GetYear(bool localTime) const noexcept
+ {
+ return aws_date_time_year(&m_date_time, localTime);
+ }
+
+ Month DateTime::GetMonth(bool localTime) const noexcept
+ {
+ return static_cast<Month>(aws_date_time_month(&m_date_time, localTime));
+ }
+
+ uint8_t DateTime::GetDay(bool localTime) const noexcept
+ {
+ return aws_date_time_month_day(&m_date_time, localTime);
+ }
+
+ DayOfWeek DateTime::GetDayOfWeek(bool localTime) const noexcept
+ {
+ return static_cast<DayOfWeek>(aws_date_time_day_of_week(&m_date_time, localTime));
+ }
+
+ uint8_t DateTime::GetHour(bool localTime) const noexcept { return aws_date_time_hour(&m_date_time, localTime); }
+
+ uint8_t DateTime::GetMinute(bool localTime) const noexcept
+ {
+ return aws_date_time_minute(&m_date_time, localTime);
+ }
+
+ uint8_t DateTime::GetSecond(bool localTime) const noexcept
+ {
+ return aws_date_time_second(&m_date_time, localTime);
+ }
+
+ bool DateTime::IsDST(bool localTime) const noexcept { return aws_date_time_dst(&m_date_time, localTime); }
+
+ DateTime DateTime::Now() noexcept
+ {
+ DateTime dateTime;
+ aws_date_time_init_now(&dateTime.m_date_time);
+ return dateTime;
+ }
+
+ std::chrono::milliseconds DateTime::operator-(const DateTime &other) const noexcept
+ {
+ auto diff = aws_date_time_diff(&m_date_time, &other.m_date_time);
+ return std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::seconds(diff));
+ }
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/ImdsClient.cpp b/contrib/restricted/aws/aws-crt-cpp/source/ImdsClient.cpp
new file mode 100644
index 0000000000..cb9b1f9431
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/ImdsClient.cpp
@@ -0,0 +1,457 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/auth/aws_imds_client.h>
+#include <aws/auth/credentials.h>
+#include <aws/crt/Api.h>
+#include <aws/crt/ImdsClient.h>
+#include <aws/crt/auth/Credentials.h>
+#include <aws/crt/http/HttpConnection.h>
+#include <aws/crt/io/Bootstrap.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Imds
+ {
+ IamProfile::IamProfile(const IamProfileView &other)
+ : lastUpdated(other.lastUpdated),
+ instanceProfileArn(other.instanceProfileArn.data(), other.instanceProfileArn.size()),
+ instanceProfileId(other.instanceProfileId.data(), other.instanceProfileId.size())
+ {
+ }
+
+ IamProfile &IamProfile::operator=(const IamProfileView &other)
+ {
+ lastUpdated = other.lastUpdated;
+ instanceProfileArn = String(other.instanceProfileArn.data(), other.instanceProfileArn.size());
+ instanceProfileId = String(other.instanceProfileId.data(), other.instanceProfileId.size());
+ return *this;
+ }
+
+ InstanceInfo::InstanceInfo(const InstanceInfoView &other)
+ : availabilityZone(other.availabilityZone.data(), other.availabilityZone.size()),
+ privateIp(other.privateIp.data(), other.privateIp.size()),
+ version(other.version.data(), other.version.size()),
+ instanceId(other.instanceId.data(), other.instanceId.size()),
+ instanceType(other.instanceType.data(), other.instanceType.size()),
+ accountId(other.accountId.data(), other.accountId.size()),
+ imageId(other.imageId.data(), other.imageId.size()), pendingTime(other.pendingTime),
+ architecture(other.architecture.data(), other.architecture.size()),
+ kernelId(other.kernelId.data(), other.kernelId.size()),
+ ramdiskId(other.ramdiskId.data(), other.ramdiskId.size()),
+ region(other.region.data(), other.region.size())
+ {
+ for (const auto &m : other.marketplaceProductCodes)
+ {
+ marketplaceProductCodes.emplace_back(m.data(), m.size());
+ }
+
+ for (const auto &m : other.billingProducts)
+ {
+ billingProducts.emplace_back(m.data(), m.size());
+ }
+ }
+
+ InstanceInfo &InstanceInfo::operator=(const InstanceInfoView &other)
+ {
+ availabilityZone = {other.availabilityZone.data(), other.availabilityZone.size()};
+ privateIp = {other.privateIp.data(), other.privateIp.size()};
+ version = {other.version.data(), other.version.size()};
+ instanceId = {other.instanceId.data(), other.instanceId.size()};
+ instanceType = {other.instanceType.data(), other.instanceType.size()};
+ accountId = {other.accountId.data(), other.accountId.size()};
+ imageId = {other.imageId.data(), other.imageId.size()};
+ pendingTime = other.pendingTime;
+ architecture = {other.architecture.data(), other.architecture.size()};
+ kernelId = {other.kernelId.data(), other.kernelId.size()};
+ ramdiskId = {other.ramdiskId.data(), other.ramdiskId.size()};
+ region = {other.region.data(), other.region.size()};
+
+ for (const auto &m : other.marketplaceProductCodes)
+ {
+ marketplaceProductCodes.emplace_back(m.data(), m.size());
+ }
+
+ for (const auto &m : other.billingProducts)
+ {
+ billingProducts.emplace_back(m.data(), m.size());
+ }
+ return *this;
+ }
+
+ ImdsClient::ImdsClient(const ImdsClientConfig &config, Allocator *allocator) noexcept
+ {
+ struct aws_imds_client_options raw_config;
+ AWS_ZERO_STRUCT(raw_config);
+ if (config.Bootstrap != nullptr)
+ {
+ raw_config.bootstrap = config.Bootstrap->GetUnderlyingHandle();
+ }
+ else
+ {
+ raw_config.bootstrap = ApiHandle::GetOrCreateStaticDefaultClientBootstrap()->GetUnderlyingHandle();
+ }
+
+ m_client = aws_imds_client_new(allocator, &raw_config);
+ m_allocator = allocator;
+ }
+
+ ImdsClient::~ImdsClient()
+ {
+ if (m_client)
+ {
+ aws_imds_client_release(m_client);
+ m_client = nullptr;
+ }
+ }
+
+ template <typename T> struct WrappedCallbackArgs
+ {
+ WrappedCallbackArgs(Allocator *allocator, T callback, void *userData)
+ : allocator(allocator), callback(callback), userData(userData)
+ {
+ }
+ Allocator *allocator;
+ T callback;
+ void *userData;
+ };
+
+ void ImdsClient::s_onResourceAcquired(const aws_byte_buf *resource, int errorCode, void *userData)
+ {
+ WrappedCallbackArgs<OnResourceAcquired> *callbackArgs =
+ static_cast<WrappedCallbackArgs<OnResourceAcquired> *>(userData);
+ callbackArgs->callback(
+ ByteCursorToStringView(aws_byte_cursor_from_buf(resource)), errorCode, callbackArgs->userData);
+ Aws::Crt::Delete(callbackArgs, callbackArgs->allocator);
+ }
+
+ void ImdsClient::s_onVectorResourceAcquired(const aws_array_list *array, int errorCode, void *userData)
+ {
+ WrappedCallbackArgs<OnVectorResourceAcquired> *callbackArgs =
+ static_cast<WrappedCallbackArgs<OnVectorResourceAcquired> *>(userData);
+ callbackArgs->callback(
+ ArrayListToVector<ByteCursor, StringView>(array, ByteCursorToStringView),
+ errorCode,
+ callbackArgs->userData);
+ Aws::Crt::Delete(callbackArgs, callbackArgs->allocator);
+ }
+
+ void ImdsClient::s_onCredentialsAcquired(const aws_credentials *credentials, int errorCode, void *userData)
+ {
+ WrappedCallbackArgs<OnCredentialsAcquired> *callbackArgs =
+ static_cast<WrappedCallbackArgs<OnCredentialsAcquired> *>(userData);
+ auto credentialsPtr = Aws::Crt::MakeShared<Auth::Credentials>(callbackArgs->allocator, credentials);
+ callbackArgs->callback(credentials, errorCode, callbackArgs->userData);
+ Aws::Crt::Delete(callbackArgs, callbackArgs->allocator);
+ }
+
+ void ImdsClient::s_onIamProfileAcquired(
+ const aws_imds_iam_profile *iamProfileInfo,
+ int errorCode,
+ void *userData)
+ {
+ WrappedCallbackArgs<OnIamProfileAcquired> *callbackArgs =
+ static_cast<WrappedCallbackArgs<OnIamProfileAcquired> *>(userData);
+ IamProfileView iamProfile;
+ iamProfile.lastUpdated = aws_date_time_as_epoch_secs(&(iamProfileInfo->last_updated));
+ iamProfile.instanceProfileArn = ByteCursorToStringView(iamProfileInfo->instance_profile_arn);
+ iamProfile.instanceProfileId = ByteCursorToStringView(iamProfileInfo->instance_profile_id);
+ callbackArgs->callback(iamProfile, errorCode, callbackArgs->userData);
+ Aws::Crt::Delete(callbackArgs, callbackArgs->allocator);
+ }
+
+ void ImdsClient::s_onInstanceInfoAcquired(
+ const aws_imds_instance_info *instanceInfo,
+ int errorCode,
+ void *userData)
+ {
+ WrappedCallbackArgs<OnInstanceInfoAcquired> *callbackArgs =
+ static_cast<WrappedCallbackArgs<OnInstanceInfoAcquired> *>(userData);
+ InstanceInfoView info;
+ info.marketplaceProductCodes = ArrayListToVector<ByteCursor, StringView>(
+ &(instanceInfo->marketplace_product_codes), ByteCursorToStringView);
+ info.availabilityZone = ByteCursorToStringView(instanceInfo->availability_zone);
+ info.privateIp = ByteCursorToStringView(instanceInfo->private_ip);
+ info.version = ByteCursorToStringView(instanceInfo->version);
+ info.instanceId = ByteCursorToStringView(instanceInfo->instance_id);
+ info.billingProducts = ArrayListToVector<ByteCursor, StringView>(
+ &(instanceInfo->billing_products), ByteCursorToStringView);
+ info.instanceType = ByteCursorToStringView(instanceInfo->instance_type);
+ info.accountId = ByteCursorToStringView(instanceInfo->account_id);
+ info.imageId = ByteCursorToStringView(instanceInfo->image_id);
+ info.pendingTime = aws_date_time_as_epoch_secs(&(instanceInfo->pending_time));
+ info.architecture = ByteCursorToStringView(instanceInfo->architecture);
+ info.kernelId = ByteCursorToStringView(instanceInfo->kernel_id);
+ info.ramdiskId = ByteCursorToStringView(instanceInfo->ramdisk_id);
+ info.region = ByteCursorToStringView(instanceInfo->region);
+ callbackArgs->callback(info, errorCode, callbackArgs->userData);
+ Aws::Crt::Delete(callbackArgs, callbackArgs->allocator);
+ }
+
+ int ImdsClient::GetResource(const StringView &resourcePath, OnResourceAcquired callback, void *userData)
+ {
+ auto wrappedCallbackArgs = Aws::Crt::New<WrappedCallbackArgs<OnResourceAcquired>>(
+ m_allocator, m_allocator, callback, userData);
+ if (wrappedCallbackArgs == nullptr)
+ {
+ return AWS_OP_ERR;
+ }
+
+ return aws_imds_client_get_resource_async(
+ m_client, StringViewToByteCursor(resourcePath), s_onResourceAcquired, wrappedCallbackArgs);
+ }
+
+ int ImdsClient::GetAmiId(OnResourceAcquired callback, void *userData)
+ {
+ auto wrappedCallbackArgs = Aws::Crt::New<WrappedCallbackArgs<OnResourceAcquired>>(
+ m_allocator, m_allocator, callback, userData);
+ if (wrappedCallbackArgs == nullptr)
+ {
+ return AWS_OP_ERR;
+ }
+ return aws_imds_client_get_ami_id(m_client, s_onResourceAcquired, wrappedCallbackArgs);
+ }
+
+ int ImdsClient::GetAmiLaunchIndex(OnResourceAcquired callback, void *userData)
+ {
+ auto wrappedCallbackArgs = Aws::Crt::New<WrappedCallbackArgs<OnResourceAcquired>>(
+ m_allocator, m_allocator, callback, userData);
+ if (wrappedCallbackArgs == nullptr)
+ {
+ return AWS_OP_ERR;
+ }
+ return aws_imds_client_get_ami_launch_index(m_client, s_onResourceAcquired, wrappedCallbackArgs);
+ }
+
+ int ImdsClient::GetAmiManifestPath(OnResourceAcquired callback, void *userData)
+ {
+ auto wrappedCallbackArgs = Aws::Crt::New<WrappedCallbackArgs<OnResourceAcquired>>(
+ m_allocator, m_allocator, callback, userData);
+ if (wrappedCallbackArgs == nullptr)
+ {
+ return AWS_OP_ERR;
+ }
+ return aws_imds_client_get_ami_manifest_path(m_client, s_onResourceAcquired, wrappedCallbackArgs);
+ }
+
+ int ImdsClient::GetAncestorAmiIds(OnVectorResourceAcquired callback, void *userData)
+ {
+ auto wrappedCallbackArgs = Aws::Crt::New<WrappedCallbackArgs<OnVectorResourceAcquired>>(
+ m_allocator, m_allocator, callback, userData);
+ if (wrappedCallbackArgs == nullptr)
+ {
+ return AWS_OP_ERR;
+ }
+ return aws_imds_client_get_ancestor_ami_ids(m_client, s_onVectorResourceAcquired, wrappedCallbackArgs);
+ }
+
+ int ImdsClient::GetInstanceAction(OnResourceAcquired callback, void *userData)
+ {
+ auto wrappedCallbackArgs = Aws::Crt::New<WrappedCallbackArgs<OnResourceAcquired>>(
+ m_allocator, m_allocator, callback, userData);
+ if (wrappedCallbackArgs == nullptr)
+ {
+ return AWS_OP_ERR;
+ }
+ return aws_imds_client_get_instance_action(m_client, s_onResourceAcquired, wrappedCallbackArgs);
+ }
+
+ int ImdsClient::GetInstanceId(OnResourceAcquired callback, void *userData)
+ {
+ auto wrappedCallbackArgs = Aws::Crt::New<WrappedCallbackArgs<OnResourceAcquired>>(
+ m_allocator, m_allocator, callback, userData);
+ if (wrappedCallbackArgs == nullptr)
+ {
+ return AWS_OP_ERR;
+ }
+ return aws_imds_client_get_instance_id(m_client, s_onResourceAcquired, wrappedCallbackArgs);
+ }
+
+ int ImdsClient::GetInstanceType(OnResourceAcquired callback, void *userData)
+ {
+ auto wrappedCallbackArgs = Aws::Crt::New<WrappedCallbackArgs<OnResourceAcquired>>(
+ m_allocator, m_allocator, callback, userData);
+ if (wrappedCallbackArgs == nullptr)
+ {
+ return AWS_OP_ERR;
+ }
+ return aws_imds_client_get_instance_type(m_client, s_onResourceAcquired, wrappedCallbackArgs);
+ }
+
+ int ImdsClient::GetMacAddress(OnResourceAcquired callback, void *userData)
+ {
+ auto wrappedCallbackArgs = Aws::Crt::New<WrappedCallbackArgs<OnResourceAcquired>>(
+ m_allocator, m_allocator, callback, userData);
+ if (wrappedCallbackArgs == nullptr)
+ {
+ return AWS_OP_ERR;
+ }
+ return aws_imds_client_get_mac_address(m_client, s_onResourceAcquired, wrappedCallbackArgs);
+ }
+
+ int ImdsClient::GetPrivateIpAddress(OnResourceAcquired callback, void *userData)
+ {
+ auto wrappedCallbackArgs = Aws::Crt::New<WrappedCallbackArgs<OnResourceAcquired>>(
+ m_allocator, m_allocator, callback, userData);
+ if (wrappedCallbackArgs == nullptr)
+ {
+ return AWS_OP_ERR;
+ }
+ return aws_imds_client_get_private_ip_address(m_client, s_onResourceAcquired, wrappedCallbackArgs);
+ }
+
+ int ImdsClient::GetAvailabilityZone(OnResourceAcquired callback, void *userData)
+ {
+ auto wrappedCallbackArgs = Aws::Crt::New<WrappedCallbackArgs<OnResourceAcquired>>(
+ m_allocator, m_allocator, callback, userData);
+ if (wrappedCallbackArgs == nullptr)
+ {
+ return AWS_OP_ERR;
+ }
+ return aws_imds_client_get_availability_zone(m_client, s_onResourceAcquired, wrappedCallbackArgs);
+ }
+
+ int ImdsClient::GetProductCodes(OnResourceAcquired callback, void *userData)
+ {
+ auto wrappedCallbackArgs = Aws::Crt::New<WrappedCallbackArgs<OnResourceAcquired>>(
+ m_allocator, m_allocator, callback, userData);
+ if (wrappedCallbackArgs == nullptr)
+ {
+ return AWS_OP_ERR;
+ }
+ return aws_imds_client_get_product_codes(m_client, s_onResourceAcquired, wrappedCallbackArgs);
+ }
+
+ int ImdsClient::GetPublicKey(OnResourceAcquired callback, void *userData)
+ {
+ auto wrappedCallbackArgs = Aws::Crt::New<WrappedCallbackArgs<OnResourceAcquired>>(
+ m_allocator, m_allocator, callback, userData);
+ if (wrappedCallbackArgs == nullptr)
+ {
+ return AWS_OP_ERR;
+ }
+ return aws_imds_client_get_public_key(m_client, s_onResourceAcquired, wrappedCallbackArgs);
+ }
+
+ int ImdsClient::GetRamDiskId(OnResourceAcquired callback, void *userData)
+ {
+ auto wrappedCallbackArgs = Aws::Crt::New<WrappedCallbackArgs<OnResourceAcquired>>(
+ m_allocator, m_allocator, callback, userData);
+ if (wrappedCallbackArgs == nullptr)
+ {
+ return AWS_OP_ERR;
+ }
+ return aws_imds_client_get_ramdisk_id(m_client, s_onResourceAcquired, wrappedCallbackArgs);
+ }
+
+ int ImdsClient::GetReservationId(OnResourceAcquired callback, void *userData)
+ {
+ auto wrappedCallbackArgs = Aws::Crt::New<WrappedCallbackArgs<OnResourceAcquired>>(
+ m_allocator, m_allocator, callback, userData);
+ if (wrappedCallbackArgs == nullptr)
+ {
+ return AWS_OP_ERR;
+ }
+ return aws_imds_client_get_reservation_id(m_client, s_onResourceAcquired, wrappedCallbackArgs);
+ }
+
+ int ImdsClient::GetSecurityGroups(OnVectorResourceAcquired callback, void *userData)
+ {
+ auto wrappedCallbackArgs = Aws::Crt::New<WrappedCallbackArgs<OnVectorResourceAcquired>>(
+ m_allocator, m_allocator, callback, userData);
+ if (wrappedCallbackArgs == nullptr)
+ {
+ return AWS_OP_ERR;
+ }
+ return aws_imds_client_get_security_groups(m_client, s_onVectorResourceAcquired, wrappedCallbackArgs);
+ }
+
+ int ImdsClient::GetBlockDeviceMapping(OnVectorResourceAcquired callback, void *userData)
+ {
+ auto wrappedCallbackArgs = Aws::Crt::New<WrappedCallbackArgs<OnVectorResourceAcquired>>(
+ m_allocator, m_allocator, callback, userData);
+ if (wrappedCallbackArgs == nullptr)
+ {
+ return AWS_OP_ERR;
+ }
+ return aws_imds_client_get_block_device_mapping(
+ m_client, s_onVectorResourceAcquired, wrappedCallbackArgs);
+ }
+
+ int ImdsClient::GetAttachedIamRole(OnResourceAcquired callback, void *userData)
+ {
+ auto wrappedCallbackArgs = Aws::Crt::New<WrappedCallbackArgs<OnResourceAcquired>>(
+ m_allocator, m_allocator, callback, userData);
+ if (wrappedCallbackArgs == nullptr)
+ {
+ return AWS_OP_ERR;
+ }
+ return aws_imds_client_get_attached_iam_role(m_client, s_onResourceAcquired, wrappedCallbackArgs);
+ }
+
+ int ImdsClient::GetCredentials(
+ const StringView &iamRoleName,
+ OnCredentialsAcquired callback,
+ void *userData)
+ {
+ auto wrappedCallbackArgs = Aws::Crt::New<WrappedCallbackArgs<OnCredentialsAcquired>>(
+ m_allocator, m_allocator, callback, userData);
+ if (wrappedCallbackArgs == nullptr)
+ {
+ return AWS_OP_ERR;
+ }
+ return aws_imds_client_get_credentials(
+ m_client, StringViewToByteCursor(iamRoleName), s_onCredentialsAcquired, wrappedCallbackArgs);
+ }
+
+ int ImdsClient::GetIamProfile(OnIamProfileAcquired callback, void *userData)
+ {
+ auto wrappedCallbackArgs = Aws::Crt::New<WrappedCallbackArgs<OnIamProfileAcquired>>(
+ m_allocator, m_allocator, callback, userData);
+ if (wrappedCallbackArgs == nullptr)
+ {
+ return AWS_OP_ERR;
+ }
+ return aws_imds_client_get_iam_profile(m_client, s_onIamProfileAcquired, wrappedCallbackArgs);
+ }
+
+ int ImdsClient::GetUserData(OnResourceAcquired callback, void *userData)
+ {
+ auto wrappedCallbackArgs = Aws::Crt::New<WrappedCallbackArgs<OnResourceAcquired>>(
+ m_allocator, m_allocator, callback, userData);
+ if (wrappedCallbackArgs == nullptr)
+ {
+ return AWS_OP_ERR;
+ }
+ return aws_imds_client_get_user_data(m_client, s_onResourceAcquired, wrappedCallbackArgs);
+ }
+
+ int ImdsClient::GetInstanceSignature(OnResourceAcquired callback, void *userData)
+ {
+ auto wrappedCallbackArgs = Aws::Crt::New<WrappedCallbackArgs<OnResourceAcquired>>(
+ m_allocator, m_allocator, callback, userData);
+ if (wrappedCallbackArgs == nullptr)
+ {
+ return AWS_OP_ERR;
+ }
+ return aws_imds_client_get_instance_signature(m_client, s_onResourceAcquired, wrappedCallbackArgs);
+ }
+
+ int ImdsClient::GetInstanceInfo(OnInstanceInfoAcquired callback, void *userData)
+ {
+ auto wrappedCallbackArgs = Aws::Crt::New<WrappedCallbackArgs<OnInstanceInfoAcquired>>(
+ m_allocator, m_allocator, callback, userData);
+ if (wrappedCallbackArgs == nullptr)
+ {
+ return AWS_OP_ERR;
+ }
+ return aws_imds_client_get_instance_info(m_client, s_onInstanceInfoAcquired, wrappedCallbackArgs);
+ }
+ } // namespace Imds
+ } // namespace Crt
+
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/JsonObject.cpp b/contrib/restricted/aws/aws-crt-cpp/source/JsonObject.cpp
new file mode 100644
index 0000000000..86a3ae73fc
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/JsonObject.cpp
@@ -0,0 +1,596 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/crt/JsonObject.h>
+
+#include <aws/crt/external/cJSON.h>
+
+#include <algorithm>
+#include <iterator>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ JsonObject::JsonObject() : m_wasParseSuccessful(true) { m_value = nullptr; }
+
+ JsonObject::JsonObject(cJSON *value)
+ : m_value(cJSON_Duplicate(value, 1 /* recurse */)), m_wasParseSuccessful(true)
+ {
+ }
+
+ JsonObject::JsonObject(const String &value) : m_wasParseSuccessful(true)
+ {
+ const char *return_parse_end;
+ m_value = cJSON_ParseWithLengthOpts(value.c_str(), value.length(), &return_parse_end, 0);
+
+ if (m_value == nullptr || cJSON_IsInvalid(m_value) == 1)
+ {
+ m_wasParseSuccessful = false;
+ m_errorMessage = "Failed to parse JSON at: ";
+ m_errorMessage += return_parse_end;
+ }
+ }
+
+ JsonObject::JsonObject(const JsonObject &value)
+ : m_value(cJSON_Duplicate(value.m_value, 1 /*recurse*/)), m_wasParseSuccessful(value.m_wasParseSuccessful),
+ m_errorMessage(value.m_errorMessage)
+ {
+ }
+
+ JsonObject::JsonObject(JsonObject &&value) noexcept
+ : m_value(value.m_value), m_wasParseSuccessful(value.m_wasParseSuccessful),
+ m_errorMessage(std::move(value.m_errorMessage))
+ {
+ value.m_value = nullptr;
+ }
+
+ void JsonObject::Destroy() { cJSON_Delete(m_value); }
+
+ JsonObject::~JsonObject() { Destroy(); }
+
+ JsonObject &JsonObject::operator=(const JsonObject &other)
+ {
+ if (this == &other)
+ {
+ return *this;
+ }
+
+ Destroy();
+ m_value = cJSON_Duplicate(other.m_value, 1 /*recurse*/);
+ m_wasParseSuccessful = other.m_wasParseSuccessful;
+ m_errorMessage = other.m_errorMessage;
+ return *this;
+ }
+
+ JsonObject &JsonObject::operator=(JsonObject &&other) noexcept
+ {
+ if (this == &other)
+ {
+ return *this;
+ }
+
+ using std::swap;
+ swap(m_value, other.m_value);
+ swap(m_errorMessage, other.m_errorMessage);
+ m_wasParseSuccessful = other.m_wasParseSuccessful;
+ return *this;
+ }
+
+ static void AddOrReplace(cJSON *root, const char *key, cJSON *value)
+ {
+ const auto existing = cJSON_GetObjectItemCaseSensitive(root, key);
+ if (existing != nullptr)
+ {
+ cJSON_ReplaceItemInObjectCaseSensitive(root, key, value);
+ }
+ else
+ {
+ cJSON_AddItemToObject(root, key, value);
+ }
+ }
+
+ JsonObject &JsonObject::WithString(const char *key, const String &value)
+ {
+ if (m_value == nullptr)
+ {
+ m_value = cJSON_CreateObject();
+ }
+
+ const auto val = cJSON_CreateString(value.c_str());
+ AddOrReplace(m_value, key, val);
+ return *this;
+ }
+
+ JsonObject &JsonObject::WithString(const String &key, const String &value)
+ {
+ return WithString(key.c_str(), value);
+ }
+
+ JsonObject &JsonObject::AsString(const String &value)
+ {
+ Destroy();
+ m_value = cJSON_CreateString(value.c_str());
+ return *this;
+ }
+
+ JsonObject &JsonObject::WithBool(const char *key, bool value)
+ {
+ if (m_value == nullptr)
+ {
+ m_value = cJSON_CreateObject();
+ }
+
+ const auto val = cJSON_CreateBool((cJSON_bool)value);
+ AddOrReplace(m_value, key, val);
+ return *this;
+ }
+
+ JsonObject &JsonObject::WithBool(const String &key, bool value) { return WithBool(key.c_str(), value); }
+
+ JsonObject &JsonObject::AsBool(bool value)
+ {
+ Destroy();
+ m_value = cJSON_CreateBool((cJSON_bool)value);
+ return *this;
+ }
+
+ JsonObject &JsonObject::WithInteger(const char *key, int value)
+ {
+ return WithDouble(key, static_cast<double>(value));
+ }
+
+ JsonObject &JsonObject::WithInteger(const String &key, int value)
+ {
+ return WithDouble(key.c_str(), static_cast<double>(value));
+ }
+
+ JsonObject &JsonObject::AsInteger(int value)
+ {
+ Destroy();
+ m_value = cJSON_CreateNumber(static_cast<double>(value));
+ return *this;
+ }
+
+ JsonObject &JsonObject::WithInt64(const char *key, int64_t value)
+ {
+ return WithDouble(key, static_cast<double>(value));
+ }
+
+ JsonObject &JsonObject::WithInt64(const String &key, int64_t value)
+ {
+ return WithDouble(key.c_str(), static_cast<double>(value));
+ }
+
+ JsonObject &JsonObject::AsInt64(int64_t value) { return AsDouble(static_cast<double>(value)); }
+
+ JsonObject &JsonObject::WithDouble(const char *key, double value)
+ {
+ if (m_value == nullptr)
+ {
+ m_value = cJSON_CreateObject();
+ }
+
+ const auto val = cJSON_CreateNumber(value);
+ AddOrReplace(m_value, key, val);
+ return *this;
+ }
+
+ JsonObject &JsonObject::WithDouble(const String &key, double value) { return WithDouble(key.c_str(), value); }
+
+ JsonObject &JsonObject::AsDouble(double value)
+ {
+ Destroy();
+ m_value = cJSON_CreateNumber(value);
+ return *this;
+ }
+
+ JsonObject &JsonObject::WithArray(const char *key, const Vector<String> &array)
+ {
+ if (m_value == nullptr)
+ {
+ m_value = cJSON_CreateObject();
+ }
+
+ auto arrayValue = cJSON_CreateArray();
+ for (const auto &i : array)
+ {
+ cJSON_AddItemToArray(arrayValue, cJSON_CreateString(i.c_str()));
+ }
+
+ AddOrReplace(m_value, key, arrayValue);
+ return *this;
+ }
+
+ JsonObject &JsonObject::WithArray(const String &key, const Vector<String> &array)
+ {
+ return WithArray(key.c_str(), array);
+ }
+
+ JsonObject &JsonObject::WithArray(const String &key, const Vector<JsonObject> &array)
+ {
+ if (m_value == nullptr)
+ {
+ m_value = cJSON_CreateObject();
+ }
+
+ auto arrayValue = cJSON_CreateArray();
+ for (const auto &i : array)
+ {
+ cJSON_AddItemToArray(arrayValue, cJSON_Duplicate(i.m_value, 1 /*recurse*/));
+ }
+
+ AddOrReplace(m_value, key.c_str(), arrayValue);
+ return *this;
+ }
+
+ JsonObject &JsonObject::WithArray(const String &key, Vector<JsonObject> &&array)
+ {
+ if (m_value == nullptr)
+ {
+ m_value = cJSON_CreateObject();
+ }
+
+ auto arrayValue = cJSON_CreateArray();
+ for (auto &i : array)
+ {
+ cJSON_AddItemToArray(arrayValue, i.m_value);
+ i.m_value = nullptr;
+ }
+
+ AddOrReplace(m_value, key.c_str(), arrayValue);
+ return *this;
+ }
+
+ JsonObject &JsonObject::AsArray(const Vector<JsonObject> &array)
+ {
+ auto arrayValue = cJSON_CreateArray();
+ for (const auto &i : array)
+ {
+ cJSON_AddItemToArray(arrayValue, cJSON_Duplicate(i.m_value, 1 /*recurse*/));
+ }
+
+ Destroy();
+ m_value = arrayValue;
+ return *this;
+ }
+
+ JsonObject &JsonObject::AsArray(Vector<JsonObject> &&array)
+ {
+ auto arrayValue = cJSON_CreateArray();
+ for (auto &i : array)
+ {
+ cJSON_AddItemToArray(arrayValue, i.m_value);
+ i.m_value = nullptr;
+ }
+
+ Destroy();
+ m_value = arrayValue;
+ return *this;
+ }
+
+ JsonObject &JsonObject::AsNull()
+ {
+ m_value = cJSON_CreateNull();
+ return *this;
+ }
+
+ JsonObject &JsonObject::WithObject(const char *key, const JsonObject &value)
+ {
+ if (m_value == nullptr)
+ {
+ m_value = cJSON_CreateObject();
+ }
+
+ const auto copy =
+ value.m_value == nullptr ? cJSON_CreateObject() : cJSON_Duplicate(value.m_value, 1 /*recurse*/);
+ AddOrReplace(m_value, key, copy);
+ return *this;
+ }
+
+ JsonObject &JsonObject::WithObject(const String &key, const JsonObject &value)
+ {
+ return WithObject(key.c_str(), value);
+ }
+
+ JsonObject &JsonObject::WithObject(const char *key, JsonObject &&value)
+ {
+ if (m_value == nullptr)
+ {
+ m_value = cJSON_CreateObject();
+ }
+
+ AddOrReplace(m_value, key, value.m_value == nullptr ? cJSON_CreateObject() : value.m_value);
+ value.m_value = nullptr;
+ return *this;
+ }
+
+ JsonObject &JsonObject::WithObject(const String &key, JsonObject &&value)
+ {
+ return WithObject(key.c_str(), std::move(value));
+ }
+
+ JsonObject &JsonObject::AsObject(const JsonObject &value)
+ {
+ *this = value;
+ return *this;
+ }
+
+ JsonObject &JsonObject::AsObject(JsonObject &&value)
+ {
+ *this = std::move(value);
+ return *this;
+ }
+
+ bool JsonObject::operator==(const JsonObject &other) const
+ {
+ return cJSON_Compare(m_value, other.m_value, 1 /*case-sensitive*/) != 0;
+ }
+
+ bool JsonObject::operator!=(const JsonObject &other) const { return !(*this == other); }
+
+ JsonView JsonObject::View() const { return *this; }
+
+ JsonView::JsonView() : m_value(nullptr) {}
+
+ JsonView::JsonView(const JsonObject &val) : m_value(val.m_value) {}
+
+ JsonView::JsonView(cJSON *val) : m_value(val) {}
+
+ JsonView &JsonView::operator=(const JsonObject &v)
+ {
+ m_value = v.m_value;
+ return *this;
+ }
+
+ JsonView &JsonView::operator=(cJSON *val)
+ {
+ m_value = val;
+ return *this;
+ }
+
+ String JsonView::GetString(const String &key) const { return GetString(key.c_str()); }
+
+ String JsonView::GetString(const char *key) const
+ {
+ AWS_ASSERT(m_value);
+ auto item = cJSON_GetObjectItemCaseSensitive(m_value, key);
+ auto str = cJSON_GetStringValue(item);
+ return str != nullptr ? str : "";
+ }
+
+ String JsonView::AsString() const
+ {
+ const char *str = cJSON_GetStringValue(m_value);
+ if (str == nullptr)
+ {
+ return {};
+ }
+ return str;
+ }
+
+ bool JsonView::GetBool(const String &key) const { return GetBool(key.c_str()); }
+
+ bool JsonView::GetBool(const char *key) const
+ {
+ AWS_ASSERT(m_value);
+ auto item = cJSON_GetObjectItemCaseSensitive(m_value, key);
+ AWS_ASSERT(item);
+ return cJSON_IsTrue(item) != 0;
+ }
+
+ bool JsonView::AsBool() const
+ {
+ AWS_ASSERT(cJSON_IsBool(m_value));
+ return cJSON_IsTrue(m_value) != 0;
+ }
+
+ int JsonView::GetInteger(const String &key) const { return GetInteger(key.c_str()); }
+
+ int JsonView::GetInteger(const char *key) const
+ {
+ AWS_ASSERT(m_value);
+ auto item = cJSON_GetObjectItemCaseSensitive(m_value, key);
+ AWS_ASSERT(item);
+ return item->valueint;
+ }
+
+ int JsonView::AsInteger() const
+ {
+ AWS_ASSERT(cJSON_IsNumber(m_value)); // can be double or value larger than int_max, but at least not UB
+ return m_value->valueint;
+ }
+
+ int64_t JsonView::GetInt64(const String &key) const { return static_cast<int64_t>(GetDouble(key)); }
+
+ int64_t JsonView::GetInt64(const char *key) const { return static_cast<int64_t>(GetDouble(key)); }
+
+ int64_t JsonView::AsInt64() const
+ {
+ AWS_ASSERT(cJSON_IsNumber(m_value));
+ return static_cast<int64_t>(m_value->valuedouble);
+ }
+
+ double JsonView::GetDouble(const String &key) const { return GetDouble(key.c_str()); }
+
+ double JsonView::GetDouble(const char *key) const
+ {
+ AWS_ASSERT(m_value);
+ auto item = cJSON_GetObjectItemCaseSensitive(m_value, key);
+ AWS_ASSERT(item);
+ return item->valuedouble;
+ }
+
+ double JsonView::AsDouble() const
+ {
+ AWS_ASSERT(cJSON_IsNumber(m_value));
+ return m_value->valuedouble;
+ }
+
+ JsonView JsonView::GetJsonObject(const String &key) const { return GetJsonObject(key.c_str()); }
+
+ JsonView JsonView::GetJsonObject(const char *key) const
+ {
+ AWS_ASSERT(m_value);
+ auto item = cJSON_GetObjectItemCaseSensitive(m_value, key);
+ return item;
+ }
+
+ JsonObject JsonView::GetJsonObjectCopy(const String &key) const { return GetJsonObjectCopy(key.c_str()); }
+
+ JsonObject JsonView::GetJsonObjectCopy(const char *key) const
+ {
+ AWS_ASSERT(m_value);
+ /* force a deep copy */
+ return JsonObject(cJSON_GetObjectItemCaseSensitive(m_value, key));
+ }
+
+ JsonView JsonView::AsObject() const
+ {
+ AWS_ASSERT(cJSON_IsObject(m_value));
+ return m_value;
+ }
+
+ Vector<JsonView> JsonView::GetArray(const String &key) const { return GetArray(key.c_str()); }
+
+ Vector<JsonView> JsonView::GetArray(const char *key) const
+ {
+ AWS_ASSERT(m_value);
+ auto array = cJSON_GetObjectItemCaseSensitive(m_value, key);
+ AWS_ASSERT(cJSON_IsArray(array));
+ Vector<JsonView> returnArray(static_cast<size_t>(cJSON_GetArraySize(array)));
+
+ auto element = array->child;
+ for (size_t i = 0; element != nullptr && i < returnArray.size(); ++i, element = element->next)
+ {
+ returnArray[i] = element;
+ }
+
+ return returnArray;
+ }
+
+ Vector<JsonView> JsonView::AsArray() const
+ {
+ AWS_ASSERT(cJSON_IsArray(m_value));
+ Vector<JsonView> returnArray(static_cast<size_t>(cJSON_GetArraySize(m_value)));
+
+ auto element = m_value->child;
+
+ for (size_t i = 0; element != nullptr && i < returnArray.size(); ++i, element = element->next)
+ {
+ returnArray[i] = element;
+ }
+
+ return returnArray;
+ }
+
+ Map<String, JsonView> JsonView::GetAllObjects() const
+ {
+ Map<String, JsonView> valueMap;
+ if (m_value == nullptr)
+ {
+ return valueMap;
+ }
+
+ for (auto iter = m_value->child; iter != nullptr; iter = iter->next)
+ {
+ valueMap.emplace(std::make_pair(String(iter->string), JsonView(iter)));
+ }
+
+ return valueMap;
+ }
+
+ bool JsonView::ValueExists(const String &key) const { return ValueExists(key.c_str()); }
+
+ bool JsonView::ValueExists(const char *key) const
+ {
+ if (cJSON_IsObject(m_value) == 0)
+ {
+ return false;
+ }
+
+ auto item = cJSON_GetObjectItemCaseSensitive(m_value, key);
+ return !(item == nullptr || cJSON_IsNull(item) != 0);
+ }
+
+ bool JsonView::KeyExists(const String &key) const { return KeyExists(key.c_str()); }
+
+ bool JsonView::KeyExists(const char *key) const
+ {
+ if (cJSON_IsObject(m_value) == 0)
+ {
+ return false;
+ }
+
+ return cJSON_GetObjectItemCaseSensitive(m_value, key) != nullptr;
+ }
+
+ bool JsonView::IsObject() const { return cJSON_IsObject(m_value) != 0; }
+
+ bool JsonView::IsBool() const { return cJSON_IsBool(m_value) != 0; }
+
+ bool JsonView::IsString() const { return cJSON_IsString(m_value) != 0; }
+
+ bool JsonView::IsIntegerType() const
+ {
+ if (cJSON_IsNumber(m_value) == 0)
+ {
+ return false;
+ }
+
+ return m_value->valuedouble == static_cast<int64_t>(m_value->valuedouble);
+ }
+
+ bool JsonView::IsFloatingPointType() const
+ {
+ if (cJSON_IsNumber(m_value) == 0)
+ {
+ return false;
+ }
+
+ return m_value->valuedouble != static_cast<int64_t>(m_value->valuedouble);
+ }
+
+ bool JsonView::IsListType() const { return cJSON_IsArray(m_value) != 0; }
+
+ bool JsonView::IsNull() const { return cJSON_IsNull(m_value) != 0; }
+
+ String JsonView::WriteCompact(bool treatAsObject) const
+ {
+ if (m_value == nullptr)
+ {
+ if (treatAsObject)
+ {
+ return "{}";
+ }
+ return "";
+ }
+
+ auto temp = cJSON_PrintUnformatted(m_value);
+ String out(temp);
+ cJSON_free(temp);
+ return out;
+ }
+
+ String JsonView::WriteReadable(bool treatAsObject) const
+ {
+ if (m_value == nullptr)
+ {
+ if (treatAsObject)
+ {
+ return "{\n}\n";
+ }
+ return "";
+ }
+
+ auto temp = cJSON_Print(m_value);
+ String out(temp);
+ cJSON_free(temp);
+ return out;
+ }
+
+ JsonObject JsonView::Materialize() const { return m_value; }
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/StringUtils.cpp b/contrib/restricted/aws/aws-crt-cpp/source/StringUtils.cpp
new file mode 100644
index 0000000000..5c6984f628
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/StringUtils.cpp
@@ -0,0 +1,15 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/StringUtils.h>
+
+#include <aws/common/hash_table.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ size_t HashString(const char *str) noexcept { return (size_t)aws_hash_c_string(str); }
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/Types.cpp b/contrib/restricted/aws/aws-crt-cpp/source/Types.cpp
new file mode 100644
index 0000000000..89f0626242
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/Types.cpp
@@ -0,0 +1,103 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/Types.h>
+
+#include <aws/common/encoding.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ ByteBuf ByteBufFromCString(const char *str) noexcept { return aws_byte_buf_from_c_str(str); }
+
+ ByteBuf ByteBufFromEmptyArray(const uint8_t *array, size_t len) noexcept
+ {
+ return aws_byte_buf_from_empty_array(array, len);
+ }
+
+ ByteBuf ByteBufFromArray(const uint8_t *array, size_t capacity) noexcept
+ {
+ return aws_byte_buf_from_array(array, capacity);
+ }
+
+ ByteBuf ByteBufNewCopy(Allocator *alloc, const uint8_t *array, size_t len)
+ {
+ ByteBuf retVal;
+ ByteBuf src = aws_byte_buf_from_array(array, len);
+ aws_byte_buf_init_copy(&retVal, alloc, &src);
+ return retVal;
+ }
+
+ void ByteBufDelete(ByteBuf &buf) { aws_byte_buf_clean_up(&buf); }
+
+ ByteCursor ByteCursorFromCString(const char *str) noexcept { return aws_byte_cursor_from_c_str(str); }
+
+ ByteCursor ByteCursorFromString(const Crt::String &str) noexcept
+ {
+ return aws_byte_cursor_from_array((const void *)str.data(), str.length());
+ }
+
+ ByteCursor ByteCursorFromStringView(const Crt::StringView &str) noexcept
+ {
+ return aws_byte_cursor_from_array((const void *)str.data(), str.length());
+ }
+
+ ByteCursor ByteCursorFromByteBuf(const ByteBuf &buf) noexcept { return aws_byte_cursor_from_buf(&buf); }
+
+ ByteCursor ByteCursorFromArray(const uint8_t *array, size_t len) noexcept
+ {
+ return aws_byte_cursor_from_array(array, len);
+ }
+
+ Vector<uint8_t> Base64Decode(const String &decode)
+ {
+ ByteCursor toDecode = ByteCursorFromString(decode);
+
+ size_t allocation_size = 0;
+
+ if (aws_base64_compute_decoded_len(&toDecode, &allocation_size) == AWS_OP_SUCCESS)
+ {
+ Vector<uint8_t> output(allocation_size, 0x00);
+ ByteBuf tempBuf = aws_byte_buf_from_array(output.data(), output.size());
+ tempBuf.len = 0;
+
+ if (aws_base64_decode(&toDecode, &tempBuf) == AWS_OP_SUCCESS)
+ {
+ return output;
+ }
+ }
+
+ return {};
+ }
+
+ String Base64Encode(const Vector<uint8_t> &encode)
+ {
+ ByteCursor toEncode = aws_byte_cursor_from_array((const void *)encode.data(), encode.size());
+
+ size_t allocation_size = 0;
+
+ if (aws_base64_compute_encoded_len(encode.size(), &allocation_size) == AWS_OP_SUCCESS)
+ {
+ String output(allocation_size, 0x00);
+ ByteBuf tempBuf = aws_byte_buf_from_array(output.data(), output.size());
+ tempBuf.len = 0;
+
+ if (aws_base64_encode(&toEncode, &tempBuf) == AWS_OP_SUCCESS)
+ {
+ // encoding appends a null terminator, and accounts for it in the encoded length,
+ // which makes the string 1 character too long
+ if (output.back() == 0)
+ {
+ output.pop_back();
+ }
+ return output;
+ }
+ }
+
+ return {};
+ }
+
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/UUID.cpp b/contrib/restricted/aws/aws-crt-cpp/source/UUID.cpp
new file mode 100644
index 0000000000..c985ea2778
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/UUID.cpp
@@ -0,0 +1,54 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/UUID.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ UUID::UUID() noexcept : m_good(false)
+ {
+ if (aws_uuid_init(&m_uuid) == AWS_OP_SUCCESS)
+ {
+ m_good = true;
+ }
+ }
+
+ UUID::UUID(const String &str) noexcept : m_good(false)
+ {
+ auto strCur = aws_byte_cursor_from_c_str(str.c_str());
+ if (aws_uuid_init_from_str(&m_uuid, &strCur) == AWS_OP_SUCCESS)
+ {
+ m_good = true;
+ }
+ }
+
+ UUID &UUID::operator=(const String &str) noexcept
+ {
+ *this = UUID(str);
+ return *this;
+ }
+
+ bool UUID::operator==(const UUID &other) noexcept { return aws_uuid_equals(&m_uuid, &other.m_uuid); }
+
+ bool UUID::operator!=(const UUID &other) noexcept { return !aws_uuid_equals(&m_uuid, &other.m_uuid); }
+
+ String UUID::ToString() const
+ {
+ String uuidStr;
+ uuidStr.resize(AWS_UUID_STR_LEN);
+ auto outBuf = ByteBufFromEmptyArray(reinterpret_cast<const uint8_t *>(uuidStr.data()), uuidStr.capacity());
+ aws_uuid_to_str(&m_uuid, &outBuf);
+ uuidStr.resize(outBuf.len);
+ return uuidStr;
+ }
+
+ UUID::operator String() const { return ToString(); }
+
+ UUID::operator ByteBuf() const noexcept { return ByteBufFromArray(m_uuid.uuid_data, sizeof(m_uuid.uuid_data)); }
+
+ int UUID::GetLastError() const noexcept { return aws_last_error(); }
+ } // namespace Crt
+} // namespace Aws \ No newline at end of file
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/auth/Credentials.cpp b/contrib/restricted/aws/aws-crt-cpp/source/auth/Credentials.cpp
new file mode 100644
index 0000000000..77e40c61a9
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/auth/Credentials.cpp
@@ -0,0 +1,478 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/crt/auth/Credentials.h>
+
+#include <aws/crt/http/HttpConnection.h>
+#include <aws/crt/http/HttpProxyStrategy.h>
+
+#include <aws/auth/credentials.h>
+#include <aws/common/string.h>
+
+#include <algorithm>
+#include <aws/http/connection.h>
+
+#include <aws/crt/Api.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Auth
+ {
+ Credentials::Credentials(const aws_credentials *credentials) noexcept : m_credentials(credentials)
+ {
+ if (credentials != nullptr)
+ {
+ aws_credentials_acquire(credentials);
+ }
+ }
+
+ Credentials::Credentials(
+ ByteCursor access_key_id,
+ ByteCursor secret_access_key,
+ ByteCursor session_token,
+ uint64_t expiration_timepoint_in_seconds,
+ Allocator *allocator) noexcept
+ : m_credentials(aws_credentials_new(
+ allocator,
+ access_key_id,
+ secret_access_key,
+ session_token,
+ expiration_timepoint_in_seconds))
+ {
+ }
+
+ Credentials::Credentials(Allocator *allocator) noexcept
+ : m_credentials(aws_credentials_new_anonymous(allocator))
+ {
+ }
+
+ Credentials::~Credentials()
+ {
+ aws_credentials_release(m_credentials);
+ m_credentials = nullptr;
+ }
+
+ ByteCursor Credentials::GetAccessKeyId() const noexcept
+ {
+ if (m_credentials)
+ {
+ return aws_credentials_get_access_key_id(m_credentials);
+ }
+ else
+ {
+ return ByteCursor{0, nullptr};
+ }
+ }
+
+ ByteCursor Credentials::GetSecretAccessKey() const noexcept
+ {
+ if (m_credentials)
+ {
+ return aws_credentials_get_secret_access_key(m_credentials);
+ }
+ else
+ {
+ return ByteCursor{0, nullptr};
+ }
+ }
+
+ ByteCursor Credentials::GetSessionToken() const noexcept
+ {
+ if (m_credentials)
+ {
+ return aws_credentials_get_session_token(m_credentials);
+ }
+ else
+ {
+ return ByteCursor{0, nullptr};
+ }
+ }
+
+ uint64_t Credentials::GetExpirationTimepointInSeconds() const noexcept
+ {
+ if (m_credentials)
+ {
+ return aws_credentials_get_expiration_timepoint_seconds(m_credentials);
+ }
+ else
+ {
+ return 0;
+ }
+ }
+
+ Credentials::operator bool() const noexcept { return m_credentials != nullptr; }
+
+ CredentialsProvider::CredentialsProvider(aws_credentials_provider *provider, Allocator *allocator) noexcept
+ : m_allocator(allocator), m_provider(provider)
+ {
+ }
+
+ CredentialsProvider::~CredentialsProvider()
+ {
+ if (m_provider)
+ {
+ aws_credentials_provider_release(m_provider);
+ m_provider = nullptr;
+ }
+ }
+
+ struct CredentialsProviderCallbackArgs
+ {
+ CredentialsProviderCallbackArgs() = default;
+
+ OnCredentialsResolved m_onCredentialsResolved;
+ std::shared_ptr<const CredentialsProvider> m_provider;
+ };
+
+ void CredentialsProvider::s_onCredentialsResolved(
+ aws_credentials *credentials,
+ int error_code,
+ void *user_data)
+ {
+ CredentialsProviderCallbackArgs *callbackArgs =
+ static_cast<CredentialsProviderCallbackArgs *>(user_data);
+
+ auto credentialsPtr =
+ Aws::Crt::MakeShared<Credentials>(callbackArgs->m_provider->m_allocator, credentials);
+
+ callbackArgs->m_onCredentialsResolved(credentialsPtr, error_code);
+
+ Aws::Crt::Delete(callbackArgs, callbackArgs->m_provider->m_allocator);
+ }
+
+ bool CredentialsProvider::GetCredentials(const OnCredentialsResolved &onCredentialsResolved) const
+ {
+ if (m_provider == nullptr)
+ {
+ return false;
+ }
+
+ auto callbackArgs = Aws::Crt::New<CredentialsProviderCallbackArgs>(m_allocator);
+ if (callbackArgs == nullptr)
+ {
+ return false;
+ }
+
+ callbackArgs->m_provider = std::static_pointer_cast<const CredentialsProvider>(shared_from_this());
+ callbackArgs->m_onCredentialsResolved = onCredentialsResolved;
+
+ aws_credentials_provider_get_credentials(m_provider, s_onCredentialsResolved, callbackArgs);
+
+ return true;
+ }
+
+ static std::shared_ptr<ICredentialsProvider> s_CreateWrappedProvider(
+ struct aws_credentials_provider *raw_provider,
+ Allocator *allocator)
+ {
+ if (raw_provider == nullptr)
+ {
+ return nullptr;
+ }
+
+ /* Switch to some kind of make_shared/allocate_shared when allocator support improves */
+ auto provider = Aws::Crt::MakeShared<CredentialsProvider>(allocator, raw_provider, allocator);
+ return std::static_pointer_cast<ICredentialsProvider>(provider);
+ }
+
+ std::shared_ptr<ICredentialsProvider> CredentialsProvider::CreateCredentialsProviderStatic(
+ const CredentialsProviderStaticConfig &config,
+ Allocator *allocator)
+ {
+ aws_credentials_provider_static_options staticOptions;
+ AWS_ZERO_STRUCT(staticOptions);
+ staticOptions.access_key_id = config.AccessKeyId;
+ staticOptions.secret_access_key = config.SecretAccessKey;
+ staticOptions.session_token = config.SessionToken;
+ return s_CreateWrappedProvider(
+ aws_credentials_provider_new_static(allocator, &staticOptions), allocator);
+ }
+
+ std::shared_ptr<ICredentialsProvider> CredentialsProvider::CreateCredentialsProviderAnonymous(
+ Allocator *allocator)
+ {
+ aws_credentials_provider_shutdown_options shutdown_options;
+ AWS_ZERO_STRUCT(shutdown_options);
+
+ return s_CreateWrappedProvider(
+ aws_credentials_provider_new_anonymous(allocator, &shutdown_options), allocator);
+ }
+
+ std::shared_ptr<ICredentialsProvider> CredentialsProvider::CreateCredentialsProviderEnvironment(
+ Allocator *allocator)
+ {
+ aws_credentials_provider_environment_options environmentOptions;
+ AWS_ZERO_STRUCT(environmentOptions);
+ return s_CreateWrappedProvider(
+ aws_credentials_provider_new_environment(allocator, &environmentOptions), allocator);
+ }
+
+ std::shared_ptr<ICredentialsProvider> CredentialsProvider::CreateCredentialsProviderProfile(
+ const CredentialsProviderProfileConfig &config,
+ Allocator *allocator)
+ {
+ struct aws_credentials_provider_profile_options raw_config;
+ AWS_ZERO_STRUCT(raw_config);
+
+ raw_config.config_file_name_override = config.ConfigFileNameOverride;
+ raw_config.credentials_file_name_override = config.CredentialsFileNameOverride;
+ raw_config.profile_name_override = config.ProfileNameOverride;
+ raw_config.bootstrap = config.Bootstrap ? config.Bootstrap->GetUnderlyingHandle() : nullptr;
+ raw_config.tls_ctx = config.TlsContext ? config.TlsContext->GetUnderlyingHandle() : nullptr;
+
+ return s_CreateWrappedProvider(aws_credentials_provider_new_profile(allocator, &raw_config), allocator);
+ }
+
+ std::shared_ptr<ICredentialsProvider> CredentialsProvider::CreateCredentialsProviderImds(
+ const CredentialsProviderImdsConfig &config,
+ Allocator *allocator)
+ {
+ struct aws_credentials_provider_imds_options raw_config;
+ AWS_ZERO_STRUCT(raw_config);
+
+ if (config.Bootstrap != nullptr)
+ {
+ raw_config.bootstrap = config.Bootstrap->GetUnderlyingHandle();
+ }
+ else
+ {
+ raw_config.bootstrap = ApiHandle::GetOrCreateStaticDefaultClientBootstrap()->GetUnderlyingHandle();
+ }
+
+ return s_CreateWrappedProvider(aws_credentials_provider_new_imds(allocator, &raw_config), allocator);
+ }
+
+ std::shared_ptr<ICredentialsProvider> CredentialsProvider::CreateCredentialsProviderChain(
+ const CredentialsProviderChainConfig &config,
+ Allocator *allocator)
+ {
+ Vector<aws_credentials_provider *> providers;
+ providers.reserve(config.Providers.size());
+
+ std::for_each(
+ config.Providers.begin(),
+ config.Providers.end(),
+ [&](const std::shared_ptr<ICredentialsProvider> &provider) {
+ providers.push_back(provider->GetUnderlyingHandle());
+ });
+
+ struct aws_credentials_provider_chain_options raw_config;
+ AWS_ZERO_STRUCT(raw_config);
+
+ raw_config.providers = providers.data();
+ raw_config.provider_count = config.Providers.size();
+
+ return s_CreateWrappedProvider(aws_credentials_provider_new_chain(allocator, &raw_config), allocator);
+ }
+
+ std::shared_ptr<ICredentialsProvider> CredentialsProvider::CreateCredentialsProviderCached(
+ const CredentialsProviderCachedConfig &config,
+ Allocator *allocator)
+ {
+ struct aws_credentials_provider_cached_options raw_config;
+ AWS_ZERO_STRUCT(raw_config);
+
+ raw_config.source = config.Provider->GetUnderlyingHandle();
+ raw_config.refresh_time_in_milliseconds = config.CachedCredentialTTL.count();
+
+ return s_CreateWrappedProvider(aws_credentials_provider_new_cached(allocator, &raw_config), allocator);
+ }
+
+ std::shared_ptr<ICredentialsProvider> CredentialsProvider::CreateCredentialsProviderChainDefault(
+ const CredentialsProviderChainDefaultConfig &config,
+ Allocator *allocator)
+ {
+ struct aws_credentials_provider_chain_default_options raw_config;
+ AWS_ZERO_STRUCT(raw_config);
+
+ raw_config.bootstrap =
+ config.Bootstrap ? config.Bootstrap->GetUnderlyingHandle()
+ : ApiHandle::GetOrCreateStaticDefaultClientBootstrap()->GetUnderlyingHandle();
+ raw_config.tls_ctx = config.TlsContext ? config.TlsContext->GetUnderlyingHandle() : nullptr;
+
+ return s_CreateWrappedProvider(
+ aws_credentials_provider_new_chain_default(allocator, &raw_config), allocator);
+ }
+
+ std::shared_ptr<ICredentialsProvider> CredentialsProvider::CreateCredentialsProviderX509(
+ const CredentialsProviderX509Config &config,
+ Allocator *allocator)
+ {
+ struct aws_credentials_provider_x509_options raw_config;
+ AWS_ZERO_STRUCT(raw_config);
+
+ raw_config.bootstrap =
+ config.Bootstrap ? config.Bootstrap->GetUnderlyingHandle()
+ : ApiHandle::GetOrCreateStaticDefaultClientBootstrap()->GetUnderlyingHandle();
+ raw_config.tls_connection_options = config.TlsOptions.GetUnderlyingHandle();
+ raw_config.thing_name = aws_byte_cursor_from_c_str(config.ThingName.c_str());
+ raw_config.role_alias = aws_byte_cursor_from_c_str(config.RoleAlias.c_str());
+ raw_config.endpoint = aws_byte_cursor_from_c_str(config.Endpoint.c_str());
+
+ struct aws_http_proxy_options proxy_options;
+ AWS_ZERO_STRUCT(proxy_options);
+ if (config.ProxyOptions.has_value())
+ {
+ const Http::HttpClientConnectionProxyOptions &proxy_config = config.ProxyOptions.value();
+ proxy_config.InitializeRawProxyOptions(proxy_options);
+
+ raw_config.proxy_options = &proxy_options;
+ }
+
+ return s_CreateWrappedProvider(aws_credentials_provider_new_x509(allocator, &raw_config), allocator);
+ }
+
+ struct DelegateCredentialsProviderCallbackArgs
+ {
+ DelegateCredentialsProviderCallbackArgs() = default;
+
+ Allocator *allocator;
+ GetCredentialsHandler m_Handler;
+ };
+
+ static int s_onDelegateGetCredentials(
+ void *delegate_user_data,
+ aws_on_get_credentials_callback_fn callback,
+ void *callback_user_data)
+ {
+ auto args = static_cast<DelegateCredentialsProviderCallbackArgs *>(delegate_user_data);
+ auto creds = args->m_Handler();
+ struct aws_credentials *m_credentials = (struct aws_credentials *)(void *)creds->GetUnderlyingHandle();
+ callback(m_credentials, AWS_ERROR_SUCCESS, callback_user_data);
+ return AWS_OP_SUCCESS;
+ }
+
+ static void s_onDelegateShutdownComplete(void *user_data)
+ {
+ auto args = static_cast<DelegateCredentialsProviderCallbackArgs *>(user_data);
+ Aws::Crt::Delete(args, args->allocator);
+ }
+
+ std::shared_ptr<ICredentialsProvider> CredentialsProvider::CreateCredentialsProviderDelegate(
+ const CredentialsProviderDelegateConfig &config,
+ Allocator *allocator)
+ {
+ struct aws_credentials_provider_delegate_options raw_config;
+ AWS_ZERO_STRUCT(raw_config);
+
+ auto delegateCallbackArgs = Aws::Crt::New<DelegateCredentialsProviderCallbackArgs>(allocator);
+ delegateCallbackArgs->allocator = allocator;
+ delegateCallbackArgs->m_Handler = config.Handler;
+ raw_config.delegate_user_data = delegateCallbackArgs;
+ raw_config.get_credentials = s_onDelegateGetCredentials;
+ aws_credentials_provider_shutdown_options options;
+ options.shutdown_callback = s_onDelegateShutdownComplete;
+ options.shutdown_user_data = delegateCallbackArgs;
+ raw_config.shutdown_options = options;
+ return s_CreateWrappedProvider(
+ aws_credentials_provider_new_delegate(allocator, &raw_config), allocator);
+ }
+
+ CredentialsProviderCognitoConfig::CredentialsProviderCognitoConfig() : Bootstrap(nullptr) {}
+
+ std::shared_ptr<ICredentialsProvider> CredentialsProvider::CreateCredentialsProviderCognito(
+ const CredentialsProviderCognitoConfig &config,
+ Allocator *allocator)
+ {
+ struct aws_credentials_provider_cognito_options raw_config;
+ AWS_ZERO_STRUCT(raw_config);
+
+ raw_config.endpoint = aws_byte_cursor_from_c_str(config.Endpoint.c_str());
+ raw_config.identity = aws_byte_cursor_from_c_str(config.Identity.c_str());
+
+ struct aws_byte_cursor custom_role_arn_cursor;
+ AWS_ZERO_STRUCT(custom_role_arn_cursor);
+ if (config.CustomRoleArn.has_value())
+ {
+ custom_role_arn_cursor = aws_byte_cursor_from_c_str(config.CustomRoleArn.value().c_str());
+ raw_config.custom_role_arn = &custom_role_arn_cursor;
+ }
+
+ Vector<struct aws_cognito_identity_provider_token_pair> logins;
+ if (config.Logins.has_value())
+ {
+ for (const auto &login_pair : config.Logins.value())
+ {
+ struct aws_cognito_identity_provider_token_pair cursor_login_pair;
+ AWS_ZERO_STRUCT(cursor_login_pair);
+
+ cursor_login_pair.identity_provider_name =
+ aws_byte_cursor_from_c_str(login_pair.IdentityProviderName.c_str());
+ cursor_login_pair.identity_provider_token =
+ aws_byte_cursor_from_c_str(login_pair.IdentityProviderToken.c_str());
+
+ logins.push_back(cursor_login_pair);
+ }
+
+ raw_config.login_count = logins.size();
+ raw_config.logins = logins.data();
+ }
+
+ raw_config.bootstrap =
+ config.Bootstrap ? config.Bootstrap->GetUnderlyingHandle()
+ : ApiHandle::GetOrCreateStaticDefaultClientBootstrap()->GetUnderlyingHandle();
+
+ raw_config.tls_ctx = config.TlsCtx.GetUnderlyingHandle();
+
+ struct aws_http_proxy_options proxy_options;
+ AWS_ZERO_STRUCT(proxy_options);
+ if (config.ProxyOptions.has_value())
+ {
+ const Http::HttpClientConnectionProxyOptions &proxy_config = config.ProxyOptions.value();
+ proxy_config.InitializeRawProxyOptions(proxy_options);
+
+ raw_config.http_proxy_options = &proxy_options;
+ }
+
+ return s_CreateWrappedProvider(
+ aws_credentials_provider_new_cognito_caching(allocator, &raw_config), allocator);
+ }
+
+ CredentialsProviderSTSConfig::CredentialsProviderSTSConfig() : Bootstrap(nullptr) {}
+
+ std::shared_ptr<ICredentialsProvider> CredentialsProvider::CreateCredentialsProviderSTS(
+ const CredentialsProviderSTSConfig &config,
+ Allocator *allocator)
+ {
+ if (config.Provider == nullptr)
+ {
+ AWS_LOGF_ERROR(
+ AWS_LS_AUTH_CREDENTIALS_PROVIDER,
+ "Failed to build STS credentials provider - missing required 'Provider' configuration "
+ "parameter");
+ return nullptr;
+ }
+
+ struct aws_credentials_provider_sts_options raw_config;
+ AWS_ZERO_STRUCT(raw_config);
+
+ raw_config.creds_provider = config.Provider->GetUnderlyingHandle();
+ raw_config.role_arn = aws_byte_cursor_from_c_str(config.RoleArn.c_str());
+ raw_config.session_name = aws_byte_cursor_from_c_str(config.SessionName.c_str());
+ raw_config.duration_seconds = config.DurationSeconds;
+
+ raw_config.bootstrap =
+ config.Bootstrap ? config.Bootstrap->GetUnderlyingHandle()
+ : ApiHandle::GetOrCreateStaticDefaultClientBootstrap()->GetUnderlyingHandle();
+
+ raw_config.tls_ctx = config.TlsCtx.GetUnderlyingHandle();
+
+ struct aws_http_proxy_options proxy_options;
+ AWS_ZERO_STRUCT(proxy_options);
+ if (config.ProxyOptions.has_value())
+ {
+ const Http::HttpClientConnectionProxyOptions &proxy_config = config.ProxyOptions.value();
+ proxy_config.InitializeRawProxyOptions(proxy_options);
+
+ raw_config.http_proxy_options = &proxy_options;
+ }
+
+ return s_CreateWrappedProvider(aws_credentials_provider_new_sts(allocator, &raw_config), allocator);
+ }
+ } // namespace Auth
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/auth/Sigv4Signing.cpp b/contrib/restricted/aws/aws-crt-cpp/source/auth/Sigv4Signing.cpp
new file mode 100644
index 0000000000..9586b85fab
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/auth/Sigv4Signing.cpp
@@ -0,0 +1,274 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/crt/auth/Sigv4Signing.h>
+
+#include <aws/crt/auth/Credentials.h>
+#include <aws/crt/http/HttpRequestResponse.h>
+
+#include <aws/auth/signable.h>
+#include <aws/auth/signing.h>
+#include <aws/auth/signing_result.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Auth
+ {
+ namespace SignedBodyValue
+ {
+ const char *EmptySha256 = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855";
+ const char *EmptySha256Str() { return EmptySha256; }
+
+ const char *UnsignedPayload = "UNSIGNED-PAYLOAD";
+ const char *UnsignedPayloadStr() { return UnsignedPayload; }
+
+ const char *StreamingAws4HmacSha256Payload = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD";
+ const char *StreamingAws4HmacSha256PayloadStr() { return StreamingAws4HmacSha256Payload; }
+
+ const char *StreamingAws4HmacSha256Events = "STREAMING-AWS4-HMAC-SHA256-EVENTS";
+ const char *StreamingAws4HmacSha256EventsStr() { return StreamingAws4HmacSha256Events; }
+ } // namespace SignedBodyValue
+
+ AwsSigningConfig::AwsSigningConfig(Allocator *allocator)
+ : ISigningConfig(), m_allocator(allocator), m_credentialsProvider(nullptr), m_credentials(nullptr)
+ {
+ AWS_ZERO_STRUCT(m_config);
+
+ SetSigningAlgorithm(SigningAlgorithm::SigV4);
+ SetSignatureType(SignatureType::HttpRequestViaHeaders);
+ SetShouldNormalizeUriPath(true);
+ SetUseDoubleUriEncode(true);
+ SetOmitSessionToken(false);
+ SetSignedBodyHeader(SignedBodyHeaderType::None);
+ SetSigningTimepoint(DateTime::Now());
+ SetExpirationInSeconds(0);
+ m_config.config_type = AWS_SIGNING_CONFIG_AWS;
+ }
+
+ AwsSigningConfig::~AwsSigningConfig() { m_allocator = nullptr; }
+
+ SigningAlgorithm AwsSigningConfig::GetSigningAlgorithm() const noexcept
+ {
+ return static_cast<SigningAlgorithm>(m_config.algorithm);
+ }
+
+ void AwsSigningConfig::SetSigningAlgorithm(SigningAlgorithm algorithm) noexcept
+ {
+ m_config.algorithm = static_cast<aws_signing_algorithm>(algorithm);
+ }
+
+ SignatureType AwsSigningConfig::GetSignatureType() const noexcept
+ {
+ return static_cast<SignatureType>(m_config.signature_type);
+ }
+
+ void AwsSigningConfig::SetSignatureType(SignatureType signatureType) noexcept
+ {
+ m_config.signature_type = static_cast<aws_signature_type>(signatureType);
+ }
+
+ const Crt::String &AwsSigningConfig::GetRegion() const noexcept { return m_signingRegion; }
+
+ void AwsSigningConfig::SetRegion(const Crt::String &region) noexcept
+ {
+ m_signingRegion = region;
+ m_config.region = ByteCursorFromCString(m_signingRegion.c_str());
+ }
+
+ const Crt::String &AwsSigningConfig::GetService() const noexcept { return m_serviceName; }
+
+ void AwsSigningConfig::SetService(const Crt::String &service) noexcept
+ {
+ m_serviceName = service;
+ m_config.service = ByteCursorFromCString(m_serviceName.c_str());
+ }
+
+ DateTime AwsSigningConfig::GetSigningTimepoint() const noexcept
+ {
+ return {aws_date_time_as_millis(&m_config.date)};
+ }
+
+ void AwsSigningConfig::SetSigningTimepoint(const DateTime &date) noexcept
+ {
+ aws_date_time_init_epoch_millis(&m_config.date, date.Millis());
+ }
+
+ bool AwsSigningConfig::GetUseDoubleUriEncode() const noexcept
+ {
+ return m_config.flags.use_double_uri_encode;
+ }
+
+ void AwsSigningConfig::SetUseDoubleUriEncode(bool useDoubleUriEncode) noexcept
+ {
+ m_config.flags.use_double_uri_encode = useDoubleUriEncode;
+ }
+
+ bool AwsSigningConfig::GetShouldNormalizeUriPath() const noexcept
+ {
+ return m_config.flags.should_normalize_uri_path;
+ }
+
+ void AwsSigningConfig::SetShouldNormalizeUriPath(bool shouldNormalizeUriPath) noexcept
+ {
+ m_config.flags.should_normalize_uri_path = shouldNormalizeUriPath;
+ }
+
+ bool AwsSigningConfig::GetOmitSessionToken() const noexcept { return m_config.flags.omit_session_token; }
+
+ void AwsSigningConfig::SetOmitSessionToken(bool omitSessionToken) noexcept
+ {
+ m_config.flags.omit_session_token = omitSessionToken;
+ }
+
+ ShouldSignHeaderCb AwsSigningConfig::GetShouldSignHeaderCallback() const noexcept
+ {
+ return m_config.should_sign_header;
+ }
+
+ void AwsSigningConfig::SetShouldSignHeaderCallback(ShouldSignHeaderCb shouldSignHeaderCb) noexcept
+ {
+ m_config.should_sign_header = shouldSignHeaderCb;
+ }
+
+ void *AwsSigningConfig::GetShouldSignHeaderUserData() const noexcept
+ {
+ return m_config.should_sign_header_ud;
+ }
+
+ void AwsSigningConfig::SetShouldSignHeaderUserData(void *userData) noexcept
+ {
+ m_config.should_sign_header_ud = userData;
+ }
+
+ const Crt::String &AwsSigningConfig::GetSignedBodyValue() const noexcept { return m_signedBodyValue; }
+
+ void AwsSigningConfig::SetSignedBodyValue(const Crt::String &signedBodyValue) noexcept
+ {
+ m_signedBodyValue = signedBodyValue;
+ m_config.signed_body_value = ByteCursorFromString(m_signedBodyValue);
+ }
+
+ SignedBodyHeaderType AwsSigningConfig::GetSignedBodyHeader() const noexcept
+ {
+ return static_cast<SignedBodyHeaderType>(m_config.signed_body_header);
+ }
+
+ void AwsSigningConfig::SetSignedBodyHeader(SignedBodyHeaderType signedBodyHeader) noexcept
+ {
+ m_config.signed_body_header = static_cast<enum aws_signed_body_header_type>(signedBodyHeader);
+ }
+
+ uint64_t AwsSigningConfig::GetExpirationInSeconds() const noexcept
+ {
+ return m_config.expiration_in_seconds;
+ }
+
+ void AwsSigningConfig::SetExpirationInSeconds(uint64_t expirationInSeconds) noexcept
+ {
+ m_config.expiration_in_seconds = expirationInSeconds;
+ }
+
+ const std::shared_ptr<ICredentialsProvider> &AwsSigningConfig::GetCredentialsProvider() const noexcept
+ {
+ return m_credentialsProvider;
+ }
+
+ void AwsSigningConfig::SetCredentialsProvider(
+ const std::shared_ptr<ICredentialsProvider> &credsProvider) noexcept
+ {
+ m_credentialsProvider = credsProvider;
+ m_config.credentials_provider = m_credentialsProvider->GetUnderlyingHandle();
+ }
+
+ const std::shared_ptr<Credentials> &AwsSigningConfig::GetCredentials() const noexcept
+ {
+ return m_credentials;
+ }
+
+ void AwsSigningConfig::SetCredentials(const std::shared_ptr<Credentials> &credentials) noexcept
+ {
+ m_credentials = credentials;
+ m_config.credentials = m_credentials->GetUnderlyingHandle();
+ }
+
+ const struct aws_signing_config_aws *AwsSigningConfig::GetUnderlyingHandle() const noexcept
+ {
+ return &m_config;
+ }
+
+ /////////////////////////////////////////////////////////////////////////////////////////////
+
+ Sigv4HttpRequestSigner::Sigv4HttpRequestSigner(Aws::Crt::Allocator *allocator)
+ : IHttpRequestSigner(), m_allocator(allocator)
+ {
+ }
+
+ struct HttpSignerCallbackData
+ {
+ HttpSignerCallbackData() : Alloc(nullptr) {}
+ Allocator *Alloc;
+ ScopedResource<struct aws_signable> Signable;
+ OnHttpRequestSigningComplete OnRequestSigningComplete;
+ std::shared_ptr<Http::HttpRequest> Request;
+ };
+
+ static void s_http_signing_complete_fn(struct aws_signing_result *result, int errorCode, void *userdata)
+ {
+ auto cbData = reinterpret_cast<HttpSignerCallbackData *>(userdata);
+
+ if (errorCode == AWS_OP_SUCCESS)
+ {
+ aws_apply_signing_result_to_http_request(
+ cbData->Request->GetUnderlyingMessage(), cbData->Alloc, result);
+ }
+
+ cbData->OnRequestSigningComplete(cbData->Request, errorCode);
+ Crt::Delete(cbData, cbData->Alloc);
+ }
+
+ bool Sigv4HttpRequestSigner::SignRequest(
+ const std::shared_ptr<Aws::Crt::Http::HttpRequest> &request,
+ const ISigningConfig &config,
+ const OnHttpRequestSigningComplete &completionCallback)
+ {
+ if (config.GetType() != SigningConfigType::Aws)
+ {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return false;
+ }
+
+ auto awsSigningConfig = static_cast<const AwsSigningConfig *>(&config);
+
+ if (!awsSigningConfig->GetCredentialsProvider() && !awsSigningConfig->GetCredentials())
+ {
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return false;
+ }
+
+ auto signerCallbackData = Crt::New<HttpSignerCallbackData>(m_allocator);
+
+ if (!signerCallbackData)
+ {
+ return false;
+ }
+
+ signerCallbackData->Alloc = m_allocator;
+ signerCallbackData->OnRequestSigningComplete = completionCallback;
+ signerCallbackData->Request = request;
+ signerCallbackData->Signable = ScopedResource<struct aws_signable>(
+ aws_signable_new_http_request(m_allocator, request->GetUnderlyingMessage()), aws_signable_destroy);
+
+ return aws_sign_request_aws(
+ m_allocator,
+ signerCallbackData->Signable.get(),
+ (aws_signing_config_base *)awsSigningConfig->GetUnderlyingHandle(),
+ s_http_signing_complete_fn,
+ signerCallbackData) == AWS_OP_SUCCESS;
+ }
+ } // namespace Auth
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/crypto/HMAC.cpp b/contrib/restricted/aws/aws-crt-cpp/source/crypto/HMAC.cpp
new file mode 100644
index 0000000000..403aff7d3f
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/crypto/HMAC.cpp
@@ -0,0 +1,173 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/crypto/HMAC.h>
+
+#include <aws/cal/hmac.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Crypto
+ {
+ bool ComputeSHA256HMAC(
+ Allocator *allocator,
+ const ByteCursor &secret,
+ const ByteCursor &input,
+ ByteBuf &output,
+ size_t truncateTo) noexcept
+ {
+ return aws_sha256_hmac_compute(allocator, &secret, &input, &output, truncateTo) == AWS_OP_SUCCESS;
+ }
+
+ bool ComputeSHA256HMAC(
+ const ByteCursor &secret,
+ const ByteCursor &input,
+ ByteBuf &output,
+ size_t truncateTo) noexcept
+ {
+ return aws_sha256_hmac_compute(ApiAllocator(), &secret, &input, &output, truncateTo) == AWS_OP_SUCCESS;
+ }
+
+ HMAC::HMAC(aws_hmac *hmac) noexcept : m_hmac(hmac), m_good(false), m_lastError(0)
+ {
+ if (hmac)
+ {
+ m_good = true;
+ }
+ else
+ {
+ m_lastError = aws_last_error();
+ }
+ }
+
+ HMAC::~HMAC()
+ {
+ if (m_hmac)
+ {
+ aws_hmac_destroy(m_hmac);
+ m_hmac = nullptr;
+ }
+ }
+
+ HMAC::HMAC(HMAC &&toMove) : m_hmac(toMove.m_hmac), m_good(toMove.m_good), m_lastError(toMove.m_lastError)
+ {
+ toMove.m_hmac = nullptr;
+ toMove.m_good = false;
+ }
+
+ HMAC &HMAC::operator=(HMAC &&toMove)
+ {
+ if (&toMove != this)
+ {
+ *this = HMAC(std::move(toMove));
+ }
+
+ return *this;
+ }
+
+ HMAC HMAC::CreateSHA256HMAC(Allocator *allocator, const ByteCursor &secret) noexcept
+ {
+ return HMAC(aws_sha256_hmac_new(allocator, &secret));
+ }
+
+ HMAC HMAC::CreateSHA256HMAC(const ByteCursor &secret) noexcept
+ {
+ return HMAC(aws_sha256_hmac_new(ApiAllocator(), &secret));
+ }
+
+ bool HMAC::Update(const ByteCursor &toHMAC) noexcept
+ {
+ if (*this)
+ {
+ if (aws_hmac_update(m_hmac, &toHMAC))
+ {
+ m_lastError = aws_last_error();
+ m_good = false;
+ return false;
+ }
+ return true;
+ }
+
+ return false;
+ }
+
+ bool HMAC::Digest(ByteBuf &output, size_t truncateTo) noexcept
+ {
+ if (*this)
+ {
+ m_good = false;
+ if (aws_hmac_finalize(m_hmac, &output, truncateTo))
+ {
+ m_lastError = aws_last_error();
+ return false;
+ }
+ return true;
+ }
+
+ return false;
+ }
+
+ aws_hmac_vtable ByoHMAC::s_Vtable = {
+ "aws-crt-cpp-byo-crypto-hmac",
+ "aws-crt-cpp-byo-crypto",
+ ByoHMAC::s_Destroy,
+ ByoHMAC::s_Update,
+ ByoHMAC::s_Finalize,
+ };
+
+ ByoHMAC::ByoHMAC(size_t digestSize, const ByteCursor &, Allocator *allocator)
+ {
+ AWS_ZERO_STRUCT(m_hmacValue);
+ m_hmacValue.impl = reinterpret_cast<void *>(this);
+ m_hmacValue.digest_size = digestSize;
+ m_hmacValue.allocator = allocator;
+ m_hmacValue.good = true;
+ m_hmacValue.vtable = &s_Vtable;
+ }
+
+ aws_hmac *ByoHMAC::SeatForCInterop(const std::shared_ptr<ByoHMAC> &selfRef)
+ {
+ AWS_FATAL_ASSERT(this == selfRef.get());
+ m_selfReference = selfRef;
+ return &m_hmacValue;
+ }
+
+ void ByoHMAC::s_Destroy(struct aws_hmac *hmac)
+ {
+ auto *byoHash = reinterpret_cast<ByoHMAC *>(hmac->impl);
+ byoHash->m_selfReference = nullptr;
+ }
+
+ int ByoHMAC::s_Update(struct aws_hmac *hmac, const struct aws_byte_cursor *buf)
+ {
+ auto *byoHmac = reinterpret_cast<ByoHMAC *>(hmac->impl);
+ if (!byoHmac->m_hmacValue.good)
+ {
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+ if (!byoHmac->UpdateInternal(*buf))
+ {
+ byoHmac->m_hmacValue.good = false;
+ return AWS_OP_ERR;
+ }
+ return AWS_OP_SUCCESS;
+ }
+
+ int ByoHMAC::s_Finalize(struct aws_hmac *hmac, struct aws_byte_buf *out)
+ {
+ auto *byoHmac = reinterpret_cast<ByoHMAC *>(hmac->impl);
+ if (!byoHmac->m_hmacValue.good)
+ {
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ bool success = byoHmac->DigestInternal(*out);
+ byoHmac->m_hmacValue.good = false;
+ return success ? AWS_OP_SUCCESS : AWS_OP_ERR;
+ }
+ } // namespace Crypto
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/crypto/Hash.cpp b/contrib/restricted/aws/aws-crt-cpp/source/crypto/Hash.cpp
new file mode 100644
index 0000000000..273d94da5e
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/crypto/Hash.cpp
@@ -0,0 +1,174 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/crypto/Hash.h>
+
+#include <aws/cal/hash.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Crypto
+ {
+ bool ComputeSHA256(
+ Allocator *allocator,
+ const ByteCursor &input,
+ ByteBuf &output,
+ size_t truncateTo) noexcept
+ {
+ return aws_sha256_compute(allocator, &input, &output, truncateTo) == AWS_OP_SUCCESS;
+ }
+
+ bool ComputeSHA256(const ByteCursor &input, ByteBuf &output, size_t truncateTo) noexcept
+ {
+ return aws_sha256_compute(ApiAllocator(), &input, &output, truncateTo) == AWS_OP_SUCCESS;
+ }
+
+ bool ComputeMD5(Allocator *allocator, const ByteCursor &input, ByteBuf &output, size_t truncateTo) noexcept
+ {
+ return aws_md5_compute(allocator, &input, &output, truncateTo) == AWS_OP_SUCCESS;
+ }
+
+ bool ComputeMD5(const ByteCursor &input, ByteBuf &output, size_t truncateTo) noexcept
+ {
+ return aws_md5_compute(ApiAllocator(), &input, &output, truncateTo) == AWS_OP_SUCCESS;
+ }
+
+ Hash::Hash(aws_hash *hash) noexcept : m_hash(hash), m_good(false), m_lastError(0)
+ {
+ if (hash)
+ {
+ m_good = true;
+ }
+ else
+ {
+ m_lastError = aws_last_error();
+ }
+ }
+
+ Hash::~Hash()
+ {
+ if (m_hash)
+ {
+ aws_hash_destroy(m_hash);
+ m_hash = nullptr;
+ }
+ }
+
+ Hash::Hash(Hash &&toMove) : m_hash(toMove.m_hash), m_good(toMove.m_good), m_lastError(toMove.m_lastError)
+ {
+ toMove.m_hash = nullptr;
+ toMove.m_good = false;
+ }
+
+ Hash &Hash::operator=(Hash &&toMove)
+ {
+ if (&toMove != this)
+ {
+ *this = Hash(std::move(toMove));
+ }
+
+ return *this;
+ }
+
+ Hash Hash::CreateSHA256(Allocator *allocator) noexcept { return Hash(aws_sha256_new(allocator)); }
+
+ Hash Hash::CreateMD5(Allocator *allocator) noexcept { return Hash(aws_md5_new(allocator)); }
+
+ bool Hash::Update(const ByteCursor &toHash) noexcept
+ {
+ if (*this)
+ {
+ if (aws_hash_update(m_hash, &toHash))
+ {
+ m_lastError = aws_last_error();
+ m_good = false;
+ return false;
+ }
+ return true;
+ }
+
+ return false;
+ }
+
+ bool Hash::Digest(ByteBuf &output, size_t truncateTo) noexcept
+ {
+ if (*this)
+ {
+ m_good = false;
+ if (aws_hash_finalize(m_hash, &output, truncateTo))
+ {
+ m_lastError = aws_last_error();
+ return false;
+ }
+ return true;
+ }
+
+ return false;
+ }
+
+ aws_hash_vtable ByoHash::s_Vtable = {
+ "aws-crt-cpp-byo-crypto-hash",
+ "aws-crt-cpp-byo-crypto",
+ ByoHash::s_Destroy,
+ ByoHash::s_Update,
+ ByoHash::s_Finalize,
+ };
+
+ ByoHash::ByoHash(size_t digestSize, Allocator *allocator)
+ {
+ AWS_ZERO_STRUCT(m_hashValue);
+ m_hashValue.vtable = &s_Vtable;
+ m_hashValue.allocator = allocator;
+ m_hashValue.impl = reinterpret_cast<void *>(this);
+ m_hashValue.digest_size = digestSize;
+ m_hashValue.good = true;
+ }
+
+ ByoHash::~ByoHash() {}
+
+ aws_hash *ByoHash::SeatForCInterop(const std::shared_ptr<ByoHash> &selfRef)
+ {
+ AWS_FATAL_ASSERT(this == selfRef.get());
+ m_selfReference = selfRef;
+ return &m_hashValue;
+ }
+
+ void ByoHash::s_Destroy(struct aws_hash *hash)
+ {
+ auto *byoHash = reinterpret_cast<ByoHash *>(hash->impl);
+ byoHash->m_selfReference = nullptr;
+ }
+
+ int ByoHash::s_Update(struct aws_hash *hash, const struct aws_byte_cursor *buf)
+ {
+ auto *byoHash = reinterpret_cast<ByoHash *>(hash->impl);
+ if (!byoHash->m_hashValue.good)
+ {
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+ if (!byoHash->UpdateInternal(*buf))
+ {
+ byoHash->m_hashValue.good = false;
+ return AWS_OP_ERR;
+ }
+ return AWS_OP_SUCCESS;
+ }
+
+ int ByoHash::s_Finalize(struct aws_hash *hash, struct aws_byte_buf *out)
+ {
+ auto *byoHash = reinterpret_cast<ByoHash *>(hash->impl);
+ if (!byoHash->m_hashValue.good)
+ {
+ return aws_raise_error(AWS_ERROR_INVALID_STATE);
+ }
+
+ bool success = byoHash->DigestInternal(*out);
+ byoHash->m_hashValue.good = false;
+ return success ? AWS_OP_SUCCESS : AWS_OP_ERR;
+ }
+ } // namespace Crypto
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/endpoints/RuleEngine.cpp b/contrib/restricted/aws/aws-crt-cpp/source/endpoints/RuleEngine.cpp
new file mode 100644
index 0000000000..b319508c93
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/endpoints/RuleEngine.cpp
@@ -0,0 +1,169 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/common/string.h>
+#include <aws/crt/Api.h>
+#include <aws/crt/endpoints/RuleEngine.h>
+#include <aws/sdkutils/endpoints_rule_engine.h>
+#include <aws/sdkutils/partitions.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Endpoints
+ {
+
+ RequestContext::RequestContext(Allocator *allocator) noexcept : m_allocator(allocator)
+ {
+ m_requestContext = aws_endpoints_request_context_new(allocator);
+ }
+
+ RequestContext::~RequestContext()
+ {
+ m_requestContext = aws_endpoints_request_context_release(m_requestContext);
+ }
+
+ bool RequestContext::AddString(const ByteCursor &name, const ByteCursor &value)
+ {
+ return AWS_OP_SUCCESS !=
+ aws_endpoints_request_context_add_string(m_allocator, m_requestContext, name, value);
+ }
+
+ bool RequestContext::AddBoolean(const ByteCursor &name, bool value)
+ {
+ return AWS_OP_SUCCESS !=
+ aws_endpoints_request_context_add_boolean(m_allocator, m_requestContext, name, value);
+ }
+
+ ResolutionOutcome::ResolutionOutcome(aws_endpoints_resolved_endpoint *impl) : m_resolvedEndpoint(impl) {}
+
+ ResolutionOutcome::ResolutionOutcome(ResolutionOutcome &&toMove) noexcept
+ : m_resolvedEndpoint(toMove.m_resolvedEndpoint)
+ {
+ toMove.m_resolvedEndpoint = nullptr;
+ }
+
+ ResolutionOutcome &ResolutionOutcome::operator=(ResolutionOutcome &&toMove)
+ {
+ if (&toMove != this)
+ {
+ *this = ResolutionOutcome(std::move(toMove));
+ }
+
+ return *this;
+ }
+
+ ResolutionOutcome::~ResolutionOutcome() { aws_endpoints_resolved_endpoint_release(m_resolvedEndpoint); }
+
+ bool ResolutionOutcome::IsEndpoint() const noexcept
+ {
+ return AWS_ENDPOINTS_RESOLVED_ENDPOINT == aws_endpoints_resolved_endpoint_get_type(m_resolvedEndpoint);
+ }
+
+ bool ResolutionOutcome::IsError() const noexcept
+ {
+ return AWS_ENDPOINTS_RESOLVED_ERROR == aws_endpoints_resolved_endpoint_get_type(m_resolvedEndpoint);
+ }
+
+ Optional<StringView> ResolutionOutcome::GetUrl() const
+ {
+ ByteCursor url;
+ if (aws_endpoints_resolved_endpoint_get_url(m_resolvedEndpoint, &url))
+ {
+ return Optional<StringView>();
+ }
+
+ return Optional<StringView>(ByteCursorToStringView(url));
+ }
+
+ inline StringView CrtStringToStringView(const aws_string *s)
+ {
+ ByteCursor key = aws_byte_cursor_from_string(s);
+ return ByteCursorToStringView(key);
+ }
+
+ Optional<UnorderedMap<StringView, Vector<StringView>>> ResolutionOutcome::GetHeaders() const
+ {
+ const aws_hash_table *resolved_headers = nullptr;
+
+ if (aws_endpoints_resolved_endpoint_get_headers(m_resolvedEndpoint, &resolved_headers))
+ {
+ return Optional<UnorderedMap<StringView, Vector<StringView>>>();
+ }
+
+ UnorderedMap<StringView, Vector<StringView>> headers;
+ for (struct aws_hash_iter iter = aws_hash_iter_begin(resolved_headers); !aws_hash_iter_done(&iter);
+ aws_hash_iter_next(&iter))
+ {
+ ByteCursor key = aws_byte_cursor_from_string((const aws_string *)iter.element.key);
+ const aws_array_list *array = (const aws_array_list *)iter.element.value;
+ headers.emplace(std::make_pair(
+ ByteCursorToStringView(key),
+ ArrayListToVector<aws_string *, StringView>(array, CrtStringToStringView)));
+ }
+
+ return Optional<UnorderedMap<StringView, Vector<StringView>>>(headers);
+ }
+
+ Optional<StringView> ResolutionOutcome::GetProperties() const
+ {
+ ByteCursor properties;
+ if (aws_endpoints_resolved_endpoint_get_properties(m_resolvedEndpoint, &properties))
+ {
+ return Optional<StringView>();
+ }
+
+ return Optional<StringView>(ByteCursorToStringView(properties));
+ }
+
+ Optional<StringView> ResolutionOutcome::GetError() const
+ {
+ ByteCursor error;
+ if (aws_endpoints_resolved_endpoint_get_error(m_resolvedEndpoint, &error))
+ {
+ return Optional<StringView>();
+ }
+
+ return Optional<StringView>(ByteCursorToStringView(error));
+ }
+
+ RuleEngine::RuleEngine(
+ const ByteCursor &rulesetCursor,
+ const ByteCursor &partitionsCursor,
+ Allocator *allocator) noexcept
+ : m_ruleEngine(nullptr)
+ {
+ auto ruleset = aws_endpoints_ruleset_new_from_string(allocator, rulesetCursor);
+ auto partitions = aws_partitions_config_new_from_string(allocator, partitionsCursor);
+ if (ruleset != NULL && partitions != NULL)
+ {
+ m_ruleEngine = aws_endpoints_rule_engine_new(allocator, ruleset, partitions);
+ }
+
+ if (ruleset != NULL)
+ {
+ aws_endpoints_ruleset_release(ruleset);
+ }
+
+ if (partitions != NULL)
+ {
+ aws_partitions_config_release(partitions);
+ }
+ }
+
+ RuleEngine::~RuleEngine() { m_ruleEngine = aws_endpoints_rule_engine_release(m_ruleEngine); }
+
+ Optional<ResolutionOutcome> RuleEngine::Resolve(const RequestContext &context) const
+ {
+ aws_endpoints_resolved_endpoint *resolved = NULL;
+ if (aws_endpoints_rule_engine_resolve(m_ruleEngine, context.GetNativeHandle(), &resolved))
+ {
+ return Optional<ResolutionOutcome>();
+ }
+ return Optional<ResolutionOutcome>(ResolutionOutcome(resolved));
+ }
+ } // namespace Endpoints
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/external/cJSON.cpp b/contrib/restricted/aws/aws-crt-cpp/source/external/cJSON.cpp
new file mode 100644
index 0000000000..f3db28d400
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/external/cJSON.cpp
@@ -0,0 +1,3120 @@
+/*
+ Copyright (c) 2009-2017 Dave Gamble and cJSON contributors
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
+*/
+
+/** MODIFICATIONS:
+ * valueInt was moved up to improve alignment.
+ * Wrap all symbols in the Aws namespace as a short-term collision resolution
+ * Replace strcpy() with strncpy()
+ *
+ * Modifications licensed under:
+ *
+ * Copyright 2010-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License").
+ * You may not use this file except in compliance with the License.
+ * A copy of the License is located at
+ *
+ * http://aws.amazon.com/apache2.0
+ *
+ * or in the "license" file accompanying this file. This file is distributed
+ * on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
+ * express or implied. See the License for the specific language governing
+ * permissions and limitations under the License.
+ */
+
+/* cJSON */
+/* JSON parser in C. */
+
+/* disable warnings about old C89 functions in MSVC */
+#if !defined(_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER)
+#define _CRT_SECURE_NO_DEPRECATE
+#endif
+
+#ifdef __GNUC__
+#pragma GCC visibility push(default)
+#endif
+#if defined(_MSC_VER)
+#pragma warning (push)
+/* disable warning about single line comments in system headers */
+#pragma warning (disable : 4001)
+#endif
+
+#include <string.h>
+#include <stdio.h>
+#include <math.h>
+#include <stdlib.h>
+#include <limits.h>
+#include <ctype.h>
+#include <float.h>
+
+#ifdef ENABLE_LOCALES
+#include <locale.h>
+#endif
+
+#if defined(_MSC_VER)
+#pragma warning (pop)
+#endif
+#ifdef __GNUC__
+#pragma GCC visibility pop
+#endif
+
+#include <aws/crt/external/cJSON.h>
+
+/* define our own boolean type */
+// #ifdef true
+// #undef true
+// #endif
+// #define true ((cJSON_bool)1)
+
+// #ifdef false
+// #undef false
+// #endif
+// #define false ((cJSON_bool)0)
+
+/* define isnan and isinf for ANSI C, if in C99 or above, isnan and isinf has been defined in math.h */
+#ifndef isinf
+#define isinf(d) (isnan((d - d)) && !isnan(d))
+#endif
+#ifndef isnan
+#define isnan(d) (d != d)
+#endif
+
+#ifndef NAN
+#define NAN 0.0/0.0
+#endif
+
+typedef struct {
+ const unsigned char *json;
+ size_t position;
+} error;
+static error global_error = { NULL, 0 };
+
+namespace Aws {
+
+CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void)
+{
+ return (const char*) (global_error.json + global_error.position);
+}
+
+CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON * const item)
+{
+ if (!cJSON_IsString(item))
+ {
+ return NULL;
+ }
+
+ return item->valuestring;
+}
+
+CJSON_PUBLIC(double) cJSON_GetNumberValue(const cJSON * const item)
+{
+ if (!cJSON_IsNumber(item))
+ {
+ return (double) NAN;
+ }
+
+ return item->valuedouble;
+}
+
+/* This is a safeguard to prevent copy-pasters from using incompatible C and header files */
+#if (CJSON_VERSION_MAJOR != 1) || (CJSON_VERSION_MINOR != 7) || (CJSON_VERSION_PATCH != 14)
+ #error cJSON.h and cJSON.c have different versions. Make sure that both have the same.
+#endif
+
+CJSON_PUBLIC(const char*) cJSON_Version(void)
+{
+ static char version[15];
+ snprintf(version, sizeof(version), "%i.%i.%i", CJSON_VERSION_MAJOR, CJSON_VERSION_MINOR, CJSON_VERSION_PATCH);
+
+ return version;
+}
+
+/* Case insensitive string comparison, doesn't consider two NULL pointers equal though */
+static int case_insensitive_strcmp(const unsigned char *string1, const unsigned char *string2)
+{
+ if ((string1 == NULL) || (string2 == NULL))
+ {
+ return 1;
+ }
+
+ if (string1 == string2)
+ {
+ return 0;
+ }
+
+ for(; tolower(*string1) == tolower(*string2); (void)string1++, string2++)
+ {
+ if (*string1 == '\0')
+ {
+ return 0;
+ }
+ }
+
+ return tolower(*string1) - tolower(*string2);
+}
+
+typedef struct internal_hooks
+{
+ void *(CJSON_CDECL *allocate)(size_t size);
+ void (CJSON_CDECL *deallocate)(void *pointer);
+ void *(CJSON_CDECL *reallocate)(void *pointer, size_t size);
+} internal_hooks;
+
+#if defined(_MSC_VER)
+/* work around MSVC error C2322: '...' address of dllimport '...' is not static */
+static void * CJSON_CDECL internal_malloc(size_t size)
+{
+ return malloc(size);
+}
+static void CJSON_CDECL internal_free(void *pointer)
+{
+ free(pointer);
+}
+static void * CJSON_CDECL internal_realloc(void *pointer, size_t size)
+{
+ return realloc(pointer, size);
+}
+#else
+#define internal_malloc malloc
+#define internal_free free
+#define internal_realloc realloc
+#endif
+
+/* strlen of character literals resolved at compile time */
+#define static_strlen(string_literal) (sizeof(string_literal) - sizeof(""))
+
+static internal_hooks global_hooks = { internal_malloc, internal_free, internal_realloc };
+
+static unsigned char* cJSON_strdup(const unsigned char* string, const internal_hooks * const hooks)
+{
+ size_t length = 0;
+ unsigned char *copy = NULL;
+
+ if (string == NULL)
+ {
+ return NULL;
+ }
+
+ length = strlen((const char*)string) + sizeof("");
+ copy = (unsigned char*)hooks->allocate(length);
+ if (copy == NULL)
+ {
+ return NULL;
+ }
+ memcpy(copy, string, length);
+
+ return copy;
+}
+
+CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks* hooks)
+{
+ if (hooks == NULL)
+ {
+ /* Reset hooks */
+ global_hooks.allocate = malloc;
+ global_hooks.deallocate = free;
+ global_hooks.reallocate = realloc;
+ return;
+ }
+
+ global_hooks.allocate = malloc;
+ if (hooks->malloc_fn != NULL)
+ {
+ global_hooks.allocate = hooks->malloc_fn;
+ }
+
+ global_hooks.deallocate = free;
+ if (hooks->free_fn != NULL)
+ {
+ global_hooks.deallocate = hooks->free_fn;
+ }
+
+ /* use realloc only if both free and malloc are used */
+ global_hooks.reallocate = NULL;
+ if ((global_hooks.allocate == malloc) && (global_hooks.deallocate == free))
+ {
+ global_hooks.reallocate = realloc;
+ }
+}
+
+/* Internal constructor. */
+static cJSON *cJSON_New_Item(const internal_hooks * const hooks)
+{
+ cJSON* node = (cJSON*)hooks->allocate(sizeof(cJSON));
+ if (node)
+ {
+ memset(node, '\0', sizeof(cJSON));
+ }
+
+ return node;
+}
+
+/* Delete a cJSON structure. */
+CJSON_PUBLIC(void) cJSON_Delete(cJSON *item)
+{
+ cJSON *next = NULL;
+ while (item != NULL)
+ {
+ next = item->next;
+ if (!(item->type & cJSON_IsReference) && (item->child != NULL))
+ {
+ cJSON_Delete(item->child);
+ }
+ if (!(item->type & cJSON_IsReference) && (item->valuestring != NULL))
+ {
+ global_hooks.deallocate(item->valuestring);
+ }
+ if (!(item->type & cJSON_StringIsConst) && (item->string != NULL))
+ {
+ global_hooks.deallocate(item->string);
+ }
+ global_hooks.deallocate(item);
+ item = next;
+ }
+}
+
+/* get the decimal point character of the current locale */
+static unsigned char get_decimal_point(void)
+{
+#ifdef ENABLE_LOCALES
+ struct lconv *lconv = localeconv();
+ return (unsigned char) lconv->decimal_point[0];
+#else
+ return '.';
+#endif
+}
+
+typedef struct
+{
+ const unsigned char *content;
+ size_t length;
+ size_t offset;
+ size_t depth; /* How deeply nested (in arrays/objects) is the input at the current offset. */
+ internal_hooks hooks;
+} parse_buffer;
+
+/* check if the given size is left to read in a given parse buffer (starting with 1) */
+#define can_read(buffer, size) ((buffer != NULL) && (((buffer)->offset + size) <= (buffer)->length))
+/* check if the buffer can be accessed at the given index (starting with 0) */
+#define can_access_at_index(buffer, index) ((buffer != NULL) && (((buffer)->offset + index) < (buffer)->length))
+#define cannot_access_at_index(buffer, index) (!can_access_at_index(buffer, index))
+/* get a pointer to the buffer at the position */
+#define buffer_at_offset(buffer) ((buffer)->content + (buffer)->offset)
+
+/* Parse the input text to generate a number, and populate the result into item. */
+static cJSON_bool parse_number(cJSON * const item, parse_buffer * const input_buffer)
+{
+ double number = 0;
+ unsigned char *after_end = NULL;
+ unsigned char number_c_string[64];
+ unsigned char decimal_point = get_decimal_point();
+ size_t i = 0;
+
+ if ((input_buffer == NULL) || (input_buffer->content == NULL))
+ {
+ return false;
+ }
+
+ /* copy the number into a temporary buffer and replace '.' with the decimal point
+ * of the current locale (for strtod)
+ * This also takes care of '\0' not necessarily being available for marking the end of the input */
+ for (i = 0; (i < (sizeof(number_c_string) - 1)) && can_access_at_index(input_buffer, i); i++)
+ {
+ switch (buffer_at_offset(input_buffer)[i])
+ {
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ case '+':
+ case '-':
+ case 'e':
+ case 'E':
+ number_c_string[i] = buffer_at_offset(input_buffer)[i];
+ break;
+
+ case '.':
+ number_c_string[i] = decimal_point;
+ break;
+
+ default:
+ goto loop_end;
+ }
+ }
+loop_end:
+ number_c_string[i] = '\0';
+
+ number = strtod((const char*)number_c_string, (char**)&after_end);
+ if (number_c_string == after_end)
+ {
+ return false; /* parse_error */
+ }
+
+ item->valuedouble = number;
+
+ /* use saturation in case of overflow */
+ if (number >= INT_MAX)
+ {
+ item->valueint = INT_MAX;
+ }
+ else if (number <= (double)INT_MIN)
+ {
+ item->valueint = INT_MIN;
+ }
+ else
+ {
+ item->valueint = (int)number;
+ }
+
+ item->type = cJSON_Number;
+
+ input_buffer->offset += (size_t)(after_end - number_c_string);
+ return true;
+}
+
+/* don't ask me, but the original cJSON_SetNumberValue returns an integer or double */
+CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number)
+{
+ if (number >= INT_MAX)
+ {
+ object->valueint = INT_MAX;
+ }
+ else if (number <= (double)INT_MIN)
+ {
+ object->valueint = INT_MIN;
+ }
+ else
+ {
+ object->valueint = (int)number;
+ }
+
+ return object->valuedouble = number;
+}
+
+CJSON_PUBLIC(char*) cJSON_SetValuestring(cJSON *object, const char *valuestring)
+{
+ char *copy = NULL;
+ /* if object's type is not cJSON_String or is cJSON_IsReference, it should not set valuestring */
+ if (!(object->type & cJSON_String) || (object->type & cJSON_IsReference))
+ {
+ return NULL;
+ }
+ if (strlen(valuestring) <= strlen(object->valuestring))
+ {
+ memcpy(object->valuestring, valuestring, strlen(valuestring) + 1);
+ return object->valuestring;
+ }
+ copy = (char*) cJSON_strdup((const unsigned char*)valuestring, &global_hooks);
+ if (copy == NULL)
+ {
+ return NULL;
+ }
+ if (object->valuestring != NULL)
+ {
+ cJSON_free(object->valuestring);
+ }
+ object->valuestring = copy;
+
+ return copy;
+}
+
+typedef struct
+{
+ unsigned char *buffer;
+ size_t length;
+ size_t offset;
+ size_t depth; /* current nesting depth (for formatted printing) */
+ cJSON_bool noalloc;
+ cJSON_bool format; /* is this print a formatted print */
+ internal_hooks hooks;
+} printbuffer;
+
+/* realloc printbuffer if necessary to have at least "needed" bytes more */
+static unsigned char* ensure(printbuffer * const p, size_t needed)
+{
+ unsigned char *newbuffer = NULL;
+ size_t newsize = 0;
+
+ if ((p == NULL) || (p->buffer == NULL))
+ {
+ return NULL;
+ }
+
+ if ((p->length > 0) && (p->offset >= p->length))
+ {
+ /* make sure that offset is valid */
+ return NULL;
+ }
+
+ if (needed > INT_MAX)
+ {
+ /* sizes bigger than INT_MAX are currently not supported */
+ return NULL;
+ }
+
+ needed += p->offset + 1;
+ if (needed <= p->length)
+ {
+ return p->buffer + p->offset;
+ }
+
+ if (p->noalloc) {
+ return NULL;
+ }
+
+ /* calculate new buffer size */
+ if (needed > (INT_MAX / 2))
+ {
+ /* overflow of int, use INT_MAX if possible */
+ if (needed <= INT_MAX)
+ {
+ newsize = INT_MAX;
+ }
+ else
+ {
+ return NULL;
+ }
+ }
+ else
+ {
+ newsize = needed * 2;
+ }
+
+ if (p->hooks.reallocate != NULL)
+ {
+ /* reallocate with realloc if available */
+ newbuffer = (unsigned char*)p->hooks.reallocate(p->buffer, newsize);
+ if (newbuffer == NULL)
+ {
+ p->hooks.deallocate(p->buffer);
+ p->length = 0;
+ p->buffer = NULL;
+
+ return NULL;
+ }
+ }
+ else
+ {
+ /* otherwise reallocate manually */
+ newbuffer = (unsigned char*)p->hooks.allocate(newsize);
+ if (!newbuffer)
+ {
+ p->hooks.deallocate(p->buffer);
+ p->length = 0;
+ p->buffer = NULL;
+
+ return NULL;
+ }
+ if (newbuffer)
+ {
+ memcpy(newbuffer, p->buffer, p->offset + 1);
+ }
+ p->hooks.deallocate(p->buffer);
+ }
+ p->length = newsize;
+ p->buffer = newbuffer;
+
+ return newbuffer + p->offset;
+}
+
+/* calculate the new length of the string in a printbuffer and update the offset */
+static void update_offset(printbuffer * const buffer)
+{
+ const unsigned char *buffer_pointer = NULL;
+ if ((buffer == NULL) || (buffer->buffer == NULL))
+ {
+ return;
+ }
+ buffer_pointer = buffer->buffer + buffer->offset;
+
+ buffer->offset += strlen((const char*)buffer_pointer);
+}
+
+/* securely comparison of floating-point variables */
+static cJSON_bool compare_double(double a, double b)
+{
+ double maxVal = fabs(a) > fabs(b) ? fabs(a) : fabs(b);
+ return (fabs(a - b) <= maxVal * DBL_EPSILON);
+}
+
+/* Render the number nicely from the given item into a string. */
+static cJSON_bool print_number(const cJSON * const item, printbuffer * const output_buffer)
+{
+ unsigned char *output_pointer = NULL;
+ double d = item->valuedouble;
+ int length = 0;
+ size_t i = 0;
+ unsigned char number_buffer[26] = {0}; /* temporary buffer to print the number into */
+ unsigned char decimal_point = get_decimal_point();
+ double test = 0.0;
+
+ if (output_buffer == NULL)
+ {
+ return false;
+ }
+
+ /* This checks for NaN and Infinity */
+ if (isnan(d) || isinf(d))
+ {
+ length = snprintf((char*)number_buffer, sizeof(number_buffer), "null");
+ }
+ else
+ {
+ /* Try 15 decimal places of precision to avoid nonsignificant nonzero digits */
+ length = snprintf((char*)number_buffer, sizeof(number_buffer), "%1.15g", d);
+
+ /* Check whether the original double can be recovered */
+ if ((sscanf((char*)number_buffer, "%lg", &test) != 1) || !compare_double((double)test, d))
+ {
+ /* If not, print with 17 decimal places of precision */
+ length = snprintf((char*)number_buffer, sizeof(number_buffer), "%1.17g", d);
+ }
+ }
+
+ /* snprintf failed or buffer overrun occurred */
+ if ((length < 0) || (length > (int)(sizeof(number_buffer) - 1)))
+ {
+ return false;
+ }
+
+ /* reserve appropriate space in the output */
+ output_pointer = ensure(output_buffer, (size_t)length + sizeof(""));
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+
+ /* copy the printed number to the output and replace locale
+ * dependent decimal point with '.' */
+ for (i = 0; i < ((size_t)length); i++)
+ {
+ if (number_buffer[i] == decimal_point)
+ {
+ output_pointer[i] = '.';
+ continue;
+ }
+
+ output_pointer[i] = number_buffer[i];
+ }
+ output_pointer[i] = '\0';
+
+ output_buffer->offset += (size_t)length;
+
+ return true;
+}
+
+/* parse 4 digit hexadecimal number */
+static unsigned parse_hex4(const unsigned char * const input)
+{
+ unsigned int h = 0;
+ size_t i = 0;
+
+ for (i = 0; i < 4; i++)
+ {
+ /* parse digit */
+ if ((input[i] >= '0') && (input[i] <= '9'))
+ {
+ h += (unsigned int) input[i] - '0';
+ }
+ else if ((input[i] >= 'A') && (input[i] <= 'F'))
+ {
+ h += (unsigned int) 10 + input[i] - 'A';
+ }
+ else if ((input[i] >= 'a') && (input[i] <= 'f'))
+ {
+ h += (unsigned int) 10 + input[i] - 'a';
+ }
+ else /* invalid */
+ {
+ return 0;
+ }
+
+ if (i < 3)
+ {
+ /* shift left to make place for the next nibble */
+ h = h << 4;
+ }
+ }
+
+ return h;
+}
+
+/* converts a UTF-16 literal to UTF-8
+ * A literal can be one or two sequences of the form \uXXXX */
+static unsigned char utf16_literal_to_utf8(const unsigned char * const input_pointer, const unsigned char * const input_end, unsigned char **output_pointer)
+{
+ long unsigned int codepoint = 0;
+ unsigned int first_code = 0;
+ const unsigned char *first_sequence = input_pointer;
+ unsigned char utf8_length = 0;
+ unsigned char utf8_position = 0;
+ unsigned char sequence_length = 0;
+ unsigned char first_byte_mark = 0;
+
+ if ((input_end - first_sequence) < 6)
+ {
+ /* input ends unexpectedly */
+ goto fail;
+ }
+
+ /* get the first utf16 sequence */
+ first_code = parse_hex4(first_sequence + 2);
+
+ /* check that the code is valid */
+ if (((first_code >= 0xDC00) && (first_code <= 0xDFFF)))
+ {
+ goto fail;
+ }
+
+ /* UTF16 surrogate pair */
+ if ((first_code >= 0xD800) && (first_code <= 0xDBFF))
+ {
+ const unsigned char *second_sequence = first_sequence + 6;
+ unsigned int second_code = 0;
+ sequence_length = 12; /* \uXXXX\uXXXX */
+
+ if ((input_end - second_sequence) < 6)
+ {
+ /* input ends unexpectedly */
+ goto fail;
+ }
+
+ if ((second_sequence[0] != '\\') || (second_sequence[1] != 'u'))
+ {
+ /* missing second half of the surrogate pair */
+ goto fail;
+ }
+
+ /* get the second utf16 sequence */
+ second_code = parse_hex4(second_sequence + 2);
+ /* check that the code is valid */
+ if ((second_code < 0xDC00) || (second_code > 0xDFFF))
+ {
+ /* invalid second half of the surrogate pair */
+ goto fail;
+ }
+
+
+ /* calculate the unicode codepoint from the surrogate pair */
+ codepoint = 0x10000 + (((first_code & 0x3FF) << 10) | (second_code & 0x3FF));
+ }
+ else
+ {
+ sequence_length = 6; /* \uXXXX */
+ codepoint = first_code;
+ }
+
+ /* encode as UTF-8
+ * takes at maximum 4 bytes to encode:
+ * 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */
+ if (codepoint < 0x80)
+ {
+ /* normal ascii, encoding 0xxxxxxx */
+ utf8_length = 1;
+ }
+ else if (codepoint < 0x800)
+ {
+ /* two bytes, encoding 110xxxxx 10xxxxxx */
+ utf8_length = 2;
+ first_byte_mark = 0xC0; /* 11000000 */
+ }
+ else if (codepoint < 0x10000)
+ {
+ /* three bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx */
+ utf8_length = 3;
+ first_byte_mark = 0xE0; /* 11100000 */
+ }
+ else if (codepoint <= 0x10FFFF)
+ {
+ /* four bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx 10xxxxxx */
+ utf8_length = 4;
+ first_byte_mark = 0xF0; /* 11110000 */
+ }
+ else
+ {
+ /* invalid unicode codepoint */
+ goto fail;
+ }
+
+ /* encode as utf8 */
+ for (utf8_position = (unsigned char)(utf8_length - 1); utf8_position > 0; utf8_position--)
+ {
+ /* 10xxxxxx */
+ (*output_pointer)[utf8_position] = (unsigned char)((codepoint | 0x80) & 0xBF);
+ codepoint >>= 6;
+ }
+ /* encode first byte */
+ if (utf8_length > 1)
+ {
+ (*output_pointer)[0] = (unsigned char)((codepoint | first_byte_mark) & 0xFF);
+ }
+ else
+ {
+ (*output_pointer)[0] = (unsigned char)(codepoint & 0x7F);
+ }
+
+ *output_pointer += utf8_length;
+
+ return sequence_length;
+
+fail:
+ return 0;
+}
+
+/* Parse the input text into an unescaped cinput, and populate item. */
+static cJSON_bool parse_string(cJSON * const item, parse_buffer * const input_buffer)
+{
+ const unsigned char *input_pointer = buffer_at_offset(input_buffer) + 1;
+ const unsigned char *input_end = buffer_at_offset(input_buffer) + 1;
+ unsigned char *output_pointer = NULL;
+ unsigned char *output = NULL;
+
+ /* not a string */
+ if (buffer_at_offset(input_buffer)[0] != '\"')
+ {
+ goto fail;
+ }
+
+ {
+ /* calculate approximate size of the output (overestimate) */
+ size_t allocation_length = 0;
+ size_t skipped_bytes = 0;
+ while (((size_t)(input_end - input_buffer->content) < input_buffer->length) && (*input_end != '\"'))
+ {
+ /* is escape sequence */
+ if (input_end[0] == '\\')
+ {
+ if ((size_t)(input_end + 1 - input_buffer->content) >= input_buffer->length)
+ {
+ /* prevent buffer overflow when last input character is a backslash */
+ goto fail;
+ }
+ skipped_bytes++;
+ input_end++;
+ }
+ input_end++;
+ }
+ if (((size_t)(input_end - input_buffer->content) >= input_buffer->length) || (*input_end != '\"'))
+ {
+ goto fail; /* string ended unexpectedly */
+ }
+
+ /* This is at most how much we need for the output */
+ allocation_length = (size_t) (input_end - buffer_at_offset(input_buffer)) - skipped_bytes;
+ output = (unsigned char*)input_buffer->hooks.allocate(allocation_length + sizeof(""));
+ if (output == NULL)
+ {
+ goto fail; /* allocation failure */
+ }
+ }
+
+ output_pointer = output;
+ /* loop through the string literal */
+ while (input_pointer < input_end)
+ {
+ if (*input_pointer != '\\')
+ {
+ *output_pointer++ = *input_pointer++;
+ }
+ /* escape sequence */
+ else
+ {
+ unsigned char sequence_length = 2;
+ if ((input_end - input_pointer) < 1)
+ {
+ goto fail;
+ }
+
+ switch (input_pointer[1])
+ {
+ case 'b':
+ *output_pointer++ = '\b';
+ break;
+ case 'f':
+ *output_pointer++ = '\f';
+ break;
+ case 'n':
+ *output_pointer++ = '\n';
+ break;
+ case 'r':
+ *output_pointer++ = '\r';
+ break;
+ case 't':
+ *output_pointer++ = '\t';
+ break;
+ case '\"':
+ case '\\':
+ case '/':
+ *output_pointer++ = input_pointer[1];
+ break;
+
+ /* UTF-16 literal */
+ case 'u':
+ sequence_length = utf16_literal_to_utf8(input_pointer, input_end, &output_pointer);
+ if (sequence_length == 0)
+ {
+ /* failed to convert UTF16-literal to UTF-8 */
+ goto fail;
+ }
+ break;
+
+ default:
+ goto fail;
+ }
+ input_pointer += sequence_length;
+ }
+ }
+
+ /* zero terminate the output */
+ *output_pointer = '\0';
+
+ item->type = cJSON_String;
+ item->valuestring = (char*)output;
+
+ input_buffer->offset = (size_t) (input_end - input_buffer->content);
+ input_buffer->offset++;
+
+ return true;
+
+fail:
+ if (output != NULL)
+ {
+ input_buffer->hooks.deallocate(output);
+ }
+
+ if (input_pointer != NULL)
+ {
+ input_buffer->offset = (size_t)(input_pointer - input_buffer->content);
+ }
+
+ return false;
+}
+
+/* Render the cstring provided to an escaped version that can be printed. */
+static cJSON_bool print_string_ptr(const unsigned char * const input, printbuffer * const output_buffer)
+{
+ const unsigned char *input_pointer = NULL;
+ unsigned char *output = NULL;
+ unsigned char *output_pointer = NULL;
+ size_t output_length = 0;
+ /* numbers of additional characters needed for escaping */
+ size_t escape_characters = 0;
+
+ if (output_buffer == NULL)
+ {
+ return false;
+ }
+
+ /* empty string */
+ if (input == NULL)
+ {
+ output = ensure(output_buffer, sizeof("\"\""));
+ if (output == NULL)
+ {
+ return false;
+ }
+ strncpy((char*)output, "\"\"", strlen("\"\"") + 1);
+
+ return true;
+ }
+
+ /* set "flag" to 1 if something needs to be escaped */
+ for (input_pointer = input; *input_pointer; input_pointer++)
+ {
+ switch (*input_pointer)
+ {
+ case '\"':
+ case '\\':
+ case '\b':
+ case '\f':
+ case '\n':
+ case '\r':
+ case '\t':
+ /* one character escape sequence */
+ escape_characters++;
+ break;
+ default:
+ if (*input_pointer < 32)
+ {
+ /* UTF-16 escape sequence uXXXX */
+ escape_characters += 5;
+ }
+ break;
+ }
+ }
+ output_length = (size_t)(input_pointer - input) + escape_characters;
+
+ output = ensure(output_buffer, output_length + sizeof("\"\""));
+ if (output == NULL)
+ {
+ return false;
+ }
+
+ /* no characters have to be escaped */
+ if (escape_characters == 0)
+ {
+ output[0] = '\"';
+ memcpy(output + 1, input, output_length);
+ output[output_length + 1] = '\"';
+ output[output_length + 2] = '\0';
+
+ return true;
+ }
+
+ output[0] = '\"';
+ output_pointer = output + 1;
+ /* copy the string */
+ for (input_pointer = input; *input_pointer != '\0'; (void)input_pointer++, output_pointer++)
+ {
+ if ((*input_pointer > 31) && (*input_pointer != '\"') && (*input_pointer != '\\'))
+ {
+ /* normal character, copy */
+ *output_pointer = *input_pointer;
+ }
+ else
+ {
+ /* character needs to be escaped */
+ *output_pointer++ = '\\';
+ switch (*input_pointer)
+ {
+ case '\\':
+ *output_pointer = '\\';
+ break;
+ case '\"':
+ *output_pointer = '\"';
+ break;
+ case '\b':
+ *output_pointer = 'b';
+ break;
+ case '\f':
+ *output_pointer = 'f';
+ break;
+ case '\n':
+ *output_pointer = 'n';
+ break;
+ case '\r':
+ *output_pointer = 'r';
+ break;
+ case '\t':
+ *output_pointer = 't';
+ break;
+ default:
+ /* escape and print as unicode codepoint */
+ snprintf((char*)output_pointer, output_buffer->length - (output_pointer - output_buffer->buffer), "u%04x", *input_pointer);
+ output_pointer += 4;
+ break;
+ }
+ }
+ }
+ output[output_length + 1] = '\"';
+ output[output_length + 2] = '\0';
+
+ return true;
+}
+
+/* Invoke print_string_ptr (which is useful) on an item. */
+static cJSON_bool print_string(const cJSON * const item, printbuffer * const p)
+{
+ return print_string_ptr((unsigned char*)item->valuestring, p);
+}
+
+/* Predeclare these prototypes. */
+static cJSON_bool parse_value(cJSON * const item, parse_buffer * const input_buffer);
+static cJSON_bool print_value(const cJSON * const item, printbuffer * const output_buffer);
+static cJSON_bool parse_array(cJSON * const item, parse_buffer * const input_buffer);
+static cJSON_bool print_array(const cJSON * const item, printbuffer * const output_buffer);
+static cJSON_bool parse_object(cJSON * const item, parse_buffer * const input_buffer);
+static cJSON_bool print_object(const cJSON * const item, printbuffer * const output_buffer);
+
+/* Utility to jump whitespace and cr/lf */
+static parse_buffer *buffer_skip_whitespace(parse_buffer * const buffer)
+{
+ if ((buffer == NULL) || (buffer->content == NULL))
+ {
+ return NULL;
+ }
+
+ if (cannot_access_at_index(buffer, 0))
+ {
+ return buffer;
+ }
+
+ while (can_access_at_index(buffer, 0) && (buffer_at_offset(buffer)[0] <= 32))
+ {
+ buffer->offset++;
+ }
+
+ if (buffer->offset == buffer->length)
+ {
+ buffer->offset--;
+ }
+
+ return buffer;
+}
+
+/* skip the UTF-8 BOM (byte order mark) if it is at the beginning of a buffer */
+static parse_buffer *skip_utf8_bom(parse_buffer * const buffer)
+{
+ if ((buffer == NULL) || (buffer->content == NULL) || (buffer->offset != 0))
+ {
+ return NULL;
+ }
+
+ if (can_access_at_index(buffer, 4) && (strncmp((const char*)buffer_at_offset(buffer), "\xEF\xBB\xBF", 3) == 0))
+ {
+ buffer->offset += 3;
+ }
+
+ return buffer;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_ParseWithOpts(const char *value, const char **return_parse_end, cJSON_bool require_null_terminated)
+{
+ size_t buffer_length;
+
+ if (NULL == value)
+ {
+ return NULL;
+ }
+
+ /* Adding null character size due to require_null_terminated. */
+ buffer_length = strlen(value) + sizeof("");
+
+ return cJSON_ParseWithLengthOpts(value, buffer_length, return_parse_end, require_null_terminated);
+}
+
+/* Parse an object - create a new root, and populate. */
+CJSON_PUBLIC(cJSON *) cJSON_ParseWithLengthOpts(const char *value, size_t buffer_length, const char **return_parse_end, cJSON_bool require_null_terminated)
+{
+ parse_buffer buffer = { 0, 0, 0, 0, { 0, 0, 0 } };
+ cJSON *item = NULL;
+
+ /* reset error position */
+ global_error.json = NULL;
+ global_error.position = 0;
+
+ if (value == NULL || 0 == buffer_length)
+ {
+ goto fail;
+ }
+
+ buffer.content = (const unsigned char*)value;
+ buffer.length = buffer_length;
+ buffer.offset = 0;
+ buffer.hooks = global_hooks;
+
+ item = cJSON_New_Item(&global_hooks);
+ if (item == NULL) /* memory fail */
+ {
+ goto fail;
+ }
+
+ if (!parse_value(item, buffer_skip_whitespace(skip_utf8_bom(&buffer))))
+ {
+ /* parse failure. ep is set. */
+ goto fail;
+ }
+
+ /* if we require null-terminated JSON without appended garbage, skip and then check for a null terminator */
+ if (require_null_terminated)
+ {
+ buffer_skip_whitespace(&buffer);
+ if ((buffer.offset >= buffer.length) || buffer_at_offset(&buffer)[0] != '\0')
+ {
+ goto fail;
+ }
+ }
+ if (return_parse_end)
+ {
+ *return_parse_end = (const char*)buffer_at_offset(&buffer);
+ }
+
+ return item;
+
+fail:
+ if (item != NULL)
+ {
+ cJSON_Delete(item);
+ }
+
+ if (value != NULL)
+ {
+ error local_error;
+ local_error.json = (const unsigned char*)value;
+ local_error.position = 0;
+
+ if (buffer.offset < buffer.length)
+ {
+ local_error.position = buffer.offset;
+ }
+ else if (buffer.length > 0)
+ {
+ local_error.position = buffer.length - 1;
+ }
+
+ if (return_parse_end != NULL)
+ {
+ *return_parse_end = (const char*)local_error.json + local_error.position;
+ }
+
+ global_error = local_error;
+ }
+
+ return NULL;
+}
+
+/* Default options for cJSON_Parse */
+CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value)
+{
+ return cJSON_ParseWithOpts(value, 0, 0);
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_ParseWithLength(const char *value, size_t buffer_length)
+{
+ return cJSON_ParseWithLengthOpts(value, buffer_length, 0, 0);
+}
+
+#define cjson_min(a, b) (((a) < (b)) ? (a) : (b))
+
+static unsigned char *print(const cJSON * const item, cJSON_bool format, const internal_hooks * const hooks)
+{
+ static const size_t default_buffer_size = 256;
+ printbuffer buffer[1];
+ unsigned char *printed = NULL;
+
+ memset(buffer, 0, sizeof(buffer));
+
+ /* create buffer */
+ buffer->buffer = (unsigned char*) hooks->allocate(default_buffer_size);
+ buffer->length = default_buffer_size;
+ buffer->format = format;
+ buffer->hooks = *hooks;
+ if (buffer->buffer == NULL)
+ {
+ goto fail;
+ }
+
+ /* print the value */
+ if (!print_value(item, buffer))
+ {
+ goto fail;
+ }
+ update_offset(buffer);
+
+ /* check if reallocate is available */
+ if (hooks->reallocate != NULL)
+ {
+ printed = (unsigned char*) hooks->reallocate(buffer->buffer, buffer->offset + 1);
+ if (printed == NULL) {
+ goto fail;
+ }
+ buffer->buffer = NULL;
+ }
+ else /* otherwise copy the JSON over to a new buffer */
+ {
+ printed = (unsigned char*) hooks->allocate(buffer->offset + 1);
+ if (printed == NULL)
+ {
+ goto fail;
+ }
+ memcpy(printed, buffer->buffer, cjson_min(buffer->length, buffer->offset + 1));
+ printed[buffer->offset] = '\0'; /* just to be sure */
+
+ /* free the buffer */
+ hooks->deallocate(buffer->buffer);
+ }
+
+ return printed;
+
+fail:
+ if (buffer->buffer != NULL)
+ {
+ hooks->deallocate(buffer->buffer);
+ }
+
+ if (printed != NULL)
+ {
+ hooks->deallocate(printed);
+ }
+
+ return NULL;
+}
+
+/* Render a cJSON item/entity/structure to text. */
+CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item)
+{
+ return (char*)print(item, true, &global_hooks);
+}
+
+CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item)
+{
+ return (char*)print(item, false, &global_hooks);
+}
+
+CJSON_PUBLIC(char *) cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt)
+{
+ printbuffer p = { 0, 0, 0, 0, 0, 0, { 0, 0, 0 } };
+
+ if (prebuffer < 0)
+ {
+ return NULL;
+ }
+
+ p.buffer = (unsigned char*)global_hooks.allocate((size_t)prebuffer);
+ if (!p.buffer)
+ {
+ return NULL;
+ }
+
+ p.length = (size_t)prebuffer;
+ p.offset = 0;
+ p.noalloc = false;
+ p.format = fmt;
+ p.hooks = global_hooks;
+
+ if (!print_value(item, &p))
+ {
+ global_hooks.deallocate(p.buffer);
+ return NULL;
+ }
+
+ return (char*)p.buffer;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_PrintPreallocated(cJSON *item, char *buffer, const int length, const cJSON_bool format)
+{
+ printbuffer p = { 0, 0, 0, 0, 0, 0, { 0, 0, 0 } };
+
+ if ((length < 0) || (buffer == NULL))
+ {
+ return false;
+ }
+
+ p.buffer = (unsigned char*)buffer;
+ p.length = (size_t)length;
+ p.offset = 0;
+ p.noalloc = true;
+ p.format = format;
+ p.hooks = global_hooks;
+
+ return print_value(item, &p);
+}
+
+/* Parser core - when encountering text, process appropriately. */
+static cJSON_bool parse_value(cJSON * const item, parse_buffer * const input_buffer)
+{
+ if ((input_buffer == NULL) || (input_buffer->content == NULL))
+ {
+ return false; /* no input */
+ }
+
+ /* parse the different types of values */
+ /* null */
+ if (can_read(input_buffer, 4) && (strncmp((const char*)buffer_at_offset(input_buffer), "null", 4) == 0))
+ {
+ item->type = cJSON_NULL;
+ input_buffer->offset += 4;
+ return true;
+ }
+ /* false */
+ if (can_read(input_buffer, 5) && (strncmp((const char*)buffer_at_offset(input_buffer), "false", 5) == 0))
+ {
+ item->type = cJSON_False;
+ input_buffer->offset += 5;
+ return true;
+ }
+ /* true */
+ if (can_read(input_buffer, 4) && (strncmp((const char*)buffer_at_offset(input_buffer), "true", 4) == 0))
+ {
+ item->type = cJSON_True;
+ item->valueint = 1;
+ input_buffer->offset += 4;
+ return true;
+ }
+ /* string */
+ if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '\"'))
+ {
+ return parse_string(item, input_buffer);
+ }
+ /* number */
+ if (can_access_at_index(input_buffer, 0) && ((buffer_at_offset(input_buffer)[0] == '-') || ((buffer_at_offset(input_buffer)[0] >= '0') && (buffer_at_offset(input_buffer)[0] <= '9'))))
+ {
+ return parse_number(item, input_buffer);
+ }
+ /* array */
+ if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '['))
+ {
+ return parse_array(item, input_buffer);
+ }
+ /* object */
+ if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '{'))
+ {
+ return parse_object(item, input_buffer);
+ }
+
+ return false;
+}
+
+/* Render a value to text. */
+static cJSON_bool print_value(const cJSON * const item, printbuffer * const output_buffer)
+{
+ unsigned char *output = NULL;
+
+ if ((item == NULL) || (output_buffer == NULL))
+ {
+ return false;
+ }
+
+ switch ((item->type) & 0xFF)
+ {
+ case cJSON_NULL:
+ output = ensure(output_buffer, 5);
+ if (output == NULL)
+ {
+ return false;
+ }
+ strncpy((char*)output, "null", strlen("null") + 1);
+ return true;
+
+ case cJSON_False:
+ output = ensure(output_buffer, 6);
+ if (output == NULL)
+ {
+ return false;
+ }
+ strncpy((char*)output, "false", strlen("false") + 1);
+ return true;
+
+ case cJSON_True:
+ output = ensure(output_buffer, 5);
+ if (output == NULL)
+ {
+ return false;
+ }
+ strncpy((char*)output, "true", strlen("true") + 1);
+ return true;
+
+ case cJSON_Number:
+ return print_number(item, output_buffer);
+
+ case cJSON_Raw:
+ {
+ size_t raw_length = 0;
+ if (item->valuestring == NULL)
+ {
+ return false;
+ }
+
+ raw_length = strlen(item->valuestring) + sizeof("");
+ output = ensure(output_buffer, raw_length);
+ if (output == NULL)
+ {
+ return false;
+ }
+ memcpy(output, item->valuestring, raw_length);
+ return true;
+ }
+
+ case cJSON_String:
+ return print_string(item, output_buffer);
+
+ case cJSON_Array:
+ return print_array(item, output_buffer);
+
+ case cJSON_Object:
+ return print_object(item, output_buffer);
+
+ default:
+ return false;
+ }
+}
+
+/* Build an array from input text. */
+static cJSON_bool parse_array(cJSON * const item, parse_buffer * const input_buffer)
+{
+ cJSON *head = NULL; /* head of the linked list */
+ cJSON *current_item = NULL;
+
+ if (input_buffer->depth >= CJSON_NESTING_LIMIT)
+ {
+ return false; /* to deeply nested */
+ }
+ input_buffer->depth++;
+
+ if (buffer_at_offset(input_buffer)[0] != '[')
+ {
+ /* not an array */
+ goto fail;
+ }
+
+ input_buffer->offset++;
+ buffer_skip_whitespace(input_buffer);
+ if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ']'))
+ {
+ /* empty array */
+ goto success;
+ }
+
+ /* check if we skipped to the end of the buffer */
+ if (cannot_access_at_index(input_buffer, 0))
+ {
+ input_buffer->offset--;
+ goto fail;
+ }
+
+ /* step back to character in front of the first element */
+ input_buffer->offset--;
+ /* loop through the comma separated array elements */
+ do
+ {
+ /* allocate next item */
+ cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks));
+ if (new_item == NULL)
+ {
+ goto fail; /* allocation failure */
+ }
+
+ /* attach next item to list */
+ if (head == NULL)
+ {
+ /* start the linked list */
+ current_item = head = new_item;
+ }
+ else
+ {
+ /* add to the end and advance */
+ current_item->next = new_item;
+ new_item->prev = current_item;
+ current_item = new_item;
+ }
+
+ /* parse next value */
+ input_buffer->offset++;
+ buffer_skip_whitespace(input_buffer);
+ if (!parse_value(current_item, input_buffer))
+ {
+ goto fail; /* failed to parse value */
+ }
+ buffer_skip_whitespace(input_buffer);
+ }
+ while (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ','));
+
+ if (cannot_access_at_index(input_buffer, 0) || buffer_at_offset(input_buffer)[0] != ']')
+ {
+ goto fail; /* expected end of array */
+ }
+
+success:
+ input_buffer->depth--;
+
+ if (head != NULL) {
+ head->prev = current_item;
+ }
+
+ item->type = cJSON_Array;
+ item->child = head;
+
+ input_buffer->offset++;
+
+ return true;
+
+fail:
+ if (head != NULL)
+ {
+ cJSON_Delete(head);
+ }
+
+ return false;
+}
+
+/* Render an array to text */
+static cJSON_bool print_array(const cJSON * const item, printbuffer * const output_buffer)
+{
+ unsigned char *output_pointer = NULL;
+ size_t length = 0;
+ cJSON *current_element = item->child;
+
+ if (output_buffer == NULL)
+ {
+ return false;
+ }
+
+ /* Compose the output array. */
+ /* opening square bracket */
+ output_pointer = ensure(output_buffer, 1);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+
+ *output_pointer = '[';
+ output_buffer->offset++;
+ output_buffer->depth++;
+
+ while (current_element != NULL)
+ {
+ if (!print_value(current_element, output_buffer))
+ {
+ return false;
+ }
+ update_offset(output_buffer);
+ if (current_element->next)
+ {
+ length = (size_t) (output_buffer->format ? 2 : 1);
+ output_pointer = ensure(output_buffer, length + 1);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+ *output_pointer++ = ',';
+ if(output_buffer->format)
+ {
+ *output_pointer++ = ' ';
+ }
+ *output_pointer = '\0';
+ output_buffer->offset += length;
+ }
+ current_element = current_element->next;
+ }
+
+ output_pointer = ensure(output_buffer, 2);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+ *output_pointer++ = ']';
+ *output_pointer = '\0';
+ output_buffer->depth--;
+
+ return true;
+}
+
+/* Build an object from the text. */
+static cJSON_bool parse_object(cJSON * const item, parse_buffer * const input_buffer)
+{
+ cJSON *head = NULL; /* linked list head */
+ cJSON *current_item = NULL;
+
+ if (input_buffer->depth >= CJSON_NESTING_LIMIT)
+ {
+ return false; /* to deeply nested */
+ }
+ input_buffer->depth++;
+
+ if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != '{'))
+ {
+ goto fail; /* not an object */
+ }
+
+ input_buffer->offset++;
+ buffer_skip_whitespace(input_buffer);
+ if (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == '}'))
+ {
+ goto success; /* empty object */
+ }
+
+ /* check if we skipped to the end of the buffer */
+ if (cannot_access_at_index(input_buffer, 0))
+ {
+ input_buffer->offset--;
+ goto fail;
+ }
+
+ /* step back to character in front of the first element */
+ input_buffer->offset--;
+ /* loop through the comma separated array elements */
+ do
+ {
+ /* allocate next item */
+ cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks));
+ if (new_item == NULL)
+ {
+ goto fail; /* allocation failure */
+ }
+
+ /* attach next item to list */
+ if (head == NULL)
+ {
+ /* start the linked list */
+ current_item = head = new_item;
+ }
+ else
+ {
+ /* add to the end and advance */
+ current_item->next = new_item;
+ new_item->prev = current_item;
+ current_item = new_item;
+ }
+
+ /* parse the name of the child */
+ input_buffer->offset++;
+ buffer_skip_whitespace(input_buffer);
+ if (!parse_string(current_item, input_buffer))
+ {
+ goto fail; /* failed to parse name */
+ }
+ buffer_skip_whitespace(input_buffer);
+
+ /* swap valuestring and string, because we parsed the name */
+ current_item->string = current_item->valuestring;
+ current_item->valuestring = NULL;
+
+ if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != ':'))
+ {
+ goto fail; /* invalid object */
+ }
+
+ /* parse the value */
+ input_buffer->offset++;
+ buffer_skip_whitespace(input_buffer);
+ if (!parse_value(current_item, input_buffer))
+ {
+ goto fail; /* failed to parse value */
+ }
+ buffer_skip_whitespace(input_buffer);
+ }
+ while (can_access_at_index(input_buffer, 0) && (buffer_at_offset(input_buffer)[0] == ','));
+
+ if (cannot_access_at_index(input_buffer, 0) || (buffer_at_offset(input_buffer)[0] != '}'))
+ {
+ goto fail; /* expected end of object */
+ }
+
+success:
+ input_buffer->depth--;
+
+ if (head != NULL) {
+ head->prev = current_item;
+ }
+
+ item->type = cJSON_Object;
+ item->child = head;
+
+ input_buffer->offset++;
+ return true;
+
+fail:
+ if (head != NULL)
+ {
+ cJSON_Delete(head);
+ }
+
+ return false;
+}
+
+/* Render an object to text. */
+static cJSON_bool print_object(const cJSON * const item, printbuffer * const output_buffer)
+{
+ unsigned char *output_pointer = NULL;
+ size_t length = 0;
+ cJSON *current_item = item->child;
+
+ if (output_buffer == NULL)
+ {
+ return false;
+ }
+
+ /* Compose the output: */
+ length = (size_t) (output_buffer->format ? 2 : 1); /* fmt: {\n */
+ output_pointer = ensure(output_buffer, length + 1);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+
+ *output_pointer++ = '{';
+ output_buffer->depth++;
+ if (output_buffer->format)
+ {
+ *output_pointer++ = '\n';
+ }
+ output_buffer->offset += length;
+
+ while (current_item)
+ {
+ if (output_buffer->format)
+ {
+ size_t i;
+ output_pointer = ensure(output_buffer, output_buffer->depth);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+ for (i = 0; i < output_buffer->depth; i++)
+ {
+ *output_pointer++ = '\t';
+ }
+ output_buffer->offset += output_buffer->depth;
+ }
+
+ /* print key */
+ if (!print_string_ptr((unsigned char*)current_item->string, output_buffer))
+ {
+ return false;
+ }
+ update_offset(output_buffer);
+
+ length = (size_t) (output_buffer->format ? 2 : 1);
+ output_pointer = ensure(output_buffer, length);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+ *output_pointer++ = ':';
+ if (output_buffer->format)
+ {
+ *output_pointer++ = '\t';
+ }
+ output_buffer->offset += length;
+
+ /* print value */
+ if (!print_value(current_item, output_buffer))
+ {
+ return false;
+ }
+ update_offset(output_buffer);
+
+ /* print comma if not last */
+ length = ((size_t)(output_buffer->format ? 1 : 0) + (size_t)(current_item->next ? 1 : 0));
+ output_pointer = ensure(output_buffer, length + 1);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+ if (current_item->next)
+ {
+ *output_pointer++ = ',';
+ }
+
+ if (output_buffer->format)
+ {
+ *output_pointer++ = '\n';
+ }
+ *output_pointer = '\0';
+ output_buffer->offset += length;
+
+ current_item = current_item->next;
+ }
+
+ output_pointer = ensure(output_buffer, output_buffer->format ? (output_buffer->depth + 1) : 2);
+ if (output_pointer == NULL)
+ {
+ return false;
+ }
+ if (output_buffer->format)
+ {
+ size_t i;
+ for (i = 0; i < (output_buffer->depth - 1); i++)
+ {
+ *output_pointer++ = '\t';
+ }
+ }
+ *output_pointer++ = '}';
+ *output_pointer = '\0';
+ output_buffer->depth--;
+
+ return true;
+}
+
+/* Get Array size/item / object item. */
+CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array)
+{
+ cJSON *child = NULL;
+ size_t size = 0;
+
+ if (array == NULL)
+ {
+ return 0;
+ }
+
+ child = array->child;
+
+ while(child != NULL)
+ {
+ size++;
+ child = child->next;
+ }
+
+ /* FIXME: Can overflow here. Cannot be fixed without breaking the API */
+
+ return (int)size;
+}
+
+static cJSON* get_array_item(const cJSON *array, size_t index)
+{
+ cJSON *current_child = NULL;
+
+ if (array == NULL)
+ {
+ return NULL;
+ }
+
+ current_child = array->child;
+ while ((current_child != NULL) && (index > 0))
+ {
+ index--;
+ current_child = current_child->next;
+ }
+
+ return current_child;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index)
+{
+ if (index < 0)
+ {
+ return NULL;
+ }
+
+ return get_array_item(array, (size_t)index);
+}
+
+static cJSON *get_object_item(const cJSON * const object, const char * const name, const cJSON_bool case_sensitive)
+{
+ cJSON *current_element = NULL;
+
+ if ((object == NULL) || (name == NULL))
+ {
+ return NULL;
+ }
+
+ current_element = object->child;
+ if (case_sensitive)
+ {
+ while ((current_element != NULL) && (current_element->string != NULL) && (strcmp(name, current_element->string) != 0))
+ {
+ current_element = current_element->next;
+ }
+ }
+ else
+ {
+ while ((current_element != NULL) && (case_insensitive_strcmp((const unsigned char*)name, (const unsigned char*)(current_element->string)) != 0))
+ {
+ current_element = current_element->next;
+ }
+ }
+
+ if ((current_element == NULL) || (current_element->string == NULL)) {
+ return NULL;
+ }
+
+ return current_element;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_GetObjectItem(const cJSON * const object, const char * const string)
+{
+ return get_object_item(object, string, false);
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_GetObjectItemCaseSensitive(const cJSON * const object, const char * const string)
+{
+ return get_object_item(object, string, true);
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_HasObjectItem(const cJSON *object, const char *string)
+{
+ return cJSON_GetObjectItem(object, string) ? 1 : 0;
+}
+
+/* Utility for array list handling. */
+static void suffix_object(cJSON *prev, cJSON *item)
+{
+ prev->next = item;
+ item->prev = prev;
+}
+
+/* Utility for handling references. */
+static cJSON *create_reference(const cJSON *item, const internal_hooks * const hooks)
+{
+ cJSON *reference = NULL;
+ if (item == NULL)
+ {
+ return NULL;
+ }
+
+ reference = cJSON_New_Item(hooks);
+ if (reference == NULL)
+ {
+ return NULL;
+ }
+
+ memcpy(reference, item, sizeof(cJSON));
+ reference->string = NULL;
+ reference->type |= cJSON_IsReference;
+ reference->next = reference->prev = NULL;
+ return reference;
+}
+
+static cJSON_bool add_item_to_array(cJSON *array, cJSON *item)
+{
+ cJSON *child = NULL;
+
+ if ((item == NULL) || (array == NULL) || (array == item))
+ {
+ return false;
+ }
+
+ child = array->child;
+ /*
+ * To find the last item in array quickly, we use prev in array
+ */
+ if (child == NULL)
+ {
+ /* list is empty, start new one */
+ array->child = item;
+ item->prev = item;
+ item->next = NULL;
+ }
+ else
+ {
+ /* append to the end */
+ if (child->prev)
+ {
+ suffix_object(child->prev, item);
+ array->child->prev = item;
+ }
+ }
+
+ return true;
+}
+
+/* Add item to array/object. */
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToArray(cJSON *array, cJSON *item)
+{
+ return add_item_to_array(array, item);
+}
+
+#if defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5))))
+ #pragma GCC diagnostic push
+#endif
+#ifdef __GNUC__
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+/* helper function to cast away const */
+static void* cast_away_const(const void* string)
+{
+ return (void*)string;
+}
+#if defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5))))
+ #pragma GCC diagnostic pop
+#endif
+
+
+static cJSON_bool add_item_to_object(cJSON * const object, const char * const string, cJSON * const item, const internal_hooks * const hooks, const cJSON_bool constant_key)
+{
+ char *new_key = NULL;
+ int new_type = cJSON_Invalid;
+
+ if ((object == NULL) || (string == NULL) || (item == NULL) || (object == item))
+ {
+ return false;
+ }
+
+ if (constant_key)
+ {
+ new_key = (char*)cast_away_const(string);
+ new_type = item->type | cJSON_StringIsConst;
+ }
+ else
+ {
+ new_key = (char*)cJSON_strdup((const unsigned char*)string, hooks);
+ if (new_key == NULL)
+ {
+ return false;
+ }
+
+ new_type = item->type & ~cJSON_StringIsConst;
+ }
+
+ if (!(item->type & cJSON_StringIsConst) && (item->string != NULL))
+ {
+ hooks->deallocate(item->string);
+ }
+
+ item->string = new_key;
+ item->type = new_type;
+
+ return add_item_to_array(object, item);
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item)
+{
+ return add_item_to_object(object, string, item, &global_hooks, false);
+}
+
+/* Add an item to an object with constant string as key */
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item)
+{
+ return add_item_to_object(object, string, item, &global_hooks, true);
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item)
+{
+ if (array == NULL)
+ {
+ return false;
+ }
+
+ return add_item_to_array(array, create_reference(item, &global_hooks));
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item)
+{
+ if ((object == NULL) || (string == NULL))
+ {
+ return false;
+ }
+
+ return add_item_to_object(object, string, create_reference(item, &global_hooks), &global_hooks, false);
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddNullToObject(cJSON * const object, const char * const name)
+{
+ cJSON *null = cJSON_CreateNull();
+ if (add_item_to_object(object, name, null, &global_hooks, false))
+ {
+ return null;
+ }
+
+ cJSON_Delete(null);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddTrueToObject(cJSON * const object, const char * const name)
+{
+ cJSON *true_item = cJSON_CreateTrue();
+ if (add_item_to_object(object, name, true_item, &global_hooks, false))
+ {
+ return true_item;
+ }
+
+ cJSON_Delete(true_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddFalseToObject(cJSON * const object, const char * const name)
+{
+ cJSON *false_item = cJSON_CreateFalse();
+ if (add_item_to_object(object, name, false_item, &global_hooks, false))
+ {
+ return false_item;
+ }
+
+ cJSON_Delete(false_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddBoolToObject(cJSON * const object, const char * const name, const cJSON_bool boolean)
+{
+ cJSON *bool_item = cJSON_CreateBool(boolean);
+ if (add_item_to_object(object, name, bool_item, &global_hooks, false))
+ {
+ return bool_item;
+ }
+
+ cJSON_Delete(bool_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddNumberToObject(cJSON * const object, const char * const name, const double number)
+{
+ cJSON *number_item = cJSON_CreateNumber(number);
+ if (add_item_to_object(object, name, number_item, &global_hooks, false))
+ {
+ return number_item;
+ }
+
+ cJSON_Delete(number_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddStringToObject(cJSON * const object, const char * const name, const char * const string)
+{
+ cJSON *string_item = cJSON_CreateString(string);
+ if (add_item_to_object(object, name, string_item, &global_hooks, false))
+ {
+ return string_item;
+ }
+
+ cJSON_Delete(string_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddRawToObject(cJSON * const object, const char * const name, const char * const raw)
+{
+ cJSON *raw_item = cJSON_CreateRaw(raw);
+ if (add_item_to_object(object, name, raw_item, &global_hooks, false))
+ {
+ return raw_item;
+ }
+
+ cJSON_Delete(raw_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddObjectToObject(cJSON * const object, const char * const name)
+{
+ cJSON *object_item = cJSON_CreateObject();
+ if (add_item_to_object(object, name, object_item, &global_hooks, false))
+ {
+ return object_item;
+ }
+
+ cJSON_Delete(object_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON*) cJSON_AddArrayToObject(cJSON * const object, const char * const name)
+{
+ cJSON *array = cJSON_CreateArray();
+ if (add_item_to_object(object, name, array, &global_hooks, false))
+ {
+ return array;
+ }
+
+ cJSON_Delete(array);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemViaPointer(cJSON *parent, cJSON * const item)
+{
+ if ((parent == NULL) || (item == NULL))
+ {
+ return NULL;
+ }
+
+ if (item != parent->child)
+ {
+ /* not the first element */
+ item->prev->next = item->next;
+ }
+ if (item->next != NULL)
+ {
+ /* not the last element */
+ item->next->prev = item->prev;
+ }
+
+ if (item == parent->child)
+ {
+ /* first element */
+ parent->child = item->next;
+ }
+ else if (item->next == NULL)
+ {
+ /* last element */
+ parent->child->prev = item->prev;
+ }
+
+ /* make sure the detached item doesn't point anywhere anymore */
+ item->prev = NULL;
+ item->next = NULL;
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which)
+{
+ if (which < 0)
+ {
+ return NULL;
+ }
+
+ return cJSON_DetachItemViaPointer(array, get_array_item(array, (size_t)which));
+}
+
+CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which)
+{
+ cJSON_Delete(cJSON_DetachItemFromArray(array, which));
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObject(cJSON *object, const char *string)
+{
+ cJSON *to_detach = cJSON_GetObjectItem(object, string);
+
+ return cJSON_DetachItemViaPointer(object, to_detach);
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string)
+{
+ cJSON *to_detach = cJSON_GetObjectItemCaseSensitive(object, string);
+
+ return cJSON_DetachItemViaPointer(object, to_detach);
+}
+
+CJSON_PUBLIC(void) cJSON_DeleteItemFromObject(cJSON *object, const char *string)
+{
+ cJSON_Delete(cJSON_DetachItemFromObject(object, string));
+}
+
+CJSON_PUBLIC(void) cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string)
+{
+ cJSON_Delete(cJSON_DetachItemFromObjectCaseSensitive(object, string));
+}
+
+/* Replace array/object items with new ones. */
+CJSON_PUBLIC(cJSON_bool) cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem)
+{
+ cJSON *after_inserted = NULL;
+
+ if (which < 0)
+ {
+ return false;
+ }
+
+ after_inserted = get_array_item(array, (size_t)which);
+ if (after_inserted == NULL)
+ {
+ return add_item_to_array(array, newitem);
+ }
+
+ newitem->next = after_inserted;
+ newitem->prev = after_inserted->prev;
+ after_inserted->prev = newitem;
+ if (after_inserted == array->child)
+ {
+ array->child = newitem;
+ }
+ else
+ {
+ newitem->prev->next = newitem;
+ }
+ return true;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemViaPointer(cJSON * const parent, cJSON * const item, cJSON * replacement)
+{
+ if ((parent == NULL) || (replacement == NULL) || (item == NULL))
+ {
+ return false;
+ }
+
+ if (replacement == item)
+ {
+ return true;
+ }
+
+ replacement->next = item->next;
+ replacement->prev = item->prev;
+
+ if (replacement->next != NULL)
+ {
+ replacement->next->prev = replacement;
+ }
+ if (parent->child == item)
+ {
+ if (parent->child->prev == parent->child)
+ {
+ replacement->prev = replacement;
+ }
+ parent->child = replacement;
+ }
+ else
+ { /*
+ * To find the last item in array quickly, we use prev in array.
+ * We can't modify the last item's next pointer where this item was the parent's child
+ */
+ if (replacement->prev != NULL)
+ {
+ replacement->prev->next = replacement;
+ }
+ if (replacement->next == NULL)
+ {
+ parent->child->prev = replacement;
+ }
+ }
+
+ item->next = NULL;
+ item->prev = NULL;
+ cJSON_Delete(item);
+
+ return true;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem)
+{
+ if (which < 0)
+ {
+ return false;
+ }
+
+ return cJSON_ReplaceItemViaPointer(array, get_array_item(array, (size_t)which), newitem);
+}
+
+static cJSON_bool replace_item_in_object(cJSON *object, const char *string, cJSON *replacement, cJSON_bool case_sensitive)
+{
+ if ((replacement == NULL) || (string == NULL))
+ {
+ return false;
+ }
+
+ /* replace the name in the replacement */
+ if (!(replacement->type & cJSON_StringIsConst) && (replacement->string != NULL))
+ {
+ cJSON_free(replacement->string);
+ }
+ replacement->string = (char*)cJSON_strdup((const unsigned char*)string, &global_hooks);
+ replacement->type &= ~cJSON_StringIsConst;
+
+ return cJSON_ReplaceItemViaPointer(object, get_object_item(object, string, case_sensitive), replacement);
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObject(cJSON *object, const char *string, cJSON *newitem)
+{
+ return replace_item_in_object(object, string, newitem, false);
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object, const char *string, cJSON *newitem)
+{
+ return replace_item_in_object(object, string, newitem, true);
+}
+
+/* Create basic types: */
+CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type = cJSON_NULL;
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type = cJSON_True;
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type = cJSON_False;
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool boolean)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type = boolean ? cJSON_True : cJSON_False;
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type = cJSON_Number;
+ item->valuedouble = num;
+
+ /* use saturation in case of overflow */
+ if (num >= INT_MAX)
+ {
+ item->valueint = INT_MAX;
+ }
+ else if (num <= (double)INT_MIN)
+ {
+ item->valueint = INT_MIN;
+ }
+ else
+ {
+ item->valueint = (int)num;
+ }
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type = cJSON_String;
+ item->valuestring = (char*)cJSON_strdup((const unsigned char*)string, &global_hooks);
+ if(!item->valuestring)
+ {
+ cJSON_Delete(item);
+ return NULL;
+ }
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if (item != NULL)
+ {
+ item->type = cJSON_String | cJSON_IsReference;
+ item->valuestring = (char*)cast_away_const(string);
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if (item != NULL) {
+ item->type = cJSON_Object | cJSON_IsReference;
+ item->child = (cJSON*)cast_away_const(child);
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child) {
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if (item != NULL) {
+ item->type = cJSON_Array | cJSON_IsReference;
+ item->child = (cJSON*)cast_away_const(child);
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type = cJSON_Raw;
+ item->valuestring = (char*)cJSON_strdup((const unsigned char*)raw, &global_hooks);
+ if(!item->valuestring)
+ {
+ cJSON_Delete(item);
+ return NULL;
+ }
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if(item)
+ {
+ item->type=cJSON_Array;
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void)
+{
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if (item)
+ {
+ item->type = cJSON_Object;
+ }
+
+ return item;
+}
+
+/* Create Arrays: */
+CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count)
+{
+ size_t i = 0;
+ cJSON *n = NULL;
+ cJSON *p = NULL;
+ cJSON *a = NULL;
+
+ if ((count < 0) || (numbers == NULL))
+ {
+ return NULL;
+ }
+
+ a = cJSON_CreateArray();
+ for(i = 0; a && (i < (size_t)count); i++)
+ {
+ n = cJSON_CreateNumber(numbers[i]);
+ if (!n)
+ {
+ cJSON_Delete(a);
+ return NULL;
+ }
+ if(!i)
+ {
+ a->child = n;
+ }
+ else
+ {
+ suffix_object(p, n);
+ }
+ p = n;
+ }
+ a->child->prev = n;
+
+ return a;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count)
+{
+ size_t i = 0;
+ cJSON *n = NULL;
+ cJSON *p = NULL;
+ cJSON *a = NULL;
+
+ if ((count < 0) || (numbers == NULL))
+ {
+ return NULL;
+ }
+
+ a = cJSON_CreateArray();
+
+ for(i = 0; a && (i < (size_t)count); i++)
+ {
+ n = cJSON_CreateNumber((double)numbers[i]);
+ if(!n)
+ {
+ cJSON_Delete(a);
+ return NULL;
+ }
+ if(!i)
+ {
+ a->child = n;
+ }
+ else
+ {
+ suffix_object(p, n);
+ }
+ p = n;
+ }
+ a->child->prev = n;
+
+ return a;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count)
+{
+ size_t i = 0;
+ cJSON *n = NULL;
+ cJSON *p = NULL;
+ cJSON *a = NULL;
+
+ if ((count < 0) || (numbers == NULL))
+ {
+ return NULL;
+ }
+
+ a = cJSON_CreateArray();
+
+ for(i = 0;a && (i < (size_t)count); i++)
+ {
+ n = cJSON_CreateNumber(numbers[i]);
+ if(!n)
+ {
+ cJSON_Delete(a);
+ return NULL;
+ }
+ if(!i)
+ {
+ a->child = n;
+ }
+ else
+ {
+ suffix_object(p, n);
+ }
+ p = n;
+ }
+ a->child->prev = n;
+
+ return a;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateStringArray(const char *const *strings, int count)
+{
+ size_t i = 0;
+ cJSON *n = NULL;
+ cJSON *p = NULL;
+ cJSON *a = NULL;
+
+ if ((count < 0) || (strings == NULL))
+ {
+ return NULL;
+ }
+
+ a = cJSON_CreateArray();
+
+ for (i = 0; a && (i < (size_t)count); i++)
+ {
+ n = cJSON_CreateString(strings[i]);
+ if(!n)
+ {
+ cJSON_Delete(a);
+ return NULL;
+ }
+ if(!i)
+ {
+ a->child = n;
+ }
+ else
+ {
+ suffix_object(p,n);
+ }
+ p = n;
+ }
+ a->child->prev = n;
+
+ return a;
+}
+
+/* Duplication */
+CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse)
+{
+ cJSON *newitem = NULL;
+ cJSON *child = NULL;
+ cJSON *next = NULL;
+ cJSON *newchild = NULL;
+
+ /* Bail on bad ptr */
+ if (!item)
+ {
+ goto fail;
+ }
+ /* Create new item */
+ newitem = cJSON_New_Item(&global_hooks);
+ if (!newitem)
+ {
+ goto fail;
+ }
+ /* Copy over all vars */
+ newitem->type = item->type & (~cJSON_IsReference);
+ newitem->valueint = item->valueint;
+ newitem->valuedouble = item->valuedouble;
+ if (item->valuestring)
+ {
+ newitem->valuestring = (char*)cJSON_strdup((unsigned char*)item->valuestring, &global_hooks);
+ if (!newitem->valuestring)
+ {
+ goto fail;
+ }
+ }
+ if (item->string)
+ {
+ newitem->string = (item->type&cJSON_StringIsConst) ? item->string : (char*)cJSON_strdup((unsigned char*)item->string, &global_hooks);
+ if (!newitem->string)
+ {
+ goto fail;
+ }
+ }
+ /* If non-recursive, then we're done! */
+ if (!recurse)
+ {
+ return newitem;
+ }
+ /* Walk the ->next chain for the child. */
+ child = item->child;
+ while (child != NULL)
+ {
+ newchild = cJSON_Duplicate(child, true); /* Duplicate (with recurse) each item in the ->next chain */
+ if (!newchild)
+ {
+ goto fail;
+ }
+ if (next != NULL)
+ {
+ /* If newitem->child already set, then crosswire ->prev and ->next and move on */
+ next->next = newchild;
+ newchild->prev = next;
+ next = newchild;
+ }
+ else
+ {
+ /* Set newitem->child and move to it */
+ newitem->child = newchild;
+ next = newchild;
+ }
+ child = child->next;
+ }
+ if (newitem && newitem->child)
+ {
+ newitem->child->prev = newchild;
+ }
+
+ return newitem;
+
+fail:
+ if (newitem != NULL)
+ {
+ cJSON_Delete(newitem);
+ }
+
+ return NULL;
+}
+
+static void skip_oneline_comment(char **input)
+{
+ *input += static_strlen("//");
+
+ for (; (*input)[0] != '\0'; ++(*input))
+ {
+ if ((*input)[0] == '\n') {
+ *input += static_strlen("\n");
+ return;
+ }
+ }
+}
+
+static void skip_multiline_comment(char **input)
+{
+ *input += static_strlen("/*");
+
+ for (; (*input)[0] != '\0'; ++(*input))
+ {
+ if (((*input)[0] == '*') && ((*input)[1] == '/'))
+ {
+ *input += static_strlen("*/");
+ return;
+ }
+ }
+}
+
+static void minify_string(char **input, char **output) {
+ (*output)[0] = (*input)[0];
+ *input += static_strlen("\"");
+ *output += static_strlen("\"");
+
+
+ for (; (*input)[0] != '\0'; (void)++(*input), ++(*output)) {
+ (*output)[0] = (*input)[0];
+
+ if ((*input)[0] == '\"') {
+ (*output)[0] = '\"';
+ *input += static_strlen("\"");
+ *output += static_strlen("\"");
+ return;
+ } else if (((*input)[0] == '\\') && ((*input)[1] == '\"')) {
+ (*output)[1] = (*input)[1];
+ *input += static_strlen("\"");
+ *output += static_strlen("\"");
+ }
+ }
+}
+
+CJSON_PUBLIC(void) cJSON_Minify(char *json)
+{
+ char *into = json;
+
+ if (json == NULL)
+ {
+ return;
+ }
+
+ while (json[0] != '\0')
+ {
+ switch (json[0])
+ {
+ case ' ':
+ case '\t':
+ case '\r':
+ case '\n':
+ json++;
+ break;
+
+ case '/':
+ if (json[1] == '/')
+ {
+ skip_oneline_comment(&json);
+ }
+ else if (json[1] == '*')
+ {
+ skip_multiline_comment(&json);
+ } else {
+ json++;
+ }
+ break;
+
+ case '\"':
+ minify_string(&json, (char**)&into);
+ break;
+
+ default:
+ into[0] = json[0];
+ json++;
+ into++;
+ }
+ }
+
+ /* and null-terminate. */
+ *into = '\0';
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_Invalid;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_False;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xff) == cJSON_True;
+}
+
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & (cJSON_True | cJSON_False)) != 0;
+}
+CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_NULL;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_Number;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_String;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_Array;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_Object;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON * const item)
+{
+ if (item == NULL)
+ {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_Raw;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_Compare(const cJSON * const a, const cJSON * const b, const cJSON_bool case_sensitive)
+{
+ if ((a == NULL) || (b == NULL) || ((a->type & 0xFF) != (b->type & 0xFF)) || cJSON_IsInvalid(a))
+ {
+ return false;
+ }
+
+ /* check if type is valid */
+ switch (a->type & 0xFF)
+ {
+ case cJSON_False:
+ case cJSON_True:
+ case cJSON_NULL:
+ case cJSON_Number:
+ case cJSON_String:
+ case cJSON_Raw:
+ case cJSON_Array:
+ case cJSON_Object:
+ break;
+
+ default:
+ return false;
+ }
+
+ /* identical objects are equal */
+ if (a == b)
+ {
+ return true;
+ }
+
+ switch (a->type & 0xFF)
+ {
+ /* in these cases and equal type is enough */
+ case cJSON_False:
+ case cJSON_True:
+ case cJSON_NULL:
+ return true;
+
+ case cJSON_Number:
+ if (compare_double(a->valuedouble, b->valuedouble))
+ {
+ return true;
+ }
+ return false;
+
+ case cJSON_String:
+ case cJSON_Raw:
+ if ((a->valuestring == NULL) || (b->valuestring == NULL))
+ {
+ return false;
+ }
+ if (strcmp(a->valuestring, b->valuestring) == 0)
+ {
+ return true;
+ }
+
+ return false;
+
+ case cJSON_Array:
+ {
+ cJSON *a_element = a->child;
+ cJSON *b_element = b->child;
+
+ for (; (a_element != NULL) && (b_element != NULL);)
+ {
+ if (!cJSON_Compare(a_element, b_element, case_sensitive))
+ {
+ return false;
+ }
+
+ a_element = a_element->next;
+ b_element = b_element->next;
+ }
+
+ /* one of the arrays is longer than the other */
+ if (a_element != b_element) {
+ return false;
+ }
+
+ return true;
+ }
+
+ case cJSON_Object:
+ {
+ cJSON *a_element = NULL;
+ cJSON *b_element = NULL;
+ cJSON_ArrayForEach(a_element, a)
+ {
+ /* TODO This has O(n^2) runtime, which is horrible! */
+ b_element = get_object_item(b, a_element->string, case_sensitive);
+ if (b_element == NULL)
+ {
+ return false;
+ }
+
+ if (!cJSON_Compare(a_element, b_element, case_sensitive))
+ {
+ return false;
+ }
+ }
+
+ /* doing this twice, once on a and b to prevent true comparison if a subset of b
+ * TODO: Do this the proper way, this is just a fix for now */
+ cJSON_ArrayForEach(b_element, b)
+ {
+ a_element = get_object_item(a, b_element->string, case_sensitive);
+ if (a_element == NULL)
+ {
+ return false;
+ }
+
+ if (!cJSON_Compare(b_element, a_element, case_sensitive))
+ {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ default:
+ return false;
+ }
+}
+
+CJSON_PUBLIC(void *) cJSON_malloc(size_t size)
+{
+ return global_hooks.allocate(size);
+}
+
+CJSON_PUBLIC(void) cJSON_free(void *object)
+{
+ global_hooks.deallocate(object);
+}
+
+}
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/http/HttpConnection.cpp b/contrib/restricted/aws/aws-crt-cpp/source/http/HttpConnection.cpp
new file mode 100644
index 0000000000..f8ed94cefc
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/http/HttpConnection.cpp
@@ -0,0 +1,400 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/Api.h>
+#include <aws/crt/http/HttpConnection.h>
+#include <aws/crt/http/HttpProxyStrategy.h>
+#include <aws/crt/http/HttpRequestResponse.h>
+#include <aws/crt/io/Bootstrap.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Http
+ {
+ /* This exists to handle aws_http_connection's shutdown callback, which might fire after
+ * HttpClientConnection has been destroyed. */
+ struct ConnectionCallbackData
+ {
+ explicit ConnectionCallbackData(Allocator *allocator) : allocator(allocator) {}
+ std::weak_ptr<HttpClientConnection> connection;
+ Allocator *allocator;
+ OnConnectionSetup onConnectionSetup;
+ OnConnectionShutdown onConnectionShutdown;
+ };
+
+ class UnmanagedConnection final : public HttpClientConnection
+ {
+ public:
+ UnmanagedConnection(aws_http_connection *connection, Aws::Crt::Allocator *allocator)
+ : HttpClientConnection(connection, allocator)
+ {
+ }
+
+ ~UnmanagedConnection() override
+ {
+ if (m_connection)
+ {
+ aws_http_connection_release(m_connection);
+ m_connection = nullptr;
+ }
+ }
+ };
+
+ void HttpClientConnection::s_onClientConnectionSetup(
+ struct aws_http_connection *connection,
+ int errorCode,
+ void *user_data) noexcept
+ {
+ /**
+ * Allocate an HttpClientConnection and seat it to `ConnectionCallbackData`'s shared_ptr.
+ */
+ auto *callbackData = static_cast<ConnectionCallbackData *>(user_data);
+ if (!errorCode)
+ {
+ auto connectionObj = std::allocate_shared<UnmanagedConnection>(
+ Aws::Crt::StlAllocator<UnmanagedConnection>(), connection, callbackData->allocator);
+
+ if (connectionObj)
+ {
+ callbackData->connection = connectionObj;
+ callbackData->onConnectionSetup(std::move(connectionObj), errorCode);
+ return;
+ }
+
+ aws_http_connection_release(connection);
+ errorCode = aws_last_error();
+ }
+
+ callbackData->onConnectionSetup(nullptr, errorCode);
+ Delete(callbackData, callbackData->allocator);
+ }
+
+ void HttpClientConnection::s_onClientConnectionShutdown(
+ struct aws_http_connection *connection,
+ int errorCode,
+ void *user_data) noexcept
+ {
+ (void)connection;
+ auto *callbackData = static_cast<ConnectionCallbackData *>(user_data);
+
+ /* Don't invoke callback if the connection object has expired. */
+ if (auto connectionPtr = callbackData->connection.lock())
+ {
+ callbackData->onConnectionShutdown(*connectionPtr, errorCode);
+ }
+
+ Delete(callbackData, callbackData->allocator);
+ }
+
+ bool HttpClientConnection::CreateConnection(
+ const HttpClientConnectionOptions &connectionOptions,
+ Allocator *allocator) noexcept
+ {
+ AWS_FATAL_ASSERT(connectionOptions.OnConnectionSetupCallback);
+ AWS_FATAL_ASSERT(connectionOptions.OnConnectionShutdownCallback);
+
+ if (connectionOptions.TlsOptions && !(*connectionOptions.TlsOptions))
+ {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_GENERAL,
+ "Cannot create HttpClientConnection: connectionOptions contains invalid TlsOptions.");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return false;
+ }
+
+ if (connectionOptions.ProxyOptions)
+ {
+ const auto &proxyOpts = connectionOptions.ProxyOptions.value();
+
+ if (proxyOpts.TlsOptions && !(*proxyOpts.TlsOptions))
+ {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_GENERAL,
+ "Cannot create HttpClientConnection: connectionOptions has ProxyOptions that contain "
+ "invalid TlsOptions.");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return false;
+ }
+ }
+
+ auto *callbackData = New<ConnectionCallbackData>(allocator, allocator);
+
+ if (!callbackData)
+ {
+ return false;
+ }
+ callbackData->onConnectionShutdown = connectionOptions.OnConnectionShutdownCallback;
+ callbackData->onConnectionSetup = connectionOptions.OnConnectionSetupCallback;
+
+ aws_http_client_connection_options options;
+ AWS_ZERO_STRUCT(options);
+ options.self_size = sizeof(aws_http_client_connection_options);
+
+ if (options.bootstrap != nullptr)
+ {
+ options.bootstrap = connectionOptions.Bootstrap->GetUnderlyingHandle();
+ }
+ else
+ {
+ options.bootstrap = ApiHandle::GetOrCreateStaticDefaultClientBootstrap()->GetUnderlyingHandle();
+ }
+
+ if (connectionOptions.TlsOptions)
+ {
+ /* This is verified earlier in this function. */
+ AWS_FATAL_ASSERT(*connectionOptions.TlsOptions);
+
+ options.tls_options =
+ const_cast<aws_tls_connection_options *>(connectionOptions.TlsOptions->GetUnderlyingHandle());
+ }
+ options.allocator = allocator;
+ options.user_data = callbackData;
+ options.host_name = aws_byte_cursor_from_c_str(connectionOptions.HostName.c_str());
+ options.port = connectionOptions.Port;
+ options.initial_window_size = connectionOptions.InitialWindowSize;
+ options.socket_options = &connectionOptions.SocketOptions.GetImpl();
+ options.on_setup = HttpClientConnection::s_onClientConnectionSetup;
+ options.on_shutdown = HttpClientConnection::s_onClientConnectionShutdown;
+ options.manual_window_management = connectionOptions.ManualWindowManagement;
+
+ aws_http_proxy_options proxyOptions;
+ AWS_ZERO_STRUCT(proxyOptions);
+ if (connectionOptions.ProxyOptions)
+ {
+ const auto &proxyOpts = connectionOptions.ProxyOptions.value();
+
+ /* This is verified earlier in this function. */
+ AWS_FATAL_ASSERT(!proxyOpts.TlsOptions || *proxyOpts.TlsOptions);
+
+ proxyOpts.InitializeRawProxyOptions(proxyOptions);
+
+ options.proxy_options = &proxyOptions;
+ }
+
+ if (aws_http_client_connect(&options))
+ {
+ Delete(callbackData, allocator);
+ return false;
+ }
+
+ return true;
+ }
+
+ HttpClientConnection::HttpClientConnection(aws_http_connection *connection, Allocator *allocator) noexcept
+ : m_connection(connection), m_allocator(allocator), m_lastError(AWS_ERROR_SUCCESS)
+ {
+ }
+
+ std::shared_ptr<HttpClientStream> HttpClientConnection::NewClientStream(
+ const HttpRequestOptions &requestOptions) noexcept
+ {
+ AWS_ASSERT(requestOptions.onIncomingHeaders);
+ AWS_ASSERT(requestOptions.onStreamComplete);
+
+ aws_http_make_request_options options;
+ AWS_ZERO_STRUCT(options);
+ options.self_size = sizeof(aws_http_make_request_options);
+ options.request = requestOptions.request->GetUnderlyingMessage();
+ options.on_response_body = HttpStream::s_onIncomingBody;
+ options.on_response_headers = HttpStream::s_onIncomingHeaders;
+ options.on_response_header_block_done = HttpStream::s_onIncomingHeaderBlockDone;
+ options.on_complete = HttpStream::s_onStreamComplete;
+
+ /* Do the same ref counting trick we did with HttpClientConnection. We need to maintain a reference
+ * internally (regardless of what the user does), until the Stream shuts down. */
+ auto *toSeat = static_cast<HttpClientStream *>(aws_mem_acquire(m_allocator, sizeof(HttpClientStream)));
+
+ if (toSeat)
+ {
+ toSeat = new (toSeat) HttpClientStream(this->shared_from_this());
+
+ Allocator *captureAllocator = m_allocator;
+ std::shared_ptr<HttpClientStream> stream(
+ toSeat,
+ [captureAllocator](HttpStream *stream) { Delete(stream, captureAllocator); },
+ StlAllocator<HttpClientStream>(captureAllocator));
+
+ stream->m_onIncomingBody = requestOptions.onIncomingBody;
+ stream->m_onIncomingHeaders = requestOptions.onIncomingHeaders;
+ stream->m_onIncomingHeadersBlockDone = requestOptions.onIncomingHeadersBlockDone;
+ stream->m_onStreamComplete = requestOptions.onStreamComplete;
+ stream->m_callbackData.allocator = m_allocator;
+
+ // we purposefully do not set m_callbackData::stream because we don't want the reference count
+ // incremented until the request is kicked off via HttpClientStream::Activate(). Activate()
+ // increments the ref count.
+ options.user_data = &stream->m_callbackData;
+ stream->m_stream = aws_http_connection_make_request(m_connection, &options);
+
+ if (!stream->m_stream)
+ {
+ stream = nullptr;
+ m_lastError = aws_last_error();
+ return nullptr;
+ }
+
+ return stream;
+ }
+
+ m_lastError = aws_last_error();
+ return nullptr;
+ }
+
+ bool HttpClientConnection::IsOpen() const noexcept { return aws_http_connection_is_open(m_connection); }
+
+ void HttpClientConnection::Close() noexcept { aws_http_connection_close(m_connection); }
+
+ HttpVersion HttpClientConnection::GetVersion() noexcept
+ {
+ return (HttpVersion)aws_http_connection_get_version(m_connection);
+ }
+
+ int HttpStream::s_onIncomingHeaders(
+ struct aws_http_stream *,
+ enum aws_http_header_block headerBlock,
+ const struct aws_http_header *headerArray,
+ size_t numHeaders,
+ void *userData) noexcept
+ {
+ auto callbackData = static_cast<ClientStreamCallbackData *>(userData);
+ callbackData->stream->m_onIncomingHeaders(*callbackData->stream, headerBlock, headerArray, numHeaders);
+
+ return AWS_OP_SUCCESS;
+ }
+
+ int HttpStream::s_onIncomingHeaderBlockDone(
+ struct aws_http_stream *,
+ enum aws_http_header_block headerBlock,
+ void *userData) noexcept
+ {
+ auto callbackData = static_cast<ClientStreamCallbackData *>(userData);
+
+ if (callbackData->stream->m_onIncomingHeadersBlockDone)
+ {
+ callbackData->stream->m_onIncomingHeadersBlockDone(*callbackData->stream, headerBlock);
+ }
+
+ return AWS_OP_SUCCESS;
+ }
+
+ int HttpStream::s_onIncomingBody(
+ struct aws_http_stream *,
+ const struct aws_byte_cursor *data,
+ void *userData) noexcept
+ {
+ auto callbackData = static_cast<ClientStreamCallbackData *>(userData);
+
+ if (callbackData->stream->m_onIncomingBody)
+ {
+ callbackData->stream->m_onIncomingBody(*callbackData->stream, *data);
+ }
+
+ return AWS_OP_SUCCESS;
+ }
+
+ void HttpStream::s_onStreamComplete(struct aws_http_stream *, int errorCode, void *userData) noexcept
+ {
+ auto callbackData = static_cast<ClientStreamCallbackData *>(userData);
+ callbackData->stream->m_onStreamComplete(*callbackData->stream, errorCode);
+ callbackData->stream = nullptr;
+ }
+
+ HttpStream::HttpStream(const std::shared_ptr<HttpClientConnection> &connection) noexcept
+ : m_stream(nullptr), m_connection(connection)
+ {
+ }
+
+ HttpStream::~HttpStream()
+ {
+ if (m_stream)
+ {
+ aws_http_stream_release(m_stream);
+ }
+
+ if (m_connection)
+ {
+ m_connection = nullptr;
+ }
+ }
+
+ HttpClientConnection &HttpStream::GetConnection() const noexcept { return *m_connection; }
+
+ HttpClientStream::HttpClientStream(const std::shared_ptr<HttpClientConnection> &connection) noexcept
+ : HttpStream(connection)
+ {
+ }
+
+ HttpClientStream::~HttpClientStream() {}
+
+ int HttpClientStream::GetResponseStatusCode() const noexcept
+ {
+ int status = 0;
+ if (!aws_http_stream_get_incoming_response_status(m_stream, &status))
+ {
+ return status;
+ }
+
+ return -1;
+ }
+
+ bool HttpClientStream::Activate() noexcept
+ {
+ m_callbackData.stream = shared_from_this();
+ if (aws_http_stream_activate(m_stream))
+ {
+ m_callbackData.stream = nullptr;
+ return false;
+ }
+
+ return true;
+ }
+
+ void HttpStream::UpdateWindow(std::size_t incrementSize) noexcept
+ {
+ aws_http_stream_update_window(m_stream, incrementSize);
+ }
+
+ HttpClientConnectionProxyOptions::HttpClientConnectionProxyOptions()
+ : HostName(), Port(0), TlsOptions(), ProxyConnectionType(AwsHttpProxyConnectionType::Legacy),
+ ProxyStrategy(), AuthType(AwsHttpProxyAuthenticationType::None)
+ {
+ }
+
+ void HttpClientConnectionProxyOptions::InitializeRawProxyOptions(
+ struct aws_http_proxy_options &rawOptions) const
+ {
+ AWS_ZERO_STRUCT(rawOptions);
+ rawOptions.connection_type = (enum aws_http_proxy_connection_type)ProxyConnectionType;
+ rawOptions.host = aws_byte_cursor_from_c_str(HostName.c_str());
+ rawOptions.port = Port;
+
+ if (TlsOptions.has_value())
+ {
+ rawOptions.tls_options = TlsOptions->GetUnderlyingHandle();
+ }
+
+ if (ProxyStrategy)
+ {
+ rawOptions.proxy_strategy = ProxyStrategy->GetUnderlyingHandle();
+ }
+
+ if (AuthType == AwsHttpProxyAuthenticationType::Basic)
+ {
+ rawOptions.auth_type = AWS_HPAT_BASIC;
+ rawOptions.auth_username = ByteCursorFromCString(BasicAuthUsername.c_str());
+ rawOptions.auth_password = ByteCursorFromCString(BasicAuthPassword.c_str());
+ }
+ }
+
+ HttpClientConnectionOptions::HttpClientConnectionOptions()
+ : Bootstrap(nullptr), InitialWindowSize(SIZE_MAX), OnConnectionSetupCallback(),
+ OnConnectionShutdownCallback(), HostName(), Port(0), SocketOptions(), TlsOptions(), ProxyOptions(),
+ ManualWindowManagement(false)
+ {
+ }
+ } // namespace Http
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/http/HttpConnectionManager.cpp b/contrib/restricted/aws/aws-crt-cpp/source/http/HttpConnectionManager.cpp
new file mode 100644
index 0000000000..7a13a2bdb1
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/http/HttpConnectionManager.cpp
@@ -0,0 +1,236 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/Api.h>
+#include <aws/crt/http/HttpConnectionManager.h>
+#include <aws/crt/http/HttpProxyStrategy.h>
+
+#include <algorithm>
+#include <aws/http/connection_manager.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Http
+ {
+ struct ConnectionManagerCallbackArgs
+ {
+ ConnectionManagerCallbackArgs() = default;
+ OnClientConnectionAvailable m_onClientConnectionAvailable;
+ std::shared_ptr<HttpClientConnectionManager> m_connectionManager;
+ };
+
+ void HttpClientConnectionManager::s_shutdownCompleted(void *userData) noexcept
+ {
+ HttpClientConnectionManager *connectionManager =
+ reinterpret_cast<HttpClientConnectionManager *>(userData);
+ connectionManager->m_shutdownPromise.set_value();
+ }
+
+ HttpClientConnectionManagerOptions::HttpClientConnectionManagerOptions() noexcept
+ : ConnectionOptions(), MaxConnections(1), EnableBlockingShutdown(false)
+ {
+ }
+
+ std::shared_ptr<HttpClientConnectionManager> HttpClientConnectionManager::NewClientConnectionManager(
+ const HttpClientConnectionManagerOptions &connectionManagerOptions,
+ Allocator *allocator) noexcept
+ {
+ const Optional<Io::TlsConnectionOptions> &tlsOptions =
+ connectionManagerOptions.ConnectionOptions.TlsOptions;
+
+ if (tlsOptions && !(*tlsOptions))
+ {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_GENERAL,
+ "Cannot create HttpClientConnectionManager: ConnectionOptions contain invalid TLSOptions.");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return nullptr;
+ }
+
+ const Crt::Optional<Crt::Http::HttpClientConnectionProxyOptions> &proxyOptions =
+ connectionManagerOptions.ConnectionOptions.ProxyOptions;
+
+ if (proxyOptions && proxyOptions->TlsOptions && !(*proxyOptions->TlsOptions))
+ {
+ AWS_LOGF_ERROR(
+ AWS_LS_HTTP_GENERAL,
+ "Cannot create HttpClientConnectionManager: ProxyOptions has ConnectionOptions that contain "
+ "invalid TLSOptions.");
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return nullptr;
+ }
+
+ auto *toSeat = static_cast<HttpClientConnectionManager *>(
+ aws_mem_acquire(allocator, sizeof(HttpClientConnectionManager)));
+ if (toSeat)
+ {
+ toSeat = new (toSeat) HttpClientConnectionManager(connectionManagerOptions, allocator);
+ return std::shared_ptr<HttpClientConnectionManager>(
+ toSeat, [allocator](HttpClientConnectionManager *manager) { Delete(manager, allocator); });
+ }
+
+ return nullptr;
+ }
+
+ HttpClientConnectionManager::HttpClientConnectionManager(
+ const HttpClientConnectionManagerOptions &options,
+ Allocator *allocator) noexcept
+ : m_allocator(allocator), m_connectionManager(nullptr), m_options(options), m_releaseInvoked(false)
+ {
+ const auto &connectionOptions = m_options.ConnectionOptions;
+ AWS_FATAL_ASSERT(connectionOptions.HostName.size() > 0);
+ AWS_FATAL_ASSERT(connectionOptions.Port > 0);
+
+ aws_http_connection_manager_options managerOptions;
+ AWS_ZERO_STRUCT(managerOptions);
+
+ if (connectionOptions.Bootstrap != nullptr)
+ {
+ managerOptions.bootstrap = connectionOptions.Bootstrap->GetUnderlyingHandle();
+ }
+ else
+ {
+ managerOptions.bootstrap =
+ ApiHandle::GetOrCreateStaticDefaultClientBootstrap()->GetUnderlyingHandle();
+ }
+
+ managerOptions.port = connectionOptions.Port;
+ managerOptions.max_connections = m_options.MaxConnections;
+ managerOptions.socket_options = &connectionOptions.SocketOptions.GetImpl();
+ managerOptions.initial_window_size = connectionOptions.InitialWindowSize;
+
+ if (options.EnableBlockingShutdown)
+ {
+ managerOptions.shutdown_complete_callback = s_shutdownCompleted;
+ managerOptions.shutdown_complete_user_data = this;
+ }
+ else
+ {
+ m_shutdownPromise.set_value();
+ }
+
+ aws_http_proxy_options proxyOptions;
+ AWS_ZERO_STRUCT(proxyOptions);
+ if (connectionOptions.ProxyOptions)
+ {
+ /* This is verified by HttpClientConnectionManager::NewClientConnectionManager */
+ AWS_FATAL_ASSERT(
+ !connectionOptions.ProxyOptions->TlsOptions || *connectionOptions.ProxyOptions->TlsOptions);
+
+ const auto &proxyOpts = connectionOptions.ProxyOptions.value();
+ proxyOpts.InitializeRawProxyOptions(proxyOptions);
+
+ managerOptions.proxy_options = &proxyOptions;
+ }
+
+ if (connectionOptions.TlsOptions)
+ {
+ /* This is verified by HttpClientConnectionManager::NewClientConnectionManager */
+ AWS_FATAL_ASSERT(*connectionOptions.TlsOptions);
+
+ managerOptions.tls_connection_options =
+ const_cast<aws_tls_connection_options *>(connectionOptions.TlsOptions->GetUnderlyingHandle());
+ }
+ managerOptions.host = aws_byte_cursor_from_c_str(connectionOptions.HostName.c_str());
+
+ m_connectionManager = aws_http_connection_manager_new(allocator, &managerOptions);
+ }
+
+ HttpClientConnectionManager::~HttpClientConnectionManager()
+ {
+ if (!m_releaseInvoked)
+ {
+ aws_http_connection_manager_release(m_connectionManager);
+ m_shutdownPromise.get_future().get();
+ }
+ m_connectionManager = nullptr;
+ }
+
+ bool HttpClientConnectionManager::AcquireConnection(
+ const OnClientConnectionAvailable &onClientConnectionAvailable) noexcept
+ {
+ auto connectionManagerCallbackArgs = Aws::Crt::New<ConnectionManagerCallbackArgs>(m_allocator);
+ if (!connectionManagerCallbackArgs)
+ {
+ return false;
+ }
+
+ connectionManagerCallbackArgs->m_connectionManager = shared_from_this();
+ connectionManagerCallbackArgs->m_onClientConnectionAvailable = onClientConnectionAvailable;
+
+ aws_http_connection_manager_acquire_connection(
+ m_connectionManager, s_onConnectionSetup, connectionManagerCallbackArgs);
+ return true;
+ }
+
+ std::future<void> HttpClientConnectionManager::InitiateShutdown() noexcept
+ {
+ m_releaseInvoked = true;
+ aws_http_connection_manager_release(m_connectionManager);
+ return m_shutdownPromise.get_future();
+ }
+
+ class ManagedConnection final : public HttpClientConnection
+ {
+ public:
+ ManagedConnection(
+ aws_http_connection *connection,
+ std::shared_ptr<HttpClientConnectionManager> connectionManager)
+ : HttpClientConnection(connection, connectionManager->m_allocator),
+ m_connectionManager(std::move(connectionManager))
+ {
+ }
+
+ ~ManagedConnection() override
+ {
+ if (m_connection)
+ {
+ aws_http_connection_manager_release_connection(
+ m_connectionManager->m_connectionManager, m_connection);
+ m_connection = nullptr;
+ }
+ }
+
+ private:
+ std::shared_ptr<HttpClientConnectionManager> m_connectionManager;
+ };
+
+ void HttpClientConnectionManager::s_onConnectionSetup(
+ aws_http_connection *connection,
+ int errorCode,
+ void *userData) noexcept
+ {
+ auto callbackArgs = static_cast<ConnectionManagerCallbackArgs *>(userData);
+ std::shared_ptr<HttpClientConnectionManager> manager = callbackArgs->m_connectionManager;
+ auto callback = std::move(callbackArgs->m_onClientConnectionAvailable);
+
+ Delete(callbackArgs, manager->m_allocator);
+
+ if (errorCode)
+ {
+ callback(nullptr, errorCode);
+ return;
+ }
+
+ auto allocator = manager->m_allocator;
+ auto connectionRawObj = Aws::Crt::New<ManagedConnection>(manager->m_allocator, connection, manager);
+
+ if (!connectionRawObj)
+ {
+ aws_http_connection_manager_release_connection(manager->m_connectionManager, connection);
+ callback(nullptr, AWS_ERROR_OOM);
+ return;
+ }
+ auto connectionObj = std::shared_ptr<ManagedConnection>(
+ connectionRawObj,
+ [allocator](ManagedConnection *managedConnection) { Delete(managedConnection, allocator); });
+
+ callback(connectionObj, AWS_OP_SUCCESS);
+ }
+
+ } // namespace Http
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/http/HttpProxyStrategy.cpp b/contrib/restricted/aws/aws-crt-cpp/source/http/HttpProxyStrategy.cpp
new file mode 100644
index 0000000000..7b9ef2c3f9
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/http/HttpProxyStrategy.cpp
@@ -0,0 +1,196 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/http/HttpProxyStrategy.h>
+
+#include <aws/common/string.h>
+#include <aws/crt/http/HttpConnection.h>
+#include <aws/http/proxy.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Http
+ {
+ HttpProxyStrategy::HttpProxyStrategy(struct aws_http_proxy_strategy *strategy) : m_strategy(strategy) {}
+
+ HttpProxyStrategy::~HttpProxyStrategy() { aws_http_proxy_strategy_release(m_strategy); }
+
+ HttpProxyStrategyBasicAuthConfig::HttpProxyStrategyBasicAuthConfig()
+ : ConnectionType(AwsHttpProxyConnectionType::Legacy), Username(), Password()
+ {
+ }
+
+ std::shared_ptr<HttpProxyStrategy> HttpProxyStrategy::CreateBasicHttpProxyStrategy(
+ const HttpProxyStrategyBasicAuthConfig &config,
+ Allocator *allocator)
+ {
+ struct aws_http_proxy_strategy_basic_auth_options basicConfig;
+ AWS_ZERO_STRUCT(basicConfig);
+ basicConfig.proxy_connection_type = (enum aws_http_proxy_connection_type)config.ConnectionType;
+ basicConfig.user_name = aws_byte_cursor_from_c_str(config.Username.c_str());
+ basicConfig.password = aws_byte_cursor_from_c_str(config.Password.c_str());
+
+ struct aws_http_proxy_strategy *strategy =
+ aws_http_proxy_strategy_new_basic_auth(allocator, &basicConfig);
+ if (strategy == NULL)
+ {
+ return NULL;
+ }
+
+ return Aws::Crt::MakeShared<HttpProxyStrategy>(allocator, strategy);
+ }
+
+ class AdaptiveHttpProxyStrategy : public HttpProxyStrategy
+ {
+ public:
+ AdaptiveHttpProxyStrategy(
+ Allocator *allocator,
+ const KerberosGetTokenFunction &kerberosGetToken,
+ const KerberosGetTokenFunction &ntlmGetCredential,
+ const NtlmGetTokenFunction &ntlmGetToken)
+ : HttpProxyStrategy(nullptr), m_Allocator(allocator), m_KerberosGetToken(kerberosGetToken),
+ m_NtlmGetCredential(ntlmGetCredential), m_NtlmGetToken(ntlmGetToken)
+ {
+ }
+
+ void SetStrategy(struct aws_http_proxy_strategy *strategy)
+ {
+ aws_http_proxy_strategy_release(m_strategy);
+ m_strategy = strategy;
+ }
+
+ static struct aws_string *NtlmGetCredential(void *user_data, int *error_code)
+ {
+ AdaptiveHttpProxyStrategy *strategy = reinterpret_cast<AdaptiveHttpProxyStrategy *>(user_data);
+
+ String ntlmCredential;
+ if (strategy->m_NtlmGetCredential(ntlmCredential))
+ {
+ struct aws_string *token =
+ aws_string_new_from_c_str(strategy->m_Allocator, ntlmCredential.c_str());
+
+ if (token != NULL)
+ {
+ return token;
+ }
+
+ *error_code = aws_last_error();
+ }
+ else
+ {
+ *error_code = AWS_ERROR_HTTP_PROXY_STRATEGY_TOKEN_RETRIEVAL_FAILURE;
+ }
+
+ return NULL;
+ }
+
+ static struct aws_string *KerberosGetToken(void *user_data, int *error_code)
+ {
+ AdaptiveHttpProxyStrategy *strategy = reinterpret_cast<AdaptiveHttpProxyStrategy *>(user_data);
+
+ String kerberosToken;
+ if (strategy->m_KerberosGetToken(kerberosToken))
+ {
+ struct aws_string *token =
+ aws_string_new_from_c_str(strategy->m_Allocator, kerberosToken.c_str());
+
+ if (token != NULL)
+ {
+ return token;
+ }
+
+ *error_code = aws_last_error();
+ }
+ else
+ {
+ *error_code = AWS_ERROR_HTTP_PROXY_STRATEGY_TOKEN_RETRIEVAL_FAILURE;
+ }
+
+ return NULL;
+ }
+
+ static struct aws_string *NtlmGetToken(
+ void *user_data,
+ const struct aws_byte_cursor *challenge_cursor,
+ int *error_code)
+ {
+ AdaptiveHttpProxyStrategy *strategy = reinterpret_cast<AdaptiveHttpProxyStrategy *>(user_data);
+
+ String ntlmToken;
+ String challengeToken((const char *)challenge_cursor->ptr, challenge_cursor->len);
+ if (strategy->m_NtlmGetToken(challengeToken, ntlmToken))
+ {
+ struct aws_string *token = aws_string_new_from_c_str(strategy->m_Allocator, ntlmToken.c_str());
+
+ if (token != NULL)
+ {
+ return token;
+ }
+
+ *error_code = aws_last_error();
+ }
+ else
+ {
+ *error_code = AWS_ERROR_HTTP_PROXY_STRATEGY_TOKEN_RETRIEVAL_FAILURE;
+ }
+
+ return NULL;
+ }
+
+ private:
+ Allocator *m_Allocator;
+
+ KerberosGetTokenFunction m_KerberosGetToken;
+ KerberosGetTokenFunction m_NtlmGetCredential;
+ NtlmGetTokenFunction m_NtlmGetToken;
+ };
+
+ std::shared_ptr<HttpProxyStrategy> HttpProxyStrategy::CreateAdaptiveHttpProxyStrategy(
+ const HttpProxyStrategyAdaptiveConfig &config,
+ Allocator *allocator)
+ {
+ std::shared_ptr<AdaptiveHttpProxyStrategy> adaptiveStrategy =
+ Aws::Crt::MakeShared<AdaptiveHttpProxyStrategy>(
+ allocator, allocator, config.KerberosGetToken, config.NtlmGetCredential, config.NtlmGetToken);
+
+ struct aws_http_proxy_strategy_tunneling_kerberos_options kerberosConfig;
+ AWS_ZERO_STRUCT(kerberosConfig);
+ kerberosConfig.get_token = AdaptiveHttpProxyStrategy::KerberosGetToken;
+ kerberosConfig.get_token_user_data = adaptiveStrategy.get();
+
+ struct aws_http_proxy_strategy_tunneling_ntlm_options ntlmConfig;
+ AWS_ZERO_STRUCT(ntlmConfig);
+ ntlmConfig.get_challenge_token = AdaptiveHttpProxyStrategy::NtlmGetToken;
+ ntlmConfig.get_token = AdaptiveHttpProxyStrategy::NtlmGetCredential;
+ ntlmConfig.get_challenge_token_user_data = adaptiveStrategy.get();
+
+ struct aws_http_proxy_strategy_tunneling_adaptive_options adaptiveConfig;
+ AWS_ZERO_STRUCT(adaptiveConfig);
+
+ if (config.KerberosGetToken)
+ {
+ adaptiveConfig.kerberos_options = &kerberosConfig;
+ }
+
+ if (config.NtlmGetToken)
+ {
+ adaptiveConfig.ntlm_options = &ntlmConfig;
+ }
+
+ struct aws_http_proxy_strategy *strategy =
+ aws_http_proxy_strategy_new_tunneling_adaptive(allocator, &adaptiveConfig);
+ if (strategy == NULL)
+ {
+ return NULL;
+ }
+
+ adaptiveStrategy->SetStrategy(strategy);
+
+ return adaptiveStrategy;
+ }
+ } // namespace Http
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/http/HttpRequestResponse.cpp b/contrib/restricted/aws/aws-crt-cpp/source/http/HttpRequestResponse.cpp
new file mode 100644
index 0000000000..a80a582ac8
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/http/HttpRequestResponse.cpp
@@ -0,0 +1,151 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/crt/http/HttpRequestResponse.h>
+
+#include <aws/crt/io/Stream.h>
+#include <aws/http/request_response.h>
+#include <aws/io/stream.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Http
+ {
+
+ HttpMessage::HttpMessage(Allocator *allocator, struct aws_http_message *message) noexcept
+ : m_allocator(allocator), m_message(message), m_bodyStream(nullptr)
+ {
+ if (message)
+ {
+ // Acquire a refcount to keep the message alive until this object dies.
+ aws_http_message_acquire(this->m_message);
+ }
+ }
+
+ HttpMessage::~HttpMessage() { m_message = aws_http_message_release(m_message); }
+
+ std::shared_ptr<Aws::Crt::Io::InputStream> HttpMessage::GetBody() const noexcept { return m_bodyStream; }
+
+ bool HttpMessage::SetBody(const std::shared_ptr<Aws::Crt::Io::IStream> &body) noexcept
+ {
+ aws_http_message_set_body_stream(m_message, nullptr);
+ m_bodyStream = nullptr;
+
+ if (body != nullptr)
+ {
+ m_bodyStream = MakeShared<Io::StdIOStreamInputStream>(m_allocator, body, m_allocator);
+ if (m_bodyStream == nullptr || !m_bodyStream)
+ {
+ return false;
+ }
+ aws_http_message_set_body_stream(m_message, m_bodyStream->GetUnderlyingStream());
+ }
+
+ return true;
+ }
+
+ bool HttpMessage::SetBody(const std::shared_ptr<Aws::Crt::Io::InputStream> &body) noexcept
+ {
+ m_bodyStream = body;
+ aws_http_message_set_body_stream(
+ m_message, m_bodyStream && *m_bodyStream ? m_bodyStream->GetUnderlyingStream() : nullptr);
+
+ return true;
+ }
+
+ size_t HttpMessage::GetHeaderCount() const noexcept { return aws_http_message_get_header_count(m_message); }
+
+ Optional<HttpHeader> HttpMessage::GetHeader(size_t index) const noexcept
+ {
+ HttpHeader header;
+ if (aws_http_message_get_header(m_message, &header, index) != AWS_OP_SUCCESS)
+ {
+ return Optional<HttpHeader>();
+ }
+
+ return Optional<HttpHeader>(header);
+ }
+
+ bool HttpMessage::AddHeader(const HttpHeader &header) noexcept
+ {
+ return aws_http_message_add_header(m_message, header) == AWS_OP_SUCCESS;
+ }
+
+ bool HttpMessage::EraseHeader(size_t index) noexcept
+ {
+ return aws_http_message_erase_header(m_message, index) == AWS_OP_SUCCESS;
+ }
+
+ HttpRequest::HttpRequest(Allocator *allocator)
+ : HttpMessage(allocator, aws_http_message_new_request(allocator))
+ {
+ // Releas the refcount as it created, since HttpMessage is taking the ownership
+ aws_http_message_release(this->m_message);
+ }
+
+ HttpRequest::HttpRequest(Allocator *allocator, struct aws_http_message *message)
+ : HttpMessage(allocator, message)
+ {
+ }
+
+ Optional<ByteCursor> HttpRequest::GetMethod() const noexcept
+ {
+ ByteCursor method;
+ if (aws_http_message_get_request_method(m_message, &method) != AWS_OP_SUCCESS)
+ {
+ return Optional<ByteCursor>();
+ }
+
+ return Optional<ByteCursor>(method);
+ }
+
+ bool HttpRequest::SetMethod(ByteCursor method) noexcept
+ {
+ return aws_http_message_set_request_method(m_message, method) == AWS_OP_SUCCESS;
+ }
+
+ Optional<ByteCursor> HttpRequest::GetPath() const noexcept
+ {
+ ByteCursor path;
+ if (aws_http_message_get_request_path(m_message, &path) != AWS_OP_SUCCESS)
+ {
+ return Optional<ByteCursor>();
+ }
+
+ return Optional<ByteCursor>(path);
+ }
+
+ bool HttpRequest::SetPath(ByteCursor path) noexcept
+ {
+ return aws_http_message_set_request_path(m_message, path) == AWS_OP_SUCCESS;
+ }
+
+ HttpResponse::HttpResponse(Allocator *allocator)
+ : HttpMessage(allocator, aws_http_message_new_response(allocator))
+ {
+ // Releas the refcount as it created, since HttpMessage is taking the ownership
+ aws_http_message_release(this->m_message);
+ }
+
+ Optional<int> HttpResponse::GetResponseCode() const noexcept
+ {
+ int response = 0;
+ if (aws_http_message_get_response_status(m_message, &response) != AWS_OP_SUCCESS)
+ {
+ return Optional<int>();
+ }
+
+ return response;
+ }
+
+ bool HttpResponse::SetResponseCode(int response) noexcept
+ {
+ return aws_http_message_set_response_status(m_message, response) == AWS_OP_SUCCESS;
+ }
+ } // namespace Http
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/io/Bootstrap.cpp b/contrib/restricted/aws/aws-crt-cpp/source/io/Bootstrap.cpp
new file mode 100644
index 0000000000..84005a41dc
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/io/Bootstrap.cpp
@@ -0,0 +1,122 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/Api.h>
+#include <aws/crt/io/Bootstrap.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Io
+ {
+
+ /**
+ * @private
+ * Holds the bootstrap's shutdown promise.
+ * Lives until the bootstrap's shutdown-complete callback fires.
+ */
+ class ClientBootstrapCallbackData
+ {
+ private:
+ Allocator *m_allocator;
+
+ public:
+ ClientBootstrapCallbackData(Allocator *allocator) : m_allocator(allocator) {}
+ /**
+ * Promise for bootstrap's shutdown.
+ */
+ std::promise<void> ShutdownPromise;
+ /**
+ * User callback of bootstrap's shutdown-complete.
+ */
+ OnClientBootstrapShutdownComplete ShutdownCallback;
+
+ /**
+ * Internal callback of bootstrap's shutdown-complete
+ */
+ static void OnShutdownComplete(void *userData)
+ {
+ auto callbackData = static_cast<ClientBootstrapCallbackData *>(userData);
+
+ callbackData->ShutdownPromise.set_value();
+ if (callbackData->ShutdownCallback)
+ {
+ callbackData->ShutdownCallback();
+ }
+
+ Crt::Delete(callbackData, callbackData->m_allocator);
+ }
+ };
+
+ ClientBootstrap::ClientBootstrap(
+ EventLoopGroup &elGroup,
+ HostResolver &resolver,
+ Allocator *allocator) noexcept
+ : m_bootstrap(nullptr), m_lastError(AWS_ERROR_SUCCESS),
+ m_callbackData(Crt::New<ClientBootstrapCallbackData>(allocator, allocator)),
+ m_enableBlockingShutdown(false)
+ {
+ m_shutdownFuture = m_callbackData->ShutdownPromise.get_future();
+
+ aws_client_bootstrap_options options;
+ options.event_loop_group = elGroup.GetUnderlyingHandle();
+ options.host_resolution_config = resolver.GetConfig();
+ options.host_resolver = resolver.GetUnderlyingHandle();
+ options.on_shutdown_complete = ClientBootstrapCallbackData::OnShutdownComplete;
+ options.user_data = m_callbackData.get();
+ m_bootstrap = aws_client_bootstrap_new(allocator, &options);
+ if (!m_bootstrap)
+ {
+ m_lastError = aws_last_error();
+ }
+ }
+
+ ClientBootstrap::ClientBootstrap(Allocator *allocator) noexcept
+ : ClientBootstrap(
+ *Crt::ApiHandle::GetOrCreateStaticDefaultEventLoopGroup(),
+ *Crt::ApiHandle::GetOrCreateStaticDefaultHostResolver(),
+ allocator)
+ {
+ }
+
+ ClientBootstrap::~ClientBootstrap()
+ {
+ if (m_bootstrap)
+ {
+ // Release m_callbackData, it destroys itself when shutdown completes.
+ m_callbackData.release();
+
+ aws_client_bootstrap_release(m_bootstrap);
+ if (m_enableBlockingShutdown)
+ {
+ // If your program is stuck here, stop using EnableBlockingShutdown()
+ m_shutdownFuture.wait();
+ }
+ }
+ }
+
+ ClientBootstrap::operator bool() const noexcept { return m_lastError == AWS_ERROR_SUCCESS; }
+
+ int ClientBootstrap::LastError() const noexcept { return m_lastError; }
+
+ void ClientBootstrap::SetShutdownCompleteCallback(OnClientBootstrapShutdownComplete callback)
+ {
+ m_callbackData->ShutdownCallback = std::move(callback);
+ }
+
+ void ClientBootstrap::EnableBlockingShutdown() noexcept { m_enableBlockingShutdown = true; }
+
+ aws_client_bootstrap *ClientBootstrap::GetUnderlyingHandle() const noexcept
+ {
+ if (*this)
+ {
+ return m_bootstrap;
+ }
+
+ return nullptr;
+ }
+ } // namespace Io
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/io/ChannelHandler.cpp b/contrib/restricted/aws/aws-crt-cpp/source/io/ChannelHandler.cpp
new file mode 100644
index 0000000000..fcbc443170
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/io/ChannelHandler.cpp
@@ -0,0 +1,217 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/io/ChannelHandler.h>
+
+#include <chrono>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Io
+ {
+ int ChannelHandler::s_ProcessReadMessage(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *,
+ struct aws_io_message *message)
+ {
+ auto *channelHandler = reinterpret_cast<ChannelHandler *>(handler->impl);
+
+ return channelHandler->ProcessReadMessage(message);
+ }
+
+ int ChannelHandler::s_ProcessWriteMessage(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *,
+ struct aws_io_message *message)
+ {
+ auto *channelHandler = reinterpret_cast<ChannelHandler *>(handler->impl);
+
+ return channelHandler->ProcessWriteMessage(message);
+ }
+
+ int ChannelHandler::s_IncrementReadWindow(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *,
+ size_t size)
+ {
+ auto *channelHandler = reinterpret_cast<ChannelHandler *>(handler->impl);
+
+ return channelHandler->IncrementReadWindow(size);
+ }
+
+ int ChannelHandler::s_ProcessShutdown(
+ struct aws_channel_handler *handler,
+ struct aws_channel_slot *,
+ enum aws_channel_direction dir,
+ int errorCode,
+ bool freeScarceResourcesImmediately)
+ {
+ auto *channelHandler = reinterpret_cast<ChannelHandler *>(handler->impl);
+
+ channelHandler->ProcessShutdown(
+ static_cast<ChannelDirection>(dir), errorCode, freeScarceResourcesImmediately);
+ return AWS_OP_SUCCESS;
+ }
+
+ size_t ChannelHandler::s_InitialWindowSize(struct aws_channel_handler *handler)
+ {
+ auto *channelHandler = reinterpret_cast<ChannelHandler *>(handler->impl);
+ return channelHandler->InitialWindowSize();
+ }
+
+ size_t ChannelHandler::s_MessageOverhead(struct aws_channel_handler *handler)
+ {
+ auto *channelHandler = reinterpret_cast<ChannelHandler *>(handler->impl);
+ return channelHandler->MessageOverhead();
+ }
+
+ void ChannelHandler::s_ResetStatistics(struct aws_channel_handler *handler)
+ {
+ auto *channelHandler = reinterpret_cast<ChannelHandler *>(handler->impl);
+ channelHandler->ResetStatistics();
+ }
+
+ void ChannelHandler::s_GatherStatistics(
+ struct aws_channel_handler *handler,
+ struct aws_array_list *statsList)
+ {
+ auto *channelHandler = reinterpret_cast<ChannelHandler *>(handler->impl);
+ channelHandler->GatherStatistics(statsList);
+ }
+
+ void ChannelHandler::s_Destroy(struct aws_channel_handler *handler)
+ {
+ auto *channelHandler = reinterpret_cast<ChannelHandler *>(handler->impl);
+ channelHandler->m_selfReference = nullptr;
+ }
+
+ struct aws_channel_handler_vtable ChannelHandler::s_vtable = {
+ s_ProcessReadMessage,
+ s_ProcessWriteMessage,
+ s_IncrementReadWindow,
+ s_ProcessShutdown,
+ s_InitialWindowSize,
+ s_MessageOverhead,
+ ChannelHandler::s_Destroy,
+ s_ResetStatistics,
+ s_GatherStatistics,
+ };
+
+ ChannelHandler::ChannelHandler(Allocator *allocator) : m_allocator(allocator)
+ {
+ AWS_ZERO_STRUCT(m_handler);
+ m_handler.alloc = allocator;
+ m_handler.impl = reinterpret_cast<void *>(this);
+ m_handler.vtable = &ChannelHandler::s_vtable;
+ }
+
+ struct aws_channel_handler *ChannelHandler::SeatForCInterop(const std::shared_ptr<ChannelHandler> &selfRef)
+ {
+ AWS_FATAL_ASSERT(this == selfRef.get());
+ m_selfReference = selfRef;
+ return &m_handler;
+ }
+
+ struct aws_io_message *ChannelHandler::AcquireMessageFromPool(MessageType messageType, size_t sizeHint)
+ {
+ return aws_channel_acquire_message_from_pool(
+ GetSlot()->channel, static_cast<aws_io_message_type>(messageType), sizeHint);
+ }
+
+ struct aws_io_message *ChannelHandler::AcquireMaxSizeMessageForWrite()
+ {
+ return aws_channel_slot_acquire_max_message_for_write(GetSlot());
+ }
+
+ void ChannelHandler::ShutDownChannel(int errorCode) { aws_channel_shutdown(GetSlot()->channel, errorCode); }
+
+ bool ChannelHandler::ChannelsThreadIsCallersThread() const
+ {
+ return aws_channel_thread_is_callers_thread(GetSlot()->channel);
+ }
+
+ bool ChannelHandler::SendMessage(struct aws_io_message *message, ChannelDirection direction)
+ {
+ return aws_channel_slot_send_message(
+ GetSlot(), message, static_cast<aws_channel_direction>(direction)) == AWS_OP_SUCCESS;
+ }
+
+ bool ChannelHandler::IncrementUpstreamReadWindow(size_t windowUpdateSize)
+ {
+ return aws_channel_slot_increment_read_window(GetSlot(), windowUpdateSize) == AWS_OP_SUCCESS;
+ }
+
+ void ChannelHandler::OnShutdownComplete(
+ ChannelDirection direction,
+ int errorCode,
+ bool freeScarceResourcesImmediately)
+ {
+ aws_channel_slot_on_handler_shutdown_complete(
+ GetSlot(),
+ static_cast<aws_channel_direction>(direction),
+ errorCode,
+ freeScarceResourcesImmediately);
+ }
+
+ size_t ChannelHandler::DownstreamReadWindow() const
+ {
+ if (!GetSlot()->adj_right)
+ {
+ return 0;
+ }
+ return aws_channel_slot_downstream_read_window(GetSlot());
+ }
+
+ size_t ChannelHandler::UpstreamMessageOverhead() const
+ {
+ return aws_channel_slot_upstream_message_overhead(GetSlot());
+ }
+
+ struct aws_channel_slot *ChannelHandler::GetSlot() const { return m_handler.slot; }
+
+ struct TaskWrapper
+ {
+ struct aws_channel_task task
+ {
+ };
+ Allocator *allocator{};
+ std::function<void(TaskStatus)> wrappingFn;
+ };
+
+ static void s_ChannelTaskCallback(struct aws_channel_task *, void *arg, enum aws_task_status status)
+ {
+ auto *taskWrapper = reinterpret_cast<TaskWrapper *>(arg);
+ taskWrapper->wrappingFn(static_cast<TaskStatus>(status));
+ Delete(taskWrapper, taskWrapper->allocator);
+ }
+
+ void ChannelHandler::ScheduleTask(std::function<void(TaskStatus)> &&task, std::chrono::nanoseconds run_in)
+ {
+ auto *wrapper = New<TaskWrapper>(m_allocator);
+ wrapper->wrappingFn = std::move(task);
+ wrapper->allocator = m_allocator;
+ aws_channel_task_init(
+ &wrapper->task, s_ChannelTaskCallback, wrapper, "cpp-crt-custom-channel-handler-task");
+
+ uint64_t currentTimestamp = 0;
+ aws_channel_current_clock_time(GetSlot()->channel, &currentTimestamp);
+ aws_channel_schedule_task_future(GetSlot()->channel, &wrapper->task, currentTimestamp + run_in.count());
+ }
+
+ void ChannelHandler::ScheduleTask(std::function<void(TaskStatus)> &&task)
+ {
+ auto *wrapper = New<TaskWrapper>(m_allocator);
+ wrapper->wrappingFn = std::move(task);
+ wrapper->allocator = m_allocator;
+ aws_channel_task_init(
+ &wrapper->task, s_ChannelTaskCallback, wrapper, "cpp-crt-custom-channel-handler-task");
+
+ aws_channel_schedule_task_now(GetSlot()->channel, &wrapper->task);
+ }
+
+ } // namespace Io
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/io/EventLoopGroup.cpp b/contrib/restricted/aws/aws-crt-cpp/source/io/EventLoopGroup.cpp
new file mode 100644
index 0000000000..000c08513b
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/io/EventLoopGroup.cpp
@@ -0,0 +1,71 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/io/EventLoopGroup.h>
+#include <iostream>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Io
+ {
+ EventLoopGroup::EventLoopGroup(uint16_t threadCount, Allocator *allocator) noexcept
+ : m_eventLoopGroup(nullptr), m_lastError(AWS_ERROR_SUCCESS)
+ {
+ m_eventLoopGroup = aws_event_loop_group_new_default(allocator, threadCount, NULL);
+ if (m_eventLoopGroup == nullptr)
+ {
+ m_lastError = aws_last_error();
+ }
+ }
+
+ EventLoopGroup::EventLoopGroup(uint16_t cpuGroup, uint16_t threadCount, Allocator *allocator) noexcept
+ : m_eventLoopGroup(nullptr), m_lastError(AWS_ERROR_SUCCESS)
+ {
+ m_eventLoopGroup =
+ aws_event_loop_group_new_default_pinned_to_cpu_group(allocator, threadCount, cpuGroup, NULL);
+ if (m_eventLoopGroup == nullptr)
+ {
+ m_lastError = aws_last_error();
+ }
+ }
+
+ EventLoopGroup::~EventLoopGroup() { aws_event_loop_group_release(m_eventLoopGroup); }
+
+ EventLoopGroup::EventLoopGroup(EventLoopGroup &&toMove) noexcept
+ : m_eventLoopGroup(toMove.m_eventLoopGroup), m_lastError(toMove.m_lastError)
+ {
+ toMove.m_lastError = AWS_ERROR_UNKNOWN;
+ toMove.m_eventLoopGroup = nullptr;
+ }
+
+ EventLoopGroup &EventLoopGroup::operator=(EventLoopGroup &&toMove) noexcept
+ {
+ m_eventLoopGroup = toMove.m_eventLoopGroup;
+ m_lastError = toMove.m_lastError;
+ toMove.m_lastError = AWS_ERROR_UNKNOWN;
+ toMove.m_eventLoopGroup = nullptr;
+
+ return *this;
+ }
+
+ int EventLoopGroup::LastError() const { return m_lastError; }
+
+ EventLoopGroup::operator bool() const { return m_lastError == AWS_ERROR_SUCCESS; }
+
+ aws_event_loop_group *EventLoopGroup::GetUnderlyingHandle() noexcept
+ {
+ if (*this)
+ {
+ return m_eventLoopGroup;
+ }
+
+ return nullptr;
+ }
+
+ } // namespace Io
+
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/io/HostResolver.cpp b/contrib/restricted/aws/aws-crt-cpp/source/io/HostResolver.cpp
new file mode 100644
index 0000000000..18173fc413
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/io/HostResolver.cpp
@@ -0,0 +1,121 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/io/HostResolver.h>
+
+#include <aws/crt/io/EventLoopGroup.h>
+
+#include <aws/common/string.h>
+#include <aws/crt/Api.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Io
+ {
+ HostResolver::~HostResolver() {}
+
+ DefaultHostResolver::DefaultHostResolver(
+ EventLoopGroup &elGroup,
+ size_t maxHosts,
+ size_t maxTTL,
+ Allocator *allocator) noexcept
+ : m_resolver(nullptr), m_allocator(allocator), m_initialized(false)
+ {
+ AWS_ZERO_STRUCT(m_config);
+
+ struct aws_host_resolver_default_options resolver_options;
+ AWS_ZERO_STRUCT(resolver_options);
+ resolver_options.max_entries = maxHosts;
+ resolver_options.el_group = elGroup.GetUnderlyingHandle();
+
+ m_resolver = aws_host_resolver_new_default(allocator, &resolver_options);
+ if (m_resolver != nullptr)
+ {
+ m_initialized = true;
+ }
+
+ m_config.impl = aws_default_dns_resolve;
+ m_config.impl_data = nullptr;
+ m_config.max_ttl = maxTTL;
+ }
+
+ DefaultHostResolver::DefaultHostResolver(size_t maxHosts, size_t maxTTL, Allocator *allocator) noexcept
+ : DefaultHostResolver(
+ *Crt::ApiHandle::GetOrCreateStaticDefaultEventLoopGroup(),
+ maxHosts,
+ maxTTL,
+ allocator)
+ {
+ }
+
+ DefaultHostResolver::~DefaultHostResolver()
+ {
+ aws_host_resolver_release(m_resolver);
+ m_initialized = false;
+ }
+
+ /**
+ * @private
+ */
+ struct DefaultHostResolveArgs
+ {
+ Allocator *allocator;
+ HostResolver *resolver;
+ OnHostResolved onResolved;
+ aws_string *host;
+ };
+
+ void DefaultHostResolver::s_onHostResolved(
+ struct aws_host_resolver *,
+ const struct aws_string *hostName,
+ int errCode,
+ const struct aws_array_list *hostAddresses,
+ void *userData)
+ {
+ DefaultHostResolveArgs *args = static_cast<DefaultHostResolveArgs *>(userData);
+
+ size_t len = aws_array_list_length(hostAddresses);
+ Vector<HostAddress> addresses;
+
+ for (size_t i = 0; i < len; ++i)
+ {
+ HostAddress *address_ptr = NULL;
+ aws_array_list_get_at_ptr(hostAddresses, reinterpret_cast<void **>(&address_ptr), i);
+ addresses.push_back(*address_ptr);
+ }
+
+ String host(aws_string_c_str(hostName), hostName->len);
+ args->onResolved(*args->resolver, addresses, errCode);
+ aws_string_destroy(args->host);
+ Delete(args, args->allocator);
+ }
+
+ bool DefaultHostResolver::ResolveHost(const String &host, const OnHostResolved &onResolved) noexcept
+ {
+ DefaultHostResolveArgs *args = New<DefaultHostResolveArgs>(m_allocator);
+ if (!args)
+ {
+ return false;
+ }
+
+ args->host = aws_string_new_from_array(
+ m_allocator, reinterpret_cast<const uint8_t *>(host.data()), host.length());
+ args->onResolved = onResolved;
+ args->resolver = this;
+ args->allocator = m_allocator;
+
+ if (!args->host ||
+ aws_host_resolver_resolve_host(m_resolver, args->host, s_onHostResolved, &m_config, args))
+ {
+ Delete(args, m_allocator);
+ return false;
+ }
+
+ return true;
+ }
+ } // namespace Io
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/io/Pkcs11.cpp b/contrib/restricted/aws/aws-crt-cpp/source/io/Pkcs11.cpp
new file mode 100644
index 0000000000..6e71287387
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/io/Pkcs11.cpp
@@ -0,0 +1,69 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/io/Pkcs11.h>
+
+#include <aws/io/logging.h>
+#include <aws/io/pkcs11.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Io
+ {
+ std::shared_ptr<Pkcs11Lib> Pkcs11Lib::Create(const String &filename, Allocator *allocator)
+ {
+ return Create(filename, InitializeFinalizeBehavior::Default, allocator);
+ }
+
+ std::shared_ptr<Pkcs11Lib> Pkcs11Lib::Create(
+ const String &filename,
+ InitializeFinalizeBehavior initializeFinalizeBehavior,
+ Allocator *allocator)
+ {
+ aws_pkcs11_lib_options options;
+ AWS_ZERO_STRUCT(options);
+
+ if (!filename.empty())
+ {
+ options.filename = ByteCursorFromString(filename);
+ }
+
+ switch (initializeFinalizeBehavior)
+ {
+ case InitializeFinalizeBehavior::Default:
+ options.initialize_finalize_behavior = AWS_PKCS11_LIB_DEFAULT_BEHAVIOR;
+ break;
+ case InitializeFinalizeBehavior::Omit:
+ options.initialize_finalize_behavior = AWS_PKCS11_LIB_OMIT_INITIALIZE;
+ break;
+ case InitializeFinalizeBehavior::Strict:
+ options.initialize_finalize_behavior = AWS_PKCS11_LIB_STRICT_INITIALIZE_FINALIZE;
+ break;
+ default:
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_PKCS11,
+ "Cannot create Pkcs11Lib. Invalid InitializeFinalizeBehavior %d",
+ (int)initializeFinalizeBehavior);
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return nullptr;
+ }
+
+ struct aws_pkcs11_lib *impl = aws_pkcs11_lib_new(allocator, &options);
+ if (impl == nullptr)
+ {
+ return nullptr;
+ }
+
+ return MakeShared<Pkcs11Lib>(allocator, *impl);
+ }
+
+ Pkcs11Lib::Pkcs11Lib(aws_pkcs11_lib &impl) : impl(&impl) {}
+
+ Pkcs11Lib::~Pkcs11Lib() { aws_pkcs11_lib_release(impl); }
+
+ } // namespace Io
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/io/SocketOptions.cpp b/contrib/restricted/aws/aws-crt-cpp/source/io/SocketOptions.cpp
new file mode 100644
index 0000000000..339b81c087
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/io/SocketOptions.cpp
@@ -0,0 +1,28 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/io/SocketOptions.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Io
+ {
+
+ static const uint32_t DEFAULT_SOCKET_TIME_MSEC = 3000;
+
+ SocketOptions::SocketOptions()
+ {
+ options.type = AWS_SOCKET_STREAM;
+ options.domain = AWS_SOCKET_IPV4;
+ options.connect_timeout_ms = DEFAULT_SOCKET_TIME_MSEC;
+ options.keep_alive_max_failed_probes = 0;
+ options.keep_alive_timeout_sec = 0;
+ options.keep_alive_interval_sec = 0;
+ options.keepalive = false;
+ }
+ } // namespace Io
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/io/Stream.cpp b/contrib/restricted/aws/aws-crt-cpp/source/io/Stream.cpp
new file mode 100644
index 0000000000..cf3d6d1cf6
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/io/Stream.cpp
@@ -0,0 +1,211 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/crt/StlAllocator.h>
+#include <aws/crt/io/Stream.h>
+
+#include <aws/io/stream.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Io
+ {
+ InputStream::~InputStream()
+ {
+ // DO NOTHING: for now. But keep this here because it has to be virtual, and we may have
+ // resources to clean up in the future.
+ }
+
+ int InputStream::s_Seek(aws_input_stream *stream, int64_t offset, enum aws_stream_seek_basis basis)
+ {
+ auto impl = static_cast<InputStream *>(stream->impl);
+
+ // Detect whether implementation raises an error when reporting failure.
+ // Docs for C++ SeekImpl API say you "SHOULD" raise an error,
+ // but the C API does in fact require an error to be raised.
+ aws_reset_error();
+
+ if (impl->SeekImpl(offset, static_cast<StreamSeekBasis>(basis)))
+ {
+ return AWS_OP_SUCCESS;
+ }
+
+ if (aws_last_error() == 0)
+ {
+ aws_raise_error(AWS_IO_STREAM_SEEK_FAILED);
+ }
+
+ return AWS_OP_ERR;
+ }
+
+ int InputStream::s_Read(aws_input_stream *stream, aws_byte_buf *dest)
+ {
+ auto impl = static_cast<InputStream *>(stream->impl);
+
+ // Detect whether implementation raises an error when reporting failure.
+ // Docs for C++ ReadImpl API say you "SHOULD" raise an error,
+ // but the C API does in fact require an error to be raised.
+ aws_reset_error();
+
+ if (impl->ReadImpl(*dest))
+ {
+ return AWS_OP_SUCCESS;
+ }
+
+ if (aws_last_error() == 0)
+ {
+ aws_raise_error(AWS_IO_STREAM_READ_FAILED);
+ }
+
+ return AWS_OP_ERR;
+ }
+
+ int InputStream::s_GetStatus(aws_input_stream *stream, aws_stream_status *status)
+ {
+ auto impl = static_cast<InputStream *>(stream->impl);
+
+ *status = impl->GetStatusImpl();
+ return AWS_OP_SUCCESS;
+ }
+
+ int InputStream::s_GetLength(struct aws_input_stream *stream, int64_t *out_length)
+ {
+ auto impl = static_cast<InputStream *>(stream->impl);
+
+ int64_t length = impl->GetLengthImpl();
+
+ if (length >= 0)
+ {
+ *out_length = length;
+ return AWS_OP_SUCCESS;
+ }
+
+ aws_raise_error(AWS_IO_STREAM_READ_FAILED);
+ return AWS_OP_ERR;
+ }
+
+ void InputStream::s_Acquire(aws_input_stream *stream)
+ {
+ auto impl = static_cast<InputStream *>(stream->impl);
+ impl->AcquireRef();
+ }
+
+ void InputStream::s_Release(aws_input_stream *stream)
+ {
+ auto impl = static_cast<InputStream *>(stream->impl);
+ impl->ReleaseRef();
+ }
+
+ aws_input_stream_vtable InputStream::s_vtable = {
+ InputStream::s_Seek,
+ InputStream::s_Read,
+ InputStream::s_GetStatus,
+ InputStream::s_GetLength,
+ InputStream::s_Acquire,
+ InputStream::s_Release,
+ };
+
+ InputStream::InputStream(Aws::Crt::Allocator *allocator)
+ {
+ m_allocator = allocator;
+ AWS_ZERO_STRUCT(m_underlying_stream);
+
+ m_underlying_stream.impl = this;
+ m_underlying_stream.vtable = &s_vtable;
+ }
+
+ StdIOStreamInputStream::StdIOStreamInputStream(
+ std::shared_ptr<Aws::Crt::Io::IStream> stream,
+ Aws::Crt::Allocator *allocator) noexcept
+ : InputStream(allocator), m_stream(std::move(stream))
+ {
+ }
+
+ bool StdIOStreamInputStream::IsValid() const noexcept
+ {
+ auto status = GetStatusImpl();
+ return status.is_valid;
+ }
+
+ bool StdIOStreamInputStream::ReadImpl(ByteBuf &buffer) noexcept
+ {
+ // so this blocks, but readsome() doesn't work at all, so this is the best we've got.
+ // if you don't like this, don't use std::input_stream and implement your own version
+ // of Aws::Crt::Io::InputStream.
+ m_stream->read(reinterpret_cast<char *>(buffer.buffer + buffer.len), buffer.capacity - buffer.len);
+ auto read = m_stream->gcount();
+ buffer.len += static_cast<size_t>(read);
+
+ if (read > 0 || (read == 0 && m_stream->eof()))
+ {
+ return true;
+ }
+
+ auto status = GetStatusImpl();
+
+ return status.is_valid && !status.is_end_of_stream;
+ }
+
+ StreamStatus StdIOStreamInputStream::GetStatusImpl() const noexcept
+ {
+ StreamStatus status;
+ status.is_end_of_stream = m_stream->eof();
+ status.is_valid = static_cast<bool>(*m_stream);
+
+ return status;
+ }
+
+ int64_t StdIOStreamInputStream::GetLengthImpl() const noexcept
+ {
+ auto currentPosition = m_stream->tellg();
+
+ m_stream->seekg(0, std::ios_base::end);
+ int64_t retVal = -1;
+
+ if (*m_stream)
+ {
+ retVal = static_cast<int64_t>(m_stream->tellg());
+ }
+
+ m_stream->seekg(currentPosition);
+
+ return retVal;
+ }
+
+ bool StdIOStreamInputStream::SeekImpl(int64_t offset, StreamSeekBasis seekBasis) noexcept
+ {
+ // very important, otherwise the stream can't be reused after reading the entire stream the first time.
+ m_stream->clear();
+
+ auto seekDir = std::ios_base::beg;
+ switch (seekBasis)
+ {
+ case StreamSeekBasis::Begin:
+ seekDir = std::ios_base::beg;
+ break;
+ case StreamSeekBasis::End:
+ seekDir = std::ios_base::end;
+ break;
+ default:
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return false;
+ }
+
+ using stdOffType = Aws::Crt::Io::IStream::off_type;
+ if (offset < std::numeric_limits<stdOffType>::min() || offset > std::numeric_limits<stdOffType>::max())
+ {
+ aws_raise_error(AWS_IO_STREAM_INVALID_SEEK_POSITION);
+ return false;
+ }
+
+ m_stream->seekg(static_cast<stdOffType>(offset), seekDir);
+
+ return true;
+ }
+ } // namespace Io
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/io/TlsOptions.cpp b/contrib/restricted/aws/aws-crt-cpp/source/io/TlsOptions.cpp
new file mode 100644
index 0000000000..6077912c9a
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/io/TlsOptions.cpp
@@ -0,0 +1,520 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/io/TlsOptions.h>
+
+#include <aws/crt/io/Pkcs11.h>
+
+#include <aws/crt/Api.h>
+#include <aws/io/logging.h>
+#include <aws/io/tls_channel_handler.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Io
+ {
+ TlsContextOptions::~TlsContextOptions()
+ {
+ if (m_isInit)
+ {
+ aws_tls_ctx_options_clean_up(&m_options);
+ }
+ }
+
+ TlsContextOptions::TlsContextOptions() noexcept : m_isInit(false) { AWS_ZERO_STRUCT(m_options); }
+
+ TlsContextOptions::TlsContextOptions(TlsContextOptions &&other) noexcept
+ {
+ m_options = other.m_options;
+ m_isInit = other.m_isInit;
+ AWS_ZERO_STRUCT(other.m_options);
+ other.m_isInit = false;
+ }
+
+ TlsContextOptions &TlsContextOptions::operator=(TlsContextOptions &&other) noexcept
+ {
+ if (&other != this)
+ {
+ if (m_isInit)
+ {
+ aws_tls_ctx_options_clean_up(&m_options);
+ }
+
+ m_options = other.m_options;
+ m_isInit = other.m_isInit;
+ AWS_ZERO_STRUCT(other.m_options);
+ other.m_isInit = false;
+ }
+
+ return *this;
+ }
+
+ TlsContextOptions TlsContextOptions::InitDefaultClient(Allocator *allocator) noexcept
+ {
+ TlsContextOptions ctxOptions;
+ aws_tls_ctx_options_init_default_client(&ctxOptions.m_options, allocator);
+ ctxOptions.m_isInit = true;
+ return ctxOptions;
+ }
+
+ TlsContextOptions TlsContextOptions::InitClientWithMtls(
+ const char *certPath,
+ const char *pKeyPath,
+ Allocator *allocator) noexcept
+ {
+ TlsContextOptions ctxOptions;
+ if (!aws_tls_ctx_options_init_client_mtls_from_path(
+ &ctxOptions.m_options, allocator, certPath, pKeyPath))
+ {
+ ctxOptions.m_isInit = true;
+ }
+ return ctxOptions;
+ }
+
+ TlsContextOptions TlsContextOptions::InitClientWithMtls(
+ const ByteCursor &cert,
+ const ByteCursor &pkey,
+ Allocator *allocator) noexcept
+ {
+ TlsContextOptions ctxOptions;
+ if (!aws_tls_ctx_options_init_client_mtls(
+ &ctxOptions.m_options,
+ allocator,
+ const_cast<ByteCursor *>(&cert),
+ const_cast<ByteCursor *>(&pkey)))
+ {
+ ctxOptions.m_isInit = true;
+ }
+ return ctxOptions;
+ }
+
+ TlsContextOptions TlsContextOptions::InitClientWithMtlsPkcs11(
+ const TlsContextPkcs11Options &pkcs11Options,
+ Allocator *allocator) noexcept
+ {
+ TlsContextOptions ctxOptions;
+ aws_tls_ctx_pkcs11_options nativePkcs11Options = pkcs11Options.GetUnderlyingHandle();
+ if (!aws_tls_ctx_options_init_client_mtls_with_pkcs11(
+ &ctxOptions.m_options, allocator, &nativePkcs11Options))
+ {
+ ctxOptions.m_isInit = true;
+ }
+ return ctxOptions;
+ }
+
+ TlsContextOptions TlsContextOptions::InitClientWithMtlsPkcs12(
+ const char *pkcs12Path,
+ const char *pkcs12Pwd,
+ Allocator *allocator) noexcept
+ {
+ TlsContextOptions ctxOptions;
+ struct aws_byte_cursor password = aws_byte_cursor_from_c_str(pkcs12Pwd);
+ if (!aws_tls_ctx_options_init_client_mtls_pkcs12_from_path(
+ &ctxOptions.m_options, allocator, pkcs12Path, &password))
+ {
+ ctxOptions.m_isInit = true;
+ }
+ return ctxOptions;
+ }
+
+ bool TlsContextOptions::SetKeychainPath(ByteCursor &keychain_path) noexcept
+ {
+ AWS_ASSERT(m_isInit);
+ return aws_tls_ctx_options_set_keychain_path(&m_options, &keychain_path) == 0;
+ }
+
+ TlsContextOptions TlsContextOptions::InitClientWithMtlsSystemPath(
+ const char *windowsCertStorePath,
+ Allocator *allocator) noexcept
+ {
+ TlsContextOptions ctxOptions;
+ if (!aws_tls_ctx_options_init_client_mtls_from_system_path(
+ &ctxOptions.m_options, allocator, windowsCertStorePath))
+ {
+ ctxOptions.m_isInit = true;
+ }
+ return ctxOptions;
+ }
+
+ int TlsContextOptions::LastError() const noexcept { return LastErrorOrUnknown(); }
+
+ bool TlsContextOptions::IsAlpnSupported() noexcept { return aws_tls_is_alpn_available(); }
+
+ bool TlsContextOptions::SetAlpnList(const char *alpn_list) noexcept
+ {
+ AWS_ASSERT(m_isInit);
+ return aws_tls_ctx_options_set_alpn_list(&m_options, alpn_list) == 0;
+ }
+
+ void TlsContextOptions::SetVerifyPeer(bool verify_peer) noexcept
+ {
+ AWS_ASSERT(m_isInit);
+ aws_tls_ctx_options_set_verify_peer(&m_options, verify_peer);
+ }
+
+ void TlsContextOptions::SetMinimumTlsVersion(aws_tls_versions minimumTlsVersion)
+ {
+ AWS_ASSERT(m_isInit);
+ aws_tls_ctx_options_set_minimum_tls_version(&m_options, minimumTlsVersion);
+ }
+
+ void TlsContextOptions::SetTlsCipherPreference(aws_tls_cipher_pref cipher_pref)
+ {
+ AWS_ASSERT(m_isInit);
+ aws_tls_ctx_options_set_tls_cipher_preference(&m_options, cipher_pref);
+ }
+
+ bool TlsContextOptions::OverrideDefaultTrustStore(const char *caPath, const char *caFile) noexcept
+ {
+ AWS_ASSERT(m_isInit);
+ return aws_tls_ctx_options_override_default_trust_store_from_path(&m_options, caPath, caFile) == 0;
+ }
+
+ bool TlsContextOptions::OverrideDefaultTrustStore(const ByteCursor &ca) noexcept
+ {
+ AWS_ASSERT(m_isInit);
+ return aws_tls_ctx_options_override_default_trust_store(&m_options, const_cast<ByteCursor *>(&ca)) == 0;
+ }
+
+ TlsContextPkcs11Options::TlsContextPkcs11Options(
+ const std::shared_ptr<Pkcs11Lib> &pkcs11Lib,
+ Allocator *) noexcept
+ : m_pkcs11Lib{pkcs11Lib}
+ {
+ }
+
+ void TlsContextPkcs11Options::SetUserPin(const String &pin) noexcept { m_userPin = pin; }
+
+ void TlsContextPkcs11Options::SetSlotId(const uint64_t id) noexcept { m_slotId = id; }
+
+ void TlsContextPkcs11Options::SetTokenLabel(const String &label) noexcept { m_tokenLabel = label; }
+
+ void TlsContextPkcs11Options::SetPrivateKeyObjectLabel(const String &label) noexcept
+ {
+ m_privateKeyObjectLabel = label;
+ }
+
+ void TlsContextPkcs11Options::SetCertificateFilePath(const String &path) noexcept
+ {
+ m_certificateFilePath = path;
+ }
+
+ void TlsContextPkcs11Options::SetCertificateFileContents(const String &contents) noexcept
+ {
+ m_certificateFileContents = contents;
+ }
+
+ aws_tls_ctx_pkcs11_options TlsContextPkcs11Options::GetUnderlyingHandle() const noexcept
+ {
+ aws_tls_ctx_pkcs11_options options;
+ AWS_ZERO_STRUCT(options);
+
+ if (m_pkcs11Lib)
+ {
+ options.pkcs11_lib = m_pkcs11Lib->GetNativeHandle();
+ }
+
+ if (m_slotId)
+ {
+ options.slot_id = &(*m_slotId);
+ }
+
+ if (m_userPin)
+ {
+ options.user_pin = ByteCursorFromString(*m_userPin);
+ }
+
+ if (m_tokenLabel)
+ {
+ options.token_label = ByteCursorFromString(*m_tokenLabel);
+ }
+
+ if (m_privateKeyObjectLabel)
+ {
+ options.private_key_object_label = ByteCursorFromString(*m_privateKeyObjectLabel);
+ }
+
+ if (m_certificateFilePath)
+ {
+ options.cert_file_path = ByteCursorFromString(*m_certificateFilePath);
+ }
+
+ if (m_certificateFileContents)
+ {
+ options.cert_file_contents = ByteCursorFromString(*m_certificateFileContents);
+ }
+
+ return options;
+ }
+
+ TlsConnectionOptions::TlsConnectionOptions() noexcept : m_lastError(AWS_ERROR_SUCCESS), m_isInit(false) {}
+
+ TlsConnectionOptions::TlsConnectionOptions(aws_tls_ctx *ctx, Allocator *allocator) noexcept
+ : m_allocator(allocator), m_lastError(AWS_ERROR_SUCCESS), m_isInit(true)
+ {
+ aws_tls_connection_options_init_from_ctx(&m_tls_connection_options, ctx);
+ }
+
+ TlsConnectionOptions::~TlsConnectionOptions()
+ {
+ if (m_isInit)
+ {
+ aws_tls_connection_options_clean_up(&m_tls_connection_options);
+ m_isInit = false;
+ }
+ }
+
+ TlsConnectionOptions::TlsConnectionOptions(const TlsConnectionOptions &options) noexcept
+ {
+ m_isInit = false;
+ AWS_ZERO_STRUCT(m_tls_connection_options);
+
+ if (options.m_isInit)
+ {
+ m_allocator = options.m_allocator;
+
+ if (!aws_tls_connection_options_copy(&m_tls_connection_options, &options.m_tls_connection_options))
+ {
+ m_isInit = true;
+ }
+ else
+ {
+ m_lastError = LastErrorOrUnknown();
+ }
+ }
+ }
+
+ TlsConnectionOptions &TlsConnectionOptions::operator=(const TlsConnectionOptions &options) noexcept
+ {
+ if (this != &options)
+ {
+ if (m_isInit)
+ {
+ aws_tls_connection_options_clean_up(&m_tls_connection_options);
+ }
+
+ m_isInit = false;
+ AWS_ZERO_STRUCT(m_tls_connection_options);
+
+ if (options.m_isInit)
+ {
+ m_allocator = options.m_allocator;
+ if (!aws_tls_connection_options_copy(
+ &m_tls_connection_options, &options.m_tls_connection_options))
+ {
+ m_isInit = true;
+ }
+ else
+ {
+ m_lastError = LastErrorOrUnknown();
+ }
+ }
+ }
+
+ return *this;
+ }
+
+ TlsConnectionOptions::TlsConnectionOptions(TlsConnectionOptions &&options) noexcept
+ : m_isInit(options.m_isInit)
+ {
+ if (options.m_isInit)
+ {
+ m_tls_connection_options = options.m_tls_connection_options;
+ m_allocator = options.m_allocator;
+ AWS_ZERO_STRUCT(options.m_tls_connection_options);
+ options.m_isInit = false;
+ }
+ }
+
+ TlsConnectionOptions &TlsConnectionOptions::operator=(TlsConnectionOptions &&options) noexcept
+ {
+ if (this != &options)
+ {
+ if (m_isInit)
+ {
+ aws_tls_connection_options_clean_up(&m_tls_connection_options);
+ }
+
+ m_isInit = false;
+
+ if (options.m_isInit)
+ {
+ m_tls_connection_options = options.m_tls_connection_options;
+ AWS_ZERO_STRUCT(options.m_tls_connection_options);
+ options.m_isInit = false;
+ m_isInit = true;
+ m_allocator = options.m_allocator;
+ }
+ }
+
+ return *this;
+ }
+
+ bool TlsConnectionOptions::SetServerName(ByteCursor &serverName) noexcept
+ {
+ if (!isValid())
+ {
+ m_lastError = LastErrorOrUnknown();
+ return false;
+ }
+
+ if (aws_tls_connection_options_set_server_name(&m_tls_connection_options, m_allocator, &serverName))
+ {
+ m_lastError = LastErrorOrUnknown();
+ return false;
+ }
+
+ return true;
+ }
+
+ bool TlsConnectionOptions::SetAlpnList(const char *alpnList) noexcept
+ {
+ if (!isValid())
+ {
+ m_lastError = LastErrorOrUnknown();
+ return false;
+ }
+
+ if (aws_tls_connection_options_set_alpn_list(&m_tls_connection_options, m_allocator, alpnList))
+ {
+ m_lastError = LastErrorOrUnknown();
+ return false;
+ }
+
+ return true;
+ }
+
+ TlsContext::TlsContext() noexcept : m_ctx(nullptr), m_initializationError(AWS_ERROR_SUCCESS) {}
+
+ TlsContext::TlsContext(TlsContextOptions &options, TlsMode mode, Allocator *allocator) noexcept
+ : m_ctx(nullptr), m_initializationError(AWS_ERROR_SUCCESS)
+ {
+#if BYO_CRYPTO
+ if (!ApiHandle::GetBYOCryptoNewTlsContextImplCallback() ||
+ !ApiHandle::GetBYOCryptoDeleteTlsContextImplCallback())
+ {
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_TLS,
+ "Must call ApiHandle::SetBYOCryptoTlsContextCallbacks() before TlsContext can be created");
+ m_initializationError = AWS_IO_TLS_CTX_ERROR;
+ return;
+ }
+
+ void *impl = ApiHandle::GetBYOCryptoNewTlsContextImplCallback()(options, mode, allocator);
+ if (!impl)
+ {
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_TLS, "Creation callback from ApiHandle::SetBYOCryptoTlsContextCallbacks() failed");
+ m_initializationError = AWS_IO_TLS_CTX_ERROR;
+ return;
+ }
+
+ auto underlying_tls_ctx = static_cast<aws_tls_ctx *>(aws_mem_calloc(allocator, 1, sizeof(aws_tls_ctx)));
+ underlying_tls_ctx->alloc = allocator;
+ underlying_tls_ctx->impl = impl;
+
+ aws_ref_count_init(&underlying_tls_ctx->ref_count, underlying_tls_ctx, [](void *userdata) {
+ auto dying_ctx = static_cast<aws_tls_ctx *>(userdata);
+ ApiHandle::GetBYOCryptoDeleteTlsContextImplCallback()(dying_ctx->impl);
+ aws_mem_release(dying_ctx->alloc, dying_ctx);
+ });
+
+ m_ctx.reset(underlying_tls_ctx, aws_tls_ctx_release);
+#else
+ if (mode == TlsMode::CLIENT)
+ {
+ aws_tls_ctx *underlying_tls_ctx = aws_tls_client_ctx_new(allocator, &options.m_options);
+ if (underlying_tls_ctx != nullptr)
+ {
+ m_ctx.reset(underlying_tls_ctx, aws_tls_ctx_release);
+ }
+ }
+ else
+ {
+ aws_tls_ctx *underlying_tls_ctx = aws_tls_server_ctx_new(allocator, &options.m_options);
+ if (underlying_tls_ctx != nullptr)
+ {
+ m_ctx.reset(underlying_tls_ctx, aws_tls_ctx_release);
+ }
+ }
+ if (!m_ctx)
+ {
+ m_initializationError = Aws::Crt::LastErrorOrUnknown();
+ }
+#endif // BYO_CRYPTO
+ }
+
+ TlsConnectionOptions TlsContext::NewConnectionOptions() const noexcept
+ {
+ if (!isValid())
+ {
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_TLS, "Trying to call TlsContext::NewConnectionOptions from an invalid TlsContext.");
+ return TlsConnectionOptions();
+ }
+
+ return TlsConnectionOptions(m_ctx.get(), m_ctx->alloc);
+ }
+
+ TlsChannelHandler::TlsChannelHandler(
+ struct aws_channel_slot *,
+ const struct aws_tls_connection_options &options,
+ Allocator *allocator)
+ : ChannelHandler(allocator)
+ {
+ m_OnNegotiationResult = options.on_negotiation_result;
+ m_userData = options.user_data;
+ aws_byte_buf_init(&m_protocolByteBuf, allocator, 16);
+ }
+
+ TlsChannelHandler::~TlsChannelHandler() { aws_byte_buf_clean_up(&m_protocolByteBuf); }
+
+ void TlsChannelHandler::CompleteTlsNegotiation(int errorCode)
+ {
+ m_OnNegotiationResult(&this->m_handler, GetSlot(), errorCode, m_userData);
+ }
+
+ ClientTlsChannelHandler::ClientTlsChannelHandler(
+ struct aws_channel_slot *slot,
+ const struct aws_tls_connection_options &options,
+ Allocator *allocator)
+ : TlsChannelHandler(slot, options, allocator)
+ {
+ }
+
+ } // namespace Io
+ } // namespace Crt
+} // namespace Aws
+
+#if BYO_CRYPTO
+AWS_EXTERN_C_BEGIN
+
+bool aws_tls_is_alpn_available(void)
+{
+ const auto &callback = Aws::Crt::ApiHandle::GetBYOCryptoIsTlsAlpnSupportedCallback();
+ if (!callback)
+ {
+ AWS_LOGF_ERROR(
+ AWS_LS_IO_TLS, "Must call ApiHandle::SetBYOCryptoTlsContextCallbacks() before ALPN can be queried");
+ return false;
+ }
+ return callback();
+}
+
+struct aws_byte_buf aws_tls_handler_protocol(struct aws_channel_handler *handler)
+{
+ auto *channelHandler = reinterpret_cast<Aws::Crt::Io::ChannelHandler *>(handler->impl);
+ auto *tlsHandler = static_cast<Aws::Crt::Io::TlsChannelHandler *>(channelHandler);
+ Aws::Crt::String protocolString = const_cast<const Aws::Crt::Io::TlsChannelHandler *>(tlsHandler)->GetProtocol();
+
+ tlsHandler->m_protocolByteBuf.len = 0;
+ aws_byte_cursor protocolCursor = Aws::Crt::ByteCursorFromString(protocolString);
+ aws_byte_buf_append_dynamic(&tlsHandler->m_protocolByteBuf, &protocolCursor);
+ return tlsHandler->m_protocolByteBuf;
+}
+
+AWS_EXTERN_C_END
+#endif /* BYO_CRYPTO */
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/io/Uri.cpp b/contrib/restricted/aws/aws-crt-cpp/source/io/Uri.cpp
new file mode 100644
index 0000000000..dd43f3249e
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/io/Uri.cpp
@@ -0,0 +1,145 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/io/Uri.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Io
+ {
+ Uri::Uri() noexcept : m_lastError(AWS_ERROR_SUCCESS), m_isInit(false) { AWS_ZERO_STRUCT(m_uri); }
+
+ Uri::~Uri()
+ {
+ if (m_isInit)
+ {
+ aws_uri_clean_up(&m_uri);
+ m_isInit = false;
+ }
+ }
+
+ Uri::Uri(const ByteCursor &cursor, Allocator *allocator) noexcept
+ : m_lastError(AWS_ERROR_SUCCESS), m_isInit(false)
+ {
+ if (!aws_uri_init_parse(&m_uri, allocator, &cursor))
+ {
+ m_isInit = true;
+ }
+ else
+ {
+ m_lastError = aws_last_error();
+ }
+ }
+
+ Uri::Uri(aws_uri_builder_options &builderOptions, Allocator *allocator) noexcept
+ : m_lastError(AWS_ERROR_SUCCESS), m_isInit(false)
+ {
+ if (!aws_uri_init_from_builder_options(&m_uri, allocator, &builderOptions))
+ {
+ m_isInit = true;
+ }
+ else
+ {
+ m_lastError = aws_last_error();
+ }
+ }
+
+ Uri::Uri(const Uri &other) : m_lastError(AWS_ERROR_SUCCESS), m_isInit(false)
+ {
+ if (other.m_isInit)
+ {
+ ByteCursor uriCursor = other.GetFullUri();
+
+ if (!aws_uri_init_parse(&m_uri, other.m_uri.allocator, &uriCursor))
+ {
+ m_isInit = true;
+ }
+ else
+ {
+ m_lastError = aws_last_error();
+ }
+ }
+ }
+
+ Uri &Uri::operator=(const Uri &other)
+ {
+ if (this != &other)
+ {
+ m_isInit = false;
+ m_lastError = AWS_ERROR_SUCCESS;
+
+ if (other.m_isInit)
+ {
+ ByteCursor uriCursor = other.GetFullUri();
+
+ if (!aws_uri_init_parse(&m_uri, other.m_uri.allocator, &uriCursor))
+ {
+ m_isInit = true;
+ }
+ else
+ {
+ m_lastError = aws_last_error();
+ }
+ }
+ }
+
+ return *this;
+ }
+
+ Uri::Uri(Uri &&uri) noexcept : m_lastError(AWS_ERROR_SUCCESS), m_isInit(uri.m_isInit)
+ {
+ if (uri.m_isInit)
+ {
+ m_uri = uri.m_uri;
+ AWS_ZERO_STRUCT(uri.m_uri);
+ uri.m_isInit = false;
+ }
+ }
+
+ Uri &Uri::operator=(Uri &&uri) noexcept
+ {
+ if (this != &uri)
+ {
+ if (m_isInit)
+ {
+ aws_uri_clean_up(&m_uri);
+ }
+
+ if (uri.m_isInit)
+ {
+ m_uri = uri.m_uri;
+ AWS_ZERO_STRUCT(uri.m_uri);
+ uri.m_isInit = false;
+ m_isInit = true;
+ m_lastError = AWS_ERROR_SUCCESS;
+ }
+ else
+ {
+ m_lastError = uri.m_lastError;
+ }
+ }
+
+ return *this;
+ }
+
+ ByteCursor Uri::GetScheme() const noexcept { return m_uri.scheme; }
+
+ ByteCursor Uri::GetAuthority() const noexcept { return m_uri.authority; }
+
+ ByteCursor Uri::GetPath() const noexcept { return m_uri.path; }
+
+ ByteCursor Uri::GetQueryString() const noexcept { return m_uri.query_string; }
+
+ ByteCursor Uri::GetHostName() const noexcept { return m_uri.host_name; }
+
+ uint16_t Uri::GetPort() const noexcept { return m_uri.port; }
+
+ ByteCursor Uri::GetPathAndQuery() const noexcept { return m_uri.path_and_query; }
+
+ ByteCursor Uri::GetFullUri() const noexcept { return ByteCursorFromByteBuf(m_uri.uri_str); }
+ } // namespace Io
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/iot/Mqtt5Client.cpp b/contrib/restricted/aws/aws-crt-cpp/source/iot/Mqtt5Client.cpp
new file mode 100644
index 0000000000..1c69203a40
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/iot/Mqtt5Client.cpp
@@ -0,0 +1,641 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+
+#include <aws/crt/Api.h>
+#include <aws/crt/auth/Credentials.h>
+#include <aws/crt/auth/Sigv4Signing.h>
+#include <aws/crt/http/HttpRequestResponse.h>
+#include <aws/crt/mqtt/Mqtt5Packets.h>
+
+#include <aws/iot/Mqtt5Client.h>
+
+#if !BYO_CRYPTO
+
+namespace Aws
+{
+ namespace Iot
+ {
+ static Crt::String AddToUsernameParameter(
+ Crt::String currentUsername,
+ Crt::String parameterValue,
+ Crt::String parameterPreText)
+ {
+ Crt::String return_string = currentUsername;
+ if (return_string.find("?") != Crt::String::npos)
+ {
+ return_string += "&";
+ }
+ else
+ {
+ return_string += "?";
+ }
+
+ if (parameterValue.find(parameterPreText) != Crt::String::npos)
+ {
+ return return_string + parameterValue;
+ }
+ else
+ {
+ return return_string + parameterPreText + parameterValue;
+ }
+ }
+
+ static bool buildMqtt5FinalUsername(
+ Crt::Optional<Mqtt5CustomAuthConfig> customAuthConfig,
+ Crt::String &username)
+ {
+ if (customAuthConfig.has_value())
+ {
+ /* If we're using token-signing authentication, then all token properties must be set */
+ bool usingSigning = false;
+ if (customAuthConfig->GetTokenValue().has_value() || customAuthConfig->GetTokenKeyName().has_value() ||
+ customAuthConfig->GetTokenSignature().has_value())
+ {
+ usingSigning = true;
+ if (!customAuthConfig->GetTokenValue().has_value() ||
+ !customAuthConfig->GetTokenKeyName().has_value() ||
+ !customAuthConfig->GetTokenSignature().has_value())
+ {
+ return false;
+ }
+ }
+ Crt::String usernameString = "";
+
+ if (!customAuthConfig->GetUsername().has_value())
+ {
+ if (!username.empty())
+ {
+ usernameString += username;
+ }
+ }
+ else
+ {
+ usernameString += customAuthConfig->GetUsername().value();
+ }
+
+ if (customAuthConfig->GetAuthorizerName().has_value())
+ {
+ usernameString = AddToUsernameParameter(
+ usernameString, customAuthConfig->GetAuthorizerName().value(), "x-amz-customauthorizer-name=");
+ }
+ if (usingSigning)
+ {
+ usernameString = AddToUsernameParameter(
+ usernameString,
+ customAuthConfig->GetTokenValue().value(),
+ customAuthConfig->GetTokenKeyName().value() + "=");
+ usernameString = AddToUsernameParameter(
+ usernameString,
+ customAuthConfig->GetTokenSignature().value(),
+ "x-amz-customauthorizer-signature=");
+ }
+
+ username = usernameString;
+ }
+ return true;
+ }
+
+ /*****************************************************
+ *
+ * Mqtt5ClientOptionsBuilder
+ *
+ *****************************************************/
+
+ Mqtt5ClientBuilder::Mqtt5ClientBuilder(Crt::Allocator *allocator) noexcept
+ : m_allocator(allocator), m_port(0), m_lastError(0), m_enableMetricsCollection(true)
+ {
+ m_options = new Crt::Mqtt5::Mqtt5ClientOptions(allocator);
+ }
+
+ Mqtt5ClientBuilder::Mqtt5ClientBuilder(int error, Crt::Allocator *allocator) noexcept
+ : m_allocator(allocator), m_options(nullptr), m_lastError(error)
+ {
+ }
+
+ Mqtt5ClientBuilder *Mqtt5ClientBuilder::NewMqtt5ClientBuilderWithMtlsFromPath(
+ const Crt::String hostName,
+ const char *certPath,
+ const char *pkeyPath,
+ Crt::Allocator *allocator) noexcept
+ {
+ Mqtt5ClientBuilder *result = new Mqtt5ClientBuilder(allocator);
+ result->m_tlsConnectionOptions =
+ Crt::Io::TlsContextOptions::InitClientWithMtls(certPath, pkeyPath, allocator);
+ if (!result->m_tlsConnectionOptions.value())
+ {
+ result->m_lastError = result->m_tlsConnectionOptions->LastError();
+ return result;
+ }
+ result->withHostName(hostName);
+ return result;
+ }
+
+ Mqtt5ClientBuilder *Mqtt5ClientBuilder::NewMqtt5ClientBuilderWithMtlsFromMemory(
+ const Crt::String hostName,
+ const Crt::ByteCursor &cert,
+ const Crt::ByteCursor &pkey,
+ Crt::Allocator *allocator) noexcept
+ {
+ Mqtt5ClientBuilder *result = new Mqtt5ClientBuilder(allocator);
+ result->m_tlsConnectionOptions = Crt::Io::TlsContextOptions::InitClientWithMtls(cert, pkey, allocator);
+ if (!result->m_tlsConnectionOptions.value())
+ {
+ result->m_lastError = result->m_tlsConnectionOptions->LastError();
+ return result;
+ }
+ result->withHostName(hostName);
+ return result;
+ }
+
+ Mqtt5ClientBuilder *Mqtt5ClientBuilder::NewMqtt5ClientBuilderWithMtlsPkcs11(
+ const Crt::String hostName,
+ const Crt::Io::TlsContextPkcs11Options &pkcs11Options,
+ Crt::Allocator *allocator) noexcept
+ {
+ Mqtt5ClientBuilder *result = new Mqtt5ClientBuilder(allocator);
+ result->m_tlsConnectionOptions =
+ Crt::Io::TlsContextOptions::InitClientWithMtlsPkcs11(pkcs11Options, allocator);
+ if (!result->m_tlsConnectionOptions.value())
+ {
+ result->m_lastError = result->m_tlsConnectionOptions->LastError();
+ return result;
+ }
+ result->withHostName(hostName);
+ return result;
+ }
+
+ Mqtt5ClientBuilder *Mqtt5ClientBuilder::NewMqtt5ClientBuilderWithWindowsCertStorePath(
+ const Crt::String hostName,
+ const char *windowsCertStorePath,
+ Crt::Allocator *allocator) noexcept
+ {
+ Mqtt5ClientBuilder *result = new Mqtt5ClientBuilder(allocator);
+ result->m_tlsConnectionOptions =
+ Crt::Io::TlsContextOptions::InitClientWithMtlsSystemPath(windowsCertStorePath, allocator);
+ if (!result->m_tlsConnectionOptions.value())
+ {
+ result->m_lastError = result->m_tlsConnectionOptions->LastError();
+ return result;
+ }
+ result->withHostName(hostName);
+ return result;
+ }
+
+ Mqtt5ClientBuilder *Mqtt5ClientBuilder::NewMqtt5ClientBuilderWithWebsocket(
+ const Crt::String hostName,
+ const WebsocketConfig &config,
+ Crt::Allocator *allocator) noexcept
+ {
+ Mqtt5ClientBuilder *result = new Mqtt5ClientBuilder(allocator);
+ result->m_tlsConnectionOptions = Crt::Io::TlsContextOptions::InitDefaultClient();
+ result->withHostName(hostName);
+ result->m_websocketConfig = config;
+ return result;
+ }
+
+ Mqtt5ClientBuilder *Mqtt5ClientBuilder::NewMqtt5ClientBuilderWithCustomAuthorizer(
+ const Crt::String hostName,
+ const Mqtt5CustomAuthConfig &customAuthConfig,
+ Crt::Allocator *allocator) noexcept
+ {
+ Mqtt5ClientBuilder *result = new Mqtt5ClientBuilder(allocator);
+ result->m_tlsConnectionOptions = Crt::Io::TlsContextOptions::InitDefaultClient();
+ result->withHostName(hostName);
+ result->WithCustomAuthorizer(customAuthConfig);
+ return result;
+ }
+
+ Mqtt5ClientBuilder *Mqtt5ClientBuilder::NewMqtt5ClientBuilderWithCustomAuthorizerWebsocket(
+ const Crt::String hostName,
+ const Mqtt5CustomAuthConfig &customAuthConfig,
+ const WebsocketConfig &config,
+ Crt::Allocator *allocator) noexcept
+ {
+ Mqtt5ClientBuilder *result = new Mqtt5ClientBuilder(allocator);
+ result->m_tlsConnectionOptions = Crt::Io::TlsContextOptions::InitDefaultClient();
+ result->withHostName(hostName);
+ result->m_websocketConfig = config;
+ result->WithCustomAuthorizer(customAuthConfig);
+ return result;
+ }
+
+ Mqtt5ClientBuilder &Mqtt5ClientBuilder::withHostName(const Crt::String hostName)
+ {
+ m_options->withHostName(hostName);
+ return *this;
+ }
+
+ Mqtt5ClientBuilder &Mqtt5ClientBuilder::withPort(uint16_t port) noexcept
+ {
+ m_port = port;
+ return *this;
+ }
+
+ Mqtt5ClientBuilder &Mqtt5ClientBuilder::WithCertificateAuthority(const char *caPath) noexcept
+ {
+ if (m_tlsConnectionOptions)
+ {
+ if (!m_tlsConnectionOptions->OverrideDefaultTrustStore(nullptr, caPath))
+ {
+ m_lastError = m_tlsConnectionOptions->LastError();
+ }
+ }
+ return *this;
+ }
+
+ Mqtt5ClientBuilder &Mqtt5ClientBuilder::WithCertificateAuthority(const Crt::ByteCursor &cert) noexcept
+ {
+ if (m_tlsConnectionOptions)
+ {
+ if (!m_tlsConnectionOptions->OverrideDefaultTrustStore(cert))
+ {
+ m_lastError = m_tlsConnectionOptions->LastError();
+ }
+ }
+ return *this;
+ }
+
+ Mqtt5ClientBuilder &Mqtt5ClientBuilder::withHttpProxyOptions(
+ const Crt::Http::HttpClientConnectionProxyOptions &proxyOptions) noexcept
+ {
+ m_proxyOptions = proxyOptions;
+ return *this;
+ }
+
+ Mqtt5ClientBuilder &Mqtt5ClientBuilder::WithCustomAuthorizer(const Iot::Mqtt5CustomAuthConfig &config) noexcept
+ {
+ m_customAuthConfig = config;
+ return *this;
+ }
+
+ Mqtt5ClientBuilder &Mqtt5ClientBuilder::withConnectOptions(
+ std::shared_ptr<ConnectPacket> packetConnect) noexcept
+ {
+ m_connectOptions = packetConnect;
+ return *this;
+ }
+
+ Mqtt5ClientBuilder &Mqtt5ClientBuilder::withSessionBehavior(ClientSessionBehaviorType sessionBehavior) noexcept
+ {
+ m_options->withSessionBehavior(sessionBehavior);
+ return *this;
+ }
+
+ Mqtt5ClientBuilder &Mqtt5ClientBuilder::withClientExtendedValidationAndFlowControl(
+ ClientExtendedValidationAndFlowControl clientExtendedValidationAndFlowControl) noexcept
+ {
+ m_options->withClientExtendedValidationAndFlowControl(clientExtendedValidationAndFlowControl);
+ return *this;
+ }
+
+ Mqtt5ClientBuilder &Mqtt5ClientBuilder::withOfflineQueueBehavior(
+ ClientOperationQueueBehaviorType operationQueueBehavior) noexcept
+ {
+ m_options->withAckTimeoutSeconds(operationQueueBehavior);
+ return *this;
+ }
+
+ Mqtt5ClientBuilder &Mqtt5ClientBuilder::withReconnectOptions(ReconnectOptions reconnectOptions) noexcept
+ {
+ m_options->withReconnectOptions(reconnectOptions);
+ return *this;
+ }
+
+ Mqtt5ClientBuilder &Mqtt5ClientBuilder::withPingTimeoutMs(uint32_t pingTimeoutMs) noexcept
+ {
+ m_options->withPingTimeoutMs(pingTimeoutMs);
+ return *this;
+ }
+
+ Mqtt5ClientBuilder &Mqtt5ClientBuilder::withConnackTimeoutMs(uint32_t connackTimeoutMs) noexcept
+ {
+ m_options->withConnackTimeoutMs(connackTimeoutMs);
+ return *this;
+ }
+
+ Mqtt5ClientBuilder &Mqtt5ClientBuilder::withAckTimeoutSeconds(uint32_t ackTimeoutSeconds) noexcept
+ {
+ m_options->withAckTimeoutSeconds(ackTimeoutSeconds);
+ return *this;
+ }
+
+ Mqtt5ClientBuilder &Mqtt5ClientBuilder::WithSdkName(const Crt::String &sdkName)
+ {
+ m_sdkName = sdkName;
+ return *this;
+ }
+
+ Mqtt5ClientBuilder &Mqtt5ClientBuilder::WithSdkVersion(const Crt::String &sdkVersion)
+ {
+ m_sdkVersion = sdkVersion;
+ return *this;
+ }
+
+ Mqtt5ClientBuilder &Mqtt5ClientBuilder::withClientConnectionSuccessCallback(
+ OnConnectionSuccessHandler callback) noexcept
+ {
+ m_options->withClientConnectionSuccessCallback(std::move(callback));
+ return *this;
+ }
+
+ Mqtt5ClientBuilder &Mqtt5ClientBuilder::withClientConnectionFailureCallback(
+ OnConnectionFailureHandler callback) noexcept
+ {
+ m_options->withClientConnectionFailureCallback(std::move(callback));
+ return *this;
+ }
+
+ Mqtt5ClientBuilder &Mqtt5ClientBuilder::withClientDisconnectionCallback(
+ OnDisconnectionHandler callback) noexcept
+ {
+ m_options->withClientDisconnectionCallback(std::move(callback));
+ return *this;
+ }
+
+ Mqtt5ClientBuilder &Mqtt5ClientBuilder::withClientStoppedCallback(OnStoppedHandler callback) noexcept
+ {
+ m_options->withClientStoppedCallback(std::move(callback));
+ return *this;
+ }
+
+ Mqtt5ClientBuilder &Mqtt5ClientBuilder::withClientAttemptingConnectCallback(
+ OnAttemptingConnectHandler callback) noexcept
+ {
+ m_options->withClientAttemptingConnectCallback(std::move(callback));
+ return *this;
+ }
+
+ Mqtt5ClientBuilder &Mqtt5ClientBuilder::withPublishReceivedCallback(OnPublishReceivedHandler callback) noexcept
+ {
+ m_options->withPublishReceivedCallback(std::move(callback));
+ return *this;
+ }
+
+ std::shared_ptr<Mqtt5Client> Mqtt5ClientBuilder::Build() noexcept
+ {
+ if (m_lastError != 0)
+ {
+ return nullptr;
+ }
+
+ uint16_t port = m_port;
+
+ if (!port) // port is default to 0
+ {
+ if (m_websocketConfig || Crt::Io::TlsContextOptions::IsAlpnSupported())
+ {
+ port = 443;
+ }
+ else
+ {
+ port = 8883;
+ }
+ }
+
+ if (port == 443 && !m_websocketConfig && Crt::Io::TlsContextOptions::IsAlpnSupported() &&
+ !m_customAuthConfig.has_value())
+ {
+ if (!m_tlsConnectionOptions->SetAlpnList("x-amzn-mqtt-ca"))
+ {
+ return nullptr;
+ }
+ }
+
+ if (m_customAuthConfig.has_value())
+ {
+ if (port != 443)
+ {
+ AWS_LOGF_WARN(
+ AWS_LS_MQTT_GENERAL,
+ "Attempting to connect to authorizer with unsupported port. Port is not 443...");
+ }
+ if (!m_websocketConfig)
+ {
+ if (!m_tlsConnectionOptions->SetAlpnList("mqtt"))
+ {
+ return nullptr;
+ }
+ }
+ }
+
+ // add metrics string to username (if metrics enabled)
+ if (m_enableMetricsCollection || m_customAuthConfig.has_value())
+ {
+ Crt::String username = "";
+ if (m_connectOptions != nullptr)
+ {
+ if (m_connectOptions->getUsername().has_value())
+ username = m_connectOptions->getUsername().value();
+ }
+ else
+ {
+ m_connectOptions = std::make_shared<ConnectPacket>(m_allocator);
+ }
+
+ if (m_customAuthConfig.has_value())
+ {
+ if (!buildMqtt5FinalUsername(m_customAuthConfig, username))
+ {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT5_CLIENT,
+ "Failed to setup CustomAuthorizerConfig, please check if the parameters are set "
+ "correctly.");
+ return nullptr;
+ }
+ if (m_customAuthConfig->GetPassword().has_value())
+ {
+ m_connectOptions->withPassword(m_customAuthConfig->GetPassword().value());
+ }
+ }
+
+ if (m_enableMetricsCollection)
+ {
+ username = AddToUsernameParameter(username, "SDK", m_sdkName);
+ username = AddToUsernameParameter(username, "Version", m_sdkName);
+ }
+ m_connectOptions->withUserName(username);
+ }
+
+ auto tlsContext =
+ Crt::Io::TlsContext(m_tlsConnectionOptions.value(), Crt::Io::TlsMode::CLIENT, m_allocator);
+ if (!tlsContext)
+ {
+ return nullptr;
+ }
+
+ m_options->withPort(port).withTlsConnectionOptions(tlsContext.NewConnectionOptions());
+
+ if (m_connectOptions != nullptr)
+ {
+ m_options->withConnectOptions(m_connectOptions);
+ }
+
+ if (m_websocketConfig.has_value())
+ {
+ auto websocketConfig = m_websocketConfig.value();
+ auto signerTransform = [websocketConfig](
+ std::shared_ptr<Crt::Http::HttpRequest> req,
+ const Crt::Mqtt::OnWebSocketHandshakeInterceptComplete &onComplete) {
+ // it is only a very happy coincidence that these function signatures match. This is the callback
+ // for signing to be complete. It invokes the callback for websocket handshake to be complete.
+ auto signingComplete =
+ [onComplete](const std::shared_ptr<Aws::Crt::Http::HttpRequest> &req1, int errorCode) {
+ onComplete(req1, errorCode);
+ };
+
+ auto signerConfig = websocketConfig.CreateSigningConfigCb();
+
+ websocketConfig.Signer->SignRequest(req, *signerConfig, signingComplete);
+ };
+
+ m_options->withWebsocketHandshakeTransformCallback(signerTransform);
+ bool useWebsocketProxyOptions =
+ m_websocketConfig->ProxyOptions.has_value() && !m_proxyOptions.has_value();
+ if (useWebsocketProxyOptions)
+ {
+ m_options->withHttpProxyOptions(m_websocketConfig->ProxyOptions.value());
+ }
+ else if (m_proxyOptions.has_value())
+ {
+ m_options->withHttpProxyOptions(m_proxyOptions.value());
+ }
+ }
+
+ return Crt::Mqtt5::Mqtt5Client::NewMqtt5Client(*m_options, m_allocator);
+ }
+
+ Aws::Iot::Mqtt5CustomAuthConfig::Mqtt5CustomAuthConfig(Crt::Allocator *allocator) noexcept
+ : m_allocator(allocator)
+ {
+ AWS_ZERO_STRUCT(m_passwordStorage);
+ }
+
+ Aws::Iot::Mqtt5CustomAuthConfig::~Mqtt5CustomAuthConfig() { aws_byte_buf_clean_up(&m_passwordStorage); }
+
+ Aws::Iot::Mqtt5CustomAuthConfig::Mqtt5CustomAuthConfig(const Mqtt5CustomAuthConfig &rhs)
+ {
+ if (&rhs != this)
+ {
+ m_allocator = rhs.m_allocator;
+ if (rhs.m_authorizerName.has_value())
+ {
+ m_authorizerName = rhs.m_authorizerName.value();
+ }
+ if (rhs.m_tokenKeyName.has_value())
+ {
+ m_tokenKeyName = rhs.m_tokenKeyName.value();
+ }
+ if (rhs.m_tokenSignature.has_value())
+ {
+ m_tokenSignature = rhs.m_tokenSignature.value();
+ }
+ if (rhs.m_tokenValue.has_value())
+ {
+ m_tokenValue = rhs.m_tokenValue.value();
+ }
+ if (rhs.m_username.has_value())
+ {
+ m_username = rhs.m_username.value();
+ }
+ if (rhs.m_password.has_value())
+ {
+ AWS_ZERO_STRUCT(m_passwordStorage);
+ aws_byte_buf_init_copy_from_cursor(&m_passwordStorage, m_allocator, rhs.m_password.value());
+ m_password = aws_byte_cursor_from_buf(&m_passwordStorage);
+ }
+ }
+ }
+
+ Mqtt5CustomAuthConfig &Aws::Iot::Mqtt5CustomAuthConfig::operator=(const Mqtt5CustomAuthConfig &rhs)
+ {
+ if (&rhs != this)
+ {
+ m_allocator = rhs.m_allocator;
+ if (rhs.m_authorizerName.has_value())
+ {
+ m_authorizerName = rhs.m_authorizerName.value();
+ }
+ if (rhs.m_tokenKeyName.has_value())
+ {
+ m_tokenKeyName = rhs.m_tokenKeyName.value();
+ }
+ if (rhs.m_tokenSignature.has_value())
+ {
+ m_tokenSignature = rhs.m_tokenSignature.value();
+ }
+ if (rhs.m_tokenValue.has_value())
+ {
+ m_tokenValue = rhs.m_tokenValue.value();
+ }
+ if (rhs.m_username.has_value())
+ {
+ m_username = rhs.m_username.value();
+ }
+ if (rhs.m_password.has_value())
+ {
+ aws_byte_buf_clean_up(&m_passwordStorage);
+ AWS_ZERO_STRUCT(m_passwordStorage);
+ aws_byte_buf_init_copy_from_cursor(&m_passwordStorage, m_allocator, rhs.m_password.value());
+ m_password = aws_byte_cursor_from_buf(&m_passwordStorage);
+ }
+ }
+ return *this;
+ }
+
+ const Crt::Optional<Crt::String> &Mqtt5CustomAuthConfig::GetAuthorizerName() { return m_authorizerName; }
+
+ const Crt::Optional<Crt::String> &Mqtt5CustomAuthConfig::GetUsername() { return m_username; }
+
+ const Crt::Optional<Crt::ByteCursor> &Mqtt5CustomAuthConfig::GetPassword() { return m_password; }
+
+ const Crt::Optional<Crt::String> &Mqtt5CustomAuthConfig::GetTokenKeyName() { return m_tokenKeyName; }
+
+ const Crt::Optional<Crt::String> &Mqtt5CustomAuthConfig::GetTokenValue() { return m_tokenValue; }
+
+ const Crt::Optional<Crt::String> &Mqtt5CustomAuthConfig::GetTokenSignature() { return m_tokenSignature; }
+
+ Mqtt5CustomAuthConfig &Aws::Iot::Mqtt5CustomAuthConfig::WithAuthorizerName(Crt::String authName)
+ {
+ m_authorizerName = std::move(authName);
+ return *this;
+ }
+
+ Mqtt5CustomAuthConfig &Aws::Iot::Mqtt5CustomAuthConfig::WithUsername(Crt::String username)
+ {
+ m_username = std::move(username);
+ return *this;
+ }
+
+ Mqtt5CustomAuthConfig &Aws::Iot::Mqtt5CustomAuthConfig::WithPassword(Crt::ByteCursor password)
+ {
+ aws_byte_buf_clean_up(&m_passwordStorage);
+ AWS_ZERO_STRUCT(m_passwordStorage);
+ aws_byte_buf_init_copy_from_cursor(&m_passwordStorage, m_allocator, password);
+ m_password = aws_byte_cursor_from_buf(&m_passwordStorage);
+ return *this;
+ }
+
+ Mqtt5CustomAuthConfig &Aws::Iot::Mqtt5CustomAuthConfig::WithTokenKeyName(Crt::String tokenKeyName)
+ {
+ m_tokenKeyName = std::move(tokenKeyName);
+ return *this;
+ }
+
+ Mqtt5CustomAuthConfig &Aws::Iot::Mqtt5CustomAuthConfig::WithTokenValue(Crt::String tokenValue)
+ {
+ m_tokenValue = std::move(tokenValue);
+ return *this;
+ }
+
+ Mqtt5CustomAuthConfig &Aws::Iot::Mqtt5CustomAuthConfig::WithTokenSignature(Crt::String tokenSignature)
+ {
+ m_tokenSignature = std::move(tokenSignature);
+ return *this;
+ }
+
+ } // namespace Iot
+} // namespace Aws
+
+#endif // !BYO_CRYPTO
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/iot/MqttClient.cpp b/contrib/restricted/aws/aws-crt-cpp/source/iot/MqttClient.cpp
new file mode 100644
index 0000000000..3f80782b3f
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/iot/MqttClient.cpp
@@ -0,0 +1,541 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/iot/MqttClient.h>
+
+#include <aws/crt/Api.h>
+#include <aws/crt/auth/Credentials.h>
+#include <aws/crt/auth/Sigv4Signing.h>
+#include <aws/crt/http/HttpRequestResponse.h>
+
+#if !BYO_CRYPTO
+
+namespace Aws
+{
+ namespace Iot
+ {
+
+ MqttClientConnectionConfig::MqttClientConnectionConfig(int lastError) noexcept
+ : m_port(0), m_lastError(lastError)
+ {
+ }
+
+ MqttClientConnectionConfig MqttClientConnectionConfig::CreateInvalid(int lastError) noexcept
+ {
+ return MqttClientConnectionConfig(lastError);
+ }
+
+ MqttClientConnectionConfig::MqttClientConnectionConfig(
+ const Crt::String &endpoint,
+ uint16_t port,
+ const Crt::Io::SocketOptions &socketOptions,
+ Crt::Io::TlsContext &&tlsContext)
+ : m_endpoint(endpoint), m_port(port), m_context(std::move(tlsContext)), m_socketOptions(socketOptions),
+ m_lastError(0)
+ {
+ }
+
+ MqttClientConnectionConfig::MqttClientConnectionConfig(
+ const Crt::String &endpoint,
+ uint16_t port,
+ const Crt::Io::SocketOptions &socketOptions,
+ Crt::Io::TlsContext &&tlsContext,
+ Crt::Mqtt::OnWebSocketHandshakeIntercept &&interceptor,
+ const Crt::Optional<Crt::Http::HttpClientConnectionProxyOptions> &proxyOptions)
+ : m_endpoint(endpoint), m_port(port), m_context(std::move(tlsContext)), m_socketOptions(socketOptions),
+ m_webSocketInterceptor(std::move(interceptor)), m_proxyOptions(proxyOptions), m_lastError(0)
+ {
+ }
+
+ MqttClientConnectionConfig::MqttClientConnectionConfig(
+ const Crt::String &endpoint,
+ uint16_t port,
+ const Crt::Io::SocketOptions &socketOptions,
+ Crt::Io::TlsContext &&tlsContext,
+ const Crt::Optional<Crt::Http::HttpClientConnectionProxyOptions> &proxyOptions)
+ : m_endpoint(endpoint), m_port(port), m_context(std::move(tlsContext)), m_socketOptions(socketOptions),
+ m_proxyOptions(proxyOptions), m_lastError(0)
+ {
+ }
+
+ MqttClientConnectionConfigBuilder::MqttClientConnectionConfigBuilder()
+ : MqttClientConnectionConfigBuilder(Crt::ApiAllocator())
+ {
+ m_lastError = AWS_ERROR_INVALID_STATE;
+ }
+
+ // Common setup shared by all valid constructors
+ MqttClientConnectionConfigBuilder::MqttClientConnectionConfigBuilder(Crt::Allocator *allocator) noexcept
+ : m_allocator(allocator), m_portOverride(0),
+# ifdef AWS_IOT_SDK_VERSION
+ m_sdkVersion(AWS_IOT_SDK_VERSION),
+# else
+ m_sdkVersion(AWS_CRT_CPP_VERSION),
+# endif
+ m_lastError(0)
+ {
+ m_socketOptions.SetConnectTimeoutMs(3000);
+ }
+
+ MqttClientConnectionConfigBuilder::MqttClientConnectionConfigBuilder(
+ const char *certPath,
+ const char *pkeyPath,
+ Crt::Allocator *allocator) noexcept
+ : MqttClientConnectionConfigBuilder(allocator)
+ {
+ m_contextOptions = Crt::Io::TlsContextOptions::InitClientWithMtls(certPath, pkeyPath, allocator);
+ if (!m_contextOptions)
+ {
+ m_lastError = m_contextOptions.LastError();
+ return;
+ }
+ }
+
+ MqttClientConnectionConfigBuilder::MqttClientConnectionConfigBuilder(
+ const Crt::ByteCursor &cert,
+ const Crt::ByteCursor &pkey,
+ Crt::Allocator *allocator) noexcept
+ : MqttClientConnectionConfigBuilder(allocator)
+ {
+ m_contextOptions = Crt::Io::TlsContextOptions::InitClientWithMtls(cert, pkey, allocator);
+ if (!m_contextOptions)
+ {
+ m_lastError = m_contextOptions.LastError();
+ return;
+ }
+ }
+
+ MqttClientConnectionConfigBuilder::MqttClientConnectionConfigBuilder(
+ const Crt::Io::TlsContextPkcs11Options &pkcs11Options,
+ Crt::Allocator *allocator) noexcept
+ : MqttClientConnectionConfigBuilder(allocator)
+ {
+ m_contextOptions = Crt::Io::TlsContextOptions::InitClientWithMtlsPkcs11(pkcs11Options, allocator);
+ if (!m_contextOptions)
+ {
+ m_lastError = m_contextOptions.LastError();
+ return;
+ }
+ }
+
+ MqttClientConnectionConfigBuilder::MqttClientConnectionConfigBuilder(
+ const char *windowsCertStorePath,
+ Crt::Allocator *allocator) noexcept
+ : MqttClientConnectionConfigBuilder(allocator)
+ {
+ m_contextOptions =
+ Crt::Io::TlsContextOptions::InitClientWithMtlsSystemPath(windowsCertStorePath, allocator);
+ if (!m_contextOptions)
+ {
+ m_lastError = m_contextOptions.LastError();
+ return;
+ }
+ }
+
+ MqttClientConnectionConfigBuilder::MqttClientConnectionConfigBuilder(
+ const WebsocketConfig &config,
+ Crt::Allocator *allocator) noexcept
+ : MqttClientConnectionConfigBuilder(allocator)
+ {
+ m_contextOptions = Crt::Io::TlsContextOptions::InitDefaultClient(allocator);
+ if (!m_contextOptions)
+ {
+ m_lastError = m_contextOptions.LastError();
+ return;
+ }
+
+ m_websocketConfig = config;
+ }
+
+ MqttClientConnectionConfigBuilder MqttClientConnectionConfigBuilder::NewDefaultBuilder() noexcept
+ {
+ MqttClientConnectionConfigBuilder return_value =
+ MqttClientConnectionConfigBuilder(Aws::Crt::ApiAllocator());
+ return_value.m_contextOptions = Crt::Io::TlsContextOptions::InitDefaultClient();
+ return return_value;
+ }
+
+ MqttClientConnectionConfigBuilder &MqttClientConnectionConfigBuilder::WithEndpoint(const Crt::String &endpoint)
+ {
+ m_endpoint = endpoint;
+ return *this;
+ }
+
+ MqttClientConnectionConfigBuilder &MqttClientConnectionConfigBuilder::WithEndpoint(Crt::String &&endpoint)
+ {
+ m_endpoint = std::move(endpoint);
+ return *this;
+ }
+
+ MqttClientConnectionConfigBuilder &MqttClientConnectionConfigBuilder::WithMetricsCollection(bool enabled)
+ {
+ m_enableMetricsCollection = enabled;
+ return *this;
+ }
+
+ MqttClientConnectionConfigBuilder &MqttClientConnectionConfigBuilder::WithSdkName(const Crt::String &sdkName)
+ {
+ m_sdkName = sdkName;
+ return *this;
+ }
+
+ MqttClientConnectionConfigBuilder &MqttClientConnectionConfigBuilder::WithSdkVersion(
+ const Crt::String &sdkVersion)
+ {
+ m_sdkVersion = sdkVersion;
+ return *this;
+ }
+
+ MqttClientConnectionConfigBuilder &MqttClientConnectionConfigBuilder::WithPortOverride(uint16_t port) noexcept
+ {
+ m_portOverride = port;
+ return *this;
+ }
+
+ MqttClientConnectionConfigBuilder &MqttClientConnectionConfigBuilder::WithCertificateAuthority(
+ const char *caPath) noexcept
+ {
+ if (m_contextOptions)
+ {
+ if (!m_contextOptions.OverrideDefaultTrustStore(nullptr, caPath))
+ {
+ m_lastError = m_contextOptions.LastError();
+ }
+ }
+ return *this;
+ }
+
+ MqttClientConnectionConfigBuilder &MqttClientConnectionConfigBuilder::WithCertificateAuthority(
+ const Crt::ByteCursor &cert) noexcept
+ {
+ if (m_contextOptions)
+ {
+ if (!m_contextOptions.OverrideDefaultTrustStore(cert))
+ {
+ m_lastError = m_contextOptions.LastError();
+ }
+ }
+ return *this;
+ }
+
+ MqttClientConnectionConfigBuilder &MqttClientConnectionConfigBuilder::WithTcpKeepAlive() noexcept
+ {
+ m_socketOptions.SetKeepAlive(true);
+ return *this;
+ }
+ MqttClientConnectionConfigBuilder &MqttClientConnectionConfigBuilder::WithTcpConnectTimeout(
+ uint32_t connectTimeoutMs) noexcept
+ {
+ m_socketOptions.SetConnectTimeoutMs(connectTimeoutMs);
+ return *this;
+ }
+
+ MqttClientConnectionConfigBuilder &MqttClientConnectionConfigBuilder::WithTcpKeepAliveTimeout(
+ uint16_t keepAliveTimeoutSecs) noexcept
+ {
+ m_socketOptions.SetKeepAliveTimeoutSec(keepAliveTimeoutSecs);
+ return *this;
+ }
+
+ MqttClientConnectionConfigBuilder &MqttClientConnectionConfigBuilder::WithTcpKeepAliveInterval(
+ uint16_t keepAliveIntervalSecs) noexcept
+ {
+ m_socketOptions.SetKeepAliveIntervalSec(keepAliveIntervalSecs);
+ return *this;
+ }
+
+ MqttClientConnectionConfigBuilder &MqttClientConnectionConfigBuilder::WithTcpKeepAliveMaxProbes(
+ uint16_t maxProbes) noexcept
+ {
+ m_socketOptions.SetKeepAliveMaxFailedProbes(maxProbes);
+ return *this;
+ }
+
+ MqttClientConnectionConfigBuilder &MqttClientConnectionConfigBuilder::WithMinimumTlsVersion(
+ aws_tls_versions minimumTlsVersion) noexcept
+ {
+ m_contextOptions.SetMinimumTlsVersion(minimumTlsVersion);
+ return *this;
+ }
+
+ MqttClientConnectionConfigBuilder &MqttClientConnectionConfigBuilder::WithHttpProxyOptions(
+ const Crt::Http::HttpClientConnectionProxyOptions &proxyOptions) noexcept
+ {
+ m_proxyOptions = proxyOptions;
+ return *this;
+ }
+
+ Crt::String MqttClientConnectionConfigBuilder::AddToUsernameParameter(
+ Crt::String currentUsername,
+ Crt::String parameterValue,
+ Crt::String parameterPreText)
+ {
+ Crt::String return_string = currentUsername;
+ if (return_string.find("?") != Crt::String::npos)
+ {
+ return_string += "&";
+ }
+ else
+ {
+ return_string += "?";
+ }
+
+ if (parameterValue.find(parameterPreText) != Crt::String::npos)
+ {
+ return return_string + parameterValue;
+ }
+ else
+ {
+ return return_string + parameterPreText + parameterValue;
+ }
+ }
+
+ MqttClientConnectionConfigBuilder &MqttClientConnectionConfigBuilder::WithCustomAuthorizer(
+ const Crt::String &username,
+ const Crt::String &authorizerName,
+ const Crt::String &authorizerSignature,
+ const Crt::String &password) noexcept
+ {
+ if (!m_contextOptions.IsAlpnSupported())
+ {
+ m_lastError = AWS_ERROR_INVALID_STATE;
+ return *this;
+ }
+
+ m_isUsingCustomAuthorizer = true;
+ Crt::String usernameString = "";
+
+ if (username.empty())
+ {
+ if (!m_username.empty())
+ {
+ usernameString += m_username;
+ }
+ }
+ else
+ {
+ usernameString += username;
+ }
+
+ if (!authorizerName.empty())
+ {
+ usernameString = AddToUsernameParameter(usernameString, authorizerName, "x-amz-customauthorizer-name=");
+ }
+ if (!authorizerSignature.empty())
+ {
+ usernameString =
+ AddToUsernameParameter(usernameString, authorizerSignature, "x-amz-customauthorizer-signature=");
+ }
+
+ m_username = usernameString;
+ m_password = password;
+
+ if (!m_websocketConfig)
+ {
+ if (!m_contextOptions.SetAlpnList("mqtt"))
+ {
+ m_lastError = m_contextOptions.LastError();
+ }
+ m_portOverride = 443;
+ }
+ return *this;
+ }
+
+ MqttClientConnectionConfigBuilder &MqttClientConnectionConfigBuilder::WithUsername(
+ const Crt::String &username) noexcept
+ {
+ m_username = username;
+ return *this;
+ }
+
+ MqttClientConnectionConfigBuilder &MqttClientConnectionConfigBuilder::WithPassword(
+ const Crt::String &password) noexcept
+ {
+ m_password = password;
+ return *this;
+ }
+
+ MqttClientConnectionConfig MqttClientConnectionConfigBuilder::Build() noexcept
+ {
+ if (m_lastError != 0)
+ {
+ return MqttClientConnectionConfig::CreateInvalid(m_lastError);
+ }
+
+ uint16_t port = m_portOverride;
+
+ if (!m_portOverride)
+ {
+ if (m_websocketConfig || Crt::Io::TlsContextOptions::IsAlpnSupported())
+ {
+ port = 443;
+ }
+ else
+ {
+ port = 8883;
+ }
+ }
+
+ Crt::String username = m_username;
+ Crt::String password = m_password;
+
+ // Check to see if a custom authorizer is being used but not through the builder
+ if (!m_isUsingCustomAuthorizer)
+ {
+ if (!m_username.empty())
+ {
+ if (m_username.find_first_of("x-amz-customauthorizer-name=") != Crt::String::npos ||
+ m_username.find_first_of("x-amz-customauthorizer-signature=") != Crt::String::npos)
+ {
+ m_isUsingCustomAuthorizer = true;
+ }
+ }
+ }
+
+ if (port == 443 && !m_websocketConfig && Crt::Io::TlsContextOptions::IsAlpnSupported() &&
+ !m_isUsingCustomAuthorizer)
+ {
+ if (!m_contextOptions.SetAlpnList("x-amzn-mqtt-ca"))
+ {
+ return MqttClientConnectionConfig::CreateInvalid(m_contextOptions.LastError());
+ }
+ }
+
+ // Is the user trying to connect using a custom authorizer?
+ if (m_isUsingCustomAuthorizer)
+ {
+ if (port != 443)
+ {
+ AWS_LOGF_WARN(
+ AWS_LS_MQTT_GENERAL,
+ "Attempting to connect to authorizer with unsupported port. Port is not 443...");
+ }
+ }
+
+ // add metrics string to username (if metrics enabled)
+ if (m_enableMetricsCollection)
+ {
+ if (username.find('?') != Crt::String::npos)
+ {
+ username += "&";
+ }
+ else
+ {
+ username += "?";
+ }
+ username += "SDK=";
+ username += m_sdkName;
+ username += "&Version=";
+ username += m_sdkVersion;
+ }
+
+ auto tlsContext = Crt::Io::TlsContext(m_contextOptions, Crt::Io::TlsMode::CLIENT, m_allocator);
+ if (!tlsContext)
+ {
+ return MqttClientConnectionConfig::CreateInvalid(tlsContext.GetInitializationError());
+ }
+
+ if (!m_websocketConfig)
+ {
+ auto config = MqttClientConnectionConfig(
+ m_endpoint, port, m_socketOptions, std::move(tlsContext), m_proxyOptions);
+ config.m_username = username;
+ config.m_password = password;
+ return config;
+ }
+
+ auto websocketConfig = m_websocketConfig.value();
+ auto signerTransform = [websocketConfig](
+ std::shared_ptr<Crt::Http::HttpRequest> req,
+ const Crt::Mqtt::OnWebSocketHandshakeInterceptComplete &onComplete) {
+ // it is only a very happy coincidence that these function signatures match. This is the callback
+ // for signing to be complete. It invokes the callback for websocket handshake to be complete.
+ auto signingComplete =
+ [onComplete](const std::shared_ptr<Aws::Crt::Http::HttpRequest> &req1, int errorCode) {
+ onComplete(req1, errorCode);
+ };
+
+ auto signerConfig = websocketConfig.CreateSigningConfigCb();
+
+ websocketConfig.Signer->SignRequest(req, *signerConfig, signingComplete);
+ };
+
+ bool useWebsocketProxyOptions = m_websocketConfig->ProxyOptions.has_value() && !m_proxyOptions.has_value();
+
+ auto config = MqttClientConnectionConfig(
+ m_endpoint,
+ port,
+ m_socketOptions,
+ std::move(tlsContext),
+ signerTransform,
+ useWebsocketProxyOptions ? m_websocketConfig->ProxyOptions : m_proxyOptions);
+ config.m_username = username;
+ config.m_password = password;
+ return config;
+ }
+
+ MqttClient::MqttClient(Crt::Io::ClientBootstrap &bootstrap, Crt::Allocator *allocator) noexcept
+ : m_client(bootstrap, allocator), m_lastError(0)
+ {
+ if (!m_client)
+ {
+ m_lastError = m_client.LastError();
+ }
+ }
+
+ MqttClient::MqttClient(Crt::Allocator *allocator) noexcept
+ : MqttClient(*Crt::ApiHandle::GetOrCreateStaticDefaultClientBootstrap(), allocator)
+ {
+ }
+
+ std::shared_ptr<Crt::Mqtt::MqttConnection> MqttClient::NewConnection(
+ const MqttClientConnectionConfig &config) noexcept
+ {
+ if (!config)
+ {
+ m_lastError = config.LastError();
+ return nullptr;
+ }
+
+ bool useWebsocket = config.m_webSocketInterceptor.operator bool();
+ auto newConnection = m_client.NewConnection(
+ config.m_endpoint.c_str(), config.m_port, config.m_socketOptions, config.m_context, useWebsocket);
+
+ if (!newConnection)
+ {
+ m_lastError = m_client.LastError();
+ return nullptr;
+ }
+
+ if (!(*newConnection))
+ {
+ m_lastError = newConnection->LastError();
+ return nullptr;
+ }
+
+ if (!config.m_username.empty() || !config.m_password.empty())
+ {
+ if (!newConnection->SetLogin(config.m_username.c_str(), config.m_password.c_str()))
+ {
+ m_lastError = newConnection->LastError();
+ return nullptr;
+ }
+ }
+
+ if (useWebsocket)
+ {
+ newConnection->WebsocketInterceptor = config.m_webSocketInterceptor;
+ }
+
+ if (config.m_proxyOptions)
+ {
+ newConnection->SetHttpProxyOptions(config.m_proxyOptions.value());
+ }
+
+ return newConnection;
+ }
+ } // namespace Iot
+} // namespace Aws
+
+#endif // !BYO_CRYPTO
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/iot/MqttCommon.cpp b/contrib/restricted/aws/aws-crt-cpp/source/iot/MqttCommon.cpp
new file mode 100644
index 0000000000..e96c7b5e79
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/iot/MqttCommon.cpp
@@ -0,0 +1,88 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/iot/MqttClient.h>
+
+#include <aws/crt/Api.h>
+#include <aws/crt/auth/Credentials.h>
+#include <aws/crt/auth/Sigv4Signing.h>
+#include <aws/crt/http/HttpRequestResponse.h>
+
+#if !BYO_CRYPTO
+
+namespace Aws
+{
+ namespace Iot
+ {
+ WebsocketConfig::WebsocketConfig(
+ const Crt::String &signingRegion,
+ Crt::Io::ClientBootstrap *bootstrap,
+ Crt::Allocator *allocator) noexcept
+ : SigningRegion(signingRegion), ServiceName("iotdevicegateway")
+ {
+ Crt::Auth::CredentialsProviderChainDefaultConfig config;
+ config.Bootstrap = bootstrap;
+
+ CredentialsProvider =
+ Crt::Auth::CredentialsProvider::CreateCredentialsProviderChainDefault(config, allocator);
+
+ Signer = Aws::Crt::MakeShared<Crt::Auth::Sigv4HttpRequestSigner>(allocator, allocator);
+
+ auto credsProviderRef = CredentialsProvider;
+ auto signingRegionCopy = SigningRegion;
+ auto serviceNameCopy = ServiceName;
+ CreateSigningConfigCb = [allocator, credsProviderRef, signingRegionCopy, serviceNameCopy]() {
+ auto signerConfig = Aws::Crt::MakeShared<Crt::Auth::AwsSigningConfig>(allocator);
+ signerConfig->SetRegion(signingRegionCopy);
+ signerConfig->SetService(serviceNameCopy);
+ signerConfig->SetSigningAlgorithm(Crt::Auth::SigningAlgorithm::SigV4);
+ signerConfig->SetSignatureType(Crt::Auth::SignatureType::HttpRequestViaQueryParams);
+ signerConfig->SetOmitSessionToken(true);
+ signerConfig->SetCredentialsProvider(credsProviderRef);
+
+ return signerConfig;
+ };
+ }
+
+ WebsocketConfig::WebsocketConfig(const Crt::String &signingRegion, Crt::Allocator *allocator) noexcept
+ : WebsocketConfig(signingRegion, Crt::ApiHandle::GetOrCreateStaticDefaultClientBootstrap(), allocator)
+ {
+ }
+
+ WebsocketConfig::WebsocketConfig(
+ const Crt::String &signingRegion,
+ const std::shared_ptr<Crt::Auth::ICredentialsProvider> &credentialsProvider,
+ Crt::Allocator *allocator) noexcept
+ : CredentialsProvider(credentialsProvider),
+ Signer(Aws::Crt::MakeShared<Crt::Auth::Sigv4HttpRequestSigner>(allocator, allocator)),
+ SigningRegion(signingRegion), ServiceName("iotdevicegateway")
+ {
+ auto credsProviderRef = CredentialsProvider;
+ auto signingRegionCopy = SigningRegion;
+ auto serviceNameCopy = ServiceName;
+ CreateSigningConfigCb = [allocator, credsProviderRef, signingRegionCopy, serviceNameCopy]() {
+ auto signerConfig = Aws::Crt::MakeShared<Crt::Auth::AwsSigningConfig>(allocator);
+ signerConfig->SetRegion(signingRegionCopy);
+ signerConfig->SetService(serviceNameCopy);
+ signerConfig->SetSigningAlgorithm(Crt::Auth::SigningAlgorithm::SigV4);
+ signerConfig->SetSignatureType(Crt::Auth::SignatureType::HttpRequestViaQueryParams);
+ signerConfig->SetOmitSessionToken(true);
+ signerConfig->SetCredentialsProvider(credsProviderRef);
+
+ return signerConfig;
+ };
+ }
+
+ WebsocketConfig::WebsocketConfig(
+ const std::shared_ptr<Crt::Auth::ICredentialsProvider> &credentialsProvider,
+ const std::shared_ptr<Crt::Auth::IHttpRequestSigner> &signer,
+ Iot::CreateSigningConfig createSigningConfig) noexcept
+ : CredentialsProvider(credentialsProvider), Signer(signer),
+ CreateSigningConfigCb(std::move(createSigningConfig)), ServiceName("iotdevicegateway")
+ {
+ }
+ } // namespace Iot
+} // namespace Aws
+
+#endif // !BYO_CRYPTO
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/mqtt/Mqtt5Client.cpp b/contrib/restricted/aws/aws-crt-cpp/source/mqtt/Mqtt5Client.cpp
new file mode 100644
index 0000000000..b8e1217d73
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/mqtt/Mqtt5Client.cpp
@@ -0,0 +1,743 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/mqtt/Mqtt5Client.h>
+#include <aws/crt/mqtt/Mqtt5Packets.h>
+
+#include <aws/crt/Api.h>
+#include <aws/crt/StlAllocator.h>
+#include <aws/crt/http/HttpProxyStrategy.h>
+#include <aws/crt/http/HttpRequestResponse.h>
+#include <aws/crt/io/Bootstrap.h>
+#include <aws/iot/MqttClient.h>
+
+#include <utility>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Mqtt5
+ {
+ struct PubAckCallbackData : public std::enable_shared_from_this<PubAckCallbackData>
+ {
+ PubAckCallbackData(Allocator *alloc = ApiAllocator()) : client(nullptr), allocator(alloc) {}
+
+ std::shared_ptr<Mqtt5Client> client;
+ OnPublishCompletionHandler onPublishCompletion;
+ Allocator *allocator;
+ };
+
+ struct SubAckCallbackData
+ {
+ SubAckCallbackData(Allocator *alloc = ApiAllocator()) : client(nullptr), allocator(alloc) {}
+
+ std::shared_ptr<Mqtt5Client> client;
+ OnSubscribeCompletionHandler onSubscribeCompletion;
+ Allocator *allocator;
+ };
+
+ struct UnSubAckCallbackData
+ {
+ UnSubAckCallbackData(Allocator *alloc = ApiAllocator()) : client(nullptr), allocator(alloc) {}
+
+ std::shared_ptr<Mqtt5Client> client;
+ OnUnsubscribeCompletionHandler onUnsubscribeCompletion;
+ Allocator *allocator;
+ };
+
+ void Mqtt5Client::s_lifeCycleEventCallback(const struct aws_mqtt5_client_lifecycle_event *event)
+ {
+ Mqtt5Client *client = reinterpret_cast<Mqtt5Client *>(event->user_data);
+ switch (event->event_type)
+ {
+ case AWS_MQTT5_CLET_STOPPED:
+ AWS_LOGF_INFO(AWS_LS_MQTT5_CLIENT, "Lifecycle event: Client Stopped!");
+ if (client->onStopped)
+ {
+ OnStoppedEventData eventData;
+ client->onStopped(*client, eventData);
+ }
+ break;
+
+ case AWS_MQTT5_CLET_ATTEMPTING_CONNECT:
+ AWS_LOGF_INFO(AWS_LS_MQTT5_CLIENT, "Lifecycle event: Attempting Connect!");
+ if (client->onAttemptingConnect)
+ {
+ OnAttemptingConnectEventData eventData;
+ client->onAttemptingConnect(*client, eventData);
+ }
+ break;
+
+ case AWS_MQTT5_CLET_CONNECTION_FAILURE:
+ AWS_LOGF_INFO(AWS_LS_MQTT5_CLIENT, "Lifecycle event: Connection Failure!");
+ AWS_LOGF_INFO(
+ AWS_LS_MQTT5_CLIENT,
+ " Error Code: %d(%s)",
+ event->error_code,
+ aws_error_debug_str(event->error_code));
+ if (client->onConnectionFailure)
+ {
+ OnConnectionFailureEventData eventData;
+ eventData.errorCode = event->error_code;
+ std::shared_ptr<ConnAckPacket> packet = nullptr;
+ if (event->connack_data != NULL)
+ {
+ packet = Aws::Crt::MakeShared<ConnAckPacket>(
+ client->m_allocator, *event->connack_data, client->m_allocator);
+ eventData.connAckPacket = packet;
+ }
+ client->onConnectionFailure(*client, eventData);
+ }
+ break;
+
+ case AWS_MQTT5_CLET_CONNECTION_SUCCESS:
+ AWS_LOGF_INFO(AWS_LS_MQTT5_CLIENT, "Lifecycle event: Connection Success!");
+ if (client->onConnectionSuccess)
+ {
+ OnConnectionSuccessEventData eventData;
+
+ std::shared_ptr<ConnAckPacket> packet = nullptr;
+ if (event->connack_data != NULL)
+ {
+ packet = Aws::Crt::MakeShared<ConnAckPacket>(ApiAllocator(), *event->connack_data);
+ }
+
+ std::shared_ptr<NegotiatedSettings> neg_settings = nullptr;
+ if (event->settings != NULL)
+ {
+ neg_settings =
+ Aws::Crt::MakeShared<NegotiatedSettings>(ApiAllocator(), *event->settings);
+ }
+
+ eventData.connAckPacket = packet;
+ eventData.negotiatedSettings = neg_settings;
+ client->onConnectionSuccess(*client, eventData);
+ }
+ break;
+
+ case AWS_MQTT5_CLET_DISCONNECTION:
+ AWS_LOGF_INFO(
+ AWS_LS_MQTT5_CLIENT,
+ " Error Code: %d(%s)",
+ event->error_code,
+ aws_error_debug_str(event->error_code));
+ if (client->onDisconnection)
+ {
+ OnDisconnectionEventData eventData;
+ std::shared_ptr<DisconnectPacket> disconnection = nullptr;
+ if (event->disconnect_data != nullptr)
+ {
+ disconnection = Aws::Crt::MakeShared<DisconnectPacket>(
+ client->m_allocator, *event->disconnect_data, client->m_allocator);
+ }
+ eventData.errorCode = event->error_code;
+ eventData.disconnectPacket = disconnection;
+ client->onDisconnection(*client, eventData);
+ }
+ break;
+ }
+ }
+
+ void Mqtt5Client::s_publishReceivedCallback(
+ const struct aws_mqtt5_packet_publish_view *publish,
+ void *user_data)
+ {
+ AWS_LOGF_INFO(AWS_LS_MQTT5_CLIENT, "on publish recieved callback");
+ Mqtt5Client *client = reinterpret_cast<Mqtt5Client *>(user_data);
+ if (client != nullptr && client->onPublishReceived != nullptr)
+ {
+ if (publish != NULL)
+ {
+ std::shared_ptr<PublishPacket> packet =
+ std::make_shared<PublishPacket>(*publish, client->m_allocator);
+ PublishReceivedEventData eventData;
+ eventData.publishPacket = packet;
+ client->onPublishReceived(*client, eventData);
+ }
+ else
+ {
+ AWS_LOGF_ERROR(AWS_LS_MQTT5_CLIENT, "Failed to access Publish packet view.");
+ }
+ }
+ }
+
+ void Mqtt5Client::s_publishCompletionCallback(
+ enum aws_mqtt5_packet_type packet_type,
+ const void *publshCompletionPacket,
+ int error_code,
+ void *complete_ctx)
+ {
+ AWS_LOGF_INFO(AWS_LS_MQTT5_CLIENT, "Publish completion callback triggered.");
+ auto callbackData = reinterpret_cast<PubAckCallbackData *>(complete_ctx);
+
+ if (callbackData)
+ {
+ std::shared_ptr<PublishResult> publish = nullptr;
+ switch (packet_type)
+ {
+ case aws_mqtt5_packet_type::AWS_MQTT5_PT_PUBACK:
+ {
+ if (publshCompletionPacket != NULL)
+ {
+ std::shared_ptr<PubAckPacket> packet = std::make_shared<PubAckPacket>(
+ *(aws_mqtt5_packet_puback_view *)publshCompletionPacket, callbackData->allocator);
+ publish = std::make_shared<PublishResult>(std::move(packet));
+ }
+ else // This should never happened.
+ {
+ AWS_LOGF_INFO(AWS_LS_MQTT5_CLIENT, "The PubAck Packet is invalid.");
+ publish = std::make_shared<PublishResult>(AWS_ERROR_INVALID_ARGUMENT);
+ }
+ break;
+ }
+ case aws_mqtt5_packet_type::AWS_MQTT5_PT_NONE:
+ {
+ publish = std::make_shared<PublishResult>(error_code);
+ break;
+ }
+ default: // Invalid packet type
+ {
+ AWS_LOGF_INFO(AWS_LS_MQTT5_CLIENT, "Invalid Packet Type.");
+ publish = std::make_shared<PublishResult>(AWS_ERROR_INVALID_ARGUMENT);
+ break;
+ }
+ }
+ if (callbackData->onPublishCompletion != NULL)
+ {
+ callbackData->onPublishCompletion(callbackData->client, error_code, publish);
+ }
+
+ Crt::Delete(callbackData, callbackData->allocator);
+ }
+ }
+
+ void Mqtt5Client::s_onWebsocketHandshake(
+ struct aws_http_message *rawRequest,
+ void *user_data,
+ aws_mqtt5_transform_websocket_handshake_complete_fn *complete_fn,
+ void *complete_ctx)
+ {
+ auto client = reinterpret_cast<Mqtt5Client *>(user_data);
+
+ Allocator *allocator = client->m_allocator;
+ // we have to do this because of private constructors.
+ auto toSeat =
+ reinterpret_cast<Http::HttpRequest *>(aws_mem_acquire(allocator, sizeof(Http::HttpRequest)));
+ toSeat = new (toSeat) Http::HttpRequest(allocator, rawRequest);
+
+ std::shared_ptr<Http::HttpRequest> request = std::shared_ptr<Http::HttpRequest>(
+ toSeat, [allocator](Http::HttpRequest *ptr) { Crt::Delete(ptr, allocator); });
+
+ auto onInterceptComplete =
+ [complete_fn,
+ complete_ctx](const std::shared_ptr<Http::HttpRequest> &transformedRequest, int errorCode) {
+ complete_fn(transformedRequest->GetUnderlyingMessage(), errorCode, complete_ctx);
+ };
+
+ client->websocketInterceptor(request, onInterceptComplete);
+ }
+
+ void Mqtt5Client::s_clientTerminationCompletion(void *complete_ctx)
+ {
+ Mqtt5Client *client = reinterpret_cast<Mqtt5Client *>(complete_ctx);
+ std::unique_lock<std::mutex> lock(client->m_terminationMutex);
+ client->m_terminationPredicate = true;
+ client->m_terminationCondition.notify_all();
+ }
+
+ void Mqtt5Client::s_subscribeCompletionCallback(
+ const aws_mqtt5_packet_suback_view *suback,
+ int error_code,
+ void *complete_ctx)
+ {
+ SubAckCallbackData *callbackData = reinterpret_cast<SubAckCallbackData *>(complete_ctx);
+ AWS_ASSERT(callbackData != nullptr);
+
+ std::shared_ptr<SubAckPacket> packet = nullptr;
+ if (suback != nullptr)
+ {
+ packet = std::make_shared<SubAckPacket>(*suback, callbackData->allocator);
+ }
+
+ if (error_code != 0)
+ {
+ AWS_LOGF_INFO(
+ AWS_LS_MQTT5_CLIENT,
+ "SubscribeCompletion Failed with Error Code: %d(%s)",
+ error_code,
+ aws_error_debug_str(error_code));
+ }
+
+ if (callbackData->onSubscribeCompletion)
+ {
+ callbackData->onSubscribeCompletion(callbackData->client, error_code, packet);
+ }
+ Crt::Delete(callbackData, callbackData->allocator);
+ }
+
+ void Mqtt5Client::s_unsubscribeCompletionCallback(
+ const aws_mqtt5_packet_unsuback_view *unsuback,
+ int error_code,
+ void *complete_ctx)
+ {
+ UnSubAckCallbackData *callbackData = reinterpret_cast<UnSubAckCallbackData *>(complete_ctx);
+ AWS_ASSERT(callbackData != nullptr);
+
+ std::shared_ptr<UnSubAckPacket> packet = nullptr;
+ if (unsuback != nullptr)
+ {
+ packet = std::make_shared<UnSubAckPacket>(*unsuback, callbackData->allocator);
+ }
+
+ if (error_code != 0)
+ {
+ AWS_LOGF_INFO(
+ AWS_LS_MQTT5_CLIENT,
+ "UnsubscribeCompletion Failed with Error Code: %d(%s)",
+ error_code,
+ aws_error_debug_str(error_code));
+ }
+
+ if (callbackData->onUnsubscribeCompletion != NULL)
+ {
+ callbackData->onUnsubscribeCompletion(callbackData->client, error_code, packet);
+ }
+
+ Crt::Delete(callbackData, callbackData->allocator);
+ }
+
+ Mqtt5Client::Mqtt5Client(const Mqtt5ClientOptions &options, Allocator *allocator) noexcept
+ : m_client(nullptr), m_allocator(allocator)
+ {
+ aws_mqtt5_client_options clientOptions;
+
+ options.initializeRawOptions(clientOptions);
+
+ /* Setup Callbacks */
+ if (options.websocketHandshakeTransform)
+ {
+ this->websocketInterceptor = options.websocketHandshakeTransform;
+ clientOptions.websocket_handshake_transform = &Mqtt5Client::s_onWebsocketHandshake;
+ clientOptions.websocket_handshake_transform_user_data = this;
+ }
+
+ if (options.onConnectionFailure)
+ {
+ this->onConnectionFailure = options.onConnectionFailure;
+ }
+
+ if (options.onConnectionSuccess)
+ {
+ this->onConnectionSuccess = options.onConnectionSuccess;
+ }
+
+ if (options.onDisconnection)
+ {
+ this->onDisconnection = options.onDisconnection;
+ }
+
+ if (options.onPublishReceived)
+ {
+ this->onPublishReceived = options.onPublishReceived;
+ }
+
+ if (options.onStopped)
+ {
+ this->onStopped = options.onStopped;
+ }
+
+ if (options.onAttemptingConnect)
+ {
+ this->onAttemptingConnect = options.onAttemptingConnect;
+ }
+
+ clientOptions.publish_received_handler_user_data = this;
+ clientOptions.publish_received_handler = &Mqtt5Client::s_publishReceivedCallback;
+
+ clientOptions.lifecycle_event_handler = &Mqtt5Client::s_lifeCycleEventCallback;
+ clientOptions.lifecycle_event_handler_user_data = this;
+
+ clientOptions.client_termination_handler = &Mqtt5Client::s_clientTerminationCompletion;
+ clientOptions.client_termination_handler_user_data = this;
+
+ m_client = aws_mqtt5_client_new(allocator, &clientOptions);
+ }
+
+ Mqtt5Client::~Mqtt5Client()
+ {
+ if (m_client != nullptr)
+ {
+ aws_mqtt5_client_release(m_client);
+ std::unique_lock<std::mutex> lock(m_terminationMutex);
+ m_terminationCondition.wait(lock, [this] { return m_terminationPredicate == true; });
+ m_client = nullptr;
+ }
+ }
+
+ std::shared_ptr<Mqtt5Client> Mqtt5Client::NewMqtt5Client(
+ const Mqtt5ClientOptions &options,
+ Allocator *allocator) noexcept
+ {
+ /* Copied from MqttClient.cpp:ln754 */
+ // As the constructor is private, make share would not work here. We do make_share manually.
+ Mqtt5Client *toSeat = reinterpret_cast<Mqtt5Client *>(aws_mem_acquire(allocator, sizeof(Mqtt5Client)));
+ if (!toSeat)
+ {
+ return nullptr;
+ }
+
+ toSeat = new (toSeat) Mqtt5Client(options, allocator);
+ return std::shared_ptr<Mqtt5Client>(
+ toSeat, [allocator](Mqtt5Client *client) { Crt::Delete(client, allocator); });
+ }
+
+ Mqtt5Client::operator bool() const noexcept { return m_client != nullptr; }
+
+ int Mqtt5Client::LastError() const noexcept { return aws_last_error(); }
+
+ bool Mqtt5Client::Start() const noexcept { return aws_mqtt5_client_start(m_client) == AWS_OP_SUCCESS; }
+
+ bool Mqtt5Client::Stop() noexcept { return aws_mqtt5_client_stop(m_client, NULL, NULL) == AWS_OP_SUCCESS; }
+
+ bool Mqtt5Client::Stop(std::shared_ptr<DisconnectPacket> disconnectOptions) noexcept
+ {
+ if (disconnectOptions == nullptr)
+ {
+ return Stop();
+ }
+
+ aws_mqtt5_packet_disconnect_view disconnect_packet;
+ AWS_ZERO_STRUCT(disconnect_packet);
+ if (disconnectOptions->initializeRawOptions(disconnect_packet) == false)
+ {
+ return false;
+ }
+ return aws_mqtt5_client_stop(m_client, &disconnect_packet, NULL) == AWS_OP_SUCCESS;
+ }
+
+ bool Mqtt5Client::Publish(
+ std::shared_ptr<PublishPacket> publishOptions,
+ OnPublishCompletionHandler onPublishCmpletionCallback) noexcept
+ {
+ if (publishOptions == nullptr)
+ {
+ return false;
+ }
+
+ aws_mqtt5_packet_publish_view publish;
+ publishOptions->initializeRawOptions(publish);
+
+ PubAckCallbackData *pubCallbackData = Aws::Crt::New<PubAckCallbackData>(m_allocator);
+
+ pubCallbackData->client = this->getptr();
+ pubCallbackData->allocator = m_allocator;
+ pubCallbackData->onPublishCompletion = onPublishCmpletionCallback;
+
+ aws_mqtt5_publish_completion_options options;
+
+ options.completion_callback = Mqtt5Client::s_publishCompletionCallback;
+ options.completion_user_data = pubCallbackData;
+
+ int result = aws_mqtt5_client_publish(m_client, &publish, &options);
+ if (result != AWS_OP_SUCCESS)
+ {
+ Crt::Delete(pubCallbackData, pubCallbackData->allocator);
+ return false;
+ }
+ return true;
+ }
+
+ bool Mqtt5Client::Subscribe(
+ std::shared_ptr<SubscribePacket> subscribeOptions,
+ OnSubscribeCompletionHandler onSubscribeCompletionCallback) noexcept
+ {
+ if (subscribeOptions == nullptr)
+ {
+ return false;
+ }
+ /* Setup packet_subscribe */
+ aws_mqtt5_packet_subscribe_view subscribe;
+
+ subscribeOptions->initializeRawOptions(subscribe);
+
+ /* Setup subscription Completion callback*/
+ SubAckCallbackData *subCallbackData = Aws::Crt::New<SubAckCallbackData>(m_allocator);
+
+ subCallbackData->client = this->getptr();
+ subCallbackData->allocator = m_allocator;
+ subCallbackData->onSubscribeCompletion = onSubscribeCompletionCallback;
+
+ aws_mqtt5_subscribe_completion_options options;
+
+ options.completion_callback = Mqtt5Client::s_subscribeCompletionCallback;
+ options.completion_user_data = subCallbackData;
+
+ /* Subscribe to topic */
+ int result = aws_mqtt5_client_subscribe(m_client, &subscribe, &options);
+ if (result != AWS_OP_SUCCESS)
+ {
+ Crt::Delete(subCallbackData, subCallbackData->allocator);
+ return false;
+ }
+ return result == AWS_OP_SUCCESS;
+ }
+
+ bool Mqtt5Client::Unsubscribe(
+ std::shared_ptr<UnsubscribePacket> unsubscribeOptions,
+ OnUnsubscribeCompletionHandler onUnsubscribeCompletionCallback) noexcept
+ {
+ if (unsubscribeOptions == nullptr)
+ {
+ return false;
+ }
+
+ aws_mqtt5_packet_unsubscribe_view unsubscribe;
+ unsubscribeOptions->initializeRawOptions(unsubscribe);
+
+ UnSubAckCallbackData *unSubCallbackData = Aws::Crt::New<UnSubAckCallbackData>(m_allocator);
+
+ unSubCallbackData->client = this->getptr();
+ unSubCallbackData->allocator = m_allocator;
+ unSubCallbackData->onUnsubscribeCompletion = onUnsubscribeCompletionCallback;
+
+ aws_mqtt5_unsubscribe_completion_options options;
+
+ options.completion_callback = Mqtt5Client::s_unsubscribeCompletionCallback;
+ options.completion_user_data = unSubCallbackData;
+
+ int result = aws_mqtt5_client_unsubscribe(m_client, &unsubscribe, &options);
+ if (result != AWS_OP_SUCCESS)
+ {
+ Crt::Delete(unSubCallbackData, unSubCallbackData->allocator);
+ return false;
+ }
+ return result == AWS_OP_SUCCESS;
+ }
+
+ const Mqtt5ClientOperationStatistics &Mqtt5Client::GetOperationStatistics() noexcept
+ {
+ aws_mqtt5_client_operation_statistics m_operationStatisticsNative = {0, 0, 0, 0};
+ if (m_client != nullptr)
+ {
+ aws_mqtt5_client_get_stats(m_client, &m_operationStatisticsNative);
+ m_operationStatistics.incompleteOperationCount =
+ m_operationStatisticsNative.incomplete_operation_count;
+ m_operationStatistics.incompleteOperationSize =
+ m_operationStatisticsNative.incomplete_operation_size;
+ m_operationStatistics.unackedOperationCount = m_operationStatisticsNative.unacked_operation_count;
+ m_operationStatistics.unackedOperationSize = m_operationStatisticsNative.unacked_operation_size;
+ }
+ return m_operationStatistics;
+ }
+
+ /*****************************************************
+ *
+ * Mqtt5ClientOptions
+ *
+ *****************************************************/
+
+ /**
+ * Mqtt5ClientOptions
+ */
+ Mqtt5ClientOptions::Mqtt5ClientOptions(Crt::Allocator *allocator) noexcept
+ : m_bootstrap(nullptr), m_sessionBehavior(ClientSessionBehaviorType::AWS_MQTT5_CSBT_DEFAULT),
+ m_extendedValidationAndFlowControlOptions(AWS_MQTT5_EVAFCO_AWS_IOT_CORE_DEFAULTS),
+ m_offlineQueueBehavior(AWS_MQTT5_COQBT_DEFAULT),
+ m_reconnectionOptions({AWS_EXPONENTIAL_BACKOFF_JITTER_DEFAULT, 0, 0, 0}), m_pingTimeoutMs(0),
+ m_connackTimeoutMs(0), m_ackTimeoutSec(0), m_allocator(allocator)
+ {
+ m_socketOptions.SetSocketType(Io::SocketType::Stream);
+ AWS_ZERO_STRUCT(m_packetConnectViewStorage);
+ AWS_ZERO_STRUCT(m_httpProxyOptionsStorage);
+ }
+
+ bool Mqtt5ClientOptions::initializeRawOptions(aws_mqtt5_client_options &raw_options) const noexcept
+ {
+ AWS_ZERO_STRUCT(raw_options);
+
+ raw_options.host_name = ByteCursorFromString(m_hostName);
+ raw_options.port = m_port;
+
+ if (m_bootstrap == nullptr)
+ {
+ raw_options.bootstrap = ApiHandle::GetOrCreateStaticDefaultClientBootstrap()->GetUnderlyingHandle();
+ }
+ else
+ {
+ raw_options.bootstrap = m_bootstrap->GetUnderlyingHandle();
+ }
+ raw_options.socket_options = &m_socketOptions.GetImpl();
+ if (m_tlsConnectionOptions.has_value())
+ {
+ raw_options.tls_options = m_tlsConnectionOptions.value().GetUnderlyingHandle();
+ }
+
+ if (m_proxyOptions.has_value())
+ {
+ raw_options.http_proxy_options = &m_httpProxyOptionsStorage;
+ }
+
+ raw_options.connect_options = &m_packetConnectViewStorage;
+ raw_options.session_behavior = m_sessionBehavior;
+ raw_options.extended_validation_and_flow_control_options = m_extendedValidationAndFlowControlOptions;
+ raw_options.offline_queue_behavior = m_offlineQueueBehavior;
+ raw_options.retry_jitter_mode = m_reconnectionOptions.m_reconnectMode;
+ raw_options.max_reconnect_delay_ms = m_reconnectionOptions.m_maxReconnectDelayMs;
+ raw_options.min_reconnect_delay_ms = m_reconnectionOptions.m_minReconnectDelayMs;
+ raw_options.min_connected_time_to_reset_reconnect_delay_ms =
+ m_reconnectionOptions.m_minConnectedTimeToResetReconnectDelayMs;
+ raw_options.ping_timeout_ms = m_pingTimeoutMs;
+ raw_options.connack_timeout_ms = m_connackTimeoutMs;
+ raw_options.ack_timeout_seconds = m_ackTimeoutSec;
+
+ return true;
+ }
+
+ Mqtt5ClientOptions::~Mqtt5ClientOptions() {}
+
+ Mqtt5ClientOptions &Mqtt5ClientOptions::withHostName(Crt::String hostname)
+ {
+ m_hostName = std::move(hostname);
+ return *this;
+ }
+
+ Mqtt5ClientOptions &Mqtt5ClientOptions::withPort(uint16_t port) noexcept
+ {
+ m_port = port;
+ return *this;
+ }
+
+ Mqtt5ClientOptions &Mqtt5ClientOptions::withBootstrap(Io::ClientBootstrap *bootStrap) noexcept
+ {
+ m_bootstrap = bootStrap;
+ return *this;
+ }
+
+ Mqtt5ClientOptions &Mqtt5ClientOptions::withSocketOptions(Io::SocketOptions socketOptions) noexcept
+ {
+ m_socketOptions = std::move(socketOptions);
+ return *this;
+ }
+
+ Mqtt5ClientOptions &Mqtt5ClientOptions::withTlsConnectionOptions(
+ const Io::TlsConnectionOptions &tslOptions) noexcept
+ {
+ m_tlsConnectionOptions = tslOptions;
+ return *this;
+ }
+
+ Mqtt5ClientOptions &Mqtt5ClientOptions::withHttpProxyOptions(
+ const Crt::Http::HttpClientConnectionProxyOptions &proxyOptions) noexcept
+ {
+ m_proxyOptions = proxyOptions;
+ m_proxyOptions->InitializeRawProxyOptions(m_httpProxyOptionsStorage);
+ return *this;
+ }
+
+ Mqtt5ClientOptions &Mqtt5ClientOptions::withConnectOptions(
+ std::shared_ptr<ConnectPacket> packetConnect) noexcept
+ {
+ m_connectOptions = packetConnect;
+ m_connectOptions->initializeRawOptions(m_packetConnectViewStorage, m_allocator);
+ return *this;
+ }
+
+ Mqtt5ClientOptions &Mqtt5ClientOptions::withSessionBehavior(
+ ClientSessionBehaviorType sessionBehavior) noexcept
+ {
+ m_sessionBehavior = sessionBehavior;
+ return *this;
+ }
+
+ Mqtt5ClientOptions &Mqtt5ClientOptions::withClientExtendedValidationAndFlowControl(
+ ClientExtendedValidationAndFlowControl clientExtendedValidationAndFlowControl) noexcept
+ {
+ m_extendedValidationAndFlowControlOptions = clientExtendedValidationAndFlowControl;
+ return *this;
+ }
+
+ Mqtt5ClientOptions &Mqtt5ClientOptions::withOfflineQueueBehavior(
+ ClientOperationQueueBehaviorType offlineQueueBehavior) noexcept
+ {
+ m_offlineQueueBehavior = offlineQueueBehavior;
+ return *this;
+ }
+
+ Mqtt5ClientOptions &Mqtt5ClientOptions::withReconnectOptions(ReconnectOptions reconnectOptions) noexcept
+ {
+ m_reconnectionOptions = reconnectOptions;
+
+ return *this;
+ }
+
+ Mqtt5ClientOptions &Mqtt5ClientOptions::withPingTimeoutMs(uint32_t pingTimeoutMs) noexcept
+ {
+ m_pingTimeoutMs = pingTimeoutMs;
+ return *this;
+ }
+
+ Mqtt5ClientOptions &Mqtt5ClientOptions::withConnackTimeoutMs(uint32_t connackTimeoutMs) noexcept
+ {
+ m_connackTimeoutMs = connackTimeoutMs;
+ return *this;
+ }
+
+ Mqtt5ClientOptions &Mqtt5ClientOptions::withAckTimeoutSeconds(uint32_t ackTimeoutSeconds) noexcept
+ {
+ m_ackTimeoutSec = ackTimeoutSeconds;
+ return *this;
+ }
+
+ Mqtt5ClientOptions &Mqtt5ClientOptions::withWebsocketHandshakeTransformCallback(
+ OnWebSocketHandshakeIntercept callback) noexcept
+ {
+ websocketHandshakeTransform = std::move(callback);
+ return *this;
+ }
+
+ Mqtt5ClientOptions &Mqtt5ClientOptions::withClientConnectionSuccessCallback(
+ OnConnectionSuccessHandler callback) noexcept
+ {
+ onConnectionSuccess = std::move(callback);
+ return *this;
+ }
+
+ Mqtt5ClientOptions &Mqtt5ClientOptions::withClientConnectionFailureCallback(
+ OnConnectionFailureHandler callback) noexcept
+ {
+ onConnectionFailure = std::move(callback);
+ return *this;
+ }
+
+ Mqtt5ClientOptions &Mqtt5ClientOptions::withClientDisconnectionCallback(
+ OnDisconnectionHandler callback) noexcept
+ {
+ onDisconnection = std::move(callback);
+ return *this;
+ }
+
+ Mqtt5ClientOptions &Mqtt5ClientOptions::withClientStoppedCallback(OnStoppedHandler callback) noexcept
+ {
+ onStopped = std::move(callback);
+ return *this;
+ }
+
+ Mqtt5ClientOptions &Mqtt5ClientOptions::withClientAttemptingConnectCallback(
+ OnAttemptingConnectHandler callback) noexcept
+ {
+ onAttemptingConnect = std::move(callback);
+ return *this;
+ }
+
+ Mqtt5ClientOptions &Mqtt5ClientOptions::withPublishReceivedCallback(
+ OnPublishReceivedHandler callback) noexcept
+ {
+ onPublishReceived = std::move(callback);
+ return *this;
+ }
+
+ } // namespace Mqtt5
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/mqtt/Mqtt5Packets.cpp b/contrib/restricted/aws/aws-crt-cpp/source/mqtt/Mqtt5Packets.cpp
new file mode 100644
index 0000000000..a59bebb635
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/mqtt/Mqtt5Packets.cpp
@@ -0,0 +1,1236 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/mqtt/Mqtt5Client.h>
+#include <aws/crt/mqtt/Mqtt5Packets.h>
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Mqtt5
+ {
+ template <typename T> void setPacketVector(Vector<T> &vector, const T *values, size_t length)
+ {
+ vector.clear();
+ for (size_t i = 0; i < length; ++i)
+ {
+ vector.push_back(values[i]);
+ }
+ }
+ template <typename T> void setPacketOptional(Optional<T> &optional, const T *value)
+ {
+ if (value != nullptr)
+ {
+ optional = *value;
+ }
+ else
+ {
+ optional.reset();
+ }
+ }
+
+ void setPacketStringOptional(
+ Optional<aws_byte_cursor> &optional,
+ Crt::String &optionalStorage,
+ const aws_byte_cursor *value)
+ {
+ if (value != nullptr)
+ {
+ optionalStorage = Crt::String((const char *)value->ptr, value->len);
+ struct aws_byte_cursor optional_cursor;
+ optional_cursor.ptr = (uint8_t *)optionalStorage.c_str();
+ optional_cursor.len = optionalStorage.size();
+ optional = optional_cursor;
+ }
+ }
+
+ void setPacketStringOptional(Optional<Crt::String> &optional, const aws_byte_cursor *value)
+ {
+ if (value != nullptr)
+ {
+ optional = Crt::String((const char *)value->ptr, value->len);
+ }
+ else
+ {
+ optional.reset();
+ }
+ }
+
+ void setPacketStringOptional(Optional<Crt::String> &optional, Crt::String &&toMove)
+ {
+ if (!toMove.empty())
+ {
+ optional = std::move(toMove);
+ }
+ else
+ {
+ optional.reset();
+ }
+ }
+
+ void setPacketByteBufOptional(
+ Optional<aws_byte_cursor> &optional,
+ ByteBuf &optionalStorage,
+ Allocator *allocator,
+ const aws_byte_cursor *value)
+ {
+ aws_byte_buf_clean_up(&optionalStorage);
+ AWS_ZERO_STRUCT(optionalStorage);
+ if (value != nullptr)
+ {
+ aws_byte_buf_init_copy_from_cursor(&optionalStorage, allocator, *value);
+ optional = aws_byte_cursor_from_buf(&optionalStorage);
+ }
+ else
+ {
+ optional.reset();
+ }
+ }
+ void setUserProperties(
+ Vector<UserProperty> &userProperties,
+ const struct aws_mqtt5_user_property *properties,
+ size_t propertyCount)
+ {
+ for (size_t i = 0; i < propertyCount; ++i)
+ {
+ userProperties.push_back(UserProperty(
+ Aws::Crt::String((const char *)properties[i].name.ptr, properties[i].name.len),
+ Aws::Crt::String((const char *)properties[i].value.ptr, properties[i].value.len)));
+ }
+ }
+ template <typename T> void setNullableFromOptional(const T *&nullable, const Optional<T> &optional)
+ {
+ if (optional.has_value())
+ {
+ nullable = &optional.value();
+ }
+ }
+
+ void s_AllocateUnderlyingUserProperties(
+ aws_mqtt5_user_property *&dst,
+ const Crt::Vector<UserProperty> &userProperties,
+ Allocator *allocator)
+ {
+ if (dst != nullptr)
+ {
+ aws_mem_release(allocator, (void *)dst);
+ dst = nullptr;
+ }
+ if (userProperties.size() > 0)
+ {
+ dst = reinterpret_cast<struct aws_mqtt5_user_property *>(
+ aws_mem_calloc(allocator, userProperties.size(), sizeof(aws_mqtt5_user_property)));
+ AWS_ZERO_STRUCT(*dst);
+ for (size_t index = 0; index < userProperties.size(); ++index)
+ {
+ (dst + index)->name = aws_byte_cursor_from_array(
+ userProperties[index].getName().c_str(), userProperties[index].getName().length());
+ (dst + index)->value = aws_byte_cursor_from_array(
+ userProperties[index].getValue().c_str(), userProperties[index].getValue().length());
+ }
+ }
+ }
+
+ void s_AllocateStringVector(
+ aws_array_list &dst,
+ const Crt::Vector<String> &stringVector,
+ Allocator *allocator)
+ {
+ AWS_ZERO_STRUCT(dst);
+
+ if (aws_array_list_init_dynamic(&dst, allocator, stringVector.size(), sizeof(aws_byte_cursor)) !=
+ AWS_OP_SUCCESS)
+ {
+ return;
+ }
+
+ for (auto &topic : stringVector)
+ {
+ ByteCursor topicCursor = ByteCursorFromString(topic);
+ aws_array_list_push_back(&dst, reinterpret_cast<const void *>(&topicCursor));
+ }
+ }
+
+ void s_AllocateUnderlyingSubscription(
+ aws_mqtt5_subscription_view *&dst,
+ const Crt::Vector<Subscription> &subscriptions,
+ Allocator *allocator)
+ {
+ if (dst != nullptr)
+ {
+ aws_mem_release(allocator, dst);
+ dst = nullptr;
+ }
+
+ aws_array_list subscription_list;
+ AWS_ZERO_STRUCT(subscription_list);
+
+ if (aws_array_list_init_dynamic(
+ &subscription_list, allocator, subscriptions.size(), sizeof(aws_mqtt5_subscription_view)) !=
+ AWS_OP_SUCCESS)
+ {
+ return;
+ }
+
+ for (auto &subscription : subscriptions)
+ {
+
+ aws_mqtt5_subscription_view underlying_subscription;
+ if (subscription.initializeRawOptions(underlying_subscription) != true)
+ {
+ goto clean_up;
+ }
+
+ aws_array_list_push_back(
+ &subscription_list, reinterpret_cast<const void *>(&underlying_subscription));
+ }
+ dst = static_cast<aws_mqtt5_subscription_view *>(subscription_list.data);
+ return;
+
+ clean_up:
+ aws_array_list_clean_up(&subscription_list);
+ }
+
+ ConnectPacket::ConnectPacket(Allocator *allocator) noexcept
+ : m_allocator(allocator), m_keepAliveIntervalSec(1200), m_userPropertiesStorage(nullptr)
+ {
+ // m_clientId.clear();
+ AWS_ZERO_STRUCT(m_usernameCursor);
+ AWS_ZERO_STRUCT(m_passowrdStorage);
+ AWS_ZERO_STRUCT(m_willStorage);
+ }
+
+ ConnectPacket &ConnectPacket::withKeepAliveIntervalSec(uint16_t second) noexcept
+ {
+ m_keepAliveIntervalSec = second;
+ return *this;
+ }
+
+ ConnectPacket &ConnectPacket::withClientId(Crt::String client_id) noexcept
+ {
+ m_clientId = std::move(client_id);
+ return *this;
+ }
+
+ ConnectPacket &ConnectPacket::withUserName(Crt::String username) noexcept
+ {
+ m_username = std::move(username);
+ m_usernameCursor = ByteCursorFromString(m_username.value());
+ return *this;
+ }
+
+ ConnectPacket &ConnectPacket::withPassword(Crt::ByteCursor password) noexcept
+ {
+ setPacketByteBufOptional(m_password, m_passowrdStorage, m_allocator, &password);
+ return *this;
+ }
+
+ ConnectPacket &ConnectPacket::withSessionExpiryIntervalSec(uint32_t sessionExpiryIntervalSec) noexcept
+ {
+ m_sessionExpiryIntervalSec = sessionExpiryIntervalSec;
+ return *this;
+ }
+
+ ConnectPacket &ConnectPacket::withRequestResponseInformation(bool requestResponseInformation) noexcept
+ {
+ m_requestResponseInformation = requestResponseInformation;
+ return *this;
+ }
+
+ ConnectPacket &ConnectPacket::withRequestProblemInformation(bool requestProblemInformation) noexcept
+ {
+ m_requestProblemInformation = requestProblemInformation;
+ return *this;
+ }
+
+ ConnectPacket &ConnectPacket::withReceiveMaximum(uint16_t receiveMaximum) noexcept
+ {
+ m_receiveMaximum = receiveMaximum;
+ return *this;
+ }
+
+ ConnectPacket &ConnectPacket::withMaximumPacketSizeBytes(uint32_t maximumPacketSizeBytes) noexcept
+ {
+ m_maximumPacketSizeBytes = maximumPacketSizeBytes;
+ return *this;
+ }
+
+ ConnectPacket &ConnectPacket::withWillDelayIntervalSec(uint32_t willDelayIntervalSec) noexcept
+ {
+ m_willDelayIntervalSeconds = willDelayIntervalSec;
+ return *this;
+ }
+
+ ConnectPacket &ConnectPacket::withWill(std::shared_ptr<PublishPacket> will) noexcept
+ {
+ m_will = will;
+ m_will.value()->initializeRawOptions(m_willStorage);
+ return *this;
+ }
+
+ ConnectPacket &ConnectPacket::withUserProperties(const Vector<UserProperty> &userProperties) noexcept
+ {
+ m_userProperties = userProperties;
+ return *this;
+ }
+
+ ConnectPacket &ConnectPacket::withUserProperties(Vector<UserProperty> &&userProperties) noexcept
+ {
+ m_userProperties = userProperties;
+ return *this;
+ }
+
+ ConnectPacket &ConnectPacket::withUserProperty(UserProperty &&property) noexcept
+ {
+ m_userProperties.push_back(std::move(property));
+ return *this;
+ }
+
+ bool ConnectPacket::initializeRawOptions(
+ aws_mqtt5_packet_connect_view &raw_options,
+ Allocator * /*allocator*/) noexcept
+ {
+ AWS_ZERO_STRUCT(raw_options);
+
+ raw_options.keep_alive_interval_seconds = m_keepAliveIntervalSec;
+ raw_options.client_id = ByteCursorFromString(m_clientId);
+
+ if (m_username.has_value())
+ {
+ raw_options.username = &m_usernameCursor;
+ }
+
+ if (m_password.has_value())
+ {
+ raw_options.password = &m_password.value();
+ }
+
+ if (m_sessionExpiryIntervalSec.has_value())
+ {
+ raw_options.session_expiry_interval_seconds = &m_sessionExpiryIntervalSec.value();
+ }
+
+ if (m_requestProblemInformation.has_value())
+ {
+ m_requestResponseInformationStorage = m_requestResponseInformation.value() ? 1 : 0;
+ raw_options.request_response_information = &m_requestResponseInformationStorage;
+ }
+
+ if (m_requestProblemInformation.has_value())
+ {
+ m_requestProblemInformationStorage = m_requestProblemInformation.value() ? 1 : 0;
+ raw_options.request_problem_information = &m_requestProblemInformationStorage;
+ }
+
+ if (m_receiveMaximum.has_value())
+ {
+ raw_options.receive_maximum = &m_receiveMaximum.value();
+ }
+
+ if (m_maximumPacketSizeBytes.has_value())
+ {
+ raw_options.maximum_packet_size_bytes = &m_maximumPacketSizeBytes.value();
+ }
+
+ if (m_willDelayIntervalSeconds.has_value())
+ {
+ raw_options.will_delay_interval_seconds = &m_willDelayIntervalSeconds.value();
+ }
+
+ if (m_will.has_value())
+ {
+ raw_options.will = &m_willStorage;
+ }
+
+ s_AllocateUnderlyingUserProperties(m_userPropertiesStorage, m_userProperties, m_allocator);
+ raw_options.user_properties = m_userPropertiesStorage;
+ raw_options.user_property_count = m_userProperties.size();
+
+ return true;
+ }
+
+ ConnectPacket::~ConnectPacket()
+ {
+ if (m_userPropertiesStorage != nullptr)
+ {
+ aws_mem_release(m_allocator, m_userPropertiesStorage);
+ m_userProperties.clear();
+ }
+ aws_byte_buf_clean_up(&m_passowrdStorage);
+ }
+
+ uint16_t ConnectPacket::getKeepAliveIntervalSec() const noexcept { return m_keepAliveIntervalSec; }
+
+ const Crt::String &ConnectPacket::getClientId() const noexcept { return m_clientId; }
+
+ const Crt::Optional<Crt::String> &ConnectPacket::getUsername() const noexcept { return m_username; }
+
+ const Crt::Optional<Crt::ByteCursor> &ConnectPacket::getPassword() const noexcept { return m_password; }
+
+ const Crt::Optional<uint32_t> &ConnectPacket::getSessionExpiryIntervalSec() const noexcept
+ {
+ return m_sessionExpiryIntervalSec;
+ }
+
+ const Crt::Optional<bool> &ConnectPacket::getRequestResponseInformation() const noexcept
+ {
+ return m_requestResponseInformation;
+ }
+
+ const Crt::Optional<bool> &ConnectPacket::getRequestProblemInformation() const noexcept
+ {
+ return m_requestProblemInformation;
+ }
+
+ const Crt::Optional<uint16_t> &ConnectPacket::getReceiveMaximum() const noexcept
+ {
+ return m_receiveMaximum;
+ }
+
+ const Crt::Optional<uint32_t> &ConnectPacket::getMaximumPacketSizeBytes() const noexcept
+ {
+ return m_maximumPacketSizeBytes;
+ }
+
+ const Crt::Optional<uint32_t> &ConnectPacket::getWillDelayIntervalSec() const noexcept
+ {
+ return m_willDelayIntervalSeconds;
+ }
+
+ const Crt::Optional<std::shared_ptr<PublishPacket>> &ConnectPacket::getWill() const noexcept
+ {
+ return m_will;
+ }
+
+ const Crt::Vector<UserProperty> &ConnectPacket::getUserProperties() const noexcept
+ {
+ return m_userProperties;
+ }
+
+ UserProperty::UserProperty(Crt::String name, Crt::String value) noexcept
+ : m_name(std::move(name)), m_value(std::move(value))
+ {
+ }
+
+ UserProperty::~UserProperty() noexcept {}
+
+ UserProperty::UserProperty(const UserProperty &toCopy) noexcept
+ : m_name(toCopy.getName()), m_value(toCopy.getValue())
+ {
+ }
+
+ UserProperty::UserProperty(UserProperty &&toMove) noexcept
+ : m_name(std::move(toMove.m_name)), m_value(std::move(toMove.m_value))
+ {
+ }
+
+ UserProperty &UserProperty::operator=(const UserProperty &toCopy) noexcept
+ {
+ if (&toCopy != this)
+ {
+ m_name = toCopy.getName();
+ m_value = toCopy.getValue();
+ }
+ return *this;
+ }
+
+ UserProperty &UserProperty::operator=(UserProperty &&toMove) noexcept
+ {
+ if (&toMove != this)
+ {
+ m_name = std::move(toMove.m_name);
+ m_value = std::move(toMove.m_value);
+ }
+ return *this;
+ }
+
+ PublishPacket::PublishPacket(const aws_mqtt5_packet_publish_view &packet, Allocator *allocator) noexcept
+ : m_allocator(allocator), m_qos(packet.qos), m_retain(packet.retain),
+ m_topicName((const char *)packet.topic.ptr, packet.topic.len), m_userPropertiesStorage(nullptr)
+ {
+ AWS_ZERO_STRUCT(m_payloadStorage);
+ AWS_ZERO_STRUCT(m_contentTypeStorage);
+ AWS_ZERO_STRUCT(m_correlationDataStorage);
+ AWS_ZERO_STRUCT(m_payload);
+
+ withPayload(packet.payload);
+
+ setPacketOptional(m_payloadFormatIndicator, packet.payload_format);
+ setPacketOptional(m_messageExpiryIntervalSec, packet.message_expiry_interval_seconds);
+ setPacketStringOptional(m_responseTopic, m_responseTopicString, packet.response_topic);
+ setPacketByteBufOptional(
+ m_correlationData, m_correlationDataStorage, allocator, packet.correlation_data);
+ setPacketByteBufOptional(m_contentType, m_contentTypeStorage, allocator, packet.content_type);
+ setPacketVector(
+ m_subscriptionIdentifiers, packet.subscription_identifiers, packet.subscription_identifier_count);
+ setUserProperties(m_userProperties, packet.user_properties, packet.user_property_count);
+ }
+
+ /* Default constructor */
+ PublishPacket::PublishPacket(Allocator *allocator) noexcept
+ : m_allocator(allocator), m_qos(QOS::AWS_MQTT5_QOS_AT_MOST_ONCE), m_retain(false), m_topicName(""),
+ m_userPropertiesStorage(nullptr)
+ {
+ AWS_ZERO_STRUCT(m_payloadStorage);
+ AWS_ZERO_STRUCT(m_contentTypeStorage);
+ AWS_ZERO_STRUCT(m_correlationDataStorage);
+ AWS_ZERO_STRUCT(m_payload);
+ }
+
+ PublishPacket::PublishPacket(
+ Crt::String topic,
+ ByteCursor payload,
+ Mqtt5::QOS qos,
+ Allocator *allocator) noexcept
+ : m_allocator(allocator), m_qos(qos), m_retain(false), m_topicName(std::move(topic)),
+ m_userPropertiesStorage(nullptr)
+ {
+ AWS_ZERO_STRUCT(m_payloadStorage);
+ AWS_ZERO_STRUCT(m_contentTypeStorage);
+ AWS_ZERO_STRUCT(m_correlationDataStorage);
+ AWS_ZERO_STRUCT(m_payload);
+
+ // Setup message payload, sync with PublishPacket::withPayload
+ aws_byte_buf_clean_up(&m_payloadStorage);
+ aws_byte_buf_init_copy_from_cursor(&m_payloadStorage, m_allocator, payload);
+ m_payload = aws_byte_cursor_from_buf(&m_payloadStorage);
+ }
+
+ PublishPacket &PublishPacket::withPayload(ByteCursor payload) noexcept
+ {
+ aws_byte_buf_clean_up(&m_payloadStorage);
+ aws_byte_buf_init_copy_from_cursor(&m_payloadStorage, m_allocator, payload);
+ m_payload = aws_byte_cursor_from_buf(&m_payloadStorage);
+ return *this;
+ }
+
+ PublishPacket &PublishPacket::withQOS(Mqtt5::QOS qos) noexcept
+ {
+ m_qos = qos;
+ return *this;
+ }
+
+ PublishPacket &PublishPacket::withRetain(bool retain) noexcept
+ {
+ m_retain = retain;
+ return *this;
+ }
+
+ PublishPacket &PublishPacket::withTopic(Crt::String topic) noexcept
+ {
+ m_topicName = std::move(topic);
+ return *this;
+ }
+
+ PublishPacket &PublishPacket::withPayloadFormatIndicator(PayloadFormatIndicator format) noexcept
+ {
+ m_payloadFormatIndicator = format;
+ return *this;
+ }
+
+ PublishPacket &PublishPacket::withMessageExpiryIntervalSec(uint32_t second) noexcept
+ {
+ m_messageExpiryIntervalSec = second;
+ return *this;
+ }
+
+ PublishPacket &PublishPacket::withResponseTopic(ByteCursor responseTopic) noexcept
+ {
+ setPacketStringOptional(m_responseTopic, m_responseTopicString, &responseTopic);
+ return *this;
+ }
+
+ PublishPacket &PublishPacket::withCorrelationData(ByteCursor correlationData) noexcept
+ {
+ setPacketByteBufOptional(m_correlationData, m_correlationDataStorage, m_allocator, &correlationData);
+ return *this;
+ }
+
+ PublishPacket &PublishPacket::withUserProperties(const Vector<UserProperty> &userProperties) noexcept
+ {
+ m_userProperties = userProperties;
+ return *this;
+ }
+
+ PublishPacket &PublishPacket::withUserProperties(Vector<UserProperty> &&userProperties) noexcept
+ {
+ m_userProperties = userProperties;
+ return *this;
+ }
+
+ PublishPacket &PublishPacket::withUserProperty(UserProperty &&property) noexcept
+ {
+ m_userProperties.push_back(std::move(property));
+ return *this;
+ }
+
+ bool PublishPacket::initializeRawOptions(aws_mqtt5_packet_publish_view &raw_options) noexcept
+ {
+ AWS_ZERO_STRUCT(raw_options);
+ raw_options.payload = m_payload;
+ raw_options.qos = m_qos;
+ raw_options.retain = m_retain;
+ raw_options.topic = ByteCursorFromString(m_topicName);
+
+ if (m_payloadFormatIndicator.has_value())
+ {
+ raw_options.payload_format =
+ (aws_mqtt5_payload_format_indicator *)&m_payloadFormatIndicator.value();
+ }
+ if (m_messageExpiryIntervalSec.has_value())
+ {
+ raw_options.message_expiry_interval_seconds = &m_messageExpiryIntervalSec.value();
+ }
+ if (m_responseTopic.has_value())
+ {
+ raw_options.response_topic = &m_responseTopic.value();
+ }
+ if (m_correlationData.has_value())
+ {
+ raw_options.correlation_data = &m_correlationData.value();
+ }
+
+ s_AllocateUnderlyingUserProperties(m_userPropertiesStorage, m_userProperties, m_allocator);
+ raw_options.user_properties = m_userPropertiesStorage;
+ raw_options.user_property_count = m_userProperties.size();
+
+ return true;
+ }
+
+ const ByteCursor &PublishPacket::getPayload() const noexcept { return m_payload; }
+
+ Mqtt5::QOS PublishPacket::getQOS() const noexcept { return m_qos; }
+
+ bool PublishPacket::getRetain() const noexcept { return m_retain; }
+
+ const Crt::String &PublishPacket::getTopic() const noexcept { return m_topicName; }
+
+ const Crt::Optional<PayloadFormatIndicator> &PublishPacket::getPayloadFormatIndicator() const noexcept
+ {
+ return m_payloadFormatIndicator;
+ }
+
+ const Crt::Optional<uint32_t> &PublishPacket::getMessageExpiryIntervalSec() const noexcept
+ {
+ return m_messageExpiryIntervalSec;
+ }
+
+ const Crt::Optional<ByteCursor> &PublishPacket::getResponseTopic() const noexcept
+ {
+ return m_responseTopic;
+ }
+
+ const Crt::Optional<ByteCursor> &PublishPacket::getCorrelationData() const noexcept
+ {
+ return m_correlationData;
+ }
+
+ const Crt::Vector<uint32_t> &PublishPacket::getSubscriptionIdentifiers() const noexcept
+ {
+ return m_subscriptionIdentifiers;
+ }
+
+ const Crt::Optional<ByteCursor> &PublishPacket::getContentType() const noexcept { return m_contentType; }
+
+ const Crt::Vector<UserProperty> &PublishPacket::getUserProperties() const noexcept
+ {
+ return m_userProperties;
+ }
+
+ PublishPacket::~PublishPacket()
+ {
+ aws_byte_buf_clean_up(&m_payloadStorage);
+ aws_byte_buf_clean_up(&m_correlationDataStorage);
+ aws_byte_buf_clean_up(&m_contentTypeStorage);
+
+ if (m_userProperties.size() > 0)
+ {
+ aws_mem_release(m_allocator, m_userPropertiesStorage);
+ m_userProperties.clear();
+ }
+ }
+
+ DisconnectPacket::DisconnectPacket(Allocator *allocator) noexcept
+ : m_allocator(allocator), m_reasonCode(AWS_MQTT5_DRC_NORMAL_DISCONNECTION),
+ m_userPropertiesStorage(nullptr)
+ {
+ }
+
+ bool DisconnectPacket::initializeRawOptions(aws_mqtt5_packet_disconnect_view &raw_options) noexcept
+ {
+ AWS_ZERO_STRUCT(raw_options);
+
+ raw_options.reason_code = m_reasonCode;
+
+ if (m_sessionExpiryIntervalSec.has_value())
+ {
+ raw_options.session_expiry_interval_seconds = &m_sessionExpiryIntervalSec.value();
+ }
+
+ if (m_reasonString.has_value())
+ {
+ m_reasonStringCursor = ByteCursorFromString(m_reasonString.value());
+ raw_options.reason_string = &m_reasonStringCursor;
+ }
+
+ if (m_serverReference.has_value())
+ {
+ m_serverReferenceCursor = ByteCursorFromString(m_serverReference.value());
+ raw_options.server_reference = &m_serverReferenceCursor;
+ }
+
+ s_AllocateUnderlyingUserProperties(m_userPropertiesStorage, m_userProperties, m_allocator);
+ raw_options.user_properties = m_userPropertiesStorage;
+ raw_options.user_property_count = m_userProperties.size();
+
+ return true;
+ }
+
+ DisconnectPacket &DisconnectPacket::withReasonCode(const DisconnectReasonCode code) noexcept
+ {
+ m_reasonCode = code;
+ return *this;
+ }
+
+ DisconnectPacket &DisconnectPacket::withSessionExpiryIntervalSec(const uint32_t second) noexcept
+ {
+ m_sessionExpiryIntervalSec = second;
+ return *this;
+ }
+
+ DisconnectPacket &DisconnectPacket::withReasonString(Crt::String reason) noexcept
+ {
+ m_reasonString = std::move(reason);
+ return *this;
+ }
+
+ DisconnectPacket &DisconnectPacket::withServerReference(Crt::String server_reference) noexcept
+ {
+ m_serverReference = std::move(server_reference);
+ return *this;
+ }
+
+ DisconnectPacket &DisconnectPacket::withUserProperties(const Vector<UserProperty> &userProperties) noexcept
+ {
+ m_userProperties = userProperties;
+ return *this;
+ }
+
+ DisconnectPacket &DisconnectPacket::withUserProperties(Vector<UserProperty> &&userProperties) noexcept
+ {
+ m_userProperties = userProperties;
+ return *this;
+ }
+
+ DisconnectPacket &DisconnectPacket::withUserProperty(UserProperty &&property) noexcept
+ {
+ m_userProperties.push_back(std::move(property));
+ return *this;
+ }
+
+ DisconnectReasonCode DisconnectPacket::getReasonCode() const noexcept { return m_reasonCode; }
+
+ const Crt::Optional<uint32_t> &DisconnectPacket::getSessionExpiryIntervalSec() const noexcept
+ {
+ return m_sessionExpiryIntervalSec;
+ }
+
+ const Crt::Optional<Crt::String> &DisconnectPacket::getReasonString() const noexcept
+ {
+ return m_reasonString;
+ }
+
+ const Crt::Optional<Crt::String> &DisconnectPacket::getServerReference() const noexcept
+ {
+ return m_serverReference;
+ }
+
+ const Crt::Vector<UserProperty> &DisconnectPacket::getUserProperties() const noexcept
+ {
+ return m_userProperties;
+ }
+
+ DisconnectPacket::DisconnectPacket(
+ const aws_mqtt5_packet_disconnect_view &packet,
+ Allocator *allocator) noexcept
+ : m_allocator(allocator), m_userPropertiesStorage(nullptr)
+ {
+ m_reasonCode = packet.reason_code;
+
+ setPacketOptional(m_sessionExpiryIntervalSec, packet.session_expiry_interval_seconds);
+ setPacketStringOptional(m_reasonString, packet.reason_string);
+ setPacketStringOptional(m_serverReference, packet.server_reference);
+ setUserProperties(m_userProperties, packet.user_properties, packet.user_property_count);
+ }
+
+ DisconnectPacket::~DisconnectPacket()
+ {
+ if (m_userPropertiesStorage != nullptr)
+ {
+ aws_mem_release(m_allocator, m_userPropertiesStorage);
+ }
+ }
+
+ PubAckPacket::PubAckPacket(const aws_mqtt5_packet_puback_view &packet, Allocator * /*allocator*/) noexcept
+ {
+ m_reasonCode = packet.reason_code;
+ setPacketStringOptional(m_reasonString, packet.reason_string);
+ setUserProperties(m_userProperties, packet.user_properties, packet.user_property_count);
+ }
+
+ PubAckReasonCode PubAckPacket::getReasonCode() const noexcept { return m_reasonCode; }
+
+ const Crt::Optional<Crt::String> &PubAckPacket::getReasonString() const noexcept { return m_reasonString; }
+
+ const Crt::Vector<UserProperty> &PubAckPacket::getUserProperties() const noexcept
+ {
+ return m_userProperties;
+ }
+
+ ConnAckPacket::ConnAckPacket(
+ const aws_mqtt5_packet_connack_view &packet,
+ Allocator * /*allocator*/) noexcept
+ {
+ m_sessionPresent = packet.session_present;
+ m_reasonCode = packet.reason_code;
+ setPacketOptional(m_sessionExpiryInterval, packet.session_expiry_interval);
+ setPacketOptional(m_receiveMaximum, packet.receive_maximum);
+ setPacketOptional(m_maximumQOS, packet.maximum_qos);
+ setPacketOptional(m_retainAvailable, packet.retain_available);
+ setPacketOptional(m_maximumPacketSize, packet.maximum_packet_size);
+ setPacketStringOptional(m_assignedClientIdentifier, packet.assigned_client_identifier);
+ setPacketOptional(m_topicAliasMaximum, packet.topic_alias_maximum);
+ setPacketStringOptional(m_reasonString, packet.reason_string);
+ setUserProperties(m_userProperties, packet.user_properties, packet.user_property_count);
+ setPacketOptional(m_wildcardSubscriptionsAvaliable, packet.wildcard_subscriptions_available);
+ setPacketOptional(m_subscriptionIdentifiersAvaliable, packet.subscription_identifiers_available);
+ setPacketOptional(m_sharedSubscriptionsAvaliable, packet.shared_subscriptions_available);
+ setPacketOptional(m_serverKeepAlive, packet.server_keep_alive);
+ setPacketStringOptional(m_responseInformation, packet.response_information);
+ setPacketStringOptional(m_serverReference, packet.server_reference);
+ }
+
+ bool ConnAckPacket::getSessionPresent() const noexcept { return m_sessionPresent; }
+
+ ConnectReasonCode ConnAckPacket::getReasonCode() const noexcept { return m_reasonCode; }
+
+ const Crt::Optional<uint32_t> &ConnAckPacket::getSessionExpiryInterval() const noexcept
+ {
+ return m_sessionExpiryInterval;
+ }
+
+ const Crt::Optional<uint16_t> &ConnAckPacket::getReceiveMaximum() const noexcept
+ {
+ return m_receiveMaximum;
+ }
+
+ const Crt::Optional<QOS> &ConnAckPacket::getMaximumQOS() const noexcept { return m_maximumQOS; }
+
+ const Crt::Optional<bool> &ConnAckPacket::getRetainAvailable() const noexcept { return m_retainAvailable; }
+
+ const Crt::Optional<uint32_t> &ConnAckPacket::getMaximumPacketSize() const noexcept
+ {
+ return m_maximumPacketSize;
+ }
+
+ const Crt::Optional<String> &ConnAckPacket::getAssignedClientIdentifier() const noexcept
+ {
+ return m_assignedClientIdentifier;
+ }
+
+ const Crt::Optional<uint16_t> ConnAckPacket::getTopicAliasMaximum() const noexcept
+ {
+ return m_topicAliasMaximum;
+ }
+
+ const Crt::Optional<String> &ConnAckPacket::getReasonString() const noexcept { return m_reasonString; }
+
+ const Vector<UserProperty> &ConnAckPacket::getUserProperty() const noexcept { return m_userProperties; }
+
+ const Crt::Optional<bool> &ConnAckPacket::getWildcardSubscriptionsAvaliable() const noexcept
+ {
+ return m_wildcardSubscriptionsAvaliable;
+ }
+
+ const Crt::Optional<bool> &ConnAckPacket::getSubscriptionIdentifiersAvaliable() const noexcept
+ {
+ return m_subscriptionIdentifiersAvaliable;
+ }
+
+ const Crt::Optional<bool> &ConnAckPacket::getSharedSubscriptionsAvaliable() const noexcept
+ {
+ return m_sharedSubscriptionsAvaliable;
+ }
+
+ const Crt::Optional<uint16_t> &ConnAckPacket::getServerKeepAlive() const noexcept
+ {
+ return m_serverKeepAlive;
+ }
+
+ const Crt::Optional<String> &ConnAckPacket::getResponseInformation() const noexcept
+ {
+ return m_responseInformation;
+ }
+
+ const Crt::Optional<String> &ConnAckPacket::getServerReference() const noexcept
+ {
+ return m_serverReference;
+ }
+
+ Subscription::Subscription(Allocator *allocator)
+ : m_allocator(allocator), m_topicFilter(""), m_qos(QOS::AWS_MQTT5_QOS_AT_MOST_ONCE), m_noLocal(false),
+ m_retain(false), m_retainHnadlingType(AWS_MQTT5_RHT_SEND_ON_SUBSCRIBE)
+
+ {
+ }
+
+ Subscription::Subscription(Crt::String topicFilter, Mqtt5::QOS qos, Allocator *allocator)
+ : m_allocator(allocator), m_topicFilter(std::move(topicFilter)), m_qos(qos), m_noLocal(false),
+ m_retain(false), m_retainHnadlingType(AWS_MQTT5_RHT_SEND_ON_SUBSCRIBE)
+ {
+ }
+
+ Subscription &Subscription::withTopicFilter(Crt::String topicFilter) noexcept
+ {
+ m_topicFilter = std::move(topicFilter);
+ return *this;
+ }
+
+ Subscription &Subscription::withQOS(Mqtt5::QOS qos) noexcept
+ {
+ m_qos = qos;
+ return *this;
+ }
+ Subscription &Subscription::withNoLocal(bool noLocal) noexcept
+ {
+ m_noLocal = noLocal;
+ return *this;
+ }
+ Subscription &Subscription::withRetain(bool retain) noexcept
+ {
+ m_retain = retain;
+ return *this;
+ }
+ Subscription &Subscription::withRetainHandlingType(RetainHandlingType retainHandlingType) noexcept
+ {
+ m_retainHnadlingType = retainHandlingType;
+ return *this;
+ }
+
+ bool Subscription::initializeRawOptions(aws_mqtt5_subscription_view &raw_options) const noexcept
+ {
+ AWS_ZERO_STRUCT(raw_options);
+ raw_options.topic_filter = ByteCursorFromString(m_topicFilter);
+ raw_options.no_local = m_noLocal;
+ raw_options.qos = m_qos;
+ raw_options.retain_as_published = m_retain;
+ raw_options.retain_handling_type = m_retainHnadlingType;
+ return true;
+ }
+
+ Subscription::Subscription(const Subscription &toCopy) noexcept
+ : m_allocator(toCopy.m_allocator), m_topicFilter(toCopy.m_topicFilter), m_qos(toCopy.m_qos),
+ m_noLocal(toCopy.m_noLocal), m_retain(toCopy.m_retain),
+ m_retainHnadlingType(toCopy.m_retainHnadlingType)
+ {
+ }
+
+ Subscription::Subscription(Subscription &&toMove) noexcept
+ : m_allocator(toMove.m_allocator), m_topicFilter(std::move(toMove.m_topicFilter)), m_qos(toMove.m_qos),
+ m_noLocal(toMove.m_noLocal), m_retain(toMove.m_retain),
+ m_retainHnadlingType(toMove.m_retainHnadlingType)
+ {
+ }
+
+ Subscription &Subscription::operator=(const Subscription &toCopy) noexcept
+ {
+ if (&toCopy != this)
+ {
+ m_allocator = toCopy.m_allocator;
+ m_qos = toCopy.m_qos;
+ m_topicFilter = toCopy.m_topicFilter;
+ m_noLocal = toCopy.m_noLocal;
+ m_retain = toCopy.m_retain;
+ m_retainHnadlingType = toCopy.m_retainHnadlingType;
+ }
+ return *this;
+ }
+
+ Subscription &Subscription::operator=(Subscription &&toMove) noexcept
+ {
+ if (&toMove != this)
+ {
+ m_allocator = toMove.m_allocator;
+ m_qos = toMove.m_qos;
+ m_topicFilter = std::move(toMove.m_topicFilter);
+ m_noLocal = toMove.m_noLocal;
+ m_retain = toMove.m_retain;
+ m_retainHnadlingType = toMove.m_retainHnadlingType;
+ }
+ return *this;
+ }
+
+ SubscribePacket::SubscribePacket(Allocator *allocator) noexcept
+ : m_allocator(allocator), m_subscriptionViewStorage(nullptr), m_userPropertiesStorage(nullptr)
+ {
+ }
+
+ SubscribePacket &SubscribePacket::withUserProperties(const Vector<UserProperty> &userProperties) noexcept
+ {
+ m_userProperties = userProperties;
+ return *this;
+ }
+
+ SubscribePacket &SubscribePacket::withUserProperties(Vector<UserProperty> &&userProperties) noexcept
+ {
+ m_userProperties = userProperties;
+ return *this;
+ }
+
+ SubscribePacket &SubscribePacket::withUserProperty(UserProperty &&property) noexcept
+ {
+ m_userProperties.push_back(std::move(property));
+ return *this;
+ }
+
+ SubscribePacket &SubscribePacket::withSubscriptionIdentifier(uint32_t identifier) noexcept
+ {
+ m_subscriptionIdentifier = identifier;
+ return *this;
+ }
+
+ SubscribePacket &SubscribePacket::withSubscriptions(const Crt::Vector<Subscription> &subscriptions) noexcept
+ {
+ m_subscriptions = subscriptions;
+
+ return *this;
+ }
+
+ SubscribePacket &SubscribePacket::withSubscriptions(Vector<Subscription> &&subscriptions) noexcept
+ {
+ m_subscriptions = subscriptions;
+ return *this;
+ }
+
+ SubscribePacket &SubscribePacket::withSubscription(Subscription &&subscription) noexcept
+ {
+ m_subscriptions.push_back(subscription);
+ return *this;
+ }
+
+ bool SubscribePacket::initializeRawOptions(aws_mqtt5_packet_subscribe_view &raw_options) noexcept
+ {
+ AWS_ZERO_STRUCT(raw_options);
+
+ s_AllocateUnderlyingSubscription(m_subscriptionViewStorage, m_subscriptions, m_allocator);
+ raw_options.subscription_count = m_subscriptions.size();
+ raw_options.subscriptions = m_subscriptionViewStorage;
+
+ s_AllocateUnderlyingUserProperties(m_userPropertiesStorage, m_userProperties, m_allocator);
+ raw_options.user_properties = m_userPropertiesStorage;
+ raw_options.user_property_count = m_userProperties.size();
+
+ return true;
+ }
+
+ SubscribePacket::~SubscribePacket()
+ {
+ if (m_userPropertiesStorage != nullptr)
+ {
+ aws_mem_release(m_allocator, m_userPropertiesStorage);
+ m_userPropertiesStorage = nullptr;
+ }
+
+ if (m_subscriptionViewStorage != nullptr)
+ {
+ aws_mem_release(m_allocator, m_subscriptionViewStorage);
+ m_subscriptionViewStorage = nullptr;
+ }
+ }
+
+ SubAckPacket::SubAckPacket(const aws_mqtt5_packet_suback_view &packet, Allocator * /*allocator*/) noexcept
+ {
+ setPacketStringOptional(m_reasonString, packet.reason_string);
+ setUserProperties(m_userProperties, packet.user_properties, packet.user_property_count);
+ for (size_t i = 0; i < packet.reason_code_count; i++)
+ {
+ m_reasonCodes.push_back(*(packet.reason_codes + i));
+ }
+ }
+
+ const Crt::Optional<Crt::String> &SubAckPacket::getReasonString() const noexcept { return m_reasonString; }
+
+ const Crt::Vector<UserProperty> &SubAckPacket::getUserProperties() const noexcept
+ {
+ return m_userProperties;
+ }
+
+ const Crt::Vector<SubAckReasonCode> &SubAckPacket::getReasonCodes() const noexcept { return m_reasonCodes; }
+
+ UnsubscribePacket::UnsubscribePacket(Allocator *allocator) noexcept
+ : m_allocator(allocator), m_userPropertiesStorage(nullptr)
+ {
+ AWS_ZERO_STRUCT(m_topicFiltersList);
+ }
+
+ UnsubscribePacket &UnsubscribePacket::withTopicFilter(Crt::String topicFilter) noexcept
+ {
+ m_topicFilters.push_back(std::move(topicFilter));
+ return *this;
+ }
+
+ UnsubscribePacket &UnsubscribePacket::withTopicFilters(Crt::Vector<String> topicFilters) noexcept
+ {
+ m_topicFilters = std::move(topicFilters);
+
+ return *this;
+ }
+
+ UnsubscribePacket &UnsubscribePacket::withUserProperties(
+ const Vector<UserProperty> &userProperties) noexcept
+ {
+ m_userProperties = userProperties;
+ return *this;
+ }
+
+ UnsubscribePacket &UnsubscribePacket::withUserProperties(Vector<UserProperty> &&userProperties) noexcept
+ {
+ m_userProperties = userProperties;
+ return *this;
+ }
+
+ UnsubscribePacket &UnsubscribePacket::withUserProperty(UserProperty &&property) noexcept
+ {
+ m_userProperties.push_back(std::move(property));
+ return *this;
+ }
+
+ bool UnsubscribePacket::initializeRawOptions(aws_mqtt5_packet_unsubscribe_view &raw_options) noexcept
+ {
+ AWS_ZERO_STRUCT(raw_options);
+
+ s_AllocateStringVector(m_topicFiltersList, m_topicFilters, m_allocator);
+ raw_options.topic_filters = static_cast<aws_byte_cursor *>(m_topicFiltersList.data);
+ raw_options.topic_filter_count = m_topicFilters.size();
+
+ s_AllocateUnderlyingUserProperties(m_userPropertiesStorage, m_userProperties, m_allocator);
+ raw_options.user_properties = m_userPropertiesStorage;
+ raw_options.user_property_count = m_userProperties.size();
+
+ return true;
+ }
+
+ UnsubscribePacket::~UnsubscribePacket()
+ {
+ aws_array_list_clean_up(&m_topicFiltersList);
+ AWS_ZERO_STRUCT(m_topicFiltersList);
+
+ if (m_userPropertiesStorage != nullptr)
+ {
+ aws_mem_release(m_allocator, m_userPropertiesStorage);
+ m_userPropertiesStorage = nullptr;
+ }
+ }
+
+ UnSubAckPacket::UnSubAckPacket(const aws_mqtt5_packet_unsuback_view &packet, Allocator *allocator) noexcept
+ {
+ (void)allocator;
+
+ setPacketStringOptional(m_reasonString, packet.reason_string);
+
+ for (size_t i = 0; i < packet.reason_code_count; i++)
+ {
+ m_reasonCodes.push_back(*(packet.reason_codes + i));
+ }
+ setUserProperties(m_userProperties, packet.user_properties, packet.user_property_count);
+ }
+
+ const Crt::Optional<Crt::String> &UnSubAckPacket::getReasonString() const noexcept
+ {
+ return m_reasonString;
+ }
+
+ const Crt::Vector<UserProperty> &UnSubAckPacket::getUserProperties() const noexcept
+ {
+ return m_userProperties;
+ }
+
+ const Crt::Vector<UnSubAckReasonCode> &UnSubAckPacket::getReasonCodes() const noexcept
+ {
+ return m_reasonCodes;
+ }
+
+ NegotiatedSettings::NegotiatedSettings(
+ const aws_mqtt5_negotiated_settings &negotiated_settings,
+ Allocator *allocator) noexcept
+ {
+ (void)allocator;
+
+ m_maximumQOS = negotiated_settings.maximum_qos;
+ m_sessionExpiryIntervalSec = negotiated_settings.session_expiry_interval;
+ m_receiveMaximumFromServer = negotiated_settings.receive_maximum_from_server;
+
+ m_maximumPacketSizeBytes = negotiated_settings.maximum_packet_size_to_server;
+ m_serverKeepAliveSec = negotiated_settings.server_keep_alive;
+
+ m_retainAvailable = negotiated_settings.retain_available;
+ m_wildcardSubscriptionsAvaliable = negotiated_settings.wildcard_subscriptions_available;
+ m_subscriptionIdentifiersAvaliable = negotiated_settings.subscription_identifiers_available;
+ m_sharedSubscriptionsAvaliable = negotiated_settings.shared_subscriptions_available;
+ m_rejoinedSession = negotiated_settings.rejoined_session;
+
+ m_clientId = Crt::String(
+ (const char *)negotiated_settings.client_id_storage.buffer,
+ negotiated_settings.client_id_storage.len);
+ }
+
+ Mqtt5::QOS NegotiatedSettings::getMaximumQOS() const noexcept { return m_maximumQOS; }
+
+ uint32_t NegotiatedSettings::getSessionExpiryIntervalSec() const noexcept
+ {
+ return m_sessionExpiryIntervalSec;
+ }
+
+ uint16_t NegotiatedSettings::getReceiveMaximumFromServer() const noexcept
+ {
+ return m_receiveMaximumFromServer;
+ }
+
+ uint32_t NegotiatedSettings::getMaximumPacketSizeBytes() const noexcept { return m_maximumPacketSizeBytes; }
+
+ uint16_t NegotiatedSettings::getServerKeepAlive() const noexcept { return m_serverKeepAliveSec; }
+
+ bool NegotiatedSettings::getRetainAvailable() const noexcept { return m_retainAvailable; }
+
+ bool NegotiatedSettings::getWildcardSubscriptionsAvaliable() const noexcept
+ {
+ return m_wildcardSubscriptionsAvaliable;
+ }
+
+ bool NegotiatedSettings::getSubscriptionIdentifiersAvaliable() const noexcept
+ {
+ return m_subscriptionIdentifiersAvaliable;
+ }
+
+ bool NegotiatedSettings::getSharedSubscriptionsAvaliable() const noexcept
+ {
+ return m_sharedSubscriptionsAvaliable;
+ }
+
+ bool NegotiatedSettings::getRejoinedSession() const noexcept { return m_rejoinedSession; }
+
+ const Crt::String &NegotiatedSettings::getClientId() const noexcept { return m_clientId; }
+
+ PublishResult::PublishResult() : m_ack(nullptr), m_errorCode(0) {}
+
+ PublishResult::PublishResult(std::shared_ptr<PubAckPacket> puback) : m_errorCode(0) { m_ack = puback; }
+
+ PublishResult::PublishResult(int error) : m_ack(nullptr), m_errorCode(error) {}
+
+ PublishResult::~PublishResult() noexcept { m_ack.reset(); }
+
+ } // namespace Mqtt5
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/source/mqtt/MqttClient.cpp b/contrib/restricted/aws/aws-crt-cpp/source/mqtt/MqttClient.cpp
new file mode 100644
index 0000000000..e36c591237
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/source/mqtt/MqttClient.cpp
@@ -0,0 +1,816 @@
+/**
+ * Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
+ * SPDX-License-Identifier: Apache-2.0.
+ */
+#include <aws/crt/mqtt/MqttClient.h>
+
+#include <aws/crt/Api.h>
+#include <aws/crt/StlAllocator.h>
+#include <aws/crt/http/HttpProxyStrategy.h>
+#include <aws/crt/http/HttpRequestResponse.h>
+#include <aws/crt/io/Bootstrap.h>
+
+#include <utility>
+
+#define AWS_MQTT_MAX_TOPIC_LENGTH 65535
+
+namespace Aws
+{
+ namespace Crt
+ {
+ namespace Mqtt
+ {
+ void MqttConnection::s_onConnectionInterrupted(aws_mqtt_client_connection *, int errorCode, void *userData)
+ {
+ auto connWrapper = reinterpret_cast<MqttConnection *>(userData);
+ if (connWrapper->OnConnectionInterrupted)
+ {
+ connWrapper->OnConnectionInterrupted(*connWrapper, errorCode);
+ }
+ }
+
+ void MqttConnection::s_onConnectionResumed(
+ aws_mqtt_client_connection *,
+ ReturnCode returnCode,
+ bool sessionPresent,
+ void *userData)
+ {
+ auto connWrapper = reinterpret_cast<MqttConnection *>(userData);
+ if (connWrapper->OnConnectionResumed)
+ {
+ connWrapper->OnConnectionResumed(*connWrapper, returnCode, sessionPresent);
+ }
+ }
+
+ void MqttConnection::s_onConnectionCompleted(
+ aws_mqtt_client_connection *,
+ int errorCode,
+ enum aws_mqtt_connect_return_code returnCode,
+ bool sessionPresent,
+ void *userData)
+ {
+ auto connWrapper = reinterpret_cast<MqttConnection *>(userData);
+ if (connWrapper->OnConnectionCompleted)
+ {
+ connWrapper->OnConnectionCompleted(*connWrapper, errorCode, returnCode, sessionPresent);
+ }
+ }
+
+ void MqttConnection::s_onDisconnect(aws_mqtt_client_connection *, void *userData)
+ {
+ auto connWrapper = reinterpret_cast<MqttConnection *>(userData);
+ if (connWrapper->OnDisconnect)
+ {
+ connWrapper->OnDisconnect(*connWrapper);
+ }
+ }
+
+ struct PubCallbackData
+ {
+ PubCallbackData() : connection(nullptr), allocator(nullptr) {}
+
+ MqttConnection *connection;
+ OnMessageReceivedHandler onMessageReceived;
+ Allocator *allocator;
+ };
+
+ static void s_cleanUpOnPublishData(void *userData)
+ {
+ auto callbackData = reinterpret_cast<PubCallbackData *>(userData);
+ Crt::Delete(callbackData, callbackData->allocator);
+ }
+
+ void MqttConnection::s_onPublish(
+ aws_mqtt_client_connection *,
+ const aws_byte_cursor *topic,
+ const aws_byte_cursor *payload,
+ bool dup,
+ enum aws_mqtt_qos qos,
+ bool retain,
+ void *userData)
+ {
+ auto callbackData = reinterpret_cast<PubCallbackData *>(userData);
+
+ if (callbackData->onMessageReceived)
+ {
+ String topicStr(reinterpret_cast<char *>(topic->ptr), topic->len);
+ ByteBuf payloadBuf = aws_byte_buf_from_array(payload->ptr, payload->len);
+ callbackData->onMessageReceived(
+ *(callbackData->connection), topicStr, payloadBuf, dup, qos, retain);
+ }
+ }
+
+ struct OpCompleteCallbackData
+ {
+ OpCompleteCallbackData() : connection(nullptr), topic(nullptr), allocator(nullptr) {}
+
+ MqttConnection *connection;
+ OnOperationCompleteHandler onOperationComplete;
+ const char *topic;
+ Allocator *allocator;
+ };
+
+ void MqttConnection::s_onOpComplete(
+ aws_mqtt_client_connection *,
+ uint16_t packetId,
+ int errorCode,
+ void *userData)
+ {
+ auto callbackData = reinterpret_cast<OpCompleteCallbackData *>(userData);
+
+ if (callbackData->onOperationComplete)
+ {
+ callbackData->onOperationComplete(*callbackData->connection, packetId, errorCode);
+ }
+
+ if (callbackData->topic)
+ {
+ aws_mem_release(
+ callbackData->allocator, reinterpret_cast<void *>(const_cast<char *>(callbackData->topic)));
+ }
+
+ Crt::Delete(callbackData, callbackData->allocator);
+ }
+
+ struct SubAckCallbackData
+ {
+ SubAckCallbackData() : connection(nullptr), topic(nullptr), allocator(nullptr) {}
+
+ MqttConnection *connection;
+ OnSubAckHandler onSubAck;
+ const char *topic;
+ Allocator *allocator;
+ };
+
+ void MqttConnection::s_onSubAck(
+ aws_mqtt_client_connection *,
+ uint16_t packetId,
+ const struct aws_byte_cursor *topic,
+ enum aws_mqtt_qos qos,
+ int errorCode,
+ void *userData)
+ {
+ auto callbackData = reinterpret_cast<SubAckCallbackData *>(userData);
+
+ if (callbackData->onSubAck)
+ {
+ String topicStr(reinterpret_cast<char *>(topic->ptr), topic->len);
+ callbackData->onSubAck(*callbackData->connection, packetId, topicStr, qos, errorCode);
+ }
+
+ if (callbackData->topic)
+ {
+ aws_mem_release(
+ callbackData->allocator, reinterpret_cast<void *>(const_cast<char *>(callbackData->topic)));
+ }
+
+ Crt::Delete(callbackData, callbackData->allocator);
+ }
+
+ struct MultiSubAckCallbackData
+ {
+ MultiSubAckCallbackData() : connection(nullptr), topic(nullptr), allocator(nullptr) {}
+
+ MqttConnection *connection;
+ OnMultiSubAckHandler onSubAck;
+ const char *topic;
+ Allocator *allocator;
+ };
+
+ void MqttConnection::s_onMultiSubAck(
+ aws_mqtt_client_connection *,
+ uint16_t packetId,
+ const struct aws_array_list *topicSubacks,
+ int errorCode,
+ void *userData)
+ {
+ auto callbackData = reinterpret_cast<MultiSubAckCallbackData *>(userData);
+
+ if (callbackData->onSubAck)
+ {
+ size_t length = aws_array_list_length(topicSubacks);
+ Vector<String> topics;
+ topics.reserve(length);
+ QOS qos = AWS_MQTT_QOS_AT_MOST_ONCE;
+ for (size_t i = 0; i < length; ++i)
+ {
+ aws_mqtt_topic_subscription *subscription = NULL;
+ aws_array_list_get_at(topicSubacks, &subscription, i);
+ topics.push_back(
+ String(reinterpret_cast<char *>(subscription->topic.ptr), subscription->topic.len));
+ qos = subscription->qos;
+ }
+
+ callbackData->onSubAck(*callbackData->connection, packetId, topics, qos, errorCode);
+ }
+
+ if (callbackData->topic)
+ {
+ aws_mem_release(
+ callbackData->allocator, reinterpret_cast<void *>(const_cast<char *>(callbackData->topic)));
+ }
+
+ Crt::Delete(callbackData, callbackData->allocator);
+ }
+
+ void MqttConnection::s_connectionInit(
+ MqttConnection *self,
+ const char *hostName,
+ uint16_t port,
+ const Io::SocketOptions &socketOptions)
+ {
+
+ self->m_hostName = String(hostName);
+ self->m_port = port;
+ self->m_socketOptions = socketOptions;
+
+ self->m_underlyingConnection = aws_mqtt_client_connection_new(self->m_owningClient);
+
+ if (self->m_underlyingConnection)
+ {
+ aws_mqtt_client_connection_set_connection_interruption_handlers(
+ self->m_underlyingConnection,
+ MqttConnection::s_onConnectionInterrupted,
+ self,
+ MqttConnection::s_onConnectionResumed,
+ self);
+ }
+ }
+
+ void MqttConnection::s_onWebsocketHandshake(
+ struct aws_http_message *rawRequest,
+ void *user_data,
+ aws_mqtt_transform_websocket_handshake_complete_fn *complete_fn,
+ void *complete_ctx)
+ {
+ auto connection = reinterpret_cast<MqttConnection *>(user_data);
+
+ Allocator *allocator = connection->m_owningClient->allocator;
+ // we have to do this because of private constructors.
+ auto toSeat =
+ reinterpret_cast<Http::HttpRequest *>(aws_mem_acquire(allocator, sizeof(Http::HttpRequest)));
+ toSeat = new (toSeat) Http::HttpRequest(allocator, rawRequest);
+
+ std::shared_ptr<Http::HttpRequest> request = std::shared_ptr<Http::HttpRequest>(
+ toSeat, [allocator](Http::HttpRequest *ptr) { Crt::Delete(ptr, allocator); });
+
+ auto onInterceptComplete =
+ [complete_fn,
+ complete_ctx](const std::shared_ptr<Http::HttpRequest> &transformedRequest, int errorCode) {
+ complete_fn(transformedRequest->GetUnderlyingMessage(), errorCode, complete_ctx);
+ };
+
+ connection->WebsocketInterceptor(request, onInterceptComplete);
+ }
+
+ MqttConnection::MqttConnection(
+ aws_mqtt_client *client,
+ const char *hostName,
+ uint16_t port,
+ const Io::SocketOptions &socketOptions,
+ const Crt::Io::TlsContext &tlsContext,
+ bool useWebsocket) noexcept
+ : m_owningClient(client), m_tlsContext(tlsContext), m_tlsOptions(tlsContext.NewConnectionOptions()),
+ m_onAnyCbData(nullptr), m_useTls(true), m_useWebsocket(useWebsocket)
+ {
+ s_connectionInit(this, hostName, port, socketOptions);
+ }
+
+ MqttConnection::MqttConnection(
+ aws_mqtt_client *client,
+ const char *hostName,
+ uint16_t port,
+ const Io::SocketOptions &socketOptions,
+ bool useWebsocket) noexcept
+ : m_owningClient(client), m_onAnyCbData(nullptr), m_useTls(false), m_useWebsocket(useWebsocket)
+ {
+ s_connectionInit(this, hostName, port, socketOptions);
+ }
+
+ MqttConnection::~MqttConnection()
+ {
+ if (*this)
+ {
+ aws_mqtt_client_connection_release(m_underlyingConnection);
+
+ if (m_onAnyCbData)
+ {
+ auto pubCallbackData = reinterpret_cast<PubCallbackData *>(m_onAnyCbData);
+ Crt::Delete(pubCallbackData, pubCallbackData->allocator);
+ }
+ }
+ }
+
+ MqttConnection::operator bool() const noexcept { return m_underlyingConnection != nullptr; }
+
+ int MqttConnection::LastError() const noexcept { return aws_last_error(); }
+
+ bool MqttConnection::SetWill(const char *topic, QOS qos, bool retain, const ByteBuf &payload) noexcept
+ {
+ ByteBuf topicBuf = aws_byte_buf_from_c_str(topic);
+ ByteCursor topicCur = aws_byte_cursor_from_buf(&topicBuf);
+ ByteCursor payloadCur = aws_byte_cursor_from_buf(&payload);
+
+ return aws_mqtt_client_connection_set_will(
+ m_underlyingConnection, &topicCur, qos, retain, &payloadCur) == 0;
+ }
+
+ bool MqttConnection::SetLogin(const char *userName, const char *password) noexcept
+ {
+ ByteBuf userNameBuf = aws_byte_buf_from_c_str(userName);
+ ByteCursor userNameCur = aws_byte_cursor_from_buf(&userNameBuf);
+
+ ByteCursor *pwdCurPtr = nullptr;
+ ByteCursor pwdCur;
+
+ if (password)
+ {
+ pwdCur = ByteCursorFromCString(password);
+ pwdCurPtr = &pwdCur;
+ }
+ return aws_mqtt_client_connection_set_login(m_underlyingConnection, &userNameCur, pwdCurPtr) == 0;
+ }
+
+ bool MqttConnection::SetWebsocketProxyOptions(
+ const Http::HttpClientConnectionProxyOptions &proxyOptions) noexcept
+ {
+ m_proxyOptions = proxyOptions;
+ return true;
+ }
+
+ bool MqttConnection::SetHttpProxyOptions(
+ const Http::HttpClientConnectionProxyOptions &proxyOptions) noexcept
+ {
+ m_proxyOptions = proxyOptions;
+ return true;
+ }
+
+ bool MqttConnection::SetReconnectTimeout(uint64_t min_seconds, uint64_t max_seconds) noexcept
+ {
+ return aws_mqtt_client_connection_set_reconnect_timeout(
+ m_underlyingConnection, min_seconds, max_seconds) == 0;
+ }
+
+ bool MqttConnection::Connect(
+ const char *clientId,
+ bool cleanSession,
+ uint16_t keepAliveTime,
+ uint32_t pingTimeoutMs,
+ uint32_t protocolOperationTimeoutMs) noexcept
+ {
+ aws_mqtt_connection_options options;
+ AWS_ZERO_STRUCT(options);
+ options.client_id = aws_byte_cursor_from_c_str(clientId);
+ options.host_name = aws_byte_cursor_from_array(
+ reinterpret_cast<const uint8_t *>(m_hostName.data()), m_hostName.length());
+ options.tls_options =
+ m_useTls ? const_cast<aws_tls_connection_options *>(m_tlsOptions.GetUnderlyingHandle()) : nullptr;
+ options.port = m_port;
+ options.socket_options = &m_socketOptions.GetImpl();
+ options.clean_session = cleanSession;
+ options.keep_alive_time_secs = keepAliveTime;
+ options.ping_timeout_ms = pingTimeoutMs;
+ options.protocol_operation_timeout_ms = protocolOperationTimeoutMs;
+ options.on_connection_complete = MqttConnection::s_onConnectionCompleted;
+ options.user_data = this;
+
+ if (m_useWebsocket)
+ {
+ if (WebsocketInterceptor)
+ {
+ if (aws_mqtt_client_connection_use_websockets(
+ m_underlyingConnection, MqttConnection::s_onWebsocketHandshake, this, nullptr, nullptr))
+ {
+ return false;
+ }
+ }
+ else
+ {
+ if (aws_mqtt_client_connection_use_websockets(
+ m_underlyingConnection, nullptr, nullptr, nullptr, nullptr))
+ {
+ return false;
+ }
+ }
+ }
+
+ if (m_proxyOptions)
+ {
+ struct aws_http_proxy_options proxyOptions;
+ m_proxyOptions->InitializeRawProxyOptions(proxyOptions);
+
+ if (aws_mqtt_client_connection_set_http_proxy_options(m_underlyingConnection, &proxyOptions))
+ {
+ return false;
+ }
+ }
+
+ return aws_mqtt_client_connection_connect(m_underlyingConnection, &options) == AWS_OP_SUCCESS;
+ }
+
+ bool MqttConnection::Disconnect() noexcept
+ {
+ return aws_mqtt_client_connection_disconnect(
+ m_underlyingConnection, MqttConnection::s_onDisconnect, this) == AWS_OP_SUCCESS;
+ }
+
+ aws_mqtt_client_connection *MqttConnection::GetUnderlyingConnection() noexcept
+ {
+ return m_underlyingConnection;
+ }
+
+ bool MqttConnection::SetOnMessageHandler(OnPublishReceivedHandler &&onPublish) noexcept
+ {
+ return SetOnMessageHandler(
+ [onPublish](
+ MqttConnection &connection, const String &topic, const ByteBuf &payload, bool, QOS, bool) {
+ onPublish(connection, topic, payload);
+ });
+ }
+
+ bool MqttConnection::SetOnMessageHandler(OnMessageReceivedHandler &&onMessage) noexcept
+ {
+ auto pubCallbackData = Aws::Crt::New<PubCallbackData>(m_owningClient->allocator);
+
+ if (!pubCallbackData)
+ {
+ return false;
+ }
+
+ pubCallbackData->connection = this;
+ pubCallbackData->onMessageReceived = std::move(onMessage);
+ pubCallbackData->allocator = m_owningClient->allocator;
+
+ if (!aws_mqtt_client_connection_set_on_any_publish_handler(
+ m_underlyingConnection, s_onPublish, pubCallbackData))
+ {
+ m_onAnyCbData = reinterpret_cast<void *>(pubCallbackData);
+ return true;
+ }
+
+ Aws::Crt::Delete(pubCallbackData, pubCallbackData->allocator);
+ return false;
+ }
+
+ uint16_t MqttConnection::Subscribe(
+ const char *topicFilter,
+ QOS qos,
+ OnPublishReceivedHandler &&onPublish,
+ OnSubAckHandler &&onSubAck) noexcept
+ {
+ return Subscribe(
+ topicFilter,
+ qos,
+ [onPublish](
+ MqttConnection &connection, const String &topic, const ByteBuf &payload, bool, QOS, bool) {
+ onPublish(connection, topic, payload);
+ },
+ std::move(onSubAck));
+ }
+
+ uint16_t MqttConnection::Subscribe(
+ const char *topicFilter,
+ QOS qos,
+ OnMessageReceivedHandler &&onMessage,
+ OnSubAckHandler &&onSubAck) noexcept
+ {
+ auto pubCallbackData = Crt::New<PubCallbackData>(m_owningClient->allocator);
+
+ if (!pubCallbackData)
+ {
+ return 0;
+ }
+
+ pubCallbackData->connection = this;
+ pubCallbackData->onMessageReceived = std::move(onMessage);
+ pubCallbackData->allocator = m_owningClient->allocator;
+
+ auto subAckCallbackData = Crt::New<SubAckCallbackData>(m_owningClient->allocator);
+
+ if (!subAckCallbackData)
+ {
+ Crt::Delete(pubCallbackData, m_owningClient->allocator);
+ return 0;
+ }
+
+ subAckCallbackData->connection = this;
+ subAckCallbackData->allocator = m_owningClient->allocator;
+ subAckCallbackData->onSubAck = std::move(onSubAck);
+ subAckCallbackData->topic = nullptr;
+ subAckCallbackData->allocator = m_owningClient->allocator;
+
+ ByteBuf topicFilterBuf = aws_byte_buf_from_c_str(topicFilter);
+ ByteCursor topicFilterCur = aws_byte_cursor_from_buf(&topicFilterBuf);
+
+ uint16_t packetId = aws_mqtt_client_connection_subscribe(
+ m_underlyingConnection,
+ &topicFilterCur,
+ qos,
+ s_onPublish,
+ pubCallbackData,
+ s_cleanUpOnPublishData,
+ s_onSubAck,
+ subAckCallbackData);
+
+ if (!packetId)
+ {
+ Crt::Delete(pubCallbackData, pubCallbackData->allocator);
+ Crt::Delete(subAckCallbackData, subAckCallbackData->allocator);
+ }
+
+ return packetId;
+ }
+
+ uint16_t MqttConnection::Subscribe(
+ const Vector<std::pair<const char *, OnPublishReceivedHandler>> &topicFilters,
+ QOS qos,
+ OnMultiSubAckHandler &&onSubAck) noexcept
+ {
+ Vector<std::pair<const char *, OnMessageReceivedHandler>> newTopicFilters;
+ newTopicFilters.reserve(topicFilters.size());
+ for (const auto &pair : topicFilters)
+ {
+ const OnPublishReceivedHandler &pubHandler = pair.second;
+ newTopicFilters.emplace_back(
+ pair.first,
+ [pubHandler](
+ MqttConnection &connection, const String &topic, const ByteBuf &payload, bool, QOS, bool) {
+ pubHandler(connection, topic, payload);
+ });
+ }
+ return Subscribe(newTopicFilters, qos, std::move(onSubAck));
+ }
+
+ uint16_t MqttConnection::Subscribe(
+ const Vector<std::pair<const char *, OnMessageReceivedHandler>> &topicFilters,
+ QOS qos,
+ OnMultiSubAckHandler &&onSubAck) noexcept
+ {
+ uint16_t packetId = 0;
+ auto subAckCallbackData = Crt::New<MultiSubAckCallbackData>(m_owningClient->allocator);
+
+ if (!subAckCallbackData)
+ {
+ return 0;
+ }
+
+ aws_array_list multiPub;
+ AWS_ZERO_STRUCT(multiPub);
+
+ if (aws_array_list_init_dynamic(
+ &multiPub, m_owningClient->allocator, topicFilters.size(), sizeof(aws_mqtt_topic_subscription)))
+ {
+ Crt::Delete(subAckCallbackData, m_owningClient->allocator);
+ return 0;
+ }
+
+ for (auto &topicFilter : topicFilters)
+ {
+ auto pubCallbackData = Crt::New<PubCallbackData>(m_owningClient->allocator);
+
+ if (!pubCallbackData)
+ {
+ goto clean_up;
+ }
+
+ pubCallbackData->connection = this;
+ pubCallbackData->onMessageReceived = topicFilter.second;
+ pubCallbackData->allocator = m_owningClient->allocator;
+
+ ByteBuf topicFilterBuf = aws_byte_buf_from_c_str(topicFilter.first);
+ ByteCursor topicFilterCur = aws_byte_cursor_from_buf(&topicFilterBuf);
+
+ aws_mqtt_topic_subscription subscription;
+ subscription.on_cleanup = s_cleanUpOnPublishData;
+ subscription.on_publish = s_onPublish;
+ subscription.on_publish_ud = pubCallbackData;
+ subscription.qos = qos;
+ subscription.topic = topicFilterCur;
+
+ aws_array_list_push_back(&multiPub, reinterpret_cast<const void *>(&subscription));
+ }
+
+ subAckCallbackData->connection = this;
+ subAckCallbackData->allocator = m_owningClient->allocator;
+ subAckCallbackData->onSubAck = std::move(onSubAck);
+ subAckCallbackData->topic = nullptr;
+ subAckCallbackData->allocator = m_owningClient->allocator;
+
+ packetId = aws_mqtt_client_connection_subscribe_multiple(
+ m_underlyingConnection, &multiPub, s_onMultiSubAck, subAckCallbackData);
+
+ clean_up:
+ if (!packetId)
+ {
+ size_t length = aws_array_list_length(&multiPub);
+ for (size_t i = 0; i < length; ++i)
+ {
+ aws_mqtt_topic_subscription *subscription = NULL;
+ aws_array_list_get_at_ptr(&multiPub, reinterpret_cast<void **>(&subscription), i);
+ auto pubCallbackData = reinterpret_cast<PubCallbackData *>(subscription->on_publish_ud);
+ Crt::Delete(pubCallbackData, m_owningClient->allocator);
+ }
+
+ Crt::Delete(subAckCallbackData, m_owningClient->allocator);
+ }
+
+ aws_array_list_clean_up(&multiPub);
+
+ return packetId;
+ }
+
+ uint16_t MqttConnection::Unsubscribe(
+ const char *topicFilter,
+ OnOperationCompleteHandler &&onOpComplete) noexcept
+ {
+ auto opCompleteCallbackData = Crt::New<OpCompleteCallbackData>(m_owningClient->allocator);
+
+ if (!opCompleteCallbackData)
+ {
+ return 0;
+ }
+
+ opCompleteCallbackData->connection = this;
+ opCompleteCallbackData->allocator = m_owningClient->allocator;
+ opCompleteCallbackData->onOperationComplete = std::move(onOpComplete);
+ opCompleteCallbackData->topic = nullptr;
+ ByteBuf topicFilterBuf = aws_byte_buf_from_c_str(topicFilter);
+ ByteCursor topicFilterCur = aws_byte_cursor_from_buf(&topicFilterBuf);
+
+ uint16_t packetId = aws_mqtt_client_connection_unsubscribe(
+ m_underlyingConnection, &topicFilterCur, s_onOpComplete, opCompleteCallbackData);
+
+ if (!packetId)
+ {
+ Crt::Delete(opCompleteCallbackData, m_owningClient->allocator);
+ }
+
+ return packetId;
+ }
+
+ uint16_t MqttConnection::Publish(
+ const char *topic,
+ QOS qos,
+ bool retain,
+ const ByteBuf &payload,
+ OnOperationCompleteHandler &&onOpComplete) noexcept
+ {
+
+ auto opCompleteCallbackData = Crt::New<OpCompleteCallbackData>(m_owningClient->allocator);
+ if (!opCompleteCallbackData)
+ {
+ return 0;
+ }
+
+ size_t topicLen = strnlen(topic, AWS_MQTT_MAX_TOPIC_LENGTH) + 1;
+ char *topicCpy =
+ reinterpret_cast<char *>(aws_mem_calloc(m_owningClient->allocator, topicLen, sizeof(char)));
+
+ if (!topicCpy)
+ {
+ Crt::Delete(opCompleteCallbackData, m_owningClient->allocator);
+ }
+
+ memcpy(topicCpy, topic, topicLen);
+
+ opCompleteCallbackData->connection = this;
+ opCompleteCallbackData->allocator = m_owningClient->allocator;
+ opCompleteCallbackData->onOperationComplete = std::move(onOpComplete);
+ opCompleteCallbackData->topic = topicCpy;
+ ByteCursor topicCur = aws_byte_cursor_from_array(topicCpy, topicLen - 1);
+
+ ByteCursor payloadCur = aws_byte_cursor_from_buf(&payload);
+ uint16_t packetId = aws_mqtt_client_connection_publish(
+ m_underlyingConnection,
+ &topicCur,
+ qos,
+ retain,
+ &payloadCur,
+ s_onOpComplete,
+ opCompleteCallbackData);
+
+ if (!packetId)
+ {
+ aws_mem_release(m_owningClient->allocator, reinterpret_cast<void *>(topicCpy));
+ Crt::Delete(opCompleteCallbackData, m_owningClient->allocator);
+ }
+
+ return packetId;
+ }
+
+ const MqttConnectionOperationStatistics &MqttConnection::GetOperationStatistics() noexcept
+ {
+ aws_mqtt_connection_operation_statistics m_operationStatisticsNative = {0, 0, 0, 0};
+ if (m_underlyingConnection != nullptr)
+ {
+ aws_mqtt_client_connection_get_stats(m_underlyingConnection, &m_operationStatisticsNative);
+ m_operationStatistics.incompleteOperationCount =
+ m_operationStatisticsNative.incomplete_operation_count;
+ m_operationStatistics.incompleteOperationSize =
+ m_operationStatisticsNative.incomplete_operation_size;
+ m_operationStatistics.unackedOperationCount = m_operationStatisticsNative.unacked_operation_count;
+ m_operationStatistics.unackedOperationSize = m_operationStatisticsNative.unacked_operation_size;
+ }
+ return m_operationStatistics;
+ }
+
+ MqttClient::MqttClient(Io::ClientBootstrap &bootstrap, Allocator *allocator) noexcept
+ : m_client(aws_mqtt_client_new(allocator, bootstrap.GetUnderlyingHandle()))
+ {
+ }
+
+ MqttClient::MqttClient(Allocator *allocator) noexcept
+ : m_client(aws_mqtt_client_new(
+ allocator,
+ Crt::ApiHandle::GetOrCreateStaticDefaultClientBootstrap()->GetUnderlyingHandle()))
+ {
+ }
+
+ MqttClient::~MqttClient()
+ {
+ aws_mqtt_client_release(m_client);
+ m_client = nullptr;
+ }
+
+ MqttClient::MqttClient(MqttClient &&toMove) noexcept : m_client(toMove.m_client)
+ {
+ toMove.m_client = nullptr;
+ }
+
+ MqttClient &MqttClient::operator=(MqttClient &&toMove) noexcept
+ {
+ if (&toMove != this)
+ {
+ m_client = toMove.m_client;
+ toMove.m_client = nullptr;
+ }
+
+ return *this;
+ }
+
+ MqttClient::operator bool() const noexcept { return m_client != nullptr; }
+
+ int MqttClient::LastError() const noexcept { return aws_last_error(); }
+
+ std::shared_ptr<MqttConnection> MqttClient::NewConnection(
+ const char *hostName,
+ uint16_t port,
+ const Io::SocketOptions &socketOptions,
+ const Crt::Io::TlsContext &tlsContext,
+ bool useWebsocket) noexcept
+ {
+ if (!tlsContext)
+ {
+ AWS_LOGF_ERROR(
+ AWS_LS_MQTT_CLIENT,
+ "id=%p Trying to call MqttClient::NewConnection using an invalid TlsContext.",
+ (void *)m_client);
+ aws_raise_error(AWS_ERROR_INVALID_ARGUMENT);
+ return nullptr;
+ }
+
+ // If you're reading this and asking.... why is this so complicated? Why not use make_shared
+ // or allocate_shared? Well, MqttConnection constructors are private and stl is dumb like that.
+ // so, we do it manually.
+ Allocator *allocator = m_client->allocator;
+ MqttConnection *toSeat =
+ reinterpret_cast<MqttConnection *>(aws_mem_acquire(allocator, sizeof(MqttConnection)));
+ if (!toSeat)
+ {
+ return nullptr;
+ }
+
+ toSeat = new (toSeat) MqttConnection(m_client, hostName, port, socketOptions, tlsContext, useWebsocket);
+ return std::shared_ptr<MqttConnection>(toSeat, [allocator](MqttConnection *connection) {
+ connection->~MqttConnection();
+ aws_mem_release(allocator, reinterpret_cast<void *>(connection));
+ });
+ }
+
+ std::shared_ptr<MqttConnection> MqttClient::NewConnection(
+ const char *hostName,
+ uint16_t port,
+ const Io::SocketOptions &socketOptions,
+ bool useWebsocket) noexcept
+
+ {
+ // If you're reading this and asking.... why is this so complicated? Why not use make_shared
+ // or allocate_shared? Well, MqttConnection constructors are private and stl is dumb like that.
+ // so, we do it manually.
+ Allocator *allocator = m_client->allocator;
+ MqttConnection *toSeat =
+ reinterpret_cast<MqttConnection *>(aws_mem_acquire(m_client->allocator, sizeof(MqttConnection)));
+ if (!toSeat)
+ {
+ return nullptr;
+ }
+
+ toSeat = new (toSeat) MqttConnection(m_client, hostName, port, socketOptions, useWebsocket);
+ return std::shared_ptr<MqttConnection>(toSeat, [allocator](MqttConnection *connection) {
+ connection->~MqttConnection();
+ aws_mem_release(allocator, reinterpret_cast<void *>(connection));
+ });
+ }
+ } // namespace Mqtt
+ } // namespace Crt
+} // namespace Aws
diff --git a/contrib/restricted/aws/aws-crt-cpp/ya.make b/contrib/restricted/aws/aws-crt-cpp/ya.make
new file mode 100644
index 0000000000..962f99ecb7
--- /dev/null
+++ b/contrib/restricted/aws/aws-crt-cpp/ya.make
@@ -0,0 +1,102 @@
+# Generated by devtools/yamaker from nixpkgs 23.05.
+
+LIBRARY()
+
+LICENSE(
+ Apache-2.0 AND
+ MIT
+)
+
+LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
+
+VERSION(0.19.8)
+
+ORIGINAL_SOURCE(https://github.com/awslabs/aws-crt-cpp/archive/v0.19.8.tar.gz)
+
+PEERDIR(
+ contrib/restricted/aws/aws-c-auth
+ contrib/restricted/aws/aws-c-cal
+ contrib/restricted/aws/aws-c-common
+ contrib/restricted/aws/aws-c-event-stream
+ contrib/restricted/aws/aws-c-http
+ contrib/restricted/aws/aws-c-io
+ contrib/restricted/aws/aws-c-mqtt
+ contrib/restricted/aws/aws-c-s3
+ contrib/restricted/aws/aws-c-sdkutils
+)
+
+ADDINCL(
+ GLOBAL contrib/restricted/aws/aws-crt-cpp/include
+)
+
+NO_COMPILER_WARNINGS()
+
+NO_UTIL()
+
+CFLAGS(
+ -DAWS_AUTH_USE_IMPORT_EXPORT
+ -DAWS_CAL_USE_IMPORT_EXPORT
+ -DAWS_CHECKSUMS_USE_IMPORT_EXPORT
+ -DAWS_COMMON_USE_IMPORT_EXPORT
+ -DAWS_COMPRESSION_USE_IMPORT_EXPORT
+ -DAWS_CRT_CPP_USE_IMPORT_EXPORT
+ -DAWS_EVENT_STREAM_USE_IMPORT_EXPORT
+ -DAWS_HTTP_USE_IMPORT_EXPORT
+ -DAWS_IO_USE_IMPORT_EXPORT
+ -DAWS_MQTT_USE_IMPORT_EXPORT
+ -DAWS_MQTT_WITH_WEBSOCKETS
+ -DAWS_S3_USE_IMPORT_EXPORT
+ -DAWS_SDKUTILS_USE_IMPORT_EXPORT
+ -DAWS_USE_EPOLL
+ -DCJSON_HIDE_SYMBOLS
+ -DS2N_CLONE_SUPPORTED
+ -DS2N_CPUID_AVAILABLE
+ -DS2N_FALL_THROUGH_SUPPORTED
+ -DS2N_FEATURES_AVAILABLE
+ -DS2N_KYBER512R3_AVX2_BMI2
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD5_SHA1_HASH
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_MD_CTX_SET_PKEY_CTX
+ -DS2N_LIBCRYPTO_SUPPORTS_EVP_RC4
+ -DS2N_MADVISE_SUPPORTED
+ -DS2N_PLATFORM_SUPPORTS_KTLS
+ -DS2N_STACKTRACE
+ -DS2N___RESTRICT__SUPPORTED
+)
+
+SRCS(
+ source/Allocator.cpp
+ source/Api.cpp
+ source/DateTime.cpp
+ source/ImdsClient.cpp
+ source/JsonObject.cpp
+ source/StringUtils.cpp
+ source/Types.cpp
+ source/UUID.cpp
+ source/auth/Credentials.cpp
+ source/auth/Sigv4Signing.cpp
+ source/crypto/HMAC.cpp
+ source/crypto/Hash.cpp
+ source/endpoints/RuleEngine.cpp
+ source/external/cJSON.cpp
+ source/http/HttpConnection.cpp
+ source/http/HttpConnectionManager.cpp
+ source/http/HttpProxyStrategy.cpp
+ source/http/HttpRequestResponse.cpp
+ source/io/Bootstrap.cpp
+ source/io/ChannelHandler.cpp
+ source/io/EventLoopGroup.cpp
+ source/io/HostResolver.cpp
+ source/io/Pkcs11.cpp
+ source/io/SocketOptions.cpp
+ source/io/Stream.cpp
+ source/io/TlsOptions.cpp
+ source/io/Uri.cpp
+ source/iot/Mqtt5Client.cpp
+ source/iot/MqttClient.cpp
+ source/iot/MqttCommon.cpp
+ source/mqtt/Mqtt5Client.cpp
+ source/mqtt/Mqtt5Packets.cpp
+ source/mqtt/MqttClient.cpp
+)
+
+END()